gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
'''
Evaluate VCFs against BAMSurgeon "Truth" VCFs
Adam Ewing, [email protected]
Requires PyVCF (https://github.com/jamescasbon/PyVCF)
and PySAM
'''
import sys, os
import vcf
import argparse
import pysam
from collections import OrderedDict
def match(subrec, trurec, vtype='SNV'):
assert vtype in ('SNV', 'SV', 'INDEL')
if vtype == 'SNV' and subrec.is_snp and trurec.is_snp:
if subrec.POS == trurec.POS and subrec.REF == trurec.REF and subrec.ALT == trurec.ALT:
return True
if vtype == 'INDEL' and subrec.is_indel and trurec.is_indel:
if subrec.POS == trurec.POS and subrec.REF == trurec.REF and subrec.ALT == trurec.ALT:
return True
if vtype == 'SV' and subrec.is_sv and trurec.is_sv:
trustart, truend = expand_sv_ends(trurec)
substart, subend = expand_sv_ends(subrec)
# check for overlap
if min(truend, subend) - max(trustart, substart) > 0:
return True
return False
def expand_sv_ends(rec):
''' assign start and end positions to SV calls using conf. intervals if present '''
startpos, endpos = rec.start, rec.end
assert rec.is_sv
try:
endpos = int(rec.INFO.get('END')[0])
if rec.INFO.get('CIPOS'):
ci = map(int, rec.INFO.get('CIPOS'))
if ci[0] < 0:
startpos += ci[0]
if rec.INFO.get('CIEND'):
ci = map(int, rec.INFO.get('CIEND'))
if ci[0] > 0:
endpos += ci[0]
except TypeError as e:
sys.stderr.write("error expanding sv interval: " + str(e) + " for record: " + str(rec) + "\n")
if startpos > endpos:
endpos, startpos = startpos, endpos
return startpos, endpos
def relevant(rec, vtype, ignorechroms):
''' Return true if a record matches the type of variant being investigated '''
rel = (rec.is_snp and vtype == 'SNV') or (rec.is_sv and vtype == 'SV') or (rec.is_indel and vtype == 'INDEL')
return rel and (ignorechroms is None or rec.CHROM not in ignorechroms)
def passfilter(rec, disabled=False):
''' Return true if a record is unfiltered or has 'PASS' in the filter field (pyvcf sets FILTER to None) '''
if disabled:
return True
if rec.FILTER is None or rec.FILTER == '.' or not rec.FILTER:
return True
return False
def svmask(rec, vcfh, truchroms):
''' mask snv calls in sv regions '''
if rec.is_snp and rec.CHROM in truchroms:
for overlap_rec in vcfh.fetch(rec.CHROM, rec.POS-1, rec.POS):
if overlap_rec.is_sv:
return True
return False
def var_dist(v1, v2):
"""compute absolute distance between two variants
"""
assert v1.CHROM == v2.CHROM
return abs(v1.POS-v2.POS)
def get_close_matches(var, vcf_fh, win, indels_only=True):
"""Find close matches for variant (PyVCF record var) in file (PyVCF
VCFReader vcf_fh) within given window win and return as list of
item,distance tuples, sorted ascendingly by distance.
"""
matches = list(vcf_fh.fetch(var.CHROM, var.POS-win, var.POS+1+win))
if indels_only:
matches = [m for m in matches if m.is_indel]
if len(matches) == 0:
return []
dist_map = [(m, var_dist(m, var)) for m in matches]
return sorted(dist_map, key=lambda x: x[1])
def have_identical_haplotypes(v1, v2, ref):
"""Check if two variant produce the same haplotype / variant sequence.
- v1 and v2: PyVCF variants to compare
- ref: PySAM FastaFile
"""
assert (v1.is_indel or v1.is_snp) and (v2.is_indel or v2.is_snp)
if v1.CHROM != v2.CHROM:
return False
if v1.is_snp and v2.is_snp:
assert v1.REF.upper() == v2.REF.upper()
return str(v1.ALT[0]).upper() == str(v2.ALT[0]).upper()
if v1.is_snp or v2.is_snp:
# one snp one indel: can't produce identical results
return False
assert v1.is_indel and v2.is_indel
# only on allele per variant allowed
assert len(v1.ALT) == 1 and len(v2.ALT) == 1, (
+ "Can't handle multi-allelic entries")
# get the sequence context whic fully overlaps both variants.
# note: pyvcf is one-based, but start and end are zero-based half-open
start = min([v1.POS, v2.POS])-1
end = max([v1.POS + max([len(v1.REF), len(v1.ALT[0])]),
v2.POS + max([len(v2.REF), len(v2.ALT[0])])
])
chrom = v1.CHROM# made sure before they are identical before
seq = list(ref.fetch(chrom, start, end).upper())
if len(seq) != end-start:
# FIXME how to handle?
sys.stderr.write("WARN: Couldn't fetch full sequence window. Skipping"
" allele-aware comparison, otherwise indices would"
" be off\n")
raise NotImplementedError
v1_offset = v1.POS-1-start
v2_offset = v2.POS-1-start
# lower() in replacement for debugging purposes only
v1_seq = seq[:v1_offset] + list(str(v1.ALT[0]).lower()) + seq[v1_offset+len(v1.REF):]
v2_seq = seq[:v2_offset] + list(str(v2.ALT[0]).lower()) + seq[v2_offset+len(v2.REF):]
if False:
print("reference sequence context\t%s" % (''.join(seq)))
print("v1 (offset %d) %s\t%s" % (v1_offset, v1, ''.join(v1_seq)))
print("v2 (offset %d) %s\t%s" % (v2_offset, v2, ''.join(v2_seq)))
print("")
try:
assert seq[v1_offset] == v1.REF[0].upper()
assert seq[v2_offset] == v2.REF[0].upper()
assert len(v1_seq) == len(seq) - len(v1.REF) + len(v1.ALT[0])
assert len(v2_seq) == len(seq) - len(v2.REF) + len(v2.ALT[0])
except AssertionError:
#import pdb; pdb.set_trace()
raise
#if ''.join(v1_seq).upper() == ''.join(v2_seq).upper():
# print ''.join(v1_seq).upper()
return ''.join(v1_seq).upper() == ''.join(v2_seq).upper()
def evaluate(submission, truth, vtype='SNV', reffa=None, ignorechroms=None, ignorepass=False,
fp_vcf=None, fn_vcf=None, tp_vcf=None,
debug=False):
''' return stats on sensitivity, specificity, balanced accuracy '''
assert vtype in ('SNV', 'SV', 'INDEL')
subvcfh = vcf.Reader(filename=submission)
truvcfh = vcf.Reader(filename=truth)
fpvcfh = fnvcfh = tpvcfh = None
if fp_vcf:
fpvcfh = vcf.Writer(open(fp_vcf, 'w'), template=subvcfh)
if fn_vcf:
fnvcfh = vcf.Writer(open(fn_vcf, 'w'), template=subvcfh)
if tp_vcf:
tpvcfh = vcf.Writer(open(tp_vcf, 'w'), template=subvcfh)
reffa_fh = None
if reffa:
reffa_fh = pysam.Fastafile(reffa)
if debug:
print("DEBUG: Using haplotype aware indel comparison")
tpcount = 0
fpcount = 0
subrecs = 0
trurecs = 0
truchroms = {}
fns = OrderedDict()
''' count records in truth vcf, track contigs/chromosomes '''
for trurec in truvcfh:
if relevant(trurec, vtype, ignorechroms):
trurecs += 1
truchroms[trurec.CHROM] = True
fns[str(trurec)] = trurec
used_truth = {} # keep track of 'truth' sites used, they should only be usable once
''' parse submission vcf, compare to truth '''
for subrec in subvcfh:
if passfilter(subrec, disabled=ignorepass):
if subrec.is_snp and vtype == 'SNV':
if not svmask(subrec, truvcfh, truchroms):
subrecs += 1
if subrec.is_sv and vtype == 'SV':
subrecs += 1
if subrec.is_indel and vtype == 'INDEL':
subrecs += 1
matched = False
startpos, endpos = subrec.start, subrec.end
if vtype == 'SV' and subrec.is_sv:
startpos, endpos = expand_sv_ends(subrec)
try:
if relevant(subrec, vtype, ignorechroms) and passfilter(subrec, disabled=ignorepass) and subrec.CHROM in truchroms:
for trurec in truvcfh.fetch(subrec.CHROM, startpos, end=endpos):
if match(subrec, trurec, vtype=vtype) and str(trurec) not in used_truth:
matched = True
if not matched and subrec.is_indel and reffa_fh:# try haplotype aware comparison
window = 100
for (trurec, _) in get_close_matches(subrec, truvcfh, window, indels_only=True):
if str(trurec) in used_truth:
continue
if have_identical_haplotypes(subrec, trurec, reffa_fh):
matched = True
if debug:
print("DEBUG: Rescuing %s which has same haplotype as %s" % (subrec, trurec))
break
if matched:
used_truth[str(trurec)] = True
except ValueError as e:
sys.stderr.write("Warning: " + str(e) + "\n")
if matched:
tpcount += 1
if tpvcfh:
tpvcfh.write_record(subrec)
if str(trurec) in fns.keys():
del fns[str(trurec)]
else:
if relevant(subrec, vtype, ignorechroms) and passfilter(subrec, disabled=ignorepass) and not svmask(subrec, truvcfh, truchroms):
fpcount += 1 # FP counting method needs to change for real tumors
if fpvcfh:
fpvcfh.write_record(subrec)
if fnvcfh:
for fn in fns.values():
fnvcfh.write_record(fn)
print(f"tpcount, fpcount, subrecs, trurecs: {tpcount},{fpcount},{subrecs},{trurecs}")
recall = float(tpcount) / float(trurecs)
if tpcount+fpcount > 0:
precision = float(tpcount) / float(tpcount + fpcount)
else:
precision = 0.0
#fdr = 1.0 - float(fpcount) / float(subrecs)
f1score = 0.0 if tpcount == 0 else 2.0*(precision*recall)/(precision+recall)
for fh in [fpvcfh, fnvcfh, tpvcfh]:
if fh:
fh.close()
return precision, recall, f1score
def main(args):
chromlist = None
if args.chromlist is not None:
chromlist = args.chromlist.split(',')
if not args.subvcf.endswith('.vcf') and not args.subvcf.endswith('.vcf.gz'):
sys.stderr.write("submission VCF filename does not end in .vcf or .vcf.gz\n")
sys.exit(1)
if not os.path.exists(args.truvcf):
sys.stderr.write("truth VCF does not exist.\n")
sys.exit(1)
if not os.path.exists(args.truvcf + '.tbi'):
sys.stderr.write("truth VCF does not appear to be indexed. bgzip + tabix index required.\n")
sys.exit(1)
if args.mutype not in ('SV', 'SNV', 'INDEL'):
sys.stderr.write("-m/--mutype must be either SV, SNV, or INDEL\n")
sys.exit(1)
result = evaluate(args.subvcf, args.truvcf, vtype=args.mutype,
reffa=args.reffa, ignorechroms=chromlist, ignorepass=args.nonpass,
fp_vcf=args.fp_vcf, fn_vcf=args.fn_vcf, tp_vcf=args.tp_vcf,
debug=args.debug)
print("precision, recall, F1 score: " + ','.join(map(str, result)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="check vcf output against a 'truth' vcf")
parser.add_argument('-v', '--vcf', dest='subvcf', required=True, help="VCF being submitted for evaluation")
parser.add_argument('-t', '--truth', dest='truvcf', required=True, help="'Truth' VCF containing true positives")
parser.add_argument('-f', '--ref', dest='reffa', help="Reference fasta file (enables haplotype-ware indel comparison)")
parser.add_argument('-m', '--mutype', dest='mutype', required=True, help="Mutation type: must be either SNV, SV, or INDEL")
parser.add_argument('--ignore', dest='chromlist', default=None, help="(optional) comma-seperated list of chromosomes to ignore")
parser.add_argument('--nonpass', dest='nonpass', action="store_true", help="evaluate all records (not just PASS records) in VCF")
parser.add_argument('--fp', dest='fp_vcf', help="print false positive positions to this vcf-file")
parser.add_argument('--tp', dest='tp_vcf', help="print true positive positions to this file")
parser.add_argument('--fn', dest='fn_vcf', help="print false negatives positions to this file")
parser.add_argument('--debug', dest='debug', action="store_true", help=argparse.SUPPRESS)
args = parser.parse_args()
main(args)
|
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tcav_results/results.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tcav_results/results.proto',
package='tcav_results',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x1atcav_results/results.proto\x12\x0ctcav_results\"\xcc\x03\n\x06Result\x12\x0f\n\x07\x63\x61v_key\x18\x01 \x01(\t\x12\x13\n\x0b\x63\x61v_concept\x18\x02 \x01(\t\x12\x18\n\x10negative_concept\x18\x03 \x01(\t\x12\x14\n\x0ctarget_class\x18\x04 \x01(\x05\x12:\n\x0e\x63\x61v_accuracies\x18\x05 \x01(\x0b\x32\".tcav_results.Result.CAVaccuracies\x12\x0c\n\x04i_up\x18\x06 \x01(\x02\x12%\n\x1dval_directional_dirs_abs_mean\x18\x07 \x01(\x02\x12!\n\x19val_directional_dirs_mean\x18\x08 \x01(\x02\x12 \n\x18val_directional_dirs_std\x18\t \x01(\x02\x12\x1c\n\x14val_directional_dirs\x18\n \x03(\x02\x12\x0c\n\x04note\x18\x0b \x01(\t\x12\r\n\x05\x61lpha\x18\x0c \x01(\x02\x12\x12\n\nbottleneck\x18\r \x01(\t\x1ag\n\rCAVaccuracies\x12\x1d\n\x15positive_set_accuracy\x18\x01 \x01(\x02\x12\x1d\n\x15negative_set_accuracy\x18\x02 \x01(\x02\x12\x18\n\x10overall_accuracy\x18\x03 \x01(\x02\"0\n\x07Results\x12%\n\x07results\x18\x01 \x03(\x0b\x32\x14.tcav_results.Result')
)
_RESULT_CAVACCURACIES = _descriptor.Descriptor(
name='CAVaccuracies',
full_name='tcav_results.Result.CAVaccuracies',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='positive_set_accuracy', full_name='tcav_results.Result.CAVaccuracies.positive_set_accuracy', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='negative_set_accuracy', full_name='tcav_results.Result.CAVaccuracies.negative_set_accuracy', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='overall_accuracy', full_name='tcav_results.Result.CAVaccuracies.overall_accuracy', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=505,
)
_RESULT = _descriptor.Descriptor(
name='Result',
full_name='tcav_results.Result',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cav_key', full_name='tcav_results.Result.cav_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cav_concept', full_name='tcav_results.Result.cav_concept', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='negative_concept', full_name='tcav_results.Result.negative_concept', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class', full_name='tcav_results.Result.target_class', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cav_accuracies', full_name='tcav_results.Result.cav_accuracies', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='i_up', full_name='tcav_results.Result.i_up', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_directional_dirs_abs_mean', full_name='tcav_results.Result.val_directional_dirs_abs_mean', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_directional_dirs_mean', full_name='tcav_results.Result.val_directional_dirs_mean', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_directional_dirs_std', full_name='tcav_results.Result.val_directional_dirs_std', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_directional_dirs', full_name='tcav_results.Result.val_directional_dirs', index=9,
number=10, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='note', full_name='tcav_results.Result.note', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='tcav_results.Result.alpha', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bottleneck', full_name='tcav_results.Result.bottleneck', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RESULT_CAVACCURACIES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=45,
serialized_end=505,
)
_RESULTS = _descriptor.Descriptor(
name='Results',
full_name='tcav_results.Results',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='tcav_results.Results.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=507,
serialized_end=555,
)
_RESULT_CAVACCURACIES.containing_type = _RESULT
_RESULT.fields_by_name['cav_accuracies'].message_type = _RESULT_CAVACCURACIES
_RESULTS.fields_by_name['results'].message_type = _RESULT
DESCRIPTOR.message_types_by_name['Result'] = _RESULT
DESCRIPTOR.message_types_by_name['Results'] = _RESULTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Result = _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), {
'CAVaccuracies' : _reflection.GeneratedProtocolMessageType('CAVaccuracies', (_message.Message,), {
'DESCRIPTOR' : _RESULT_CAVACCURACIES,
'__module__' : 'tcav_results.results_pb2'
# @@protoc_insertion_point(class_scope:tcav_results.Result.CAVaccuracies)
})
,
'DESCRIPTOR' : _RESULT,
'__module__' : 'tcav_results.results_pb2'
# @@protoc_insertion_point(class_scope:tcav_results.Result)
})
_sym_db.RegisterMessage(Result)
_sym_db.RegisterMessage(Result.CAVaccuracies)
Results = _reflection.GeneratedProtocolMessageType('Results', (_message.Message,), {
'DESCRIPTOR' : _RESULTS,
'__module__' : 'tcav_results.results_pb2'
# @@protoc_insertion_point(class_scope:tcav_results.Results)
})
_sym_db.RegisterMessage(Results)
# @@protoc_insertion_point(module_scope)
|
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from warnings import warn
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if not key in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Attributes', 'Methods',
'Returns', 'Raises', 'Warns'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not self._role in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the network latency of a Firecracker guest."""
import logging
import platform
import re
import pytest
import host_tools.network as net_tools
from conftest import ARTIFACTS_COLLECTION
from framework.artifacts import ArtifactSet
from framework.matrix import TestMatrix, TestContext
from framework.builder import MicrovmBuilder
from framework.stats import core, consumer, producer, types, criteria,\
function
from framework.utils import eager_map, get_kernel_version, CpuMap
from framework.artifacts import DEFAULT_HOST_IP
from framework.utils_cpuid import get_cpu_model_name
from integration_tests.performance.utils import handle_failure
PING = "ping -c {} -i {} {}"
LATENCY_AVG_BASELINES = {
"x86_64": {
"4.14": {
"target": 0.240, # milliseconds
"delta": 0.040 # milliseconds
},
"5.10": {
"target": 0.250, # milliseconds
"delta": 0.020 # milliseconds
}
},
"aarch64": {
"4.14": {
"target": 0.039, # milliseconds
"delta": 0.020 # milliseconds
},
"5.10": {
"target": 0.034, # milliseconds
"delta": 0.020 # milliseconds
}
}
}
PKT_LOSS = "pkt_loss"
PKT_LOSS_STAT_KEY = "value"
LATENCY = "latency"
def pass_criteria():
"""Define pass criteria for the statistics."""
arch = platform.machine()
host_kernel = get_kernel_version(level=1)
return {
"Avg": criteria.EqualWith(LATENCY_AVG_BASELINES[arch][host_kernel])
}
def measurements():
"""Define measurements."""
latency = types.MeasurementDef.create_measurement(
LATENCY,
"ms",
[function.ValuePlaceholder("Avg"),
function.ValuePlaceholder("Min"),
function.ValuePlaceholder("Max"),
function.ValuePlaceholder("Stddev"),
function.ValuePlaceholder("Percentile99"),
function.ValuePlaceholder("Percentile90"),
function.ValuePlaceholder("Percentile50")],
pass_criteria())
pkt_loss = types.MeasurementDef.create_measurement(
PKT_LOSS,
"percentage",
[function.ValuePlaceholder(PKT_LOSS_STAT_KEY)])
return [latency, pkt_loss]
def consume_ping_output(cons, raw_data, requests):
"""Consume ping output.
Output example:
PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
64 bytes from 8.8.8.8: icmp_seq=1 ttl=118 time=17.7 ms
64 bytes from 8.8.8.8: icmp_seq=2 ttl=118 time=17.7 ms
64 bytes from 8.8.8.8: icmp_seq=3 ttl=118 time=17.4 ms
64 bytes from 8.8.8.8: icmp_seq=4 ttl=118 time=17.8 ms
--- 8.8.8.8 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3005ms
rtt min/avg/max/mdev = 17.478/17.705/17.808/0.210 ms
"""
eager_map(cons.set_measurement_def, measurements())
st_keys = ["Min",
"Avg",
"Max",
"Stddev"]
output = raw_data.strip().split('\n')
assert len(output) > 2
# E.g: round-trip min/avg/max/stddev = 17.478/17.705/17.808/0.210 ms
stat_values = output[-1]
pattern_stats = "min/avg/max/[a-z]+dev = (.+)/(.+)/(.+)/(.+) ms"
stat_values = re.findall(pattern_stats, stat_values)[0]
assert len(stat_values) == 4
for index, stat_value in enumerate(stat_values[:4]):
cons.consume_stat(st_name=st_keys[index],
ms_name=LATENCY,
value=float(stat_value))
# E.g: 4 packets transmitted, 4 received, 0% packet loss
packet_stats = output[-2]
pattern_packet = ".+ packet.+transmitted, .+ received," \
" (.+)% packet loss"
pkt_loss = re.findall(pattern_packet, packet_stats)[0]
assert len(pkt_loss) == 1
cons.consume_stat(st_name=PKT_LOSS_STAT_KEY,
ms_name=PKT_LOSS,
value=pkt_loss[0])
# Compute percentiles.
seqs = output[1:requests + 1]
times = []
pattern_time = ".+ bytes from .+: icmp_seq=.+ ttl=.+ time=(.+) ms"
for index, seq in enumerate(seqs):
time = re.findall(pattern_time, seq)
assert len(time) == 1
times.append(time[0])
times.sort()
cons.consume_stat(st_name="Percentile50",
ms_name=LATENCY,
value=times[int(requests * 0.5)])
cons.consume_stat(st_name="Percentile90",
ms_name=LATENCY,
value=times[int(requests * 0.9)])
cons.consume_stat(st_name="Percentile99",
ms_name=LATENCY,
value=times[int(requests * 0.99)])
@pytest.mark.nonci
@pytest.mark.timeout(3600)
def test_network_latency(bin_cloner_path, results_file_dumper):
"""
Test network latency for multiple vm configurations.
@type: performance
"""
logger = logging.getLogger("network_latency")
microvm_artifacts = ArtifactSet(
ARTIFACTS_COLLECTION.microvms(keyword="1vcpu_1024mb")
)
kernel_artifacts = ArtifactSet(ARTIFACTS_COLLECTION.kernels())
disk_artifacts = ArtifactSet(ARTIFACTS_COLLECTION.disks(keyword="ubuntu"))
# Create a test context and add builder, logger, network.
test_context = TestContext()
test_context.custom = {
'builder': MicrovmBuilder(bin_cloner_path),
'logger': logger,
'requests': 1000,
'interval': 0.2, # Seconds.
'name': 'network_latency',
'results_file_dumper': results_file_dumper
}
# Create the test matrix.
test_matrix = TestMatrix(context=test_context,
artifact_sets=[
microvm_artifacts,
kernel_artifacts,
disk_artifacts
])
test_matrix.run_test(_g2h_send_ping)
def _g2h_send_ping(context):
"""Send ping from guest to host."""
logger = context.custom['logger']
vm_builder = context.custom['builder']
interval_between_req = context.custom['interval']
name = context.custom['name']
file_dumper = context.custom['results_file_dumper']
logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} "
.format(name,
context.microvm.name(),
context.kernel.name(),
context.disk.name()))
# Create a rw copy artifact.
rw_disk = context.disk.copy()
# Get ssh key from read-only artifact.
ssh_key = context.disk.ssh_key()
# Create a fresh microvm from aftifacts.
vm_instance = vm_builder.build(kernel=context.kernel,
disks=[rw_disk],
ssh_key=ssh_key,
config=context.microvm)
basevm = vm_instance.vm
basevm.start()
# Check if the needed CPU cores are available. We have the API thread, VMM
# thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + basevm.vcpus_count
# Pin uVM threads to physical cores.
current_cpu_id = 0
assert basevm.pin_vmm(current_cpu_id), \
"Failed to pin firecracker thread."
current_cpu_id += 1
assert basevm.pin_api(current_cpu_id), \
"Failed to pin fc_api thread."
for i in range(basevm.vcpus_count):
current_cpu_id += 1
assert basevm.pin_vcpu(i, current_cpu_id + i), \
f"Failed to pin fc_vcpu {i} thread."
custom = {"microvm": context.microvm.name(),
"kernel": context.kernel.name(),
"disk": context.disk.name(),
"cpu_model_name": get_cpu_model_name()}
st_core = core.Core(name="network_latency", iterations=1, custom=custom)
env_id = f"{context.kernel.name()}/{context.disk.name()}/" \
f"{context.microvm.name()}"
cons = consumer.LambdaConsumer(
func=consume_ping_output,
func_kwargs={"requests": context.custom['requests']}
)
cmd = PING.format(context.custom['requests'],
interval_between_req,
DEFAULT_HOST_IP)
prod = producer.SSHCommand(cmd,
net_tools.SSHConnection(basevm.ssh_config))
st_core.add_pipe(producer=prod, consumer=cons, tag=f"{env_id}/ping")
# Gather results and verify pass criteria.
try:
result = st_core.run_exercise()
except core.CoreException as err:
handle_failure(file_dumper, err)
file_dumper.dump(result)
|
|
# util/deprecations.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
import re
import warnings
from . import compat
from .langhelpers import decorator
from .langhelpers import inject_docstring_text
from .langhelpers import inject_param_text
from .. import exc
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated_cls(version, message, constructor="__init__"):
header = ".. deprecated:: %s %s" % (version, (message or ""))
def decorate(cls):
return _decorate_cls_with_warning(
cls,
constructor,
exc.SADeprecationWarning,
message % dict(func=constructor),
header,
)
return decorate
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param version:
Issue version in the warning.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % (version, (message or ""))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SADeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_params(**specs):
"""Decorates a function to warn on use of certain parameters.
e.g. ::
@deprecated_params(
weak_identity_map=(
"0.7",
"the :paramref:`.Session.weak_identity_map parameter "
"is deprecated."
)
)
"""
messages = {}
for param, (version, message) in specs.items():
messages[param] = _sanitize_restructured_text(message)
def decorate(fn):
spec = compat.inspect_getfullargspec(fn)
if spec.defaults is not None:
defaults = dict(
zip(
spec.args[(len(spec.args) - len(spec.defaults)) :],
spec.defaults,
)
)
check_defaults = set(defaults).intersection(messages)
check_kw = set(messages).difference(defaults)
else:
check_defaults = ()
check_kw = set(messages)
@decorator
def warned(fn, *args, **kwargs):
for m in check_defaults:
if kwargs[m] != defaults[m]:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
for m in check_kw:
if m in kwargs:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
return fn(*args, **kwargs)
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(
doc,
{
param: ".. deprecated:: %s %s" % (version, (message or ""))
for param, (version, message) in specs.items()
},
)
decorated = warned(fn)
decorated.__doc__ = doc
return decorated
return decorate
def pending_deprecation(
version, message=None, add_deprecation_to_docstring=True
):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % (version, (message or ""))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_option_value(parameter_value, default_value, warning_text):
if parameter_value is None:
return default_value
else:
warn_deprecated(warning_text)
return parameter_value
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r"\:(\w+)\:`~?\.?(.+?)`", repl, text)
def _decorate_cls_with_warning(
cls, constructor, wtype, message, docstring_header=None
):
doc = cls.__doc__ is not None and cls.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=constructor)
doc = inject_docstring_text(doc, docstring_header, 1)
if type(cls) is type:
clsdict = dict(cls.__dict__)
clsdict["__doc__"] = doc
cls = type(cls.__name__, cls.__bases__, clsdict)
constructor_fn = clsdict[constructor]
else:
cls.__doc__ = doc
constructor_fn = getattr(cls, constructor)
setattr(
cls,
constructor,
_decorate_with_warning(constructor_fn, wtype, message, None),
)
return cls
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
skip_warning = kwargs.pop("_sa_skip_warning", False)
if not skip_warning:
warnings.warn(message, wtype, stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
doc = inject_docstring_text(doc, docstring_header, 1)
decorated = warned(func)
decorated.__doc__ = doc
decorated._sa_warn = lambda: warnings.warn(message, wtype, stacklevel=3)
return decorated
|
|
from urllib import quote_plus
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse
try:
from django.utils import simplejson as json
except ImportError:
import json
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.db.models import Q
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from communities.models import Communities
from taggit.models import Tag
from django.views.decorators.http import require_POST
from django.views.generic import View
import datetime
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model, authenticate
from django.views.generic import RedirectView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
User = get_user_model()
@login_required
def post_create(request):
#FORM
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
form.save_m2m()
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
else:
messages.error(request, "Not Successfully Created")
context = {
"form": form,
}
return render(request, "stories/post_form.html", context)
@login_required
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
#FORM SAVE
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
#COMMENTS
new_comment, created = Comment.objects.get_or_create(
user = request.user,
content_type= content_type,
object_id = obj_id,
content = content_data,
parent = parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
#COMMUNITIES
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form":form,
"tags": tags,
}
extra_context={
'objects':Post.objects.all(),
}
return render(request, "stories/post_detail.html", context)
class PostLikeToggle(RedirectView):
def get_redirect_url(self, *args, **kwargs):
slug = self.kwargs.get("slug")
print(slug)
obj = get_object_or_404(Post, slug=slug)
url_ = obj.get_absolute_url()
user = self.request.user
if user.is_authenticated():
if user in obj.likes.all():
obj.likes.remove(user)
else:
obj.likes.add(user)
return url_
class PostLikeAPIToggle(APIView):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, slug=None, format=None):
# slug = self.kwargs.get("slug")
obj = get_object_or_404(Post, slug=slug)
url_ = obj.get_absolute_url()
user = self.request.user
updated = False
liked = False
if user.is_authenticated():
if user in obj.likes.all():
liked = False
obj.likes.remove(user)
else:
liked = True
obj.likes.add(user)
updated = True
data = {
"updated": updated,
"liked": liked
}
return Response(data)
def tags(request, tag):
posts = Post.objects.filter(tags__name=tag)
name = Post.objects.all(),
context={
'posts': posts,
'tags':tag,
'name': tag,
}
return render(request, "stories/tags_list.html", context)
def post_list(request, slug=None):
user = User.objects.all()
today = timezone.now().date()
queryset_list = Post.objects.active()#.order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
queryset_list.user = request.user
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
context = {
"object_list": queryset_list,
"user": user,
"title": "List",
"today": today,
}
return render(request, "stories/post_list.html", context)
def user(request, username):
user = User.objects.all()
context = {
"user": user,
}
return render(request, "stories/post_list.html", context)
@login_required
def post_update(request, slug=None):
instance = get_object_or_404(Post, slug =slug)
form = PostForm(request.POST or None, request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
form.save_m2m()
messages.success(request, "<a href='#'>Item</a> Saved", extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance":instance,
"form":form,
"tags":tags
}
return render(request, "stories/post_form.html", context)
def post_delete(request, slug=None):
instance = get_object_or_404(Post, slug =slug)
messages.success(request, "Successfully Deleted")
instance.delete()
return redirect("posts:list")
def post_profile(request):
queryset_list = Post.objects.all()
context = {
"object_list": queryset_list,
}
return render(request, "profiles/profile_list.html", context)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import tempfile
import yaml
import mock
import unittest2
from distutils.spawn import find_executable
from st2tests.base import CleanFilesTestCase
from st2debug.cmd.submit_debug_info import create_archive
from st2debug.cmd.submit_debug_info import encrypt_archive
import st2debug.cmd.submit_debug_info
from st2debug.constants import GPG_KEY
from st2debug.constants import GPG_KEY_FINGERPRINT
from st2debug.constants import S3_BUCKET_URL
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(BASE_DIR, 'fixtures')
GPG_INSTALLED = find_executable('gpg') is not None
SUBMIT_DEBUG_YAML_FILE = os.path.join(FIXTURES_DIR, 'submit-debug-info.yaml')
@unittest2.skipIf(not GPG_INSTALLED, 'gpg binary not available')
class SubmitDebugInfoTestCase(CleanFilesTestCase):
def setUp(self):
super(SubmitDebugInfoTestCase, self).setUp()
# Mock paths so we include mock data
logs_dir = os.path.join(FIXTURES_DIR, 'logs/')
st2debug.cmd.submit_debug_info.ST2_LOG_FILES_PATH = logs_dir + '*.log'
st2debug.cmd.submit_debug_info.LOG_FILE_PATHS = [
st2debug.cmd.submit_debug_info.ST2_LOG_FILES_PATH
]
configs_dir = os.path.join(FIXTURES_DIR, 'configs/')
st2debug.cmd.submit_debug_info.ST2_CONFIG_FILE_PATH = os.path.join(configs_dir, 'st2.conf')
st2debug.cmd.submit_debug_info.MISTRAL_CONFIG_FILE_PATH = os.path.join(configs_dir,
'mistral.conf')
st2debug.cmd.submit_debug_info.CONFIG_FILE_PATHS = [
st2debug.cmd.submit_debug_info.ST2_CONFIG_FILE_PATH,
st2debug.cmd.submit_debug_info.MISTRAL_CONFIG_FILE_PATH
]
# Mock get_packs_base_paths
content_dir = os.path.join(FIXTURES_DIR, 'content/')
return_value = [content_dir]
st2debug.cmd.submit_debug_info.get_packs_base_paths = mock.Mock()
st2debug.cmd.submit_debug_info.get_packs_base_paths.return_value = return_value
def _verify_archive(self, archive_path, extract_path, required_directories):
# Verify archive has been created
self.assertTrue(os.path.isfile(archive_path))
self.to_delete_files.append(archive_path)
self.to_delete_directories.append(extract_path)
self._extract_archive(archive_path=archive_path, extract_path=extract_path)
for directory_name in required_directories:
full_path = os.path.join(extract_path, directory_name)
self.assertTrue(os.path.isdir(full_path))
# Verify system info file has ben created
full_path = os.path.join(extract_path, 'system_info.yaml')
self.assertTrue(os.path.isfile(full_path))
# Verify logs have been copied
logs_path = os.path.join(extract_path, 'logs')
log_files = os.listdir(logs_path)
self.assertTrue(len(log_files), 2)
# Verify configs have been copied
st2_config_path = os.path.join(extract_path, 'configs', 'st2.conf')
mistral_config_path = os.path.join(extract_path, 'configs', 'mistral.conf')
self.assertTrue(os.path.isfile(st2_config_path))
self.assertTrue(os.path.isfile(mistral_config_path))
# Verify packs have been copied
content_path = os.path.join(extract_path, 'content/dir-1')
pack_directories = os.listdir(content_path)
self.assertEqual(len(pack_directories), 1)
# Verify sensitive data has been masked in the configs
with open(st2_config_path, 'r') as fp:
st2_config_content = fp.read()
with open(mistral_config_path, 'r') as fp:
mistral_config_content = fp.read()
self.assertTrue('ponies' not in st2_config_content)
self.assertTrue('username = **removed**' in st2_config_content)
self.assertTrue('password = **removed**' in st2_config_content)
self.assertTrue('url = **removed**' in st2_config_content)
self.assertTrue('StackStorm' not in mistral_config_content)
self.assertTrue('connection = **removed**' in mistral_config_content)
# Very config.yaml has been removed from the content pack directories
pack_dir = os.path.join(content_path, 'twilio')
config_path = os.path.join(pack_dir, 'config.yaml')
self.assertTrue(os.path.isdir(pack_dir))
self.assertTrue(not os.path.exists(config_path))
def test_create_archive_include_all(self):
archive_path = create_archive(include_logs=True, include_configs=True,
include_content=True,
include_system_info=True)
extract_path = tempfile.mkdtemp()
self._verify_archive(archive_path=archive_path,
extract_path=extract_path,
required_directories=['logs', 'configs', 'content'])
def _create_config_yaml_file(self):
config_data = dict(
log_file_paths={'st2_log_files_path': os.path.join(FIXTURES_DIR, 'logs/st2*.log')},
conf_file_paths={
'st2_config_file_path': os.path.join(FIXTURES_DIR, 'configs/st2.conf'),
'mistral_config_file_path': os.path.join(FIXTURES_DIR, 'configs/mistral.conf')},
s3_bucket={'url': S3_BUCKET_URL},
gpg={'gpg_key_fingerprint': GPG_KEY_FINGERPRINT,
'gpg_key': GPG_KEY},
shell_commands={'cmd': 'rpm -qa'},
company_name={'name': 'MyCompany'})
with open(SUBMIT_DEBUG_YAML_FILE, 'w') as outfile:
outfile.write(yaml.dump(config_data, default_flow_style=False))
def test_create_archive_include_all_with_config_option(self):
# Create the YAML configuration file
self._create_config_yaml_file()
self.to_delete_files.append(SUBMIT_DEBUG_YAML_FILE)
# Load the submit debug info yaml file
st2debug.cmd.submit_debug_info.load_config_yaml_file(SUBMIT_DEBUG_YAML_FILE)
archive_path = create_archive(include_logs=True, include_configs=True,
include_content=True,
include_system_info=True,
include_shell_commands=True,
config_yaml=SUBMIT_DEBUG_YAML_FILE)
extract_path = tempfile.mkdtemp()
self._verify_archive(archive_path=archive_path,
extract_path=extract_path,
required_directories=['logs', 'configs', 'content', 'commands'])
# Verify commands output have been copied
commands_path = os.path.join(extract_path, 'commands')
command_files = os.listdir(commands_path)
self.assertTrue(len(command_files), 1)
def test_create_archive_exclusion(self):
# Verify only system info file is included
archive_path = create_archive(include_logs=False, include_configs=False,
include_content=False,
include_system_info=True)
# Verify archive has been created
self.assertTrue(os.path.isfile(archive_path))
self.to_delete_files.append(archive_path)
extract_path = tempfile.mkdtemp()
self.to_delete_directories.append(extract_path)
self._extract_archive(archive_path=archive_path, extract_path=extract_path)
# Verify system info file is there and other directories are empty
directories = ['logs', 'configs', 'content']
for directory_name in directories:
full_path = os.path.join(extract_path, directory_name)
files = os.listdir(full_path)
self.assertEqual(len(files), 0)
full_path = os.path.join(extract_path, 'system_info.yaml')
self.assertTrue(os.path.isfile(full_path))
def test_encrypt_archive(self):
plaintext_archive_path = create_archive(include_logs=True, include_configs=True,
include_content=True,
include_system_info=True)
plaintext_archive_size = os.stat(plaintext_archive_path).st_size
encrypted_archive_path = encrypt_archive(archive_file_path=plaintext_archive_path)
encrypt_archive_size = os.stat(encrypted_archive_path).st_size
self.assertTrue(os.path.isfile(encrypted_archive_path))
self.assertTrue(encrypt_archive_size > plaintext_archive_size)
self.assertRaises(Exception, archive_path=encrypted_archive_path,
extract_path='/tmp')
def test_encrypt_archive_with_custom_gpg_key(self):
# Create the YAML configuration file
self._create_config_yaml_file()
self.to_delete_files.append(SUBMIT_DEBUG_YAML_FILE)
# Load the submit debug info yaml file
st2debug.cmd.submit_debug_info.load_config_yaml_file(SUBMIT_DEBUG_YAML_FILE)
plaintext_archive_path = create_archive(include_logs=True, include_configs=True,
include_content=True,
include_system_info=True,
include_shell_commands=True,
config_yaml=SUBMIT_DEBUG_YAML_FILE)
plaintext_archive_size = os.stat(plaintext_archive_path).st_size
encrypted_archive_path = encrypt_archive(archive_file_path=plaintext_archive_path)
encrypt_archive_size = os.stat(encrypted_archive_path).st_size
self.assertTrue(os.path.isfile(encrypted_archive_path))
self.assertTrue(encrypt_archive_size > plaintext_archive_size)
self.assertRaises(Exception, archive_path=encrypted_archive_path,
extract_path='/tmp')
def _extract_archive(self, archive_path, extract_path):
with tarfile.open(archive_path) as tar:
tar.extractall(path=extract_path)
|
|
#
# The Python Imaging Library.
# $Id$
#
# Windows Icon support for PIL
#
# History:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
# <[email protected]>.
# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
#
# Icon format references:
# * https://en.wikipedia.org/wiki/ICO_(file_format)
# * https://msdn.microsoft.com/en-us/library/ms997538.aspx
import struct
from io import BytesIO
from . import Image, ImageFile, BmpImagePlugin, PngImagePlugin
from ._binary import i8, i16le as i16, i32le as i32
from math import log, ceil
__version__ = "0.1"
#
# --------------------------------------------------------------------
_MAGIC = b"\0\0\1\0"
def _save(im, fp, filename):
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get("sizes",
[(16, 16), (24, 24), (32, 32), (48, 48),
(64, 64), (128, 128), (256, 256)])
width, height = im.size
sizes = filter(lambda x: False if (x[0] > width or x[1] > height or
x[0] > 256 or x[1] > 256) else True,
sizes)
sizes = list(sizes)
fp.write(struct.pack("<H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes)*16
for size in sizes:
width, height = size
# 0 means 256
fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(struct.pack("<H", 32)) # wBitCount(2)
image_io = BytesIO()
tmp = im.copy()
tmp.thumbnail(size, Image.LANCZOS)
tmp.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
bytes_len = len(image_bytes)
fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
def _accept(prefix):
return prefix[:4] == _MAGIC
class IcoFile(object):
def __init__(self, buf):
"""
Parse image from file-like object containing ico file data
"""
# check magic
s = buf.read(6)
if not _accept(s):
raise SyntaxError("not an ICO file")
self.buf = buf
self.entry = []
# Number of items in file
self.nb_items = i16(s[4:])
# Get headers for each item
for i in range(self.nb_items):
s = buf.read(16)
icon_header = {
'width': i8(s[0]),
'height': i8(s[1]),
'nb_color': i8(s[2]), # No. of colors in image (0 if >=8bpp)
'reserved': i8(s[3]),
'planes': i16(s[4:]),
'bpp': i16(s[6:]),
'size': i32(s[8:]),
'offset': i32(s[12:])
}
# See Wikipedia
for j in ('width', 'height'):
if not icon_header[j]:
icon_header[j] = 256
# See Wikipedia notes about color depth.
# We need this just to differ images with equal sizes
icon_header['color_depth'] = (icon_header['bpp'] or
(icon_header['nb_color'] != 0 and
ceil(log(icon_header['nb_color'],
2))) or 256)
icon_header['dim'] = (icon_header['width'], icon_header['height'])
icon_header['square'] = (icon_header['width'] *
icon_header['height'])
self.entry.append(icon_header)
self.entry = sorted(self.entry, key=lambda x: x['color_depth'])
# ICO images are usually squares
# self.entry = sorted(self.entry, key=lambda x: x['width'])
self.entry = sorted(self.entry, key=lambda x: x['square'])
self.entry.reverse()
def sizes(self):
"""
Get a list of all available icon sizes and color depths.
"""
return {(h['width'], h['height']) for h in self.entry}
def getimage(self, size, bpp=False):
"""
Get an image from the icon
"""
for (i, h) in enumerate(self.entry):
if size == h['dim'] and (bpp is False or bpp == h['color_depth']):
return self.frame(i)
return self.frame(0)
def frame(self, idx):
"""
Get an image from frame idx
"""
header = self.entry[idx]
self.buf.seek(header['offset'])
data = self.buf.read(8)
self.buf.seek(header['offset'])
if data[:8] == PngImagePlugin._MAGIC:
# png frame
im = PngImagePlugin.PngImageFile(self.buf)
else:
# XOR + AND mask bmp frame
im = BmpImagePlugin.DibImageFile(self.buf)
# change tile dimension to only encompass XOR image
im.size = (im.size[0], int(im.size[1] / 2))
d, e, o, a = im.tile[0]
im.tile[0] = d, (0, 0) + im.size, o, a
# figure out where AND mask image starts
mode = a[0]
bpp = 8
for k, v in BmpImagePlugin.BIT2MODE.items():
if mode == v[1]:
bpp = k
break
if 32 == bpp:
# 32-bit color depth icon image allows semitransparent areas
# PIL's DIB format ignores transparency bits, recover them.
# The DIB is packed in BGRX byte order where X is the alpha
# channel.
# Back up to start of bmp data
self.buf.seek(o)
# extract every 4th byte (eg. 3,7,11,15,...)
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
# convert to an 8bpp grayscale image
mask = Image.frombuffer(
'L', # 8bpp
im.size, # (w, h)
alpha_bytes, # source chars
'raw', # raw decoder
('L', 0, -1) # 8bpp inverted, unpadded, reversed
)
else:
# get AND image from end of bitmap
w = im.size[0]
if (w % 32) > 0:
# bitmap row data is aligned to word boundaries
w += 32 - (im.size[0] % 32)
# the total mask data is
# padded row size * height / bits per char
and_mask_offset = o + int(im.size[0] * im.size[1] *
(bpp / 8.0))
total_bytes = int((w * im.size[1]) / 8)
self.buf.seek(and_mask_offset)
mask_data = self.buf.read(total_bytes)
# convert raw data to image
mask = Image.frombuffer(
'1', # 1 bpp
im.size, # (w, h)
mask_data, # source chars
'raw', # raw decoder
('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
)
# now we have two images, im is XOR image and mask is AND image
# apply mask image as alpha channel
im = im.convert('RGBA')
im.putalpha(mask)
return im
##
# Image plugin for Windows Icon files.
class IcoImageFile(ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft Windows .ico files.
By default the largest resolution image in the file will be loaded. This
can be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
<[email protected]>.
https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
"""
format = "ICO"
format_description = "Windows Icon"
def _open(self):
self.ico = IcoFile(self.fp)
self.info['sizes'] = self.ico.sizes()
self.size = self.ico.entry[0]['dim']
self.load()
def load(self):
im = self.ico.getimage(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
def load_seek(self):
# Flag the ImageFile.Parser so that it
# just does all the decode at the end.
pass
#
# --------------------------------------------------------------------
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
Image.register_save(IcoImageFile.format, _save)
Image.register_extension(IcoImageFile.format, ".ico")
|
|
'''Wrapper for the github API.
This maintains an on-disk cache of API calls and maps github API calls onto
Python functions.
'''
import sys
import logging
import json
import re
import os
import hashlib
import cPickle
import requests
GITHUB_API_ROOT = 'https://api.github.com'
WHITESPACE_RE = re.compile(r'^[ \t\n\r]*$')
logger = logging.getLogger(__name__)
# TODO(danvk): inject a cache from the server module
#from werkzeug.contrib.cache import SimpleCache
#cache = SimpleCache()
class SimpleCache(object):
def __init__(self):
self._cache_dir = '/tmp/better-git-pr/cache'
if not os.path.exists(self._cache_dir):
os.makedirs(self._cache_dir)
def _file_for_key(self, k):
return os.path.join(self._cache_dir, hashlib.md5(k).hexdigest())
def get(self, k):
f = self._file_for_key(k)
if os.path.exists(f):
try:
return open(f).read().decode('utf-8')
except:
return None
def set(self, k, v):
f = self._file_for_key(k)
open(f, 'wb').write(v.encode('utf-8'))
def delete_multi(self, ks):
for k in ks:
f = self._file_for_key(k)
if os.path.exists(f):
os.unlink(f)
cache = SimpleCache()
def _fetch_url(token, url, extra_headers=None, bust_cache=False):
key = url + json.dumps(extra_headers)
cached = cache.get(key)
if cached is not None and not bust_cache:
return cached
logger.info('Uncached request for %s', url)
headers = {}
if token:
headers.update({'Authorization': 'token ' + token})
if extra_headers:
headers.update(extra_headers)
r = requests.get(url, headers=headers)
if not r.ok:
logger.warn('Request for %s failed: %s', url, r.text)
return False
response = r.text
cache.set(key, response)
return response
def _post_api(token, path, obj, **kwargs):
url = (GITHUB_API_ROOT + path) % kwargs
assert '%' not in url
logger.info('Posting to %s', url)
r = requests.post(url, headers={'Authorization': 'token ' + token, 'Content-type': 'application/json'}, data=json.dumps(obj))
if not r.ok:
logger.warn('Request for %s failed.', url)
logger.warn('%s', r)
logger.warn('%s', r.text)
logger.warn('Posted:\n%s', json.dumps(obj))
return False
return r.json()
def _fetch_api(token, url, bust_cache=False):
response = _fetch_url(token, url, bust_cache=bust_cache)
if response is None or response is False:
return None
if WHITESPACE_RE.match(response):
return None
try:
j = json.loads(response)
except ValueError:
logger.warn('Failed to parse as JSON:\n%s', response)
raise
return j
# caching: never
def get_current_user_info(token):
"""Returns information about the authenticated user."""
return _fetch_api(token, GITHUB_API_ROOT + '/user', bust_cache=True)
# caching: should always check after calling
def get_pull_requests(token, owner, repo, bust_cache=False):
url = (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/pulls') % {'owner': owner, 'repo': repo}
return _fetch_api(token, url, bust_cache=bust_cache)
def _pull_request_url(owner, repo, pull_number):
return (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/pulls/%(pull_number)s') % {'owner': owner, 'repo': repo, 'pull_number': pull_number}
# caching: should check after calling
def get_pull_request(token, owner, repo, pull_number, bust_cache=False):
url = _pull_request_url(owner, repo, pull_number)
return _fetch_api(token, url, bust_cache=bust_cache)
# caching: never expires
def get_commit_info(token, owner, repo, sha):
url = (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/commits/%(sha)s') % {'owner': owner, 'repo': repo, 'sha': sha}
return _fetch_api(token, url)
def _commits_url(owner, repo, pull_number):
return (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/pulls/%(pull_number)s/commits') % {
'owner': owner, 'repo': repo, 'pull_number': pull_number}
# caching: expires when pull_request's updated_at changes
def get_pull_request_commits(token, owner, repo, pull_number):
"""Returns commits from first to last."""
commits = _fetch_api(token, _commits_url(owner, repo, pull_number))
if not commits:
return None
# See https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
commits.sort(key=lambda x: x['commit']['author']['date'])
return commits
def _comments_urls(owner, repo, pull_number):
issue_url = '/repos/%(owner)s/%(repo)s/issues/%(pull_number)s/comments' % {'owner': owner, 'repo': repo, 'pull_number': pull_number}
diff_url = '/repos/%(owner)s/%(repo)s/pulls/%(pull_number)s/comments' % {'owner': owner, 'repo': repo, 'pull_number': pull_number}
return GITHUB_API_ROOT + issue_url, GITHUB_API_ROOT + diff_url
# caching: expires when pull_request's updated_at changes
def get_pull_request_comments(token, owner, repo, pull_number):
# There are two types of comments:
# 1. top level (these are issue comments)
# 2. diff-level (these are pull requests comments)
# TODO(danvk): are there also file-level comments?
issue_url, diff_url = _comments_urls(owner, repo, pull_number)
issue_comments = _fetch_api(token, issue_url) or []
pr_comments = _fetch_api(token, diff_url) or []
return {'top_level': issue_comments, 'diff_level': pr_comments}
# caching: never expires
def get_diff_info(token, owner, repo, sha1, sha2):
# https://developer.github.com/v3/repos/commits/#compare-two-commits
# Highlights include files.{filename,additions,deletions,changes}
url = (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/compare/%(sha1)s...%(sha2)s') % {'owner': owner, 'repo': repo, 'sha1': sha1, 'sha2': sha2}
return _fetch_api(token, url)
# caching: never expires
def get_file_diff(token, owner, repo, path, sha1, sha2):
# https://developer.github.com/v3/repos/commits/#compare-two-commits
# Highlights include files.{filename,additions,deletions,changes}
url = (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/compare/%(sha1)s...%(sha2)s') % {'owner': owner, 'repo': repo, 'sha1': sha1, 'sha2': sha2}
unified_diff = _fetch_url(token, url, extra_headers={'Accept': 'application/vnd.github.3.diff'})
if not unified_diff:
logger.info('Unable to get unified diff %s', url)
return None
# Parse out the bit that's relevant to the file
diff_start_re = re.compile(r'^diff --git a/(.*?) b/(.*?)$', re.MULTILINE)
ms = [m for m in diff_start_re.finditer(unified_diff)]
file_idx = -1
for idx, m in enumerate(ms):
# is it possible that m.group(1) != m.group(2)
if m.group(1) == path and m.group(2) == path:
file_idx = idx
break
if file_idx == -1:
logger.info('Unable to find diff for %s in %s', path, url)
return None
start = ms[file_idx].start()
if file_idx < len(ms) - 1:
limit = ms[file_idx + 1].start()
else:
limit = len(unified_diff)
return unified_diff[start:limit]
# caching: never expires
def get_file_at_ref(token, owner, repo, path, sha):
url = (GITHUB_API_ROOT + '/repos/%(owner)s/%(repo)s/contents/%(path)s?ref=%(sha)s') % {'owner': owner, 'repo': repo, 'path': path, 'sha': sha}
return _fetch_url(token, url, extra_headers={'Accept': 'application/vnd.github.3.raw'})
# caching: n/a
def post_comment(token, owner, repo, pull_number, comment):
# Have to have 'body', then either 'in_reply_to' or a full position spec.
if not 'body' in comment and (
('in_reply_to' in comment) or
('commit_id' in comment and
'path' in comment and
'position' in comment)):
return None
post_path = '/repos/%(owner)s/%(repo)s/pulls/%(pull_number)s/comments'
filtered_comment = {'body': comment['body']}
if 'in_reply_to' in comment:
filtered_comment['in_reply_to'] = comment['in_reply_to']
else:
filtered_comment['commit_id'] = comment['original_commit_id']
filtered_comment['position'] = comment['original_position']
filtered_comment['path'] = comment['path']
return _post_api(token, post_path, filtered_comment,
owner=owner, repo=repo, pull_number=pull_number)
# caching: n/a
def post_issue_comment(token, owner, repo, issue_number, body):
post_path = '/repos/%(owner)s/%(repo)s/issues/%(issue_number)s/comments'
return _post_api(token, post_path, {
'body': body
}, owner=owner, repo=repo, issue_number=issue_number)
# caching: never
def get_user_subscriptions(token, user):
'''Returns a list of repos to which the user subscribes.'''
# TODO(danvk): follow paginated results here.
url = (GITHUB_API_ROOT + '/users/%(user)s/subscriptions?per_page=100') % {'user': user}
subscriptions = _fetch_api(token, url)
if not subscriptions:
return None
subscriptions.sort(key=lambda repo: repo['updated_at'])
subscriptions.reverse()
return subscriptions
def _expire_urls(urls):
keys = [url + json.dumps(None) for url in urls]
cache.delete_multi(keys)
def expire_cache_for_pull_request_children(owner, repo, pull_number):
"""Delete all non-permanent cache entries relating to this PR."""
urls = (list(_comments_urls(owner, repo, pull_number)) +
[_commits_url(owner, repo, pull_number)])
_expire_urls(urls)
def expire_cache_for_pull_request(owner, repo, pull_number):
"""Delete the Pull Request RPC itself from the cache."""
_expire_urls([_pull_request_url(owner, repo, pull_number)])
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(nduca): Rewrite what some of these tests to use mocks instead of
# actually talking to the device. This would improve our coverage quite
# a bit.
import socket
import tempfile
import unittest
import mock
from telemetry.core import cros_interface
from telemetry import decorators
from telemetry.internal import forwarders
from telemetry.internal.forwarders import cros_forwarder
from telemetry.testing import options_for_unittests
class CrOSInterfaceTest(unittest.TestCase):
def _GetCRI(self):
remote = options_for_unittests.GetCopy().cros_remote
remote_ssh_port = options_for_unittests.GetCopy().cros_remote_ssh_port
return cros_interface.CrOSInterface(
remote, remote_ssh_port,
options_for_unittests.GetCopy().cros_ssh_identity)
@decorators.Enabled('cros-chrome')
def testPushContents(self):
with self._GetCRI() as cri:
cri.RunCmdOnDevice(['rm', '-rf', '/tmp/testPushContents'])
cri.PushContents('hello world', '/tmp/testPushContents')
contents = cri.GetFileContents('/tmp/testPushContents')
self.assertEquals(contents, 'hello world')
@decorators.Enabled('cros-chrome')
def testExists(self):
with self._GetCRI() as cri:
self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
@decorators.Enabled('linux')
def testExistsLocal(self):
with cros_interface.CrOSInterface() as cri:
self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
@decorators.Enabled('cros-chrome')
def testGetFileContents(self): # pylint: disable=no-self-use
with self._GetCRI() as cri:
hosts = cri.GetFileContents('/etc/lsb-release')
self.assertTrue('CHROMEOS' in hosts)
@decorators.Enabled('cros-chrome')
def testGetFileContentsNonExistent(self):
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.PushContents('testGetFileNonExistent', f.name)
cri.RmRF(f.name)
self.assertRaises(OSError, lambda: cri.GetFileContents(f.name))
@decorators.Enabled('cros-chrome')
def testGetFile(self): # pylint: disable=no-self-use
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.GetFile('/etc/lsb-release', f.name)
with open(f.name, 'r') as f2:
res = f2.read()
self.assertTrue('CHROMEOS' in res)
@decorators.Enabled('cros-chrome')
def testGetFileNonExistent(self):
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.PushContents('testGetFileNonExistent', f.name)
cri.RmRF(f.name)
self.assertRaises(OSError, lambda: cri.GetFile(f.name))
@decorators.Enabled('cros-chrome')
def testIsServiceRunning(self):
with self._GetCRI() as cri:
self.assertTrue(cri.IsServiceRunning('openssh-server'))
@decorators.Enabled('linux')
def testIsServiceRunningLocal(self):
with cros_interface.CrOSInterface() as cri:
self.assertTrue(cri.IsServiceRunning('dbus'))
@decorators.Enabled('cros-chrome')
def testGetRemotePortAndIsHTTPServerRunningOnPort(self):
with self._GetCRI() as cri:
# Create local server.
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.listen(0)
# Get remote port and ensure that it was unused.
remote_port = cri.GetRemotePort()
self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
# Forward local server's port to remote device's remote_port.
forwarder = cros_forwarder.CrOsForwarderFactory(cri).Create(
forwarders.PortPairs(http=forwarders.PortPair(port, remote_port),
https=None,
dns=None))
# At this point, remote device should be able to connect to local server.
self.assertTrue(cri.IsHTTPServerRunningOnPort(remote_port))
# Next remote port shouldn't be the same as remote_port, since remote_port
# is now in use.
self.assertTrue(cri.GetRemotePort() != remote_port)
# Close forwarder and local server ports.
forwarder.Close()
sock.close()
# Device should no longer be able to connect to remote_port since it is no
# longer in use.
self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
@decorators.Enabled('cros-chrome')
def testGetRemotePortReservedPorts(self):
with self._GetCRI() as cri:
# Should return 2 separate ports even though the first one isn't
# technically being used yet.
remote_port_1 = cri.GetRemotePort()
remote_port_2 = cri.GetRemotePort()
self.assertTrue(remote_port_1 != remote_port_2)
@decorators.Enabled('cros-chrome')
def testTakeScreenShot(self):
with self._GetCRI() as cri:
def _Cleanup():
cri.RmRF('/var/log/screenshots/test-prefix*')
_Cleanup()
cri.TakeScreenShot('test-prefix')
self.assertTrue(cri.FileExistsOnDevice(
'/var/log/screenshots/test-prefix-0.png'))
_Cleanup()
# TODO(tengs): It would be best if we can filter this test and other tests
# that need to be run locally based on the platform of the system browser.
@decorators.Enabled('linux')
def testEscapeCmdArguments(self):
"""Commands and their arguments that are executed through the cros
interface should follow bash syntax. This test needs to run on remotely
and locally on the device to check for consistency.
"""
options = options_for_unittests.GetCopy()
with cros_interface.CrOSInterface(options.cros_remote,
options.cros_remote_ssh_port,
options.cros_ssh_identity) as cri:
# Check arguments with no special characters
stdout, _ = cri.RunCmdOnDevice(['echo', '--arg1=value1', '--arg2=value2',
'--arg3="value3"'])
assert stdout.strip() == '--arg1=value1 --arg2=value2 --arg3=value3'
# Check argument with special characters escaped
stdout, _ = cri.RunCmdOnDevice(['echo', '--arg=A\\; echo \\"B\\"'])
assert stdout.strip() == '--arg=A; echo "B"'
# Check argument with special characters in quotes
stdout, _ = cri.RunCmdOnDevice(['echo', "--arg='$HOME;;$PATH'"])
assert stdout.strip() == "--arg=$HOME;;$PATH"
@decorators.Enabled('cros-chrome', 'linux')
@mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
def testTryLoginSuccess(self, mock_run_cmd):
mock_run_cmd.return_value = ('root\n', '')
cri = cros_interface.CrOSInterface(
"testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
cri.TryLogin()
mock_run_cmd.assert_called_once_with(['echo', '$USER'], quiet=True)
@decorators.Enabled('cros-chrome', 'linux')
@mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
def testTryLoginStderr(self, mock_run_cmd):
cri = cros_interface.CrOSInterface(
"testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
mock_run_cmd.return_value = ('', 'Host key verification failed')
self.assertRaises(cros_interface.LoginException, cri.TryLogin)
self.assertRaisesRegexp(cros_interface.LoginException,
r'.*host key verification failed..*', cri.TryLogin)
mock_run_cmd.return_value = ('', 'Operation timed out')
self.assertRaisesRegexp(cros_interface.LoginException,
r'Timed out while logging into.*', cri.TryLogin)
mock_run_cmd.return_value = ('', 'UNPROTECTED PRIVATE KEY FILE!')
self.assertRaisesRegexp(cros_interface.LoginException,
r'Permissions for .* are too open. To fix this.*',
cri.TryLogin)
mock_run_cmd.return_value = (
'', 'Permission denied (publickey,keyboard-interactive)')
self.assertRaisesRegexp(cros_interface.KeylessLoginRequiredException,
r'Need to set up ssh auth for .*', cri.TryLogin)
mock_run_cmd.return_value = ('', 'Fallback error case')
self.assertRaisesRegexp(cros_interface.LoginException,
r'While logging into .*, got .*', cri.TryLogin)
mock_run_cmd.return_value = ('', 'Could not resolve hostname')
self.assertRaisesRegexp(cros_interface.DNSFailureException,
r'Unable to resolve the hostname for:.*',
cri.TryLogin)
@decorators.Enabled('cros-chrome', 'linux')
@mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
def testTryLoginStdout(self, mock_run_cmd):
mock_run_cmd.return_value = ('notrooot', '')
cri = cros_interface.CrOSInterface(
"testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
self.assertRaisesRegexp(cros_interface.LoginException,
r'Logged into .*, expected \$USER=root, but got .*',
cri.TryLogin)
|
|
#
# GtkHelp.py -- customized Gtk widgets
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import time
import math
from ginga.gtkw import gtksel
import gtk
import gobject
from ginga.misc import Bunch, Callback
from functools import reduce
class TabWorkspace(gtk.Notebook):
def to_next(self):
num_tabs = self.get_n_pages()
cur_idx = self.get_current_page()
new_idx = (cur_idx + 1) % num_tabs
self.set_current_page(new_idx)
def to_previous(self):
num_tabs = self.get_n_pages()
new_idx = self.get_current_page() - 1
if new_idx < 0:
new_idx = max(num_tabs - 1, 0)
self.set_current_page(new_idx)
class MDIWorkspace(gtk.Layout):
"""
This is a work in progress!
"""
def __init__(self):
super(MDIWorkspace, self).__init__()
self.children = []
self.selected_child = None
self.kbdmouse_mask = 0
self.bg_rgb = (0.5, 0.5, 0.5)
self._last_win_x = None
self._last_win_y = None
self.connect("configure-event", self.configure_event)
if not gtksel.have_gtk3:
self.connect("expose_event", self.expose_event)
## else:
## self.connect("draw", self.draw_event)
self.connect("motion_notify_event", self.motion_notify_event)
self.connect("button_press_event", self.button_press_event)
self.connect("button_release_event", self.button_release_event)
mask = self.get_events()
self.set_events(mask
| gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK
| gtk.gdk.FOCUS_CHANGE_MASK
| gtk.gdk.STRUCTURE_MASK
| gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.KEY_PRESS_MASK
| gtk.gdk.KEY_RELEASE_MASK
| gtk.gdk.POINTER_MOTION_MASK
#| gtk.gdk.POINTER_MOTION_HINT_MASK
| gtk.gdk.SCROLL_MASK)
def expose_event(self, widget, event):
x , y, width, height = event.area
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
cr.set_source_rgb(*self.bg_rgb)
cr.paint()
return True
def configure_event(self, widget, event):
rect = widget.get_allocation()
x, y, width, height = rect.x, rect.y, rect.width, rect.height
# This is a workaround for a strange bug in Gtk 3
# where we get multiple configure callbacks even though
# the size hasn't changed. We avoid creating a new surface
# if there is an old surface with the exact same size.
# This prevents some flickering of the display on focus events.
wwd, wht = self.get_window_size()
if (wwd == width) and (wht == height):
return True
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(0, 0, width, height)
#cr.clip()
cr.set_source_rgb(*self.bg_rgb)
cr.paint()
#self.configure(width, height)
return True
def append_page(self, widget, label):
vbox = gtk.VBox()
evbox = gtk.EventBox()
evbox.add(label)
evbox.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse("yellow"))
evbox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("skyblue"))
vbox.pack_start(evbox, fill=False, expand=False)
vbox.pack_start(widget, fill=True, expand=True)
fr = gtk.Frame()
fr.set_border_width(10)
fr.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
fr.add(vbox)
#fr.set_resize_mode(gtk.RESIZE_IMMEDIATE)
fr.show_all()
evbox.connect("button_press_event", self.select_child_cb, fr)
bnch = Bunch.Bunch(widget=widget, window=fr)
self.children.append(bnch)
self.put(fr, 10, 10)
def set_tab_reorderable(self, w, tf):
pass
def set_tab_detachable(self, w, tf):
pass
def get_tab_label(self, w):
return None
def page_num(self, widget):
idx = 0
for bnch in self.children:
if bnch.widget == widget:
return idx
idx += 1
return -1
def set_current_page(self, idx):
bnch = self.children[idx]
window = bnch.window
window.show()
def remove_page(self, idx):
bnch = self.children[idx]
window = bnch.window
#self.remove(window)
def select_child_cb(self, layout, event, widget):
ex = event.x_root; ey = event.y_root
x, y, width, height = widget.get_allocation()
win = widget.get_window()
if win is None:
return False
x, y = win.get_position()
#dx, dy = int(ex - x), int(ey - y)
dx, dy = ex, ey
self.selected_child = Bunch.Bunch(widget=widget,
cr = self.setup_cr(self.bin_window),
x_origin=x, y_origin=y,
dx=dx, dy=dy, wd=width, ht=height)
return False
def button_press_event(self, widget, event):
# event.button, event.x, event.y
x = event.x; y = event.y
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
return True
def setup_cr(self, drawable):
cr = drawable.cairo_create()
cr.set_line_width(2)
cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
return cr
def button_release_event(self, widget, event):
# event.button, event.x, event.y
x = event.x_root; y = event.y_root
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
if self.selected_child is not None:
bnch = self.selected_child
x = int(bnch.x_origin + (x - bnch.dx))
y = int(bnch.x_origin + (y - bnch.dy))
self.move(self.selected_child.widget, x, y)
self.selected_child = None
return True
def motion_notify_event(self, widget, event):
button = self.kbdmouse_mask
if event.is_hint:
return
else:
x, y, state = event.x_root, event.y_root, event.state
if state & gtk.gdk.BUTTON1_MASK:
button |= 0x1
elif state & gtk.gdk.BUTTON2_MASK:
button |= 0x2
elif state & gtk.gdk.BUTTON3_MASK:
button |= 0x4
if (button & 0x1) and (self.selected_child is not None):
bnch = self.selected_child
x = int(bnch.x_origin + (x - bnch.dx))
y = int(bnch.x_origin + (y - bnch.dy))
self.move(self.selected_child.widget, x, y)
return True
def to_next(self):
pass
def to_previous(self):
pass
class GridWorkspace(gtk.Table):
def __init__(self):
super(GridWorkspace, self).__init__()
self.set_row_spacings(2)
self.set_col_spacings(2)
self.widgets = []
self.labels = {}
def _relayout(self):
# calculate number of rows and cols, try to maintain a square
# TODO: take into account the window geometry
num_widgets = len(self.widgets)
rows = int(round(math.sqrt(num_widgets)))
cols = rows
if rows**2 < num_widgets:
cols += 1
# remove all the old widgets
for w in self.widgets:
self.remove(w)
self.resize(rows, cols)
# add them back in, in a grid
for i in range(0, rows):
for j in range(0, cols):
index = i*cols + j
if index < num_widgets:
widget = self.widgets[index]
self.attach(widget, j, j+1, i, i+1,
xoptions=gtk.FILL|gtk.EXPAND,
yoptions=gtk.FILL|gtk.EXPAND,
xpadding=0, ypadding=0)
def append_page(self, widget, label):
self.widgets.append(widget)
self.labels[widget] = label
self._relayout()
def remove_page(self, idx):
widget = self.getWidget(idx)
del self.labels[widget]
self.widgets.remove(widget)
self.remove(widget)
self._relayout()
def page_num(self, widget):
try:
return self.widgets.index(widget)
except (IndexError, ValueError) as e:
return -1
def getWidget(self, index):
return self.widgets[index]
def set_tab_reorderable(self, w, tf):
pass
def set_tab_detachable(self, w, tf):
pass
def get_tab_label(self, widget):
return self.labels[widget]
def set_current_page(self, idx):
widget = self.getWidget(idx)
self.set_focus_child(widget)
def to_next(self):
pass
def to_previous(self):
pass
class WidgetMask(object):
def __init__(self, *args):
self.cb_fn = None
self.cb_args = []
self.cb_kwdargs = {}
self.connected = False
self.changed = False
def sconnect(self, signal, cb_fn, *args, **kwdargs):
self.cb_fn = cb_fn
self.cb_args = args
self.cb_kwdargs = kwdargs
self.connect(signal, self.cb)
self.connected = True
def change(self):
if self.connected:
self.changed = True
def cb(self, *args):
if self.changed:
self.changed = False
return
newargs = list(args)
newargs.extend(self.cb_args)
kwdargs = self.cb_kwdargs.copy()
return self.cb_fn(*newargs, **kwdargs)
class TopLevel(gtk.Window):
def __init__(self):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
class CheckButton(WidgetMask, gtk.CheckButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.CheckButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckButton, self).set_active(newval)
class ToggleButton(WidgetMask, gtk.ToggleButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ToggleButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ToggleButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(ToggleButton, self).set_active(newval)
class RadioButton(WidgetMask, gtk.RadioButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.RadioButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(RadioButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(RadioButton, self).set_active(newval)
class CheckMenuItem(WidgetMask, gtk.CheckMenuItem):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.CheckMenuItem.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckMenuItem, self).set_active(newval)
class SpinButton(WidgetMask, gtk.SpinButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.SpinButton.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(SpinButton, self).set_value(newval)
class HScale(WidgetMask, gtk.HScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.HScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(HScale, self).set_value(newval)
class VScale(WidgetMask, gtk.VScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.VScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(VScale, self).set_value(newval)
class ComboBoxMixin(object):
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ComboBox, self).set_active(newval)
def insert_alpha(self, text):
model = self.get_model()
tup = (text, )
j = 0
for i in range(len(model)):
j = i
if model[i][0] > text:
model.insert(j, tup)
return
model.insert(j+1, tup)
def insert_text(self, idx, text):
model = self.get_model()
tup = (text, )
model.insert(idx, tup)
def delete_alpha(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
del model[i]
return
def clear(self):
model = self.get_model()
model.clear()
def show_text(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
self.set_active(i)
return
class ComboBox(WidgetMask, gtk.ComboBox, ComboBoxMixin):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ComboBox.__init__(self, *args, **kwdargs)
class ComboBoxEntry(WidgetMask, gtk.ComboBoxEntry, ComboBoxMixin):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ComboBoxEntry.__init__(self, *args, **kwdargs)
class Notebook(gtk.Notebook):
def set_group_id(self, id):
if not gtksel.have_gtk3:
super(Notebook, self).set_group_id(id)
else:
super(Notebook, self).set_group_name(str(id))
def combo_box_new_text():
liststore = gtk.ListStore(gobject.TYPE_STRING)
combobox = ComboBox()
combobox.set_model(liststore)
cell = gtk.CellRendererText()
combobox.pack_start(cell, True)
combobox.add_attribute(cell, 'text', 0)
return combobox
class Dialog(gtk.Dialog):
def __init__(self, title=None, flags=None, buttons=None,
callback=None):
button_list = []
for name, val in buttons:
button_list.extend([name, val])
super(Dialog, self).__init__(title=title, flags=flags,
buttons=tuple(button_list))
#self.w.connect("close", self.close)
if callback:
self.connect("response", callback)
class MenuBar(gtk.MenuBar):
def __init__(self):
super(MenuBar, self).__init__()
def add_name(self, name):
btn = gtk.MenuItem(label=name)
menu = gtk.Menu()
btn.set_submenu(menu)
self.append(btn)
return menu
class ToolBar(gtk.Toolbar):
def __init__(self):
super(ToolBar, self).__init__()
def add_name(self, name):
btn = gtk.Button(name)
menu = gtk.Menu()
btn.connect('button-press-event', self._mk_click_cb(menu))
## btn.connect('focus-in-event', self._focus_event, True, menu)
## btn.connect('focus-out-event', self._focus_event, False, menu)
self.pack_start(btn, fill=False, expand=False, padding=2)
return menu
def _mk_click_cb(self, menu):
def menu_up(button, event):
if event.type == gtk.gdk.BUTTON_PRESS:
if gtksel.have_gtk3:
menu.popup(None, None, None, None, event.button, event.time)
else:
menu.popup(None, None, None, event.button, event.time)
return True
return False
return menu_up
def _focus_event(self, widget, event, hasFocus, menu):
if hasFocus and self._isactive:
if gtksel.have_gtk3:
menu.popup(None, None, None, None, 1, 0)
else:
menu.popup(None, None, None, 1, 0)
return True
else:
#menu.popdown()
pass
return False
class Desktop(Callback.Callbacks):
def __init__(self):
super(Desktop, self).__init__()
# for tabs
self.tab = Bunch.caselessDict()
self.tabcount = 0
self.notebooks = Bunch.caselessDict()
self.toplevels = []
for name in ('page-switch', 'page-select'):
self.enable_callback(name)
# --- Tab Handling ---
def make_ws(self, name=None, group=1, show_tabs=True, show_border=False,
detachable=True, tabpos=None, scrollable=True, wstype='nb'):
if not name:
name = str(time.time())
#if wstype == 'mdi':
# TODO: Gtk MDI workspace not ready for prime time...
if wstype == '___':
nb = MDIWorkspace()
widget = gtk.ScrolledWindow()
widget.set_border_width(2)
widget.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
widget.add(nb)
elif wstype == 'grid':
nb = GridWorkspace()
widget = nb
else:
nb = TabWorkspace()
if tabpos is None:
tabpos = gtk.POS_TOP
# Allows drag-and-drop between notebooks
if not gtksel.have_gtk3:
nb.set_group_id(group)
if detachable:
nb.connect("create-window", self.detach_page_cb, group)
nb.connect("switch-page", self.switch_page_cb)
nb.set_tab_pos(tabpos)
nb.set_scrollable(scrollable)
nb.set_show_tabs(show_tabs)
nb.set_show_border(show_border)
#nb.set_border_width(2)
widget = nb
bnch = Bunch.Bunch(nb=nb, name=name, widget=widget, group=group)
self.notebooks[name] = bnch
return bnch
def get_nb(self, name):
return self.notebooks[name].nb
def get_size(self, widget):
alloc = widget.get_allocation()
wd, ht = alloc.width, alloc.height
return (wd, ht)
def get_ws_size(self, name):
w = self.get_nb(name)
return self.get_size(w)
def get_wsnames(self, group=1):
res = []
for name in self.notebooks.keys():
bnch = self.notebooks[name]
if group is None:
res.append(name)
elif group == bnch.group:
res.append(name)
return res
def get_tabnames(self, group=1):
res = []
for name in self.tab.keys():
bnch = self.tab[name]
if group is None:
res.append(name)
elif group == bnch.group:
res.append(name)
return res
def add_tab(self, wsname, widget, group, labelname, tabname=None,
data=None):
tab_w = self.get_nb(wsname)
self.tabcount += 1
if not tabname:
tabname = labelname
if tabname in self.tab:
tabname = 'tab%d' % self.tabcount
label = gtk.Label(labelname)
evbox = gtk.EventBox()
evbox.add(label)
evbox.show_all()
tab_w.append_page(widget, evbox)
bnch = Bunch.Bunch(widget=widget, name=labelname,
tabname=tabname, data=data)
self.tab[tabname] = bnch
evbox.connect("button-press-event", self.select_cb, labelname, data)
tab_w.set_tab_reorderable(widget, True)
tab_w.set_tab_detachable(widget, True)
widget.show()
return tabname
def _find_nb(self, tabname):
widget = self.tab[tabname].widget
for bnch in self.notebooks.values():
nb = bnch.nb
page_num = nb.page_num(widget)
if page_num < 0:
continue
return (nb, page_num)
return (None, None)
def _find_tab(self, widget):
for key, bnch in self.tab.items():
if widget == bnch.widget:
return bnch
return None
def select_cb(self, widget, event, name, data):
self.make_callback('page-select', name, data)
def raise_tab(self, tabname):
nb, page_num = self._find_nb(tabname)
if nb:
nb.set_current_page(page_num)
# bring this window to the user's attention
win = nb.get_window()
if win:
if hasattr(win, 'present'):
# gtk3 ?
win.present()
else:
# gtk2
win.show()
def remove_tab(self, tabname):
nb, page_num = self._find_nb(tabname)
if nb:
nb.remove_page(page_num)
del self.tab[tabname]
return
def highlight_tab(self, tabname, onoff):
nb, page_num = self._find_nb(tabname)
if nb:
widget = self.tab[tabname].widget
lbl = nb.get_tab_label(widget)
if lbl is None:
return
name = self.tab[tabname].name
if onoff:
lbl.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('palegreen'))
else:
lbl.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('grey'))
def add_toplevel(self, bnch, wsname, width=700, height=700):
topw = TopLevel()
topw.set_default_size(width, height)
self.toplevels.append(topw)
topw.set_title(wsname)
topw.set_border_width(0)
topw.add(bnch.widget)
topw.show_all()
return topw
def create_toplevel_ws(self, width, height, group, x=None, y=None):
# create top level workspace
root = gtk.Window(gtk.WINDOW_TOPLEVEL)
## root.set_title(title)
# TODO: this needs to be more sophisticated
root.set_border_width(2)
root.set_default_size(width, height)
root.show()
#self.update_pending()
vbox = gtk.VBox()
root.add(vbox)
menubar = MenuBar()
vbox.pack_start(menubar, fill=True, expand=False)
# create a Window pulldown menu, and add it to the menu bar
winmenu = menubar.add_name("Window")
## w = gtk.MenuItem("Take Tab")
## winmenu.append(w)
#w.connect("activate", lambda w: self.gui_take_tab())
sep = gtk.SeparatorMenuItem()
winmenu.append(sep)
quit_item = gtk.MenuItem(label="Close")
winmenu.append(quit_item)
#quit_item.connect_object ("activate", self.quit, "file.exit")
quit_item.show()
bnch = self.make_ws(group=group)
vbox.pack_start(bnch.widget, padding=2, fill=True, expand=True)
root.connect("delete_event", lambda w, e: self.close_page_cb(bnch, root))
lbl = gtk.Statusbar()
lbl.set_has_resize_grip(True)
vbox.pack_end(lbl, expand=False, fill=True, padding=2)
vbox.show_all()
root.show_all()
if x is not None:
win = root.get_window()
win.move(x, y)
return bnch
def close_page_cb(self, bnch, root):
children = bnch.nb.get_children()
if len(children) == 0:
del self.notebooks[bnch.name]
root.destroy()
return True
def detach_page_cb(self, source, widget, x, y, group):
# Detach page to new top-level workspace
## page = self.widgetToPage(widget)
## if not page:
## return None
xo, yo, width, height = widget.get_allocation()
bnch = self.create_toplevel_ws(width, height, group, x=x, y=y)
return bnch.nb
def switch_page_cb(self, nbw, gptr, page_num):
pagew = nbw.get_nth_page(page_num)
bnch = self._find_tab(pagew)
if bnch is not None:
self.make_callback('page-switch', bnch.name, bnch.data)
return False
def make_desktop(self, layout, widgetDict=None):
if widgetDict is None:
widgetDict = {}
def process_common_params(widget, inparams):
params = Bunch.Bunch(name=None, height=-1, width=-1, xpos=-1, ypos=-1)
params.update(inparams)
if params.name:
widgetDict[params.name] = widget
if (params.width >= 0) or (params.height >= 0):
widget.set_size_request(params.width, params.height)
#pass
# User wants to place window somewhere
if params.xpos >= 0:
#widget.show()
win = widget.get_window()
if win is not None:
win.move(params.xpos, params.ypos)
return params
def make_widget(kind, paramdict, args, pack):
kind = kind.lower()
# Process workspace parameters
params = Bunch.Bunch(name=None, title=None, height=-1,
width=-1, group=1, show_tabs=True,
show_border=False, scrollable=True,
detachable=True, wstype='nb',
tabpos=gtk.POS_TOP)
params.update(paramdict)
if kind == 'widget':
widget = args[0]
elif kind == 'ws':
group = int(params.group)
widget = self.make_ws(name=params.name, group=group,
show_tabs=params.show_tabs,
show_border=params.show_border,
detachable=params.detachable,
tabpos=params.tabpos,
wstype=params.wstype,
scrollable=params.scrollable).widget
# If a title was passed as a parameter, then make a frame to
# wrap the widget using the title.
if params.title:
fr = gtk.Frame(label=' '+params.title+' ')
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.10, 0.5)
fr.add(widget)
pack(fr)
else:
pack(widget)
process_common_params(widget, params)
if (kind == 'ws') and (len(args) > 0):
# <-- Notebook ws specified a sub-layout. We expect a list
# of tabname, layout pairs--iterate over these and add them
# to the workspace as tabs.
for tabname, layout in args[0]:
def pack(w):
# ?why should group be the same as parent group?
self.add_tab(params.name, w, group,
tabname, tabname.lower())
make(layout, pack)
widget.show_all()
return widget
# Horizontal adjustable panel
def horz(params, cols, pack):
if len(cols) > 2:
hpaned = gtk.HPaned()
make(cols[0], lambda w: hpaned.pack1(w, resize=True, shrink=True))
horz(params, cols[1:],
lambda w: hpaned.pack2(w, resize=True, shrink=True))
pack(hpaned)
elif len(cols) == 2:
hpaned = gtk.HPaned()
make(cols[0], lambda w: hpaned.pack1(w, resize=True, shrink=True))
make(cols[1], lambda w: hpaned.pack2(w, resize=True, shrink=True))
pack(hpaned)
elif len(cols) == 1:
hpaned = gtk.HBox()
make(cols[0], lambda w: hpaned.pack_start(w, expand=True, fill=True)) #?
pack(hpaned)
process_common_params(hpaned, params)
hpaned.show_all()
# Vertical adjustable panel
def vert(params, rows, pack):
if len(rows) > 2:
vpaned = gtk.VPaned()
make(rows[0], lambda w: vpaned.pack1(w, resize=True, shrink=True))
vert(params, rows[1:],
lambda w: vpaned.pack2(w, resize=True, shrink=True))
pack(vpaned)
elif len(rows) == 2:
vpaned = gtk.VPaned()
make(rows[0], lambda w: vpaned.pack1(w, resize=True, shrink=True))
make(rows[1], lambda w: vpaned.pack2(w, resize=True, shrink=True))
pack(vpaned)
elif len(rows) == 1:
vpaned = gtk.VBox()
make(rows[0], lambda w: vpaned.pack_start(w, expand=True, fill=True)) #?
pack(vpaned)
process_common_params(vpaned, params)
vpaned.show_all()
# Horizontal fixed array
def hbox(params, cols, pack):
widget = gtk.HBox()
for dct in cols:
if isinstance(dct, dict):
#fill = dct.get('fill', True)
#expand = dct.get('expand', True) #?
fill = True
expand = (dct.get('stretch', 1) == 1)
col = dct.get('col', None)
else:
# assume a list defining the col
fill = expand = True
col = dct
if col is not None:
make(col, lambda w: widget.pack_start(w,
fill=fill,
expand=expand))
process_common_params(widget, params)
widget.show_all()
pack(widget)
# Vertical fixed array
def vbox(params, rows, pack):
widget = gtk.VBox()
for dct in rows:
if isinstance(dct, dict):
#fill = dct.get('fill', True)
#expand = dct.get('expand', True) #?
fill = True
expand = (dct.get('stretch', 1) == 1)
row = dct.get('row', None)
else:
# assume a list defining the row
fill = expand = True
row = dct
if row is not None:
make(row, lambda w: widget.pack_start(w,
fill=fill,
expand=expand))
process_common_params(widget, params)
widget.show_all()
pack(widget)
# Sequence of separate items
def seq(params, cols, pack):
def mypack(w):
topw = TopLevel()
## topw.set_title(title)
topw.set_border_width(0)
topw.add(w)
self.toplevels.append(topw)
topw.show_all()
for dct in cols:
if isinstance(dct, dict):
#fill = dct.get('fill', True)
#expand = dct.get('expand', True) #?
fill = True
expand = (dct.get('stretch', 1) == 1)
col = dct.get('col', None)
else:
# assume a list defining the col
fill = expand = True
col = dct
if col is not None:
make(col, mypack)
widget = gtk.Label("Placeholder")
pack(widget)
def make(constituents, pack):
kind = constituents[0]
params = constituents[1]
if len(constituents) > 2:
rest = constituents[2:]
else:
rest = []
if kind == 'vpanel':
vert(params, rest, pack)
elif kind == 'hpanel':
horz(params, rest, pack)
elif kind == 'vbox':
vbox(params, rest, pack)
elif kind == 'hbox':
hbox(params, rest, pack)
elif kind == 'seq':
seq(params, rest, pack)
elif kind in ('ws', 'widget'):
make_widget(kind, params, rest, pack)
make(layout, lambda w: None)
def _name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def _make_widget(tup, ns):
swap = False
title = tup[0]
if not title.startswith('@'):
name = _name_mangle(title)
w1 = gtk.Label(title + ':')
w1.set_alignment(0.95, 0.5)
else:
# invisible title
swap = True
name = _name_mangle(title[1:])
w1 = gtk.Label('')
wtype = tup[1]
if wtype == 'label':
w2 = gtk.Label('')
w2.set_alignment(0.05, 0.5)
elif wtype == 'xlabel':
w2 = gtk.Label('')
w2.set_alignment(0.05, 0.5)
name = 'xlbl_' + name
elif wtype == 'entry':
w2 = gtk.Entry()
w2.set_width_chars(12)
elif wtype == 'combobox':
w2 = combo_box_new_text()
elif wtype == 'spinbutton':
w2 = SpinButton()
elif wtype == 'vbox':
w2 = gtk.VBox()
elif wtype == 'hbox':
w2 = gtk.HBox()
elif wtype == 'hscale':
w2 = HScale()
elif wtype == 'vscale':
w2 = VScale()
elif wtype == 'checkbutton':
w1 = gtk.Label('')
w2 = CheckButton(title)
w2.set_mode(True)
swap = True
elif wtype == 'radiobutton':
w1 = gtk.Label('')
w2 = RadioButton(title)
swap = True
elif wtype == 'togglebutton':
w1 = gtk.Label('')
w2 = ToggleButton(title)
w2.set_mode(True)
swap = True
elif wtype == 'button':
w1 = gtk.Label('')
w2 = gtk.Button(title)
swap = True
elif wtype == 'spacer':
w1 = gtk.Label('')
w2 = gtk.Label('')
else:
raise ValueError("Bad wtype=%s" % wtype)
lblname = 'lbl_%s' % (name)
if swap:
w1, w2 = w2, w1
ns[name] = w1
ns[lblname] = w2
else:
ns[name] = w2
ns[lblname] = w1
return (w1, w2)
def build_info(captions):
vbox = gtk.VBox(spacing=2)
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
table = gtk.Table(rows=numrows, columns=numcols)
table.set_row_spacings(2)
table.set_col_spacings(4)
vbox.pack_start(table, expand=False)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
if col < len(tup):
tup1 = tup[col:col+2]
w1, w2 = _make_widget(tup1, wb)
table.attach(w1, col, col+1, row, row+1,
xoptions=gtk.FILL, yoptions=gtk.FILL,
xpadding=1, ypadding=1)
table.attach(w2, col+1, col+2, row, row+1,
xoptions=gtk.FILL, yoptions=gtk.FILL,
xpadding=1, ypadding=1)
col += 2
row += 1
vbox.show_all()
return vbox, wb
#END
|
|
import json
import requests
import pytest
import demistomock as demisto
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
BASE_URL = 'https://localhost:6078'
API_TOKEN = 'apitoken'
params = {
'base_url': BASE_URL,
'token': API_TOKEN,
'insecure': True,
'mirror_direction': 'Both',
'first_fetch': '7 Days',
'max_fetch': '2'
}
@pytest.fixture(autouse=True)
def set_mocker(mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'integrationInstance', return_value='respond_test')
mocker.patch.object(demisto, 'findUser', return_value={
'username': 'user1'})
def load_test_data(json_path):
with open(json_path) as f:
return json.load(f)
def mock_rest_client():
from RespondAnalyst import RestClient
return RestClient(
base_url=BASE_URL,
verify=False
)
def test_fetch_incidents_does_not_get_most_recent_event_again(mocker, requests_mock):
from RespondAnalyst import fetch_incidents
get_ids_response = []
get_full_incidents_response = []
client = mock_rest_client()
last_run = {
'Tenant 1': {
'time': 1593044883}
}
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1',
'dev1_tenant2': 'Tenant 2'}
)
mocker.patch.object(client, 'construct_and_send_get_incident_ids_query',
return_value=get_ids_response)
mocker.patch.object(client, 'construct_and_send_full_incidents_query',
return_value=get_full_incidents_response)
next_run, incidents = fetch_incidents(client, last_run)
assert len(incidents) == 0
assert next_run['Tenant 1']['time'] == 1593044883
assert next_run['Tenant 2']['time'] is None
def test_get_incident_command(requests_mock):
from RespondAnalyst import get_incident_command
full_incidents_response = load_test_data(
'test_data/full_incidents_response_single_full_incident.json')
client = mock_rest_client()
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1',
'dev1_tenant2': 'Tenant 2'}
)
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=full_incidents_response
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 6
}
command_result = get_incident_command(client, args)
assert command_result
assert '### Mandiant Automated Defense Alert, Tenant 1 : 6' in getattr(command_result, 'readable_output')
def test_fetch_incidents_no_new(mocker, requests_mock):
from RespondAnalyst import fetch_incidents
get_ids_response = []
get_full_incidents_response = []
client = mock_rest_client()
last_run = {
'Tenant 1': {
'time': 1593044883}
}
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1',
'dev1_tenant2': 'Tenant 2'}
)
mocker.patch.object(client, 'construct_and_send_get_incident_ids_query',
return_value=get_ids_response)
mocker.patch.object(client, 'construct_and_send_full_incidents_query',
return_value=get_full_incidents_response)
next_run, incidents = fetch_incidents(client, last_run)
assert len(incidents) == 0
assert next_run['Tenant 1']['time'] == 1593044883
assert next_run['Tenant 2']['time'] is None
def test_fetch_incidents(mocker, requests_mock):
from RespondAnalyst import fetch_incidents
get_ids_response = [{
'id': '8', 'dateCreated': '1234566789'}, {
'id': '14', 'dateCreated': '12345676789'}]
get_full_incidents_response = load_test_data('test_data/full_incidents.json')
client = mock_rest_client()
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
mocker.patch.object(client, 'construct_and_send_get_incident_ids_query',
return_value=get_ids_response)
mocker.patch.object(client, 'construct_and_send_full_incidents_query',
return_value=get_full_incidents_response)
expected_output = load_test_data('test_data/fetch_incidents_response.json')
next_run, response = fetch_incidents(client, None)
assert expected_output == response
assert next_run['Tenant 1']['time'] == '1591374028642'
def test_remove_user(mocker, requests_mock):
from RespondAnalyst import remove_user_command
rest_client = mock_rest_client()
get_all_users_response = load_test_data('test_data/users.json')
remove_user_response = {
'data': {
'removeUserFromIncident': {
'id': '5',
'userIds': []}}}
mocker.patch.object(demisto, 'info')
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=remove_user_response
)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'username': '[email protected]'
}
res = remove_user_command(rest_client, args)
assert res == 'user with email: [email protected] removed from incident with id 5 ' \
'' \
'on tenant Tenant 1'
def test_assign_user(mocker, requests_mock):
from RespondAnalyst import assign_user_command
assign_user_response = {
'data': {
'addUserToIncident': {
'id': '5',
'userIds': ['675ad53a-d8f4-4ae7-9a3a-59de6c70b912']}}}
get_all_users_response = load_test_data('test_data/users.json')
rest_client = mock_rest_client()
mocker.patch.object(demisto, 'info')
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=assign_user_response
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'username': '[email protected]',
}
res = assign_user_command(rest_client, args)
assert res == 'user with email: [email protected] added to incident with id 5 on ' \
'tenant Tenant 1'
# no tenant id provided
args = {
'incident_id': 5,
'username': '[email protected]',
}
res = assign_user_command(rest_client, args)
assert res == 'user with email: [email protected] added to incident with id 5 on ' \
'tenant Tenant 1'
def test_close_incident(mocker, requests_mock):
from RespondAnalyst import close_incident_command
rest_client = mock_rest_client()
mocker.patch.object(demisto, 'info')
get_all_users_response = load_test_data('test_data/users.json')
close_incident_response = load_test_data('test_data/close_incident_response.json')
single_full_incident_response = load_test_data('test_data/single_full_incident.json')
mocker.patch.object(rest_client, 'construct_and_send_full_incidents_query',
return_value=single_full_incident_response)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=close_incident_response
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'incident_feedback': 'NonActionable',
'feedback_selected_options': [{
'id': '4',
'key': 'unmonitoredAssets',
'value': 'true'},
{
'id': '19',
'key': 'scopedCorrectly',
'value': 'No'}],
'incident_comments': 'new text',
}
res = close_incident_command(rest_client, args)
assert "incident closed and/or feedback updated for incident with id 5 on tenant Tenant 1" in \
res
# no tenant id
args = {
'incident_id': 6,
'incident_feedback': 'NonActionable',
'feedback_selected_options': [{
'id': '4',
'key': 'unmonitoredAssets',
'value': 'true'},
{
'id': '19',
'key': 'scopedCorrectly',
'value': 'No'}],
'incident_comments': 'new text',
}
# not expecting a different id bc of mocked responses, just expecting a successful response
res == close_incident_command(rest_client, args)
assert 'incident closed and/or feedback updated for incident with id 5 on tenant Tenant 1' in \
res
def test_assign_user_raise_exception(mocker, requests_mock):
from RespondAnalyst import assign_user_command
rest_client = mock_rest_client()
mocker.patch.object(demisto, 'error')
get_all_users_response = load_test_data('test_data/users.json')
mocker.patch.object(rest_client, 'construct_and_send_add_user_to_incident_mutation',
return_value=Exception)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'username': '[email protected]',
}
with pytest.raises(Exception):
assign_user_command(rest_client, args)
demisto.error.assert_any_call(
"error adding user to incident: type object 'Exception' has no attribute 'get'")
def test_remove_user_raises_exception(mocker, requests_mock):
from RespondAnalyst import remove_user_command
rest_client = mock_rest_client()
mocker.patch.object(demisto, 'error')
get_all_users_response = load_test_data('test_data/users.json')
mocker.patch.object(rest_client, 'construct_and_send_remove_user_from_incident_mutation',
return_value=Exception)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser?tempId={API_TOKEN}',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'username': '[email protected]'
}
with pytest.raises(Exception):
remove_user_command(rest_client, args)
demisto.error.assert_called_once_with(
'no user found with email [email protected]')
def test_close_incident_with_bad_responses(mocker, requests_mock):
from RespondAnalyst import close_incident_command
rest_client = mock_rest_client()
mocker.patch.object(demisto, 'error')
get_all_users_response = load_test_data('test_data/users.json')
mocker.patch.object(rest_client, 'construct_and_send_close_incident_mutation',
return_value=Exception)
mocker.patch.object(rest_client, 'construct_and_send_full_incidents_query',
return_value=Exception)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
args = {
'tenant_id': 'Tenant 1',
'incident_id': 5,
'incident_feedback': 'NonActionable',
'feedback_selected_options': [{
'id': '4',
'key': 'unmonitoredAssets',
'value': 'true'},
{
'id': '19',
'key': 'scopedCorrectly',
'value': 'No'}],
'incident_comments': 'new text',
}
with pytest.raises(Exception):
close_incident_command(rest_client, args)
demisto.error.assert_any_call(
"error closing incident and/or updating feedback: 'type' object is not subscriptable")
def test_get_remote_data_command(requests_mock):
from RespondAnalyst import get_remote_data_command
full_incidents_response = load_test_data(
'test_data/full_incidents_response_single_full_incident.json')
rest_client = mock_rest_client()
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=full_incidents_response
)
args = {
'id': 'Tenant 1:1'}
res = get_remote_data_command(rest_client, args)
expected_result = [
{
"id": "Tenant 1:1",
"incidentId": "6",
"timeGenerated": "2020-06-05T16:20:21Z",
"eventCount": 24,
"firstEventTime": "2019-12-21T13:05:31Z",
"lastEventTime": "2020-06-05T08:20:17Z",
"URL": "https://localhost:6078/secure/incidents/6?tenantId=dev1",
"closeURL": "https://localhost:6078/secure/incidents/feedback/6?tenantId=dev1",
"title": "Virus Infections, Suspicious Repeated Connections and Int - Int Network IPS "
"Activity",
"description": "description of the incident",
"status": "Closed",
"severity": "Critical",
"probability": "VeryHigh",
"attackStage": "LateralMovement",
"attackTactic": None,
"assetCriticality": "Critical",
"assetCount": 1,
"assets": [{
"hostname": "host1",
"ipaddress": "10.150.0.11",
"isinternal": True}],
"escalationreasons": [
{
"label": "Multiple Network IPS Signatures Triggered by Same Internal Asset"}],
"assignedUsers": ["user1"],
"feedback": {
"timeUpdated": "1593469076049",
"userId": "[email protected]",
"outcome": "Non-Actionable",
"comments": "blah blah blah"},
"tenantIdRespond": "dev1",
"tenantId": "Tenant 1",
"respondRemoteId": "Tenant 1:6",
"dbotMirrorDirection": "Both",
"dbotMirrorInstance": "respond_test",
"owner": "user1",
'externalsystems': [{
'hostname': 'host2',
'ipaddress': '10.150.0.22',
'isinternal': False}],
'malware': [{
'name': 'name1',
'type': 'Ransomware',
'vendor': 'vendor'},
{
'name': 'name2',
'type': 'RAT',
'vendor': 'vendor'}],
"hashes": [{'hash': '44d88612fea8a8f36de82e1278abb02f'}],
'accounts': [{
'domain': None,
'name': 'svc_adminscom3'},
{
'domain': None,
'name': 'svc_adminscom'},
{
'domain': 'test',
'name': 'svc_adminscom2'},
{
'domain': None,
'name': 'svc_adminscom2'},
{
'domain': 'test',
'name': 'svc_adminscom3'},
{
'domain': 'test',
'name': 'svc_adminscom'},
{
'domain': None,
'name': 'Unknown'}],
"signatures": [],
"domains": []},
{
"Contents": {
"closeNotes": "blah blah blah",
"closeReason": "Non-Actionable",
"dbotIncidentClose": True
},
"ContentsFormat": "json",
"Type": 1
}
]
assert res == expected_result
def test_update_remote_system_command(mocker, requests_mock):
from RespondAnalyst import update_remote_system_command
args = {
"data": "tons of data",
"entries": "entries val",
"incidentChanged": True,
"remoteId": "Tenant 1:1",
"status": "status val",
"delta": {
"title": "title val",
"description": "description val"}
}
rest_client = mock_rest_client()
get_all_users_response = load_test_data('test_data/users.json')
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1',
'dev1_tenant2': 'Tenant 2'}
)
requests_mock.get(
f'{BASE_URL}/api/v0/users?tempId={API_TOKEN}',
json=get_all_users_response
)
requests_mock.get(
f'{BASE_URL}/session/activeUser',
json={
'userId': 'qa1-user-id',
'currentTenant': 'dev1',
'email': '[email protected]',
'firstname': 'jay',
'lastname': 'blue'}
)
mocker.patch.object(rest_client, 'construct_and_send_update_title_mutation', return_value={})
mocker.patch.object(rest_client, 'construct_and_send_update_description_mutation',
return_value={})
title_spy = mocker.spy(rest_client, 'construct_and_send_update_title_mutation')
desc_spy = mocker.spy(rest_client, 'construct_and_send_update_description_mutation')
res = update_remote_system_command(rest_client, args)
assert title_spy.call_count == 1
assert desc_spy.call_count == 1
assert res == 'Tenant 1:1'
def test_get_mapping_fields_command():
from RespondAnalyst import get_mapping_fields_command
res = get_mapping_fields_command()
expected = {
'Respond Software Incident': {
'feedback comments': 'the user assigned outcome of a closed incident',
'title': 'incident title',
'feedback outcome': 'the outcome of the incident close'}}
assert res.extract_mapping() == expected
def test_get_escalations_no_new(requests_mock, mocker):
from RespondAnalyst import get_escalations_command
escalation_query_response = {
'data': {
'newEscalations': []}}
requests_mock.post(
f'{BASE_URL}/graphql?tempId={API_TOKEN}&tenantId=dev1',
json=escalation_query_response
)
args = {
'incident_id': '1'}
rest_client = mock_rest_client()
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1'}
)
escalations_spy = mocker.spy(rest_client, 'construct_and_send_new_escalations_query')
res = get_escalations_command(rest_client, args)
assert res == [{
'Type': 1,
'Contents': 'No new escalations',
'ContentsFormat': 'text'}]
assert escalations_spy.call_count == 1
def test_get_escalations_throws_exception(requests_mock, mocker):
from RespondAnalyst import get_escalations_command
args = {
'tenant_id': 'Tenant 1',
'incident_id': '1'}
rest_client = mock_rest_client()
requests_mock.get(
f'{BASE_URL}/session/tenantIdMapping?tempId={API_TOKEN}',
json={
'dev1': 'Tenant 1',
'dev1_tenant2': 'Tenant 2'}
)
debug_spy = mocker.spy(demisto, 'debug')
mocker.patch.object(rest_client,
'construct_and_send_new_escalations_query').side_effect = Exception(
'Unauthorized')
with pytest.raises(Exception):
get_escalations_command(rest_client, args)
assert debug_spy.call_count == 1
debug_spy.assert_called_with(
"Error while getting escalation data in Respond incoming mirror for incident 1 Error "
"message: Unauthorized")
|
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes.
For a complete description of this module see the Python documentation.
Main API
========
call(...): Runs a command, waits for it to complete, then returns
the return code.
check_call(...): Same as call() but raises CalledProcessError()
if return code is not 0
check_output(...): Same as check_call() but returns the contents of
stdout instead of a return code
Popen(...): A class for flexibly executing a command in a new process
Constants
---------
PIPE: Special value that indicates a pipe should be created
STDOUT: Special value that indicates that stderr should go to stdout
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
Attributes:
cmd, returncode, output
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'py3k_warning': '3',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
if getattr(sys.flags, 'hash_randomization') != 0:
args.append('-R')
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
""" Execute a child program in a new process.
For a complete description of the arguments see the Python documentation.
Arguments:
args: A string, or a sequence of program arguments.
bufsize: supplied as the buffering argument to the open() function when
creating the stdin/stdout/stderr pipe file objects
executable: A replacement program to execute.
stdin, stdout and stderr: These specify the executed programs' standard
input, standard output and standard error file handles, respectively.
preexec_fn: (POSIX only) An object to be called in the child process
just before the child is executed.
close_fds: Controls closing or inheriting of file descriptors.
shell: If true, the command will be executed through the shell.
cwd: Sets the current directory before the child is executed.
env: Defines the environment variables for the new process.
universal_newlines: If true, use universal line endings for file
objects stdin, stdout and stderr.
startupinfo and creationflags (Windows only)
Attributes:
stdin, stdout, stderr, pid, returncode
"""
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr)
try:
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
except Exception:
# Preserve original exception in case os.close raises.
exc_type, exc_value, exc_trace = sys.exc_info()
for fd in to_close:
try:
if mswindows:
fd.Close()
else:
os.close(fd)
except EnvironmentError:
pass
raise exc_type, exc_value, exc_trace
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
"""Check if child process has terminated. Set and return returncode
attribute."""
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
to_close = set()
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None), to_close
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
# We just duplicated the handle, it has to be closed at the end
to_close.add(p2cread)
if stdin == PIPE:
to_close.add(p2cwrite)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(c2pwrite)
if stdout == PIPE:
to_close.add(c2pread)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(errwrite)
if stderr == PIPE:
to_close.add(errread)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
def _close_in_parent(fd):
fd.Close()
to_close.remove(fd)
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
_close_in_parent(p2cread)
if c2pwrite is not None:
_close_in_parent(c2pwrite)
if errwrite is not None:
_close_in_parent(errwrite)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno == errno.EPIPE:
# communicate() should ignore broken pipe error
pass
elif (e.errno == errno.EINVAL
and self.poll() is not None):
# Issue #19612: stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_subprocess.TerminateProcess(self._handle, 1)
except OSError as e:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
if e.winerror != 5:
raise
rc = _subprocess.GetExitCodeProcess(self._handle)
if rc == _subprocess.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
to_close = set()
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
to_close.update((p2cread, p2cwrite))
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
to_close.update((c2pread, c2pwrite))
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
to_close.update((errread, errwrite))
elif stderr == STDOUT:
if c2pwrite is not None:
errwrite = c2pwrite
else: # child's stdout is not set, use parent's stdout
errwrite = sys.__stdout__.fileno()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
def _close_in_parent(fd):
os.close(fd)
to_close.remove(fd)
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = self.pipe_cloexec()
try:
try:
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = { None }
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
# Close all other fds, if asked for - after
# preexec_fn(), which may open FDs.
if close_fds:
self._close_fds(but=errpipe_write)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# Wait for exec to fail or succeed; possibly raising exception
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
pickle_bits = []
while data:
pickle_bits.append(data)
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
data = "".join(pickle_bits)
finally:
if p2cread is not None and p2cwrite is not None:
_close_in_parent(p2cread)
if c2pwrite is not None and c2pread is not None:
_close_in_parent(c2pwrite)
if errwrite is not None and errread is not None:
_close_in_parent(errwrite)
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
if e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
while self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
# Check the pid and loop as waitpid has been known to return
# 0 even without WNOHANG in odd situations. issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
|
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Public'), ('private', 'Private'), ('groups', 'Selected Group Only')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(newref), 'mail_group_id': mail_group_id}, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, [mail_group_id], context=context)
return mail_group_id
def unlink(self, cr, uid, ids, context=None):
groups = self.browse(cr, uid, ids, context=context)
alias_ids = [group.alias_id.id for group in groups if group.alias_id]
menu_ids = [group.menu_id.id for group in groups if group.menu_id]
# Delete mail_group
try:
all_emp_group = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mail', 'group_all_employees')[1]
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in ids:
raise osv.except_osv(_('Warning!'), _('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(mail_group, self).unlink(cr, uid, ids, context=context)
# Cascade-delete mail aliases as well, as they should not exist without the mail group.
self.pool.get('mail.alias').unlink(cr, SUPERUSER_ID, alias_ids, context=context)
# Cascade-delete menu entries as well
self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, menu_ids, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
result = super(mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, ids, context=context)
# if description, name or alias is changed: update client action
if vals.get('description') or vals.get('name') or vals.get('alias_id') or vals.get('alias_name'):
cobj = self.pool.get('ir.actions.client')
for action in [group.menu_id.action for group in self.browse(cr, uid, ids, context=context)]:
new_params = action.params
new_params['header_description'] = self._generate_header_description(cr, uid, group, context=context)
cobj.write(cr, SUPERUSER_ID, [action.id], {'params': str(new_params)}, context=context)
# if name is changed: update menu
if vals.get('name'):
mobj = self.pool.get('ir.ui.menu')
mobj.write(cr, SUPERUSER_ID,
[group.menu_id.id for group in self.browse(cr, uid, ids, context=context)],
{'name': vals.get('name')}, context=context)
return result
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of groups if display_groups_suggestions if the
user perference allows it."""
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_groups_suggestions:
return []
else:
return super(mail_group, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(mail_group, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if group.alias_domain and group.alias_name:
headers['List-Id'] = '%s.%s' % (group.alias_name, group.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (group.alias_name, group.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (group.name, group.alias_name, group.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Jsonschema validation of cloud custodian config.
We start with a walkthrough of the various class registries
of resource types and assemble and generate the schema.
We do some specialization to reduce overall schema size
via reference usage, although in some cases we prefer
copies, due to issues with inheritance via reference (
allowedProperties and enum extension).
All filters and actions are annotated with schema typically using
the utils.type_schema function.
"""
from collections import Counter
import json
import logging
from jsonschema import Draft4Validator as Validator
from jsonschema.exceptions import best_match
from c7n.manager import resources
from c7n.filters import ValueFilter, EventFilter, AgeFilter
from c7n.offhours import Time as TimeFilter
def validate(data):
schema = generate()
Validator.check_schema(schema)
validator = Validator(schema)
errors = list(validator.iter_errors(data))
if not errors:
counter = Counter([p['name'] for p in data.get('policies')])
dupes = []
for k, v in counter.items():
if v > 1:
dupes.append(k)
if dupes:
return [ValueError(
"Only one policy with a given name allowed, duplicates: %s" % (
", ".join(dupes)))]
return []
try:
return [specific_error(errors[0])]
except Exception:
logging.exception(
"specific_error failed, traceback, followed by fallback")
return filter(None, [
errors[0],
best_match(validator.iter_errors(data)),
])
def specific_error(error):
"""Try to find the best error for humans to resolve
The jsonschema.exceptions.best_match error is based purely on a
mix of a strong match (ie. not anyOf, oneOf) and schema depth,
this often yields odd results that are semantically confusing,
instead we can use a bit of structural knowledge of schema to
provide better results.
"""
if error.validator not in ('anyOf', 'oneOf'):
return error
r = t = None
if isinstance(error.instance, dict):
t = error.instance.get('type')
r = error.instance.get('resource')
if r is not None:
found = None
for idx, v in enumerate(error.validator_value):
if r in v['$ref'].rsplit('/', 2):
found = idx
if found is not None:
# error context is a flat list of all validation
# failures, we have to index back to the policy
# of interest.
for e in error.context:
# resource policies have a fixed path from
# the top of the schema
if e.absolute_schema_path[4] == found:
return specific_error(e)
return specific_error(error.context[idx])
if t is not None:
found = None
for idx, v in enumerate(error.validator_value):
if '$ref' in v and v['$ref'].endswith(t):
found = idx
if found is not None:
# Try to walk back an element/type ref to the specific
# error
spath = list(error.context[0].absolute_schema_path)
spath.reverse()
slen = len(spath)
if 'oneOf' in spath:
idx = spath.index('oneOf')
elif 'anyOf' in spath:
idx = spath.index('anyOf')
vidx = slen - idx
for e in error.context:
if e.absolute_schema_path[vidx] == found:
return e
return error
def generate(resource_types=()):
resource_defs = {}
definitions = {
'resources': resource_defs,
'filters': {
'value': ValueFilter.schema,
'event': EventFilter.schema,
'time': TimeFilter.schema,
'age': AgeFilter.schema,
# Shortcut form of value filter as k=v
'valuekv': {
'type': 'object',
'minProperties': 1,
'maxProperties': 1},
},
'policy': {
'type': 'object',
'required': ['name', 'resource'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': "^[A-z][A-z0-9]*(-[A-z0-9]*[A-z][A-z0-9]*)*$"},
'region': {'type': 'string'},
'resource': {'type': 'string'},
'max-resources': {'type': 'integer'},
'comment': {'type': 'string'},
'comments': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'mode': {'$ref': '#/definitions/policy-mode'},
'actions': {
'type': 'array',
},
'filters': {
'type': 'array'
},
#
# unclear if this should be allowed, it kills resource
# cache coherency between policies, and we need to
# generalize server side query mechanisms, currently
# this only for ec2 instance queries. limitations
# in json schema inheritance prevent us from doing this
# on a type specific basis http://goo.gl/8UyRvQ
'query': {
'type': 'array', 'items': {
'type': 'object',
'minProperties': 1,
'maxProperties': 1}}
},
},
'policy-mode': {
'type': 'object',
'required': ['type'],
'properties': {
'type': {
'enum': [
'cloudtrail',
'ec2-instance-state',
'asg-instance-state',
'periodic'
]},
'events': {'type': 'array', 'items': {
'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['event', 'source', 'ids'],
'properties': {
'source': {'type': 'string'},
'ids': {'type': 'string'},
'event': {'type': 'string'}}}]
}}
},
},
}
resource_refs = []
for type_name, resource_type in resources.items():
if resource_types and type_name not in resource_types:
continue
resource_refs.append(
process_resource(type_name, resource_type, resource_defs))
schema = {
'$schema': 'http://json-schema.org/schema#',
'id': 'http://schema.cloudcustodian.io/v0/custodian.json',
'definitions': definitions,
'type': 'object',
'required': ['policies'],
'additionalProperties': False,
'properties': {
'vars': {'type': 'object'},
'policies': {
'type': 'array',
'additionalItems': False,
'items': {'anyOf': resource_refs}
}
}
}
return schema
def process_resource(type_name, resource_type, resource_defs):
r = resource_defs.setdefault(type_name, {'actions': {}, 'filters': {}})
seen_actions = set() # Aliases get processed once
action_refs = []
for action_name, a in resource_type.action_registry.items():
if a in seen_actions:
continue
else:
seen_actions.add(a)
r['actions'][action_name] = a.schema
action_refs.append(
{'$ref': '#/definitions/resources/%s/actions/%s' % (
type_name, action_name)})
# one word action shortcuts
action_refs.append(
{'enum': resource_type.action_registry.keys()})
nested_filter_refs = []
filters_seen = set()
for k, v in sorted(resource_type.filter_registry.items()):
if v in filters_seen:
continue
else:
filters_seen.add(v)
nested_filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, k)})
nested_filter_refs.append(
{'$ref': '#/definitions/filters/valuekv'})
filter_refs = []
filters_seen = set() # for aliases
for filter_name, f in sorted(resource_type.filter_registry.items()):
if f in filters_seen:
continue
else:
filters_seen.add(f)
if filter_name in ('or', 'and'):
continue
elif filter_name == 'value':
r['filters'][filter_name] = {
'$ref': '#/definitions/filters/value'}
r['filters']['valuekv'] = {
'$ref': '#/definitions/filters/valuekv'}
elif filter_name == 'event':
r['filters'][filter_name] = {
'$ref': '#/definitions/filters/event'}
elif filter_name == 'or':
r['filters'][filter_name] = {
'type': 'array',
'items': {'anyOf': nested_filter_refs}}
elif filter_name == 'and':
r['filters'][filter_name] = {
'type': 'array',
'items': {'anyOf': nested_filter_refs}}
else:
r['filters'][filter_name] = f.schema
filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, filter_name)})
filter_refs.append(
{'$ref': '#/definitions/filters/valuekv'})
# one word filter shortcuts
filter_refs.append(
{'enum': resource_type.filter_registry.keys()})
resource_policy = {
'allOf': [
{'$ref': '#/definitions/policy'},
{'properties': {
'resource': {'enum': [type_name]},
'filters': {
'type': 'array',
'items': {'anyOf': filter_refs}},
'actions': {
'type': 'array',
'items': {'anyOf': action_refs}}}},
]
}
if type_name == 'ec2':
resource_policy['allOf'][1]['properties']['query'] = {}
r['policy'] = resource_policy
return {'$ref': '#/definitions/resources/%s/policy' % type_name}
if __name__ == '__main__':
from c7n.resources import load_resources
load_resources()
# dump our schema
# $ python -m c7n.schema
try:
print(json.dumps(generate(), indent=2))
except:
import traceback, pdb, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
|
|
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""pytest is a tool that eases test running and debugging.
To be able to use pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize pytest's behaviour by defining a ``pytestconf.py`` file
somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
pytest path/to/mytests.py -m '(not long and database) or regr'
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py -s not (will skip test_notthisone)
pytest --coverage test_foo.py
(only if logilab.devtools is available)
"""
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
from logilab.common.fileutils import abspath_listdir
from logilab.common import testlib
import doctest
import unittest
import imp
import __builtin__
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
## coverage hacks, do not read this, do not read this, do not read this
# hey, but this is an aspect, right ?!!!
class TraceController(object):
nesting = 0
def pause_tracing(cls):
if not cls.nesting:
cls.tracefunc = staticmethod(getattr(sys, '__settrace__', sys.settrace))
cls.oldtracer = getattr(sys, '__tracer__', None)
sys.__notrace__ = True
cls.tracefunc(None)
cls.nesting += 1
pause_tracing = classmethod(pause_tracing)
def resume_tracing(cls):
cls.nesting -= 1
assert cls.nesting >= 0
if not cls.nesting:
cls.tracefunc(cls.oldtracer)
delattr(sys, '__notrace__')
resume_tracing = classmethod(resume_tracing)
pause_tracing = TraceController.pause_tracing
resume_tracing = TraceController.resume_tracing
def nocoverage(func):
if hasattr(func, 'uncovered'):
return func
func.uncovered = True
def not_covered(*args, **kwargs):
pause_tracing()
try:
return func(*args, **kwargs)
finally:
resume_tracing()
not_covered.uncovered = True
return not_covered
## end of coverage hacks
# monkeypatch unittest and doctest (ouch !)
unittest.TestCase = testlib.TestCase
unittest.main = testlib.unittest_main
unittest._TextTestResult = testlib.SkipAwareTestResult
unittest.TextTestRunner = testlib.SkipAwareTextTestRunner
unittest.TestLoader = testlib.NonStrictTestLoader
unittest.TestProgram = testlib.SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
execfile(path, namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
curdir = osp.abspath(projdir)
previousdir = curdir
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in sys.modules.items():
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print "*" * 79
print self.report
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print "no test dir found testing here:", here
# if no test was found during the visit, consider
# the local directory as a test directory even if
# it doesn't have a traditional test directory name
self.testonedir(here)
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
for filename in abspath_listdir(testdir):
if this_is_a_testfile(filename):
if self.options.exitfirst and not self.options.restart:
# overwrite restart file
try:
restartfile = open(testlib.FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(),testlib.FILE_RESTART)
raise e
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
self.firstwrite = True
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
# overwrite restart file if it has not been done already
if self.options.exitfirst and not self.options.restart and self.firstwrite:
try:
restartfile = open(testlib.FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(),testlib.FILE_RESTART)
raise e
modname = osp.basename(filename)[:-3]
try:
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
except TypeError: # < py 2.4 bw compat
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70)
try:
tstart, cstart = time(), clock()
try:
testprog = testlib.unittest_main(modname, batchmode=batchmode, cvg=self.cvg,
options=self.options, outstream=sys.stderr)
except KeyboardInterrupt:
raise
except SystemExit, exc:
self.errcode = exc.code
raise
except testlib.TestSkipped:
print "Module skipped:", filename
self.report.skip_module(filename)
return None
except Exception:
self.report.failed_to_test_module(filename)
print >> sys.stderr, 'unhandled exception occurred while testing', modname
import traceback
traceback.print_exc(file=sys.stderr)
return None
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
finally:
if dirname:
os.chdir(here)
class DjangoTester(PyTester):
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
while not osp.isfile(osp.join(curdir, 'settings.py')) and \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
raise AssertionError('could not find settings.py')
previousdir = curdir
curdir = newdir
# late django initialization
settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
from django.core.management import setup_environ
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
# add settings dir to pythonpath since it's the project's root
if curdir not in sys.path:
sys.path.insert(1, curdir)
def before_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
def after_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
teardown_test_environment()
print 'destroying', self.dbname
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, _ in os.walk(os.getcwd()):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
if 'tests.py' in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
if basename in ('test', 'tests'):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
testfiles = [fpath for fpath in abspath_listdir(testdir)
if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
testfiles.remove(osp.join(testdir, 'tests.py'))
except ValueError:
pass
for filename in testfiles:
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
print >>sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
try:
try:
tstart, cstart = time(), clock()
self.before_testfile()
testprog = testlib.unittest_main(modname, batchmode=batchmode, cvg=self.cvg)
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
except SystemExit:
raise
except Exception, exc:
import traceback
traceback.print_exc()
self.report.failed_to_test_module(filename)
print 'unhandled exception occurred while testing', modname
print 'error: %s' % exc
return None
finally:
self.after_testfile()
if dirname:
os.chdir(here)
def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
def rebuild_and_store(option, opt, value, parser):
"""carry the option to unittest_main and store
the value on current parser
"""
parser.newargs.append(opt)
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
warnings.simplefilter('ignore', DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# pytest options
parser.add_option('-t', dest='testdir', default=None,
help="directory where the tests will be found")
parser.add_option('-d', dest='dbc', default=False,
action="store_true", help="enable design-by-contract")
# unittest_main options provided and passed through pytest
parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
action="callback", help="Verbose output")
parser.add_option('-i', '--pdb', callback=rebuild_and_store,
dest="pdb", action="callback",
help="Enable test failure inspection (conflicts with --coverage)")
parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
dest="exitfirst", default=False,
action="callback", help="Exit on first failure "
"(only make sense when pytest run one test file)")
parser.add_option('-R', '--restart', callback=rebuild_and_store,
dest="restart", default=False,
action="callback",
help="Restart tests from where it failed (implies exitfirst) "
"(only make sense if tests previously ran with exitfirst only)")
parser.add_option('-c', '--capture', callback=capture_and_rebuild,
action="callback",
help="Captures and prints standard out/err only on errors "
"(only make sense when pytest run one test file)")
parser.add_option('--color', callback=rebuild_cmdline,
action="callback",
help="colorize tracebacks")
parser.add_option('-p', '--printonly',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="printonly", default=None,
help="Only prints lines matching specified pattern (implies capture) "
"(only make sense when pytest run one test file)")
parser.add_option('-s', '--skip',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="skipped", default=None,
help="test names matching this name will be skipped "
"to skip several patterns, use commas")
parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
action="callback", help="Minimal output")
parser.add_option('-P', '--profile', default=None, dest='profile',
help="Profile execution and store data in the given file")
parser.add_option('-m', '--match', default=None, dest='tags_pattern',
help="only execute test whose tag match the current pattern")
try:
from logilab.devtools.lib.coverage import Coverage
parser.add_option('--coverage', dest="coverage", default=False,
action="store_true",
help="run tests with pycoverage (conflicts with --pdb)")
except ImportError:
pass
if DJANGO_FOUND:
parser.add_option('-J', '--django', dest='django', default=False,
action="store_true",
help='use pytest for django test cases')
return parser
def parseargs(parser):
"""Parse the command line and return (options processed), (options to pass to
unittest_main()), (explicitfile or None).
"""
# parse the command line
options, args = parser.parse_args()
if options.pdb and getattr(options, 'coverage', False):
parser.error("'pdb' and 'coverage' options are exclusive")
filenames = [arg for arg in args if arg.endswith('.py')]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
explicitfile = filenames[0]
args.remove(explicitfile)
else:
explicitfile = None
# someone wants DBC
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.printonly:
newargs.extend(['--printonly', options.printonly])
if options.skipped:
newargs.extend(['--skip', options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
# append additional args to the new sys.argv and let unittest_main
# do the rest
newargs += args
return options, explicitfile
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
options, explicitfile = parseargs(parser)
# mock a new command line
sys.argv[1:] = parser.newargs
covermode = getattr(options, 'coverage', None)
cvg = None
if not '' in sys.path:
sys.path.insert(0, '')
if covermode:
# control_import_coverage(rootdir)
from logilab.devtools.lib.coverage import Coverage
cvg = Coverage([rootdir])
cvg.erase()
cvg.start()
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
tester = testercls(cvg, options)
if explicitfile:
cmd, args = tester.testfile, (explicitfile,)
elif options.testdir:
cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
else:
cmd, args = tester.testall, (options.exitfirst,)
try:
try:
if options.profile:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
print 'profile data saved in', options.profile
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
finally:
if covermode:
cvg.stop()
cvg.save()
tester.show_report()
if covermode:
print 'coverage information stored, use it with pycoverage -ra'
sys.exit(tester.errcode)
|
|
#!/usr/bin/env python
"""
MHC Epitope analysis
Created September 2013
Copyright (C) Damien Farrell
"""
import sys, os, shutil, string, types
import csv, glob, pickle, itertools
import re
import time, random
from collections import OrderedDict
from operator import itemgetter
#import matplotlib
#matplotlib.use('agg')
import pylab as plt
import numpy as np
import pandas as pd
import subprocess
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import base, sequtils, tepitope, utilities
home = os.path.expanduser("~")
#fix paths!
genomespath = os.path.join(home, 'epitopedata')
datadir = os.path.join(home, 'testpredictions')
def plotheatmap(df, ax=None, cmap='Blues'):
if ax==None:
fig=plt.figure()
ax=fig.add_subplot(111)
else:
fig = ax.get_figure()
df = df._get_numeric_data()
hm=ax.pcolor(df,cmap=cmap)
#fig.colorbar(hm, ax=ax)
ax.set_xticks(np.arange(0.5, len(df.columns)))
ax.set_yticks(np.arange(0.5, len(df.index)))
ax.set_xticklabels(df.columns, minor=False, fontsize=10,rotation=45)
ax.set_yticklabels(df.index, minor=False, fontsize=8)
ax.set_ylim(0, len(df.index))
hm.set_clim(0,1)
plt.tight_layout()
return
def getAllBinders(path, method='tepitope', n=3, cutoff=0.95, promiscuous=True):
"""Get all promiscuous binders from a set of proteins in path"""
print 'getting binders..'
binders = []
m=method
if m=='bcell': return #not applicable
l=9
P = base.getPredictor(m)
files = glob.glob(os.path.join(path, '*.mpk'))
#get allele specific cutoffs
P.allelecutoffs = getCutoffs(path, method, cutoff, overwrite=True)
for f in files:
df = pd.read_msgpack(f)
if promiscuous== True:
b = P.getPromiscuousBinders(data=df,n=n)
else:
b = P.getBinders(data=df)
#print b[:5]
binders.append(b)
result = pd.concat(binders)
result['start'] = result.pos
result['end'] = result.pos+result.peptide.str.len()
return result
def getCutoffs(path, method, q=0.98, overwrite=False):
"""Get global cutoffs for predictions in path"""
quantfile = os.path.join(path,'quantiles.csv')
if not os.path.exists(quantfile) or overwrite==True:
base.getScoreDistributions(method, path)
quantiles = pd.read_csv(quantfile,index_col=0)
cutoffs = dict(quantiles.ix[q])
return cutoffs
def getNmer(df, n=20, key='translation'):
"""Get 20mer peptide"""
def getseq(x):
n=20
size=len(x[key])
if size<n:
o = int((n-size)/2.0)+1
s = x[key][x.start-o:x.end+o][:20]
else:
s = x[key][x.start:x.end]
return s
x = df.apply(getseq,1)
return x
def getOverlappingBinders(binders1, binders2, label='overlap'):
"""Overlap for binders with any set of peptides with start/end cols"""
new=[]
def overlap(x,b):
f = b[(b.pos>x.start) & (b.pos<x.end)]
#print x.locus_tag,x.start,x.end,x.peptide,len(f) #,f.peptide,f.pos
return len(f)
for n,df in binders1.groupby('name'):
b = binders2[binders2.name==n]
df[label] = df.apply(lambda r: overlap(r,b),axis=1)
new.append(df)
result = pd.concat(new)
print '%s with overlapping binders' %len(result[result[label]>0])
return result
def getOrthologs(seq,expect=10,hitlist_size=400,equery=None):
"""Fetch orthologous sequences using blast and return the records
as a dataframe"""
from Bio.Blast import NCBIXML,NCBIWWW
from Bio import Entrez, SeqIO
Entrez.email = "[email protected]"
#entrez_query = "mycobacterium[orgn]"
#db = '/local/blast/nr'
#SeqIO.write(SeqRecord(Seq(seq)), 'tempseq.faa', "fasta")
#sequtils.doLocalBlast(db, 'tempseq.faa', output='my_blast.xml', maxseqs=100, evalue=expect)
try:
print 'running blast..'
result_handle = NCBIWWW.qblast("blastp", "nr", seq, expect=expect,
hitlist_size=500,entrez_query=equery)
time.sleep(2)
except:
print 'blast timeout'
return
savefile = open("my_blast.xml", "w")
savefile.write(result_handle.read())
savefile.close()
result_handle = open("my_blast.xml")
df = sequtils.getBlastResults(result_handle)
df['accession'] = df.subj.apply(lambda x: x.split('|')[3])
df['definition'] = df.subj.apply(lambda x: x.split('|')[4])
df = df.drop(['subj','positive','query_length','score'],1)
print len(df)
df.drop_duplicates(subset=['definition'], inplace=True)
df = df[df['perc_ident']!=100]
print len(df)
#df = getAlignedBlastResults(df)
return df
def getAlignedBlastResults(df,aln=None,idkey='accession',productkey='definition'):
"""Get gapped alignment from blast results """
sequtils.dataframe2Fasta(df, idkey=idkey, seqkey='sequence',
productkey=productkey, outfile='blast_found.faa')
aln = sequtils.muscleAlignment("blast_found.faa")
alnrows = [[a.id,str(a.seq)] for a in aln]
alndf = pd.DataFrame(alnrows,columns=['accession','seq'])
#res = df.merge(alndf, left_index=True, right_index=True)
res = df.merge(alndf, on=['accession'])
res = res.drop('sequence',1)
#get rid of duplicate hits
#res.drop_duplicates(subset=['definition','seq'], inplace=True)
res = res.sort('identity',ascending=False)
print '%s hits, %s filtered' %(len(df), len(res))
return res
def setBlastLink(df):
def makelink(x):
return '<a href=http://www.ncbi.nlm.nih.gov/protein/%s> %s </a>' %(x,x)
df['accession'] = df.accession.apply(makelink)
return df
def alignment2Dataframe(aln):
"""Blast results alignment 2 dataframe for making tables"""
alnrows = [[a.id,str(a.seq)] for a in aln]
df = pd.DataFrame(alnrows,columns=['name','seq'])
return df
def findClusters(binders, method, dist=None, minsize=3,
genome=None):
"""Get clusters of binders for all predictions"""
C=[]
grps = list(binders.groupby('name'))
print '%s proteins with binders' %len(grps)
length = len(binders.head(1).peptide.max())
if dist==None:
dist = length+1
print 'using dist for clusters: %s' %dist
for n,b in grps:
if len(b)==0: continue
clusts = base.dbscan(b,dist=dist,minsize=minsize)
if len(clusts) == 0:
continue
for c in clusts:
gaps = [c[i]-c[i-1] for i in range(1,len(c))]
C.append([n,min(c),max(c)+length,len(c)])
if len(C)==0:
print 'no clusters'
return pd.DataFrame()
x = pd.DataFrame(C,columns=['name','start','end','binders'])
x['clustersize'] = (x.end-x.start)
x['density'] = x.binders/(x.end-x.start)
x['method'] = method
if genome is not None:
temp = x.merge(genome[['locus_tag','gene','translation']],
left_on='name',right_on='locus_tag')
x['peptide'] = getNmer(temp)
x = x.sort(['binders','density'],ascending=False)
print '%s clusters found in %s proteins' %(len(x),len(x.groupby('name')))
print
return x
def genomeAnalysis(datadir,label,gname,method):
"""this method should be made independent of web app paths etc"""
path = os.path.join(datadir, '%s/%s/%s' %(label,gname,method))
#path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
g = sequtils.genbank2Dataframe(gfile, cds=True)
b = getAllBinders(path, method=method, n=5)
P = base.getPredictor(method)
res = b.groupby('name').agg({P.scorekey:[np.mean,np.size,np.max]}).sort()
res.columns = res.columns.get_level_values(1)
res = res.merge(g[['locus_tag','length','gene','product','order']],
left_index=True,right_on='locus_tag')
res['perc'] = res['size']/res.length*100
res = res.sort('perc',ascending=False)
top = b.groupby('peptide').agg({P.scorekey:np.mean,'allele':np.max,
'name': lambda x: x}).reset_index()
top = top.sort(P.scorekey,ascending=P.rankascending)
cl = findClusters(b, method, dist=9, minsize=3)
if cl is not None:
gc = cl.groupby('name').agg({'density':np.max})
res = res.merge(gc,left_on='locus_tag',right_index=True)
#print res[:10]
return res
def testFeatures():
"""test feature handling"""
fname = os.path.join(datadir,'MTB-H37Rv.gb')
df = sequtils.genbank2Dataframe(fname, cds=True)
df = df.set_index('locus_tag')
keys = df.index
name='Rv0011c'
row = df.ix[name]
seq = row.translation
prod = row['product']
rec = SeqRecord(Seq(seq),id=name,description=prod)
fastafmt = rec.format("fasta")
print fastafmt
print row.to_dict()
ind = keys.get_loc(name)
previous = keys[ind-1]
if ind<len(keys)-1:
next = keys[ind+1]
else:
next=None
return
def testrun(gname):
method = 'tepitope'#'iedbmhc1'#'netmhciipan'
path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
df = sequtils.genbank2Dataframe(gfile, cds=True)
#names = list(df.locus_tag[:1])
names=['VP24']
alleles1 = ["HLA-A*02:02", "HLA-A*11:01", "HLA-A*32:07", "HLA-B*15:17", "HLA-B*51:01",
"HLA-C*04:01", "HLA-E*01:03"]
alleles2 = ["HLA-DRB1*0101", "HLA-DRB1*0305", "HLA-DRB1*0812", "HLA-DRB1*1196", "HLA-DRB1*1346",
"HLA-DRB1*1455", "HLA-DRB1*1457", "HLA-DRB1*1612", "HLA-DRB4*0107", "HLA-DRB5*0203"]
P = base.getPredictor(method)
P.iedbmethod='IEDB_recommended' #'netmhcpan'
P.predictProteins(df,length=11,alleles=alleles2,names=names,
save=True,path=path)
f = os.path.join('test', names[0]+'.mpk')
df = pd.read_msgpack(f)
P.data=df
#b = P.getBinders(data=df)
#print b[:20]
base.getScoreDistributions(method, path)
return
def testBcell(gname):
path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
df = sequtils.genbank2Dataframe(gfile, cds=True)
names=['VP24']
P = base.getPredictor('bcell')
P.iedbmethod='Chou-Fasman'
P.predictProteins(df,names=names,save=True,path=path)
print P.data
return
def testconservation(label,gname):
"""Conservation analysis"""
tag='VP24'
pd.set_option('max_colwidth', 800)
gfile = os.path.join(genomespath,'%s.gb' %gname)
g = sequtils.genbank2Dataframe(gfile, cds=True)
res = g[g['locus_tag']==tag]
seq = res.translation.head(1).squeeze()
print seq
#alnrows = getOrthologs(seq)
#alnrows.to_csv('blast_%s.csv' %tag)
alnrows = pd.read_csv('blast_%s.csv' %tag,index_col=0)
alnrows.drop_duplicates(subset=['accession'], inplace=True)
alnrows = alnrows[alnrows['perc_ident']>=60]
seqs=[SeqRecord(Seq(a.sequence),a.accession) for i,a in alnrows.iterrows()]
print seqs[:2]
sequtils.distanceTree(seqs=seqs)#,ref=seqs[0])
#sequtils.ETETree(seqs, ref, metric)
#df = sequtils.getFastaProteins("blast_found.faa",idindex=3)
'''method='tepitope'
P = base.getPredictor(method)
P.predictSequences(df,seqkey='sequence')
b = P.getBinders()'''
return
def getLocalOrthologs(seq, db):
"""Get alignment for a protein using local blast db"""
SeqIO.write(SeqRecord(Seq(seq)), 'tempseq.faa', "fasta")
sequtils.doLocalBlast(db, 'tempseq.faa', output='my_blast.xml', maxseqs=30)
result_handle = open("my_blast.xml")
df = sequtils.getBlastResults(result_handle)
return df
def findConservedPeptide(peptide, recs):
"""Find sequences where a peptide is conserved"""
f=[]
for i,a in recs.iterrows():
seq = a.sequence.replace('-','')
found = seq.find(peptide)
f.append(found)
s = pd.DataFrame(f,columns=['found'],index=recs.accession)
s = s.replace(-1,np.nan)
#print s
res = s.count()
return s
def getPredictions(path,tag,method='tepitope',q=0.96):
"""Get predictions from file system"""
q=round(q,2)
#preds = OrderedDict()
cutoffs = {}
filename = os.path.join(path, tag+'.mpk')
if not os.path.exists(filename):
return
df = pd.read_msgpack(filename)
pred = base.getPredictor(name=method, data=df)
cutoffs = pred.allelecutoffs = getCutoffs(path, method, q)
pred = pred
return pred
def test():
gname = 'ebolavirus'
label = 'test'
testrun(gname)
#testBcell(gname)
#testgenomeanalysis(label,gname,method)
#testconservation(label,gname)
#testFeatures()
return
if __name__ == '__main__':
pd.set_option('display.width', 600)
test()
|
|
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for java_generator."""
__author__ = '[email protected] (Tony Aiuto)'
from google.apputils import basetest
from googleapis.codegen import data_types
from googleapis.codegen import data_value
from googleapis.codegen import java_generator
class JavaApiTest(basetest.TestCase):
def testToClassName(self):
"""Test creating safe class names from object names."""
# 'parameters': {}
api = java_generator.JavaApi({
'name': 'dummy',
'version': 'v1',
'resources': {
'foo': {
'methods': {
'bar': {
'id': 'bar',
}
}
}
}
})
self.assertEquals('Foo', api.ToClassName('foo', api))
self.assertEquals('DummyClass', api.ToClassName('class', None))
self.assertEquals('DummyDefault', api.ToClassName('default', None))
self.assertEquals('DummyImport', api.ToClassName('import', None))
self.assertEquals('DummyObject', api.ToClassName('object', None))
self.assertEquals('DummyString', api.ToClassName('string', None))
self.assertEquals('DummyTrue', api.ToClassName('true', None))
self.assertEquals('dummy', api.values['name'])
self.assertEquals('Dummy', api._class_name)
# Test the renaming of the object when it matches the API name.
self.assertEquals('Dummy', api.ToClassName('dummy', api))
foo = api._resources[0]
self.assertEquals('DummyOperations',
api.ToClassName('dummy', foo, element_type='resource'))
bar = foo._methods[0]
self.assertEquals('DummyOperation',
api.ToClassName('dummy', bar, element_type='method'))
def testToClassNameWithCanonical(self):
api = java_generator.JavaApi({
'name': 'dummy',
'canonicalName': 'Dummy Service',
'version': 'v1',
'resources': {}
})
self.assertEquals('dummy', api.values['name'])
self.assertEquals('DummyService', api._class_name)
self.assertEquals('DummyServiceClass', api.ToClassName('class', None))
def testGetCodeTypeFromDictionary(self):
"""Test mapping of JSON schema types to Java class names."""
language_model = java_generator.JavaLanguageModel()
test_cases = [
['String', {'type': 'string', 'format': 'byte'}],
['DateTime', {'type': 'string', 'format': 'date-time'}],
['Double', {'type': 'number', 'format': 'double'}],
['Float', {'type': 'number', 'format': 'float'}],
['Short', {'type': 'integer', 'format': 'int16'}],
['Integer', {'type': 'integer', 'format': 'int32'}],
['Long', {'type': 'string', 'format': 'int64'}],
['Object', {'type': 'any'}],
['Boolean', {'type': 'boolean'}],
['String', {'type': 'string'}],
['Long', {'type': 'integer', 'format': 'uint32'}],
['UnsignedLong', {'type': 'string', 'format': 'uint64'}],
]
for test_case in test_cases:
self.assertEquals(
test_case[0],
language_model.GetCodeTypeFromDictionary(test_case[1]))
class JavaGeneratorTest(basetest.TestCase):
def testImportsForArray(self):
"""Test if we get the right imports for an array.
The goal is to see that an array of a primative type which requires an
import really works.
"""
gen = java_generator.BaseJavaGenerator({
'name': 'dummy',
'version': 'v1',
'resources': {},
'schemas': {
'Bar': {
'id': 'Bar',
'type': 'object',
'properties': {
'p1': {
'type': 'array',
'items': {
'type': 'string',
'format': 'uint64'
}
},
'p2': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'string',
'format': 'date-time'
}
}
}
}
}
}
})
gen.AnnotateApiForLanguage(gen.api)
found_big_integer = False
found_date_time = False
for schema in gen._api._schemas.values():
import_manager = schema.values.get('importManager')
for import_list in import_manager.ImportLists():
for import_def in import_list:
print import_def
if import_def.find('BigInteger'):
found_big_integer = True
if import_def.find('DateTime'):
found_date_time = True
self.assertTrue(found_big_integer)
self.assertTrue(found_date_time)
class JavaLanguageModelTest(basetest.TestCase):
"""Tests for features implemented in the language model."""
def testDefaultPath(self):
"""Test the package path generation."""
def MakeGen(host):
gen = java_generator.BaseJavaGenerator({
'name': 'fake',
'version': 'v1',
'rootUrl': 'https://%s/' % host,
'servicePath': 'fake/v1',
'ownerDomain': host,
})
gen.AnnotateApiForLanguage(gen.api)
return gen
gen = MakeGen('google.com')
self.assertEquals('com/google/api/services/fake', gen.api.module.path)
self.assertEquals('com/google/api/services/fake/model',
gen.api.model_module.path)
gen = MakeGen('not-google.com')
self.assertEquals('com/not_google/fake', gen.api.module.path)
self.assertEquals('com.not_google.fake', gen.api.module.name)
gen = MakeGen('my-custom_app.appspot.com')
self.assertEquals('com/appspot/my_custom_app/fake', gen.api.module.path)
def testAllowedCharacters(self):
# make sure $ is allowed in a name and that @ is not
model = java_generator.JavaLanguageModel()
self.assertEquals('$ref',
model.TransformString(None, '$ref', model.member_policy))
self.assertEquals('set$ref',
model.TransformString(None, '$ref', model.setter_policy))
self.assertEquals('getId',
model.TransformString(None, '@id', model.getter_policy))
class JavaLanguageModelDataValueTest(basetest.TestCase):
"""Tests for DataValue integration."""
def setUp(self):
self.language_model = java_generator.JavaLanguageModel()
def _CreateDataValue(self, value, val_type):
def_dict = {
'className': 'Foo',
'type': val_type,
}
prototype = data_types.DataType(
def_dict, None, language_model=self.language_model)
dv = data_value.DataValue(value, prototype)
return dv
def testRenderBoolean(self):
dv = self._CreateDataValue(True, 'boolean')
render_method = self.language_model._SUPPORTED_TYPES['boolean']
self.assertEqual('true', render_method(dv))
dv.SetValue(False)
self.assertEqual('false', render_method(dv))
def testRenderInteger(self):
dv = self._CreateDataValue(42, 'integer')
render_method = self.language_model._SUPPORTED_TYPES['integer']
self.assertRaises(ValueError, render_method, dv)
dv.SetTemplateValue('codeType', 'Long')
self.assertEqual('42L', render_method(dv))
class Java14LanguageModelTest(basetest.TestCase):
def setUp(self):
self.language_model = java_generator.Java14LanguageModel()
def _CreateDataValue(self, value, val_type):
def_dict = {
'className': 'Foo',
'type': val_type,
}
prototype = data_types.DataType(
def_dict, None, language_model=self.language_model)
dv = data_value.DataValue(value, prototype)
return dv
def testRenderBoolean(self):
dv = self._CreateDataValue(True, 'boolean')
render_method = self.language_model._SUPPORTED_TYPES['boolean']
self.assertEqual('true', render_method(dv))
dv.SetValue(False)
self.assertEqual('false', render_method(dv))
def testRenderInteger(self):
dv = self._CreateDataValue(42, 'integer')
render_method = self.language_model._SUPPORTED_TYPES['integer']
self.assertRaises(ValueError, render_method, dv)
dv.SetTemplateValue('codeType', 'java.lang.Long')
self.assertEqual('42L', render_method(dv))
if __name__ == '__main__':
basetest.main()
|
|
import collections
import string
import numpy
import six
import cupy
from cupy import carray
from cupy import elementwise
from cupy import util
six_range = six.moves.range
six_zip = six.moves.zip
_broadcast = elementwise._broadcast
_check_args = elementwise._check_args
_decide_params_type = elementwise._decide_params_type
_get_kernel_params = elementwise._get_kernel_params
_get_args_info = elementwise._get_args_info
_get_out_args = elementwise._get_out_args
_get_out_args_with_params = elementwise._get_out_args_with_params
_get_param_info = elementwise._get_param_info
_get_typename = elementwise._get_typename
_guess_routine = elementwise._guess_routine
_reduce_dims = elementwise._reduce_dims
def _get_simple_reduction_kernel(
name, block_size, reduce_type, params, identity,
pre_map_expr, reduce_expr, post_map_expr,
type_preamble, input_expr, output_expr, preamble, options):
if identity is None:
identity = ''
module_code = string.Template('''
${type_preamble}
${preamble}
#define REDUCE(a, b) (${reduce_expr})
#define POST_MAP(a) (${post_map_expr})
typedef ${reduce_type} _type_reduce;
extern "C" __global__ void ${name}(${params}) {
if (_out_clp2_size > 256) {
CUPY_FOR(_i, _out_ind.size()) {
_type_reduce _s = _type_reduce(${identity});
for (int _j = _i, _J = 0;
_j < _in_ind.size();
_j += _out_ind.size(), _J++) {
_in_ind.set(_j);
${input_expr}
_type_reduce _a = ${pre_map_expr};
_s = REDUCE(_s, _a);
}
_out_ind.set(_i);
${output_expr}
POST_MAP(_s);
}
} else {
extern __shared__ _type_reduce _sdata_raw[];
_type_reduce *_sdata = _sdata_raw;
int _tid = threadIdx.x;
_sdata[_tid] = _type_reduce(${identity});
unsigned int _i = _tid % _out_clp2_size;
if (_i >= _out_ind.size()) return;
_type_reduce _s = _type_reduce(${identity});
int _J_offset = _tid / _out_clp2_size;
int _j_offset = _J_offset * _out_ind.size();
int _J_stride = ${block_size} / _out_clp2_size;
int _j_stride = _J_stride * _out_ind.size();
for (int _j = _i + _j_offset, _J = _J_offset;
_j < _in_ind.size();
_j += _j_stride, _J += _J_stride) {
_in_ind.set(_j);
${input_expr}
_type_reduce _a = ${pre_map_expr};
_s = REDUCE(_s, _a);
}
_sdata[_tid] = _s;
__syncthreads();
if (_tid >= 256) return;
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 256]);
__syncthreads();
if (_out_clp2_size <= 128) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 128]);
__syncthreads();
if (_out_clp2_size <= 64) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 64]);
__syncthreads();
if (_out_clp2_size <= 32) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 32]);
if (_out_clp2_size <= 16) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 16]);
if (_out_clp2_size <= 8) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 8]);
if (_out_clp2_size <= 4) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 4]);
if (_out_clp2_size <= 2) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 2]);
if (_out_clp2_size <= 1) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 1]);
}
}
}
}
}
}
}
}
_s = _sdata[_tid];
if (_tid >= _out_ind.size()) return;
_out_ind.set(_i);
${output_expr}
POST_MAP(_s);
}
}''').substitute(
name=name,
block_size=block_size,
reduce_type=reduce_type,
params=params,
identity=identity,
reduce_expr=reduce_expr,
pre_map_expr=pre_map_expr,
post_map_expr=post_map_expr,
type_preamble=type_preamble,
input_expr=input_expr,
output_expr=output_expr,
preamble=preamble)
module = carray.compile_with_cache(module_code, options)
return module.get_function(name)
def _get_axis(axis, ndim):
if axis is None:
axis = tuple(six_range(ndim))
elif isinstance(axis, collections.Sequence):
axis = tuple(axis)
else:
axis = axis,
for dim in axis:
if dim < -ndim or dim >= ndim:
raise ValueError('Axis overrun')
axis = tuple(sorted([dim % ndim for dim in axis]))
raxis = tuple([dim for dim in six_range(ndim) if dim not in axis])
return axis, raxis
def _get_out_shape(shape, axis, raxis, keepdims):
if keepdims:
out_shape = list(shape)
for i in axis:
out_shape[i] = 1
return tuple(out_shape)
return tuple([shape[i] for i in raxis])
def _get_trans_args(args, trans, shape, params=None):
if trans == tuple(six_range(len(shape))):
return args, shape
if params is not None and any(p.raw for p in params):
raise NotImplementedError('Illegal conditions')
args = [cupy.transpose(a, trans) if isinstance(a, cupy.ndarray) else a
for a in args]
shape = tuple([shape[i] for i in trans])
return args, shape
def _get_inout_args(in_args, out_args, in_indexer, out_indexer, out_clp2_size,
params, reduce_dims):
if reduce_dims:
in_args, in_shape = _reduce_dims(
in_args, params, in_indexer.shape)
out_args, out_shape = _reduce_dims(
out_args, params[len(in_args):], out_indexer.shape)
in_indexer.shape = in_shape
out_indexer.shape = out_shape
args = in_args + out_args + [in_indexer, out_indexer,
numpy.int32(out_clp2_size)]
return args
@util.memoize(for_each_device=True)
def _get_simple_reduction_function(
routine, params, args_info, in_arg_dtype, out_arg_dtype, out_types,
name, block_size, identity, input_expr, output_expr, _preamble,
options):
reduce_type = routine[3]
if reduce_type is None:
reduce_type = _get_typename(out_types[0])
t = (_get_typename(in_arg_dtype), _get_typename(out_arg_dtype))
type_preamble = 'typedef %s type_in0_raw; typedef %s type_out0_raw;' % t
params = _get_kernel_params(params, args_info)
return _get_simple_reduction_kernel(
name, block_size, reduce_type, params, identity,
routine[0], routine[1], routine[2],
type_preamble, input_expr, output_expr, _preamble, options)
class simple_reduction_function(object):
def __init__(self, name, ops, identity, preamble):
self.name = name
self._ops = ops
self.identity = identity
self._preamble = preamble
self.nin = 1
self.nout = 1
in_params = _get_param_info('T in0', True)
out_params = _get_param_info('T out0', False)
self._params = (
in_params + out_params +
_get_param_info(
'CIndexer _in_ind, CIndexer _out_ind', False) +
_get_param_info('int32 _out_clp2_size', True))
self._input_expr = 'const type_in0_raw in0 = _raw_in0[_in_ind.get()];'
self._output_expr = 'type_out0_raw &out0 = _raw_out0[_out_ind.get()];'
self._routine_cache = {}
def __call__(self, a, axis=None, dtype=None, out=None, keepdims=False):
if not isinstance(a, cupy.ndarray):
raise TypeError('Input type must be cupy.ndarray')
if self.identity is None:
assert a.size != 0
if dtype is not None:
dtype = numpy.dtype(dtype).type
in_args = [a]
if out is None:
_check_args((a,))
out_args = []
else:
_check_args((a, out))
out_args = [out]
in_types, out_types, routine = _guess_routine(
self.name, self._routine_cache, self._ops, in_args, dtype)
axis, raxis = _get_axis(axis, a.ndim)
out_shape = _get_out_shape(a.shape, axis, raxis, keepdims)
out_args = _get_out_args(out_args, out_types, out_shape)
in_args, in_shape = _get_trans_args(
in_args, axis + raxis, in_args[0].shape)
in_indexer = carray.Indexer(in_shape)
out_indexer = carray.Indexer(out_shape)
out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1))
inout_args = _get_inout_args(
in_args, out_args, in_indexer, out_indexer, out_clp2_size,
self._params, True)
args_info = _get_args_info(inout_args)
block_size = 512
kern = _get_simple_reduction_function(
routine, self._params, args_info,
in_args[0].dtype.type, out_args[0].dtype.type, out_types,
self.name, block_size, self.identity,
self._input_expr, self._output_expr, self._preamble, ())
shared_mem = 32 * block_size
if out_clp2_size > 256:
shared_mem = 0
# TODO(okuta) set actual size
kern.linear_launch(max(out_indexer.size, block_size), inout_args,
shared_mem, block_size)
if len(out_args) == 1:
return out_args[0]
return tuple(out_args)
@util.memoize(for_each_device=True)
def _get_reduction_kernel(
params, args_info, types,
name, block_size, reduce_type, identity, map_expr, reduce_expr,
post_map_expr, preamble, options):
kernel_params = _get_kernel_params(params, args_info)
arrays = [p for p, a in six_zip(params, args_info)
if not p.raw and a[0] is cupy.ndarray]
type_preamble = '\n'.join(
'typedef %s %s;' % (_get_typename(v), k)
for k, v in types)
input_expr = '\n'.join(
['const {0} {1} = _raw_{1}[_j];'.format(p.ctype, p.name)
for p in arrays if p.is_const])
output_expr = '\n'.join(
['{0} &{1} = _raw_{1}[_i];'.format(p.ctype, p.name)
for p in arrays if not p.is_const])
return _get_simple_reduction_kernel(
name, block_size, reduce_type, kernel_params, identity,
map_expr, reduce_expr, post_map_expr,
type_preamble, input_expr, output_expr, preamble, options)
class ReductionKernel(object):
"""User-defined reduction kernel.
This class can be used to define a reduction kernel with or without
broadcasting.
The kernel is compiled at an invocation of the
:meth:`~ReductionKernel.__call__` method, which is cached for each device.
The compiled binary is also cached into a file under the
``$HOME/.cupy/kernel_cache/`` directory with a hashed file name. The cached
binary is resued by other processes.
Args:
in_params (str): Input argument list.
out_params (str): Output argument list.
map_expr (str): Mapping expression for input values.
reduce_expr (str): Reduction expression.
post_map_expr (str): Mapping expression for reduced values.
identity (str): Identity value for starting the reduction.
name (str): Name of the kernel function. It should be set for
readability of the performance profiling.
reduce_type (str): Type of values to be used for reduction. This type
is used to store the special variables ``a``.
reduce_dims (bool): If True, input arrays are reshaped without copy to
smaller dimensions for efficiency.
preamble (str): Fragment of the CUDA-C/C++ code that is inserted at the
top of the cu file.
options (tuple of str): Additional compilation options.
"""
def __init__(self, in_params, out_params,
map_expr, reduce_expr, post_map_expr,
identity, name='reduce_kernel', reduce_type=None,
reduce_dims=True, preamble='', options=()):
self.in_params = _get_param_info(in_params, True)
self.out_params = _get_param_info(out_params, False)
self.nin = len(self.in_params)
self.nout = len(self.out_params)
self.nargs = self.nin + self.nout
self.params = (
self.in_params + self.out_params +
_get_param_info('CIndexer _in_ind, CIndexer _out_ind', False) +
_get_param_info('int32 _out_clp2_size', True))
self.identity = identity
self.reduce_expr = reduce_expr
self.map_expr = map_expr
self.name = name
self.options = options
self.reduce_dims = reduce_dims
self.post_map_expr = post_map_expr
if reduce_type is None:
self.reduce_type = self.out_params[0].ctype
else:
self.reduce_type = reduce_type
self.preamble = preamble
def __call__(self, *args, **kwargs):
"""Compiles and invokes the reduction kernel.
The compilation runs only if the kernel is not cached. Note that the
kernels with different argument dtypes, ndims, or axis are not
compatible. It means that single ReductionKernel object may be compiled
into multiple kernel binaries.
Args:
args: Arguments of the kernel.
Returns:
Arrays are returned according to the ``out_params`` argument of the
``__init__`` method.
"""
out = kwargs.pop('out', None)
axis = kwargs.pop('axis', None)
keepdims = kwargs.pop('keepdims', False)
if kwargs:
raise TypeError('Wrong arguments %s' % kwargs)
n_args = len(args)
if n_args != self.nin and n_args != self.nargs:
raise TypeError('Wrong number of arguments for %s' % self.name)
out_args = list(args[self.nin:])
if out is not None:
if self.nout != 1:
raise NotImplementedError('')
if len(out_args) != 0:
raise ValueError("cannot specify 'out' as both "
"a positional and keyword argument")
out_args = [out]
in_args, broad_shape = _broadcast(args, self.in_params, False)
_check_args(in_args + out_args)
if self.identity is None:
assert 0 in broad_shape
cp_array = cupy.ndarray
in_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in in_args])
out_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in out_args])
in_types, out_types, types = _decide_params_type(
self.in_params, self.out_params,
in_ndarray_types, out_ndarray_types)
axis, raxis = _get_axis(axis, len(broad_shape))
out_shape = _get_out_shape(broad_shape, axis, raxis, keepdims)
in_args = [x if isinstance(x, cp_array) else t(x)
for x, t in six_zip(in_args, in_types)]
in_args, in_shape = _get_trans_args(
in_args, axis + raxis, broad_shape, self.in_params)
out_args = _get_out_args_with_params(
out_args, out_types, out_shape, self.out_params)
in_indexer = carray.Indexer(in_shape)
out_indexer = carray.Indexer(out_shape)
out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1))
inout_args = _get_inout_args(
in_args, out_args, in_indexer, out_indexer, out_clp2_size,
self.params, self.reduce_dims)
args_info = _get_args_info(inout_args)
block_size = 512
kern = _get_reduction_kernel(
self.params, args_info, types,
self.name, block_size, self.reduce_type, self.identity,
self.map_expr, self.reduce_expr, self.post_map_expr,
self.preamble, self.options)
shared_mem = 32 * block_size
if out_clp2_size > 256:
shared_mem = 0
# TODO(okuta) set actual size
kern.linear_launch(max(out_indexer.size, block_size), inout_args,
shared_mem, block_size)
return out_args[0]
def create_reduction_func(name, ops, routine=None, identity=None,
preamble=''):
_ops = []
for t in ops:
if not isinstance(t, tuple):
typ = t
rt = routine
else:
typ, rt = t
rt = tuple(i or j for i, j in six_zip(rt, routine))
types = typ.split('->')
if len(types) == 1:
in_types = out_types = tuple(types)
else:
in_types, out_types = map(tuple, types)
in_types = tuple([numpy.dtype(t).type for t in in_types])
out_types = tuple([numpy.dtype(t).type for t in out_types])
_ops.append((in_types, out_types, rt))
return simple_reduction_function(name, _ops, identity, preamble)
_min_max_preamble = '''
struct min_max_st{
type_in0_raw value;
int index;
__device__ min_max_st() : index(-1) { }
__device__ min_max_st(type_in0_raw v) : value(v), index(0) { }
__device__ min_max_st(type_in0_raw v, int i) : value(v), index(i) { }
};
__device__ min_max_st my_min(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return min_max_st(min(a.value, b.value));
}
__device__ min_max_st my_max(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return min_max_st(max(a.value, b.value));
}
__device__ min_max_st my_argmin(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return (a.value <= b.value) ? a : b;
}
__device__ min_max_st my_argmax(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return (a.value >= b.value) ? a : b;
}'''
amin = create_reduction_func(
'cupy_min',
('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'),
('min_max_st(in0)', 'my_min(a, b)', 'out0 = a.value', 'min_max_st'),
None, _min_max_preamble)
amax = create_reduction_func(
'cupy_max',
('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'),
('min_max_st(in0)', 'my_max(a, b)', 'out0 = a.value', 'min_max_st'),
None, _min_max_preamble)
argmin = create_reduction_func(
'cupy_argmin',
('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l',
'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'),
('min_max_st(in0, _J)', 'my_argmin(a, b)', 'out0 = a.index', 'min_max_st'),
None, _min_max_preamble)
argmax = create_reduction_func(
'cupy_argmax',
('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l',
'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'),
('min_max_st(in0, _J)', 'my_argmax(a, b)', 'out0 = a.index', 'min_max_st'),
None, _min_max_preamble)
|
|
"""
parser.http package (imdb package).
This package provides the IMDbHTTPAccessSystem class used to access
IMDb's data through the web interface.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "http" or "web"
or "html" (this is the default).
Copyright 2004-2012 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import socket
import logging
from urllib import FancyURLopener, quote_plus
from codecs import lookup
from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
imdbURL_character_main, imdbURL_company_main, \
imdbURL_keyword_main, imdbURL_find, imdbURL_top250, \
imdbURL_bottom100
from imdb.utils import analyze_title
from imdb._exceptions import IMDbDataAccessError, IMDbParserError
import searchMovieParser
import searchPersonParser
import searchCharacterParser
import searchCompanyParser
import searchKeywordParser
import movieParser
import personParser
import characterParser
import companyParser
import topBottomParser
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.http.aux')
IN_GAE = False
try:
import google.appengine
IN_GAE = True
_aux_logger.info('IMDbPY is running in the Google App Engine environment')
except ImportError:
pass
class _ModuleProxy:
"""A proxy to instantiate and access parsers."""
def __init__(self, module, defaultKeys=None, oldParsers=False,
useModule=None, fallBackToNew=False):
"""Initialize a proxy for the given module; defaultKeys, if set,
muste be a dictionary of values to set for instanced objects."""
if oldParsers or fallBackToNew:
_aux_logger.warn('The old set of parsers was removed; falling ' \
'back to the new parsers.')
self.useModule = useModule
if defaultKeys is None:
defaultKeys = {}
self._defaultKeys = defaultKeys
self._module = module
def __getattr__(self, name):
"""Called only when no look-up is found."""
_sm = self._module
# Read the _OBJECTS dictionary to build the asked parser.
if name in _sm._OBJECTS:
_entry = _sm._OBJECTS[name]
# Initialize the parser.
kwds = {}
if self.useModule:
kwds = {'useModule': self.useModule}
parserClass = _entry[0][0]
obj = parserClass(**kwds)
attrsToSet = self._defaultKeys.copy()
attrsToSet.update(_entry[1] or {})
# Set attribute to the object.
for key in attrsToSet:
setattr(obj, key, attrsToSet[key])
setattr(self, name, obj)
return obj
return getattr(_sm, name)
PY_VERSION = sys.version_info[:2]
# The cookies for the "adult" search.
# Please don't mess with these account.
# Old 'IMDbPY' account.
_old_cookie_id = 'boM2bYxz9MCsOnH9gZ0S9QHs12NWrNdApxsls1Vb5/NGrNdjcHx3dUas10UASoAjVEvhAbGagERgOpNkAPvxdbfKwaV2ikEj9SzXY1WPxABmDKQwdqzwRbM+12NSeJFGUEx3F8as10WwidLzVshDtxaPIbP13NdjVS9UZTYqgTVGrNcT9vyXU1'
_old_cookie_uu = '3M3AXsquTU5Gur/Svik+ewflPm5Rk2ieY3BIPlLjyK3C0Dp9F8UoPgbTyKiGtZp4x1X+uAUGKD7BM2g+dVd8eqEzDErCoYvdcvGLvVLAen1y08hNQtALjVKAe+1hM8g9QbNonlG1/t4S82ieUsBbrSIQbq1yhV6tZ6ArvSbA7rgHc8n5AdReyAmDaJ5Wm/ee3VDoCnGj/LlBs2ieUZNorhHDKK5Q=='
# New 'IMDbPYweb' account.
_cookie_id = 'rH1jNAkjTlNXvHolvBVBsgaPICNZbNdjVjzFwzas9JRmusdjVoqBs/Hs12NR+1WFxEoR9bGKEDUg6sNlADqXwkas12N131Rwdb+UQNGKN8PWrNdjcdqBQVLq8mbGDHP3hqzxhbD692NQi9D0JjpBtRaPIbP1zNdjUOqENQYv1ADWrNcT9vyXU1'
_cookie_uu = 'su4/m8cho4c6HP+W1qgq6wchOmhnF0w+lIWvHjRUPJ6nRA9sccEafjGADJ6hQGrMd4GKqLcz2X4z5+w+M4OIKnRn7FpENH7dxDQu3bQEHyx0ZEyeRFTPHfQEX03XF+yeN1dsPpcXaqjUZAw+lGRfXRQEfz3RIX9IgVEffdBAHw2wQXyf9xdMPrQELw0QNB8dsffsqcdQemjPB0w+moLcPh0JrKrHJ9hjBzdMPpcXTH7XRwwOk='
# imdbpy2010 account.
#_cookie_id = 'QrCdxVi+L+WgqOLrQJJgBgRRXGInphxiBPU/YXSFDyExMFzCp6YcYgSVXyEUhS/xMID8wqemHGID4DlntwZ49vemP5UXsAxiJ4D6goSmHGIgNT9hMXBaRSF2vMS3phxB0bVfQiQlP1RxdrzhB6YcRHFASyIhQVowwXCKtDSlD2YhgRvxBsCKtGemHBKH9mxSI='
#_cookie_uu = 'oiEo2yoJFCA2Zbn/o7Z1LAPIwotAu6QdALv3foDb1x5F/tdrFY63XkSfty4kntS8Y8jkHSDLt3406+d+JThEilPI0mtTaOQdA/t2/iErp22jaLdeVU5ya4PIREpj7HFdpzhEHadcIAngSER50IoHDpD6Bz4Qy3b+UIhE/hBbhz5Q63ceA2hEvhPo5B0FnrL9Q8jkWjDIbA0Au3d+AOtnXoCIRL4Q28c+UOtnXpP4RL4T6OQdA+6ijUCI5B0AW2d+UOtnXpPYRL4T6OQdA8jkTUOYlC0A=='
class _FakeURLOpener(object):
"""Fake URLOpener object, used to return empty strings instead of
errors.
"""
def __init__(self, url, headers):
self.url = url
self.headers = headers
def read(self, *args, **kwds): return ''
def close(self, *args, **kwds): pass
def info(self, *args, **kwds): return self.headers
class IMDbURLopener(FancyURLopener):
"""Fetch web pages and handle errors."""
_logger = logging.getLogger('imdbpy.parser.http.urlopener')
def __init__(self, *args, **kwargs):
self._last_url = u''
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ('User-Agent', 'User-agent', 'user-agent'):
self.del_header(header)
self.set_header('User-Agent', 'Mozilla/5.0')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
self.set_header('Cookie', c_header)
def get_proxy(self):
"""Return the used proxy, or an empty string."""
return self.proxies.get('http', '')
def set_proxy(self, proxy):
"""Set the proxy."""
if not proxy:
if self.proxies.has_key('http'):
del self.proxies['http']
else:
if not proxy.lower().startswith('http://'):
proxy = 'http://%s' % proxy
self.proxies['http'] = proxy
def set_header(self, header, value, _overwrite=True):
"""Set a default header."""
if _overwrite:
self.del_header(header)
self.addheaders.append((header, value))
def get_header(self, header):
"""Return the first value of a header, or None
if not present."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
return self.addheaders[index][1]
return None
def del_header(self, header):
"""Remove a default header."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
del self.addheaders[index]
break
def retrieve_unicode(self, url, size=-1):
"""Retrieves the given URL, and returns a unicode string,
trying to guess the encoding of the data (assuming latin_1
by default)"""
encode = None
try:
if size != -1:
self.set_header('Range', 'bytes=0-%d' % size)
uopener = self.open(url)
kwds = {}
if PY_VERSION > (2, 3) and not IN_GAE:
kwds['size'] = size
content = uopener.read(**kwds)
self._last_url = uopener.url
# Maybe the server is so nice to tell us the charset...
server_encode = uopener.info().getparam('charset')
# Otherwise, look at the content-type HTML meta tag.
if server_encode is None and content:
first_bytes = content[:512]
begin_h = first_bytes.find('text/html; charset=')
if begin_h != -1:
end_h = first_bytes[19+begin_h:].find('"')
if end_h != -1:
server_encode = first_bytes[19+begin_h:19+begin_h+end_h]
if server_encode:
try:
if lookup(server_encode):
encode = server_encode
except (LookupError, ValueError, TypeError):
pass
uopener.close()
if size != -1:
self.del_header('Range')
self.close()
except IOError, e:
if size != -1:
# Ensure that the Range header is removed.
self.del_header('Range')
raise IMDbDataAccessError({'errcode': e.errno,
'errmsg': str(e.strerror),
'url': url,
'proxy': self.get_proxy(),
'exception type': 'IOError',
'original exception': e})
if encode is None:
encode = 'latin_1'
# The detection of the encoding is error prone...
self._logger.warn('Unable to detect the encoding of the retrieved '
'page [%s]; falling back to default latin1.', encode)
##print unicode(content, encode, 'replace').encode('utf8')
return unicode(content, encode, 'replace')
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
self._logger.warn('404 code returned for %s: %s (headers: %s)',
url, errmsg, headers)
return _FakeURLOpener(url, headers)
raise IMDbDataAccessError({'url': 'http:%s' % url,
'errcode': errcode,
'errmsg': errmsg,
'headers': headers,
'error type': 'http_error_default',
'proxy': self.get_proxy()})
def open_unknown(self, fullurl, data=None):
raise IMDbDataAccessError({'fullurl': fullurl,
'data': str(data),
'error type': 'open_unknown',
'proxy': self.get_proxy()})
def open_unknown_proxy(self, proxy, fullurl, data=None):
raise IMDbDataAccessError({'proxy': str(proxy),
'fullurl': fullurl,
'error type': 'open_unknown_proxy',
'data': str(data)})
class IMDbHTTPAccessSystem(IMDbBase):
"""The class used to access IMDb's data through the web."""
accessSystem = 'http'
_http_logger = logging.getLogger('imdbpy.parser.http')
def __init__(self, isThin=0, adultSearch=1, proxy=-1, oldParsers=False,
fallBackToNew=False, useModule=None, cookie_id=-1,
timeout=30, cookie_uu=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
self.urlOpener = IMDbURLopener()
# When isThin is set, we're parsing the "maindetails" page
# of a movie (instead of the "combined" page) and movie/person
# references are not collected if no defaultModFunct is provided.
#
# NOTE: httpThin was removed since IMDbPY 4.8.
self.isThin = isThin
self._getRefs = True
self._mdparse = False
if isThin:
self._http_logger.warn('"httpThin" access system no longer ' +
'supported; "http" used automatically', exc_info=False)
self.isThin = 0
if self.accessSystem in ('httpThin', 'webThin', 'htmlThin'):
self.accessSystem = 'http'
self.set_timeout(timeout)
self.do_adult_search(adultSearch)
if cookie_id != -1:
if cookie_id is None:
self.del_cookies()
elif cookie_uu is not None:
self.set_cookies(cookie_id, cookie_uu)
if proxy != -1:
self.set_proxy(proxy)
if useModule is not None:
if not isinstance(useModule, (list, tuple)) and ',' in useModule:
useModule = useModule.split(',')
_def = {'_modFunct': self._defModFunct, '_as': self.accessSystem}
# Proxy objects.
self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scProxy = _ModuleProxy(searchCharacterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.pProxy = _ModuleProxy(personParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.cProxy = _ModuleProxy(characterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return '%07d' % int(movieID)
except ValueError, e:
raise IMDbParserError('invalid movieID "%s": %s' % (movieID, e))
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return '%07d' % int(personID)
except ValueError, e:
raise IMDbParserError('invalid personID "%s": %s' % (personID, e))
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
try:
return '%07d' % int(characterID)
except ValueError, e:
raise IMDbParserError('invalid characterID "%s": %s' % \
(characterID, e))
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return '%07d' % int(companyID)
except ValueError, e:
raise IMDbParserError('invalid companyID "%s": %s' % \
(companyID, e))
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID; in this implementation
the movieID _is_ the imdbID.
"""
return movieID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID; in this implementation
the personID _is_ the imdbID.
"""
return personID
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID; in this implementation
the characterID _is_ the imdbID.
"""
return characterID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID; in this implementation
the companyID _is_ the imdbID.
"""
return companyID
def get_proxy(self):
"""Return the used proxy or an empty string."""
return self.urlOpener.get_proxy()
def set_proxy(self, proxy):
"""Set the web proxy to use.
It should be a string like 'http://localhost:8080/'; if the
string is empty, no proxy will be used.
If set, the value of the environment variable HTTP_PROXY is
automatically used.
"""
self.urlOpener.set_proxy(proxy)
def set_timeout(self, timeout):
"""Set the default timeout, in seconds, of the connection."""
try:
timeout = int(timeout)
except Exception:
timeout = 0
if timeout <= 0:
timeout = None
socket.setdefaulttimeout(timeout)
def set_cookies(self, cookie_id, cookie_uu):
"""Set a cookie to access an IMDb's account."""
c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
self.urlOpener.set_header('Cookie', c_header)
def del_cookies(self):
"""Remove the used cookie."""
self.urlOpener.del_header('Cookie')
def do_adult_search(self, doAdult,
cookie_id=_cookie_id, cookie_uu=_cookie_uu):
"""If doAdult is true, 'adult' movies are included in the
search results; cookie_id and cookie_uu are optional
parameters to select a specific account (see your cookie
or cookies.txt file."""
if doAdult:
self.set_cookies(cookie_id, cookie_uu)
#c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
#self.urlOpener.set_header('Cookie', c_header)
else:
self.urlOpener.del_header('Cookie')
def _retrieve(self, url, size=-1, _noCookies=False):
"""Retrieve the given URL."""
##print url
_cookies = None
# XXX: quite obscene, but in some very limited
# cases (/ttXXXXXXX/epdate) if the cookies
# are set, a 500 error is returned.
if _noCookies:
_cookies = self.urlOpener.get_header('Cookie')
self.del_cookies()
self._http_logger.debug('fetching url %s (size: %d)', url, size)
try:
ret = self.urlOpener.retrieve_unicode(url, size=size)
finally:
if _noCookies and _cookies:
self.urlOpener.set_header('Cookie', _cookies)
return ret
def _get_search_content(self, kind, ton, results):
"""Retrieve the web page for a given search.
kind can be 'tt' (for titles), 'nm' (for names),
'char' (for characters) or 'co' (for companies).
ton is the title or the name to search.
results is the maximum number of results to be retrieved."""
if isinstance(ton, unicode):
try:
ton = ton.encode('iso8859-1')
except Exception, e:
try:
ton = ton.encode('utf-8')
except Exception, e:
pass
##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
params = 'q=%s;s=%s;mx=%s' % (quote_plus(ton), kind, str(results))
if kind == 'ep':
params = params.replace('s=ep;', 's=tt;ttype=ep;', 1)
cont = self._retrieve(self.urls['find'] % params)
#print 'URL:', imdbURL_find % params
if cont.find('Your search returned more than') == -1 or \
cont.find("displayed the exact matches") == -1:
return cont
# The retrieved page contains no results, because too many
# titles or names contain the string we're looking for.
params = 'q=%s;ls=%s;lm=0' % (quote_plus(ton), kind)
size = 131072 + results * 512
return self._retrieve(self.urls['find'] % params, size=size)
def _search_movie(self, title, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'tt', 'q': title})
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (quote_plus(title), str(results))
##cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('tt', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def _search_episode(self, title, results):
t_dict = analyze_title(title)
if t_dict['kind'] == 'episode':
title = t_dict['title']
cont = self._get_search_content('ep', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def get_movie_main(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'combined')
return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse)
def get_movie_full_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'fullcredits')
return self.mProxy.movie_parser.parse(cont)
def get_movie_plot(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'plotsummary')
return self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs)
def get_movie_awards(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'awards')
return self.mProxy.movie_awards_parser.parse(cont)
def get_movie_taglines(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'taglines')
return self.mProxy.taglines_parser.parse(cont)
def get_movie_keywords(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'keywords')
return self.mProxy.keywords_parser.parse(cont)
def get_movie_alternate_versions(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'alternateversions')
return self.mProxy.alternateversions_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_crazy_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'crazycredits')
return self.mProxy.crazycredits_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_goofs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'goofs')
return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_quotes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'quotes')
return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs)
def get_movie_release_dates(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'releaseinfo')
ret = self.mProxy.releasedates_parser.parse(cont)
ret['info sets'] = ('release dates', 'akas')
return ret
get_movie_akas = get_movie_release_dates
get_movie_release_info = get_movie_release_dates
def get_movie_vote_details(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'ratings')
return self.mProxy.ratings_parser.parse(cont)
def get_movie_official_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'officialsites')
return self.mProxy.officialsites_parser.parse(cont)
def get_movie_trivia(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'trivia')
return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs)
def get_movie_connections(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'movieconnections')
return self.mProxy.connections_parser.parse(cont)
def get_movie_technical(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'technical')
return self.mProxy.tech_parser.parse(cont)
def get_movie_business(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'business')
return self.mProxy.business_parser.parse(cont, getRefs=self._getRefs)
def get_movie_literature(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'literature')
return self.mProxy.literature_parser.parse(cont)
def get_movie_locations(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'locations')
return self.mProxy.locations_parser.parse(cont)
def get_movie_soundtrack(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundtrack')
return self.mProxy.soundtrack_parser.parse(cont)
def get_movie_dvd(self, movieID):
self._http_logger.warn('dvd information no longer available', exc_info=False)
return {}
def get_movie_recommendations(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'recommendations')
return self.mProxy.rec_parser.parse(cont)
def get_movie_external_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'externalreviews')
return self.mProxy.externalrev_parser.parse(cont)
def get_movie_newsgroup_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'newsgroupreviews')
return self.mProxy.newsgrouprev_parser.parse(cont)
def get_movie_misc_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'miscsites')
return self.mProxy.misclinks_parser.parse(cont)
def get_movie_sound_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundsites')
return self.mProxy.soundclips_parser.parse(cont)
def get_movie_video_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'videosites')
return self.mProxy.videoclips_parser.parse(cont)
def get_movie_photo_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'photosites')
return self.mProxy.photosites_parser.parse(cont)
def get_movie_news(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'news')
return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs)
def get_movie_amazon_reviews(self, movieID):
self._http_logger.warn('amazon review no longer available', exc_info=False)
return {}
def get_movie_guests(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'epcast')
return self.mProxy.episodes_cast_parser.parse(cont)
get_movie_episodes_cast = get_movie_guests
def get_movie_merchandising_links(self, movieID):
self._http_logger.warn('merchandising links no longer available',
exc_info=False)
return {}
def _purge_seasons_data(self, data_d):
if '_current_season' in data_d['data']:
del data_d['data']['_current_season']
if '_seasons' in data_d['data']:
del data_d['data']['_seasons']
return data_d
def get_movie_episodes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'episodes')
data_d = self.mProxy.season_episodes_parser.parse(cont)
if not data_d and 'data' in data_d:
return {}
_current_season = data_d['data'].get('_current_season', '')
_seasons = data_d['data'].get('_seasons') or []
data_d = self._purge_seasons_data(data_d)
data_d['data'].setdefault('episodes', {})
nr_eps = len(data_d['data']['episodes'].get(_current_season) or [])
for season in _seasons:
if season == _current_season:
continue
other_cont = self._retrieve(self.urls['movie_main'] % movieID + 'episodes?season=' + str(season))
other_d = self.mProxy.season_episodes_parser.parse(other_cont)
other_d = self._purge_seasons_data(other_d)
other_d['data'].setdefault('episodes', {})
if not (other_d and other_d['data'] and other_d['data']['episodes'][season]):
continue
nr_eps += len(other_d['data']['episodes'].get(season) or [])
data_d['data']['episodes'][season] = other_d['data']['episodes'][season]
data_d['data']['number of episodes'] = nr_eps
return data_d
def get_movie_episodes_rating(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'epdate', _noCookies=True)
data_d = self.mProxy.eprating_parser.parse(cont)
# set movie['episode of'].movieID for every episode.
if data_d.get('data', {}).has_key('episodes rating'):
for item in data_d['data']['episodes rating']:
episode = item['episode']
episode['episode of'].movieID = movieID
return data_d
def get_movie_faqs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'faq')
return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_airing(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'tvschedule')
return self.mProxy.airing_parser.parse(cont)
get_movie_tv_schedule = get_movie_airing
def get_movie_synopsis(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'synopsis')
return self.mProxy.synopsis_parser.parse(cont)
def get_movie_parents_guide(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'parentalguide')
return self.mProxy.parentsguide_parser.parse(cont)
def _search_person(self, name, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'nm', 'q': name})
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
#params = 'q=%s&nm=on&mx=%s' % (quote_plus(name), str(results))
#cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('nm', name, results)
return self.spProxy.search_person_parser.parse(cont, results=results)['data']
def get_person_main(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'maindetails')
ret = self.pProxy.maindetails_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
def get_person_filmography(self, personID):
return self.get_person_main(personID)
def get_person_biography(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'bio')
return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs)
def get_person_awards(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'awards')
return self.pProxy.person_awards_parser.parse(cont)
def get_person_other_works(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'otherworks')
return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs)
#def get_person_agent(self, personID):
# cont = self._retrieve(self.urls['person_main'] % personID + 'agent')
# return self.pProxy.agent_parser.parse(cont)
def get_person_publicity(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'publicity')
return self.pProxy.publicity_parser.parse(cont)
def get_person_official_sites(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'officialsites')
return self.pProxy.person_officialsites_parser.parse(cont)
def get_person_news(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'news')
return self.pProxy.news_parser.parse(cont)
def get_person_episodes(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmoseries')
return self.pProxy.person_series_parser.parse(cont)
def get_person_merchandising_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'forsale')
return self.pProxy.sales_parser.parse(cont)
def get_person_genres_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmogenre')
return self.pProxy.person_genres_parser.parse(cont)
def get_person_keywords_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmokey')
return self.pProxy.person_keywords_parser.parse(cont)
def _search_character(self, name, results):
cont = self._get_search_content('char', name, results)
return self.scProxy.search_character_parser.parse(cont, results=results)['data']
def get_character_main(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID)
ret = self.cProxy.character_main_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
get_character_filmography = get_character_main
def get_character_biography(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID + 'bio')
return self.cProxy.character_bio_parser.parse(cont,
getRefs=self._getRefs)
def get_character_episodes(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID +
'filmoseries')
return self.cProxy.character_series_parser.parse(cont)
def get_character_quotes(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID + 'quotes')
return self.cProxy.character_quotes_parser.parse(cont,
getRefs=self._getRefs)
def _search_company(self, name, results):
cont = self._get_search_content('co', name, results)
url = self.urlOpener._last_url
return self.scompProxy.search_company_parser.parse(cont, url=url,
results=results)['data']
def get_company_main(self, companyID):
cont = self._retrieve(self.urls['company_main'] % companyID)
ret = self.compProxy.company_main_parser.parse(cont)
return ret
def _search_keyword(self, keyword, results):
# XXX: the IMDb web server seems to have some serious problem with
# non-ascii keyword.
# E.g.: http://akas.imdb.com/keyword/fianc%E9/
# will return a 500 Internal Server Error: Redirect Recursion.
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._get_search_content('kw', keyword, results)
except IMDbDataAccessError:
self._http_logger.warn('unable to search for keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_keyword_parser.parse(cont, results=results)['data']
def _get_keyword(self, keyword, results):
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._retrieve(self.urls['keyword_main'] % keyword)
except IMDbDataAccessError:
self._http_logger.warn('unable to get keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data']
def _get_top_bottom_movies(self, kind):
if kind == 'top':
parser = self.topBottomProxy.top250_parser
url = self.urls['top250']
elif kind == 'bottom':
parser = self.topBottomProxy.bottom100_parser
url = self.urls['bottom100']
else:
return []
cont = self._retrieve(url)
return parser.parse(cont)['data']
|
|
from collections import Iterable
import copy
from numbers import Real, Integral
import sys
import numpy as np
from openmc import Mesh
from openmc.summary import Summary
import openmc.checkvalue as cv
if sys.version_info[0] >= 3:
basestring = str
_FILTER_TYPES = ['universe', 'material', 'cell', 'cellborn', 'surface',
'mesh', 'energy', 'energyout', 'mu', 'polar', 'azimuthal',
'distribcell', 'delayedgroup']
class Filter(object):
"""A filter used to constrain a tally to a specific criterion, e.g. only
tally events when the particle is in a certain cell and energy range.
Parameters
----------
type : str
The type of the tally filter. Acceptable values are "universe",
"material", "cell", "cellborn", "surface", "mesh", "energy",
"energyout", and "distribcell".
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter. This takes on different meaning for different
filters. See the OpenMC online documentation for more details.
Attributes
----------
type : str
The type of the tally filter.
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter
num_bins : Integral
The number of filter bins
mesh : Mesh or None
A Mesh object for 'mesh' type filters.
offset : Integral
A value used to index tally bins for 'distribcell' tallies.
stride : Integral
The number of filter, nuclide and score bins within each of this
filter's bins.
"""
# Initialize Filter class attributes
def __init__(self, type=None, bins=None):
self._type = None
self._num_bins = 0
self._bins = None
self._mesh = None
self._offset = -1
self._stride = None
if type is not None:
self.type = type
if bins is not None:
self.bins = bins
def __eq__(self, other):
if not isinstance(other, Filter):
return False
elif self.type != other.type:
return False
elif len(self.bins) != len(other.bins):
return False
elif not np.allclose(self.bins, other.bins):
return False
else:
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def __deepcopy__(self, memo):
existing = memo.get(id(self))
# If this is the first time we have tried to copy this object, create a copy
if existing is None:
clone = type(self).__new__(type(self))
clone._type = self.type
clone._bins = copy.deepcopy(self.bins, memo)
clone._num_bins = self.num_bins
clone._mesh = copy.deepcopy(self.mesh, memo)
clone._offset = self.offset
clone._stride = self.stride
memo[id(self)] = clone
return clone
# If this object has been copied before, return the first copy made
else:
return existing
def __repr__(self):
string = 'Filter\n'
string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', self.type)
string += '{0: <16}{1}{2}\n'.format('\tBins', '=\t', self.bins)
string += '{0: <16}{1}{2}\n'.format('\tOffset', '=\t', self.offset)
return string
@property
def type(self):
return self._type
@property
def bins(self):
return self._bins
@property
def num_bins(self):
if self.bins is None:
return 0
elif self.type in ['energy', 'energyout']:
return len(self.bins) - 1
elif self.type in ['cell', 'cellborn', 'surface', 'universe', 'material']:
return len(self.bins)
else:
return self._num_bins
@property
def mesh(self):
return self._mesh
@property
def offset(self):
return self._offset
@property
def stride(self):
return self._stride
@type.setter
def type(self, type):
if type is None:
self._type = type
elif type not in _FILTER_TYPES:
msg = 'Unable to set Filter type to "{0}" since it is not one ' \
'of the supported types'.format(type)
raise ValueError(msg)
self._type = type
@bins.setter
def bins(self, bins):
if self.type is None:
msg = 'Unable to set bins for Filter to "{0}" since ' \
'the Filter type has not yet been set'.format(bins)
raise ValueError(msg)
# If the bin edge is a single value, it is a Cell, Material, etc. ID
if not isinstance(bins, Iterable):
bins = [bins]
# If the bins are in a collection, convert it to a list
else:
bins = list(bins)
if self.type in ['cell', 'cellborn', 'surface', 'material',
'universe', 'distribcell', 'delayedgroup']:
cv.check_iterable_type('filter bins', bins, Integral)
for edge in bins:
cv.check_greater_than('filter bin', edge, 0, equality=True)
elif self.type in ['energy', 'energyout']:
for edge in bins:
if not cv._isinstance(edge, Real):
msg = 'Unable to add bin edge "{0}" to a "{1}" Filter ' \
'since it is a non-integer or floating point ' \
'value'.format(edge, self.type)
raise ValueError(msg)
elif edge < 0.:
msg = 'Unable to add bin edge "{0}" to a "{1}" Filter ' \
'since it is a negative value'.format(edge, self.type)
raise ValueError(msg)
# Check that bin edges are monotonically increasing
for index in range(len(bins)):
if index > 0 and bins[index] < bins[index-1]:
msg = 'Unable to add bin edges "{0}" to a "{1}" Filter ' \
'since they are not monotonically ' \
'increasing'.format(bins, self.type)
raise ValueError(msg)
# mesh filters
elif self.type == 'mesh':
if not len(bins) == 1:
msg = 'Unable to add bins "{0}" to a mesh Filter since ' \
'only a single mesh can be used per tally'.format(bins)
raise ValueError(msg)
elif not cv._isinstance(bins[0], Integral):
msg = 'Unable to add bin "{0}" to mesh Filter since it ' \
'is a non-integer'.format(bins[0])
raise ValueError(msg)
elif bins[0] < 0:
msg = 'Unable to add bin "{0}" to mesh Filter since it ' \
'is a negative integer'.format(bins[0])
raise ValueError(msg)
# If all error checks passed, add bin edges
self._bins = np.array(bins)
@num_bins.setter
def num_bins(self, num_bins):
cv.check_type('filter num_bins', num_bins, Integral)
cv.check_greater_than('filter num_bins', num_bins, 0, equality=True)
self._num_bins = num_bins
@mesh.setter
def mesh(self, mesh):
cv.check_type('filter mesh', mesh, Mesh)
self._mesh = mesh
self.type = 'mesh'
self.bins = self.mesh.id
@offset.setter
def offset(self, offset):
cv.check_type('filter offset', offset, Integral)
self._offset = offset
@stride.setter
def stride(self, stride):
cv.check_type('filter stride', stride, Integral)
if stride < 0:
msg = 'Unable to set stride "{0}" for a "{1}" Filter since it ' \
'is a negative value'.format(stride, self.type)
raise ValueError(msg)
self._stride = stride
def can_merge(self, filter):
"""Determine if filter can be merged with another.
Parameters
----------
filter : Filter
Filter to compare with
Returns
-------
bool
Whether the filter can be merged
"""
if not isinstance(filter, Filter):
return False
# Filters must be of the same type
elif self.type != filter.type:
return False
# Distribcell filters cannot have more than one bin
elif self.type == 'distribcell':
return False
# Mesh filters cannot have more than one bin
elif self.type == 'mesh':
return False
# Different energy bins are not mergeable
elif 'energy' in self.type:
return False
else:
return True
def merge(self, filter):
"""Merge this filter with another.
Parameters
----------
filter : Filter
Filter to merge with
Returns
-------
merged_filter : Filter
Filter resulting from the merge
"""
if not self.can_merge(filter):
msg = 'Unable to merge "{0}" with "{1}" ' \
'filters'.format(self.type, filter.type)
raise ValueError(msg)
# Create deep copy of filter to return as merged filter
merged_filter = copy.deepcopy(self)
# Merge unique filter bins
merged_bins = list(set(np.concatenate((self.bins, filter.bins))))
merged_filter.bins = merged_bins
merged_filter.num_bins = len(merged_bins)
return merged_filter
def is_subset(self, other):
"""Determine if another filter is a subset of this filter.
If all of the bins in the other filter are included as bins in this
filter, then it is a subset of this filter.
Parameters
----------
other : Filter
The filter to query as a subset of this filter
Returns
-------
bool
Whether or not the other filter is a subset of this filter
"""
if not isinstance(other, Filter):
return False
elif self.type != other.type:
return False
elif self.type in ['energy', 'energyout']:
if len(self.bins) != len(other.bins):
return False
else:
return np.allclose(self.bins, other.bins)
for bin in other.bins:
if bin not in self.bins:
return False
return True
def get_bin_index(self, filter_bin):
"""Returns the index in the Filter for some bin.
Parameters
----------
filter_bin : Integral or tuple
The bin is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. The bin is an integer for the
cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is an (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell
interest.
Returns
-------
filter_index : Integral
The index in the Tally data array for this filter bin.
See also
--------
Filter.get_bin()
"""
try:
# Filter bins for a mesh are an (x,y,z) tuple
if self.type == 'mesh':
# Convert (x,y,z) to a single bin -- this is similar to
# subroutine mesh_indices_to_bin in openmc/src/mesh.F90.
if (len(self.mesh.dimension) == 3):
nx, ny, nz = self.mesh.dimension
val = (filter_bin[0] - 1) * ny * nz + \
(filter_bin[1] - 1) * nz + \
(filter_bin[2] - 1)
else:
nx, ny = self.mesh.dimension
val = (filter_bin[0] - 1) * ny + \
(filter_bin[1] - 1)
filter_index = val
# Use lower energy bound to find index for energy Filters
elif self.type in ['energy', 'energyout']:
deltas = np.abs(self.bins - filter_bin[1]) / filter_bin[1]
min_delta = np.min(deltas)
if min_delta < 1E-3:
filter_index = deltas.argmin() - 1
else:
raise ValueError
# Filter bins for distribcells are "IDs" of each unique placement
# of the Cell in the Geometry (integers starting at 0)
elif self.type == 'distribcell':
filter_index = filter_bin
# Use ID for all other Filters (e.g., material, cell, etc.)
else:
val = np.where(self.bins == filter_bin)[0][0]
filter_index = val
except ValueError:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
return filter_index
def get_bin(self, bin_index):
"""Returns the filter bin for some filter bin index.
Parameters
----------
bin_index : Integral
The zero-based index into the filter's array of bins. The bin
index for 'material', 'surface', 'cell', 'cellborn', and 'universe'
filters corresponds to the ID in the filter's list of bins. For
'distribcell' tallies the bin index necessarily can only be zero
since only one cell can be tracked per tally. The bin index for
'energy' and 'energyout' filters corresponds to the energy range of
interest in the filter bins of energies. The bin index for 'mesh'
filters is the index into the flattened array of (x,y) or (x,y,z)
mesh cell bins.
Returns
-------
bin : 1-, 2-, or 3-tuple of Real
The bin in the Tally data array. The bin for 'material', surface',
'cell', 'cellborn', 'universe' and 'distribcell' filters is a
1-tuple of the ID corresponding to the appropriate filter bin.
The bin for 'energy' and 'energyout' filters is a 2-tuple of the
lower and upper energies bounding the energy interval for the filter
bin. The bin for 'mesh' tallies is a 2-tuple or 3-tuple of the x,y
or x,y,z mesh cell indices corresponding to the bin in a 2D/3D mesh.
See also
--------
Filter.get_bin_index()
"""
cv.check_type('bin_index', bin_index, Integral)
cv.check_greater_than('bin_index', bin_index, 0, equality=True)
cv.check_less_than('bin_index', bin_index, self.num_bins)
if self.type == 'mesh':
# Construct 3-tuple of x,y,z cell indices for a 3D mesh
if len(self.mesh.dimension) == 3:
nx, ny, nz = self.mesh.dimension
x = bin_index / (ny * nz)
y = (bin_index - (x * ny * nz)) / nz
z = bin_index - (x * ny * nz) - (y * nz)
filter_bin = (x, y, z)
# Construct 2-tuple of x,y cell indices for a 2D mesh
else:
nx, ny = self.mesh.dimension
x = bin_index / ny
y = bin_index - (x * ny)
filter_bin = (x, y)
# Construct 2-tuple of lower, upper energies for energy(out) filters
elif self.type in ['energy', 'energyout']:
filter_bin = (self.bins[bin_index], self.bins[bin_index+1])
# Construct 1-tuple of with the cell ID for distribcell filters
elif self.type == 'distribcell':
filter_bin = (self.bins[0],)
# Construct 1-tuple with domain ID (e.g., material) for other filters
else:
filter_bin = (self.bins[bin_index],)
return filter_bin
def get_pandas_dataframe(self, data_size, summary=None):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method
for the Tally.get_pandas_dataframe(...) method.
This capability has been tested for Pandas >=0.13.1. However, it is
recommended to use v0.16 or newer versions of Pandas since this method
uses Pandas' Multi-index functionality.
Parameters
----------
data_size : Integral
The total number of bins in the tally corresponding to this filter
summary : None or Summary
An optional Summary object to be used to construct columns for
distribcell tally filters (default is None). The geometric
information in the Summary object is embedded into a Multi-index
column with a geometric "path" to each distribcell instance.
NOTE: This option requires the OpenCG Python package.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns of strings that characterize the
filter's bins. The number of rows in the DataFrame is the same as
the total number of bins in the corresponding tally, with the filter
bin appropriately tiled to map to the corresponding tally bins.
For 'cell', 'cellborn', 'surface', 'material', and 'universe'
filters, the DataFrame includes a single column with the cell,
surface, material or universe ID corresponding to each filter bin.
For 'distribcell' filters, the DataFrame either includes:
1. a single column with the cell instance IDs (without summary info)
2. separate columns for the cell IDs, universe IDs, and lattice IDs
and x,y,z cell indices corresponding to each (with summary info).
For 'energy' and 'energyout' filters, the DataFrame include a single
column with each element comprising a string with the lower, upper
energy bounds for each filter bin.
For 'mesh' filters, the DataFrame includes three columns for the
x,y,z mesh cell indices corresponding to each filter bin.
Raises
------
ImportError
When Pandas is not installed, or summary info is requested but
OpenCG is not installed.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Attempt to import Pandas
try:
import pandas as pd
except ImportError:
msg = 'The Pandas Python package must be installed on your system'
raise ImportError(msg)
# Initialize Pandas DataFrame
df = pd.DataFrame()
# mesh filters
if self.type == 'mesh':
# Initialize dictionary to build Pandas Multi-index column
filter_dict = {}
# Append Mesh ID as outermost index of mult-index
mesh_key = 'mesh {0}'.format(self.mesh.id)
# Find mesh dimensions - use 3D indices for simplicity
if (len(self.mesh.dimension) == 3):
nx, ny, nz = self.mesh.dimension
else:
nx, ny = self.mesh.dimension
nz = 1
# Generate multi-index sub-column for x-axis
filter_bins = np.arange(1, nx+1)
repeat_factor = ny * nz * self.stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'x')] = filter_bins
# Generate multi-index sub-column for y-axis
filter_bins = np.arange(1, ny+1)
repeat_factor = nz * self.stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'y')] = filter_bins
# Generate multi-index sub-column for z-axis
filter_bins = np.arange(1, nz+1)
repeat_factor = self.stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'z')] = filter_bins
# Initialize a Pandas DataFrame from the mesh dictionary
df = pd.concat([df, pd.DataFrame(filter_dict)])
# distribcell filters
elif self.type == 'distribcell':
level_df = None
if isinstance(summary, Summary):
# Attempt to import the OpenCG package
try:
import opencg
except ImportError:
msg = 'The OpenCG package must be installed ' \
'to use a Summary for distribcell dataframes'
raise ImportError(msg)
# Extract the OpenCG geometry from the Summary
opencg_geometry = summary.opencg_geometry
openmc_geometry = summary.openmc_geometry
# Use OpenCG to compute the number of regions
opencg_geometry.initialize_cell_offsets()
num_regions = opencg_geometry.num_regions
# Initialize a dictionary mapping OpenMC distribcell
# offsets to OpenCG LocalCoords linked lists
offsets_to_coords = {}
# Use OpenCG to compute LocalCoords linked list for
# each region and store in dictionary
for region in range(num_regions):
coords = opencg_geometry.find_region(region)
path = opencg.get_path(coords)
cell_id = path[-1]
# If this region is in Cell corresponding to the
# distribcell filter bin, store it in dictionary
if cell_id == self.bins[0]:
offset = openmc_geometry.get_offset(path, self.offset)
offsets_to_coords[offset] = coords
# Each distribcell offset is a DataFrame bin
# Unravel the paths into DataFrame columns
num_offsets = len(offsets_to_coords)
# Initialize termination condition for while loop
levels_remain = True
counter = 0
# Iterate over each level in the CSG tree hierarchy
while levels_remain:
levels_remain = False
# Initialize dictionary to build Pandas Multi-index
# column for this level in the CSG tree hierarchy
level_dict = {}
# Initialize prefix Multi-index keys
counter += 1
level_key = 'level {0}'.format(counter)
univ_key = (level_key, 'univ', 'id')
cell_key = (level_key, 'cell', 'id')
lat_id_key = (level_key, 'lat', 'id')
lat_x_key = (level_key, 'lat', 'x')
lat_y_key = (level_key, 'lat', 'y')
lat_z_key = (level_key, 'lat', 'z')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[univ_key] = np.empty(num_offsets)
level_dict[cell_key] = np.empty(num_offsets)
level_dict[lat_id_key] = np.empty(num_offsets)
level_dict[lat_x_key] = np.empty(num_offsets)
level_dict[lat_y_key] = np.empty(num_offsets)
level_dict[lat_z_key] = np.empty(num_offsets)
# Initialize Multi-index columns to NaN - this is
# necessary since some distribcell instances may
# have very different LocalCoords linked lists
level_dict[univ_key][:] = np.NAN
level_dict[cell_key][:] = np.NAN
level_dict[lat_id_key][:] = np.NAN
level_dict[lat_x_key][:] = np.NAN
level_dict[lat_y_key][:] = np.NAN
level_dict[lat_z_key][:] = np.NAN
# Iterate over all regions (distribcell instances)
for offset in range(num_offsets):
coords = offsets_to_coords[offset]
# If entire LocalCoords has been unraveled into
# Multi-index columns already, continue
if coords is None:
continue
# Assign entry to Universe Multi-index column
if coords._type == 'universe':
level_dict[univ_key][offset] = coords._universe._id
level_dict[cell_key][offset] = coords._cell._id
# Assign entry to Lattice Multi-index column
else:
level_dict[lat_id_key][offset] = coords._lattice._id
level_dict[lat_x_key][offset] = coords._lat_x
level_dict[lat_y_key][offset] = coords._lat_y
level_dict[lat_z_key][offset] = coords._lat_z
# Move to next node in LocalCoords linked list
if coords._next is None:
offsets_to_coords[offset] = None
else:
offsets_to_coords[offset] = coords._next
levels_remain = True
# Tile the Multi-index columns
for level_key, level_bins in level_dict.items():
level_bins = np.repeat(level_bins, self.stride)
tile_factor = data_size / len(level_bins)
level_bins = np.tile(level_bins, tile_factor)
level_dict[level_key] = level_bins
# Initialize a Pandas DataFrame from the level dictionary
if level_df is None:
level_df = pd.DataFrame(level_dict)
else:
level_df = pd.concat([level_df, pd.DataFrame(level_dict)], axis=1)
# Create DataFrame column for distribcell instances IDs
# NOTE: This is performed regardless of whether the user
# requests Summary geometric information
filter_bins = np.arange(self.num_bins)
filter_bins = np.repeat(filter_bins, self.stride)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_bins = filter_bins
df = pd.DataFrame({self.type : filter_bins})
# If OpenCG level info DataFrame was created, concatenate
# with DataFrame of distribcell instance IDs
if level_df is not None:
level_df = level_df.dropna(axis=1, how='all')
level_df = level_df.astype(np.int)
df = pd.concat([level_df, df], axis=1)
# energy, energyout filters
elif 'energy' in self.type:
bins = self.bins
num_bins = self.num_bins
# Create strings for
template = '({0:.1e} - {1:.1e})'
filter_bins = []
for i in range(num_bins):
filter_bins.append(template.format(bins[i], bins[i+1]))
# Tile the energy bins into a DataFrame column
filter_bins = np.repeat(filter_bins, self.stride)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_bins = filter_bins
df = pd.concat([df, pd.DataFrame({self.type + ' [MeV]' : filter_bins})])
# universe, material, surface, cell, and cellborn filters
else:
filter_bins = np.repeat(self.bins, self.stride)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_bins = filter_bins
df = pd.concat([df, pd.DataFrame({self.type : filter_bins})])
return df
|
|
"""
.. _tut_erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
.. contents:: Here we cover the specifics of EEG, namely:
:local:
:depth: 1
"""
import mne
from mne.datasets import sample
from mne.channels import combine_channels
###############################################################################
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname)
###############################################################################
# Let's restrict the data to the EEG channels
raw.pick_types(meg=False, eeg=True, eog=True).load_data()
# This particular dataset already has an average reference projection added
# that we now want to remove it for the sake of this example.
raw.set_eeg_reference([])
###############################################################################
# By looking at the measurement info you will see that we have now
# 59 EEG channels and 1 EOG channel
print(raw.info)
###############################################################################
# In practice it's quite common to have some EEG channels that are actually
# EOG channels. To change a channel type you can use the
# :func:`mne.io.Raw.set_channel_types` method. For example
# to treat an EOG channel as EEG you can change its type using
raw.set_channel_types(mapping={'EOG 061': 'eeg'})
print(raw.info)
###############################################################################
# And to change the name of the EOG channel
raw.rename_channels(mapping={'EOG 061': 'EOG'})
###############################################################################
# Let's reset the EOG channel back to EOG type.
raw.set_channel_types(mapping={'EOG': 'eog'})
###############################################################################
# The EEG channels in the sample dataset already have locations.
# These locations are available in the 'loc' of each channel description.
# For the first channel we get
print(raw.info['chs'][0]['loc'])
###############################################################################
# And it's actually possible to plot the channel locations using
# :func:`mne.io.Raw.plot_sensors`.
# In the case where your data don't have locations you can use one of the
# standard :class:`Montages <mne.channels.DigMontage>` shipped with MNE.
# See :ref:`plot_montage` and :ref:`tut-eeg-fsaverage-source-modeling`.
raw.plot_sensors()
raw.plot_sensors('3d') # in 3D
###############################################################################
# Setting EEG reference
# ---------------------
#
# Let's first inspect our Raw object with its original reference that was
# applied during the recording of the data.
# We define Epochs and compute an ERP for the left auditory condition.
reject = dict(eeg=180e-6, eog=150e-6)
event_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5
events = mne.read_events(event_fname)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
evoked_no_ref = mne.Epochs(raw, **epochs_params).average()
title = 'EEG Original reference'
evoked_no_ref.plot(titles=dict(eeg=title), time_unit='s')
evoked_no_ref.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Common average reference (car)**: We add back the average reference
# projection that we removed at the beginning of this example (right after
# loading the data).
raw_car, _ = mne.set_eeg_reference(raw, 'average', projection=True)
evoked_car = mne.Epochs(raw_car, **epochs_params).average()
del raw_car # save memory
title = 'EEG Average reference'
evoked_car.plot(titles=dict(eeg=title), time_unit='s')
evoked_car.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Custom reference**: Use the mean of channels EEG 001 and EEG 002 as
# a reference
raw_custom, _ = mne.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
del raw_custom # save memory
title = 'EEG Custom reference'
evoked_custom.plot(titles=dict(eeg=title), time_unit='s')
evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# Evoked response averaged across channels by ROI
# -----------------------------------------------
#
# It is possible to average channels by region of interest (for example left
# and right) when studying the response to this left auditory stimulus. Here we
# use our Raw object on which the average reference projection has been added
# back.
evoked = mne.Epochs(raw, **epochs_params).average()
left_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 017', 'EEG 018', 'EEG 025', 'EEG 026'])
right_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 023', 'EEG 024', 'EEG 034', 'EEG 035'])
roi_dict = dict(Left=left_idx, Right=right_idx)
evoked_combined = combine_channels(evoked, roi_dict, method='mean')
title = 'Evoked response averaged by side'
evoked_combined.plot(titles=dict(eeg=title), time_unit='s')
###############################################################################
# Evoked arithmetic (e.g. differences)
# ------------------------------------
#
# Trial subsets from Epochs can be selected using 'tags' separated by '/'.
# Evoked objects support basic arithmetic.
# First, we create an Epochs object containing 4 conditions.
event_id = {'left/auditory': 1, 'right/auditory': 2,
'left/visual': 3, 'right/visual': 4}
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
epochs = mne.Epochs(raw, **epochs_params)
print(epochs)
###############################################################################
# Next, we create averages of stimulation-left vs stimulation-right trials.
# We can use negative weights in `mne.combine_evoked` to construct difference
# ERPs.
left, right = epochs["left"].average(), epochs["right"].average()
# create and plot difference ERP
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
mne.combine_evoked([left, right], weights=[1, -1]).plot_joint(**joint_kwargs)
###############################################################################
# This is an equal-weighting difference. If you have imbalanced trial numbers,
# you could also consider either equalizing the number of events per
# condition (using
# `epochs.equalize_event_counts <mne.Epochs.equalize_event_counts>`) or
# use weights proportional to the number of trials averaged together to create
# each `~mne.Evoked` (by passing ``weights='nave'`` to `~mne.combine_evoked`).
# As an example, first, we create individual ERPs for each condition.
aud_l = epochs["auditory/left"].average()
aud_r = epochs["auditory/right"].average()
vis_l = epochs["visual/left"].average()
vis_r = epochs["visual/right"].average()
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)
###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)
# Then, we can construct and plot an unweighted average of left vs. right
# trials this way, too:
mne.combine_evoked(
all_evokeds, weights=[0.5, 0.5, -0.5, -0.5]).plot_joint(**joint_kwargs)
###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.
# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
mne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds)
# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])
# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
all_evokeds[cond].plot_joint(title=cond, **joint_kwargs)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of helpers for custom decoding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import seq2seq as contrib_seq2seq
__all__ = [
"ContinuousEmbeddingTrainingHelper",
"ScheduledContinuousEmbeddingTrainingHelper",
"GreedyContinuousEmbeddingHelper",
"FixedContinuousEmbeddingHelper",
]
def _unstack_ta(inp):
return tf.TensorArray(
dtype=inp.dtype, size=tf.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
class ContinuousEmbeddingTrainingHelper(contrib_seq2seq.TrainingHelper):
"""Regards previous outputs as the next input embeddings. Avoids sampling.
By doing so, the decoded sequences are differentiable throughout.
Returned sample_ids are the argmax of the RNN output logits.
"""
def sample(self, time, outputs, state, name=None):
with tf.name_scope(name, "TrainingHelperSample", [time, outputs, state]):
if isinstance(state, tuple):
# TODO(alshedivat): fix the if statement as it works only with GNMT.
sample_ids = tf.cast(tf.argmax(outputs, axis=-1), tf.int32)
else:
sample_ids = tf.cast(tf.argmax(state.alignments, axis=-1), tf.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del sample_ids # Unused.
with tf.name_scope(name, "TrainingHelperNextInputs", [time, outputs]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(
all_finished, lambda: self._zero_inputs, lambda: outputs)
return finished, next_inputs, state
class ScheduledContinuousEmbeddingTrainingHelper(contrib_seq2seq.TrainingHelper
):
"""Training helper that constructs next inputs using scheduled mixing.
The hlper mixes previous outputs with the true ground truth embeddings for the
previous time step using `sampling_probability` as the mixing weight for the
ground truth, i.e.:
next_inputs = weight * ground_truth + (1 - weight) * generated
"""
def __init__(self, inputs, sequence_length, mixing_concentration=1.,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
mixing_concentration: <float32> [] for the alpha parameter of the
Dirichlet distribution used to sample mixing weights from [0, 1].
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with tf.name_scope(name, "ScheduledContinuousEmbedding",
[mixing_concentration]):
self._mixing_concentration = tf.convert_to_tensor(
mixing_concentration, name="mixing_concentration")
if self._mixing_concentration.get_shape().ndims == 0:
self._mixing_concentration = tf.expand_dims(self._mixing_concentration,
0)
if (self._mixing_concentration.get_shape().ndims != 1 or
self._mixing_concentration.get_shape().as_list()[0] > 1):
raise ValueError(
"mixing_concentration must be a scalar. saw shape: %s" %
(self._mixing_concentration.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledContinuousEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def sample(self, time, outputs, state, name=None):
with tf.name_scope(name, "TrainingHelperSample", [time, outputs, state]):
if isinstance(state, tuple):
# TODO(alshedivat): fix the if statement as it works only with GNMT.
sample_ids = tf.cast(tf.argmax(outputs, axis=-1), tf.int32)
else:
sample_ids = tf.cast(tf.argmax(state.alignments, axis=-1), tf.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Compute the next inputs and state."""
del sample_ids # Unused.
with tf.name_scope(name, "ScheduledContinuousEmbeddingNextInputs",
[time, outputs, state]):
# Get ground truth next inputs.
(finished, base_next_inputs,
state) = contrib_seq2seq.TrainingHelper.next_inputs(
self, time, outputs, state, name=name)
# Get generated next inputs.
all_finished = tf.reduce_all(finished)
generated_next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: outputs,
lambda: outputs)
# Sample mixing weights.
weight_sampler = tf.distributions.Dirichlet(
concentration=self._mixing_concentration)
weight = weight_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
alpha, beta = weight, 1 - weight
# Mix the inputs.
next_inputs = alpha * base_next_inputs + beta * generated_next_inputs
return finished, next_inputs, state
class GreedyContinuousEmbeddingHelper(contrib_seq2seq.GreedyEmbeddingHelper):
"""Greedy decoding helper with continuous reuse of embeddings."""
def sample(self, time, outputs, state, name=None):
del time, name # Unused.
if isinstance(state, tuple):
# TODO(alshedivat): fix the if statement as it works only with GNMT.
sample_ids = tf.cast(tf.argmax(outputs, axis=-1), tf.int32)
else:
sample_ids = tf.cast(tf.argmax(state.alignments, axis=-1), tf.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, name # Unused.
finished = tf.equal(sample_ids, self._end_token)
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: outputs)
return finished, next_inputs, state
class FixedContinuousEmbeddingHelper(contrib_seq2seq.GreedyEmbeddingHelper):
"""Decodes for a fixed number of steps and continuously reuses embeddings."""
def __init__(self, embedding, start_tokens, end_token, num_steps):
super(FixedContinuousEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._num_steps = num_steps
def sample(self, time, outputs, state, name=None):
del time, name # Unused.
if isinstance(state, tuple):
# TODO(alshedivat): fix the if statement as it works only with GNMT.
sample_ids = tf.cast(tf.argmax(outputs, axis=-1), tf.int32)
else:
sample_ids = tf.cast(tf.argmax(state.alignments, axis=-1), tf.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Compute next inputs and state."""
del sample_ids, name # Unused.
next_time = time + 1
finished = (next_time >= self._num_steps)
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: outputs)
return finished, next_inputs, state
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import netaddr
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import re
import six
from neutron.agent.common import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOOPBACK_DEVNAME = 'lo'
SYS_NET_PATH = '/sys/class/net'
DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
METRIC_PATTERN = re.compile(r"metric (\S+)")
class AddressNotReady(exceptions.NeutronException):
message = _("Failure waiting for address %(address)s to "
"become ready: %(reason)s")
class SubProcessBase(object):
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, run_as_root=True,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def _as_root(self, options, command, args, use_root_namespace=False):
namespace = self.namespace if not use_root_namespace else None
return self._execute(options, command, args, run_as_root=True,
namespace=namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, run_as_root=False,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
ip_cmd = add_namespace_to_cmd(['ip'], namespace)
cmd = ip_cmd + opt_list + [command] + list(args)
return utils.execute(cmd, run_as_root=run_as_root,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
def get_log_fail_as_error(self):
return self.log_fail_as_error
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, namespace=self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
if self.namespace:
# we call out manually because in order to avoid screen scraping
# iproute2 we use find to see what is in the sysfs directory, as
# suggested by Stephen Hemminger (iproute2 dev).
output = utils.execute(['ip', 'netns', 'exec', self.namespace,
'find', SYS_NET_PATH, '-maxdepth', '1',
'-type', 'l', '-printf', '%f '],
run_as_root=True,
log_fail_as_error=self.log_fail_as_error
).split()
else:
output = (
i for i in os.listdir(SYS_NET_PATH)
if os.path.islink(os.path.join(SYS_NET_PATH, i))
)
for name in output:
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name, namespace=self.namespace))
return retval
def get_device_by_ip(self, ip):
"""Get the IPDevice from system which has ip configured."""
for device in self.get_devices():
if device.addr.list(to=ip):
return device
def add_tuntap(self, name, mode='tap'):
self._as_root([], 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, namespace=self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root([], 'link', tuple(args))
return (IPDevice(name1, namespace=self.namespace),
IPDevice(name2, namespace=namespace2))
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
self._as_root([], 'link', ('del', name))
def add_dummy(self, name):
"""Create a Linux dummy interface with the given name."""
self._as_root([], 'link', ('add', name, 'type', 'dummy'))
return IPDevice(name, namespace=self.namespace)
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(namespace=name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
# tuple: min,max
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root([], 'link', cmd)
return (IPDevice(name, namespace=self.namespace))
@classmethod
def get_namespaces(cls):
output = cls._execute([], 'netns', ('list',))
return [l.split()[0] for l in output.splitlines()]
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None):
super(IPDevice, self).__init__(namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name
and self.namespace == other.namespace)
def __str__(self):
return self.name
def exists(self):
"""Return True if the device exists in the namespace."""
# we must save and restore this before returning
orig_log_fail_as_error = self.get_log_fail_as_error()
self.set_log_fail_as_error(False)
try:
return bool(self.link.address)
except RuntimeError:
return False
finally:
self.set_log_fail_as_error(orig_log_fail_as_error)
def delete_addr_and_conntrack_state(self, cidr):
"""Delete an address along with its conntrack state
This terminates any active connections through an IP.
:param cidr: the IP address for which state should be removed.
This can be passed as a string with or without /NN.
A netaddr.IPAddress or netaddr.Network representing the IP address
can also be passed.
"""
self.addr.delete(cidr)
ip_str = str(netaddr.IPNetwork(cidr).ip)
ip_wrapper = IPWrapper(namespace=self.namespace)
# Delete conntrack state for ingress traffic
# If 0 flow entries have been deleted
# conntrack -D will return 1
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_LE("Failed deleting ingress connection state of"
" floatingip %s"), ip_str)
# Delete conntrack state for egress traffic
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_LE("Failed deleting egress connection state of"
" floatingip %s"), ip_str)
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, options, args):
return self._parent._run(options, self.COMMAND, args)
def _as_root(self, options, args, use_root_namespace=False):
return self._parent._as_root(options,
self.COMMAND,
args,
use_root_namespace=use_root_namespace)
class IPRule(SubProcessBase):
def __init__(self, namespace=None):
super(IPRule, self).__init__(namespace=namespace)
self.rule = IpRuleCommand(self)
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'
@staticmethod
def _make_canonical(ip_version, settings):
"""Converts settings to a canonical represention to compare easily"""
def canonicalize_fwmark_string(fwmark_mask):
"""Reformats fwmark/mask in to a canonical form
Examples, these are all equivalent:
"0x1"
0x1
"0x1/0xfffffffff"
(0x1, 0xfffffffff)
:param fwmark_mask: The firewall and mask (default 0xffffffff)
:type fwmark_mask: A string with / as delimiter, an iterable, or a
single value.
"""
# Turn the value we were passed in to an iterable: fwmark[, mask]
if isinstance(fwmark_mask, six.string_types):
# A / separates the optional mask in a string
iterable = fwmark_mask.split('/')
else:
try:
iterable = iter(fwmark_mask)
except TypeError:
# At this point, it must be a single integer
iterable = [fwmark_mask]
def to_i(s):
if isinstance(s, six.string_types):
# Passing 0 as "base" arg to "int" causes it to determine
# the base automatically.
return int(s, 0)
# s isn't a string, can't specify base argument
return int(s)
integers = [to_i(x) for x in iterable]
# The default mask is all ones, the mask is 32 bits.
if len(integers) == 1:
integers.append(0xffffffff)
# We now have two integers in a list. Convert to canonical string.
return '{0:#x}/{1:#x}'.format(*integers)
def canonicalize(item):
k, v = item
# ip rule shows these as 'any'
if k == 'from' and v == 'all':
return k, constants.IP_ANY[ip_version]
# lookup and table are interchangeable. Use table every time.
if k == 'lookup':
return 'table', v
if k == 'fwmark':
return k, canonicalize_fwmark_string(v)
return k, v
if 'type' not in settings:
settings['type'] = 'unicast'
return {k: str(v) for k, v in map(canonicalize, settings.items())}
def _parse_line(self, ip_version, line):
# Typical rules from 'ip rule show':
# 4030201: from 1.2.3.4/24 lookup 10203040
# 1024: from all iif qg-c43b1928-48 lookup noscope
parts = line.split()
if not parts:
return {}
# Format of line is: "priority: <key> <value> ... [<type>]"
settings = {k: v for k, v in zip(parts[1::2], parts[2::2])}
settings['priority'] = parts[0][:-1]
if len(parts) % 2 == 0:
# When line has an even number of columns, last one is the type.
settings['type'] = parts[-1]
return self._make_canonical(ip_version, settings)
def list_rules(self, ip_version):
lines = self._as_root([ip_version], ['show']).splitlines()
return [self._parse_line(ip_version, line) for line in lines]
def _exists(self, ip_version, **kwargs):
return kwargs in self.list_rules(ip_version)
def _make__flat_args_tuple(self, *args, **kwargs):
for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]):
args += kwargs_item
return tuple(args)
def add(self, ip, **kwargs):
ip_version = get_ip_version(ip)
kwargs.update({'from': ip})
canonical_kwargs = self._make_canonical(ip_version, kwargs)
if not self._exists(ip_version, **canonical_kwargs):
args_tuple = self._make__flat_args_tuple('add', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
def delete(self, ip, **kwargs):
ip_version = get_ip_version(ip)
# TODO(Carl) ip ignored in delete, okay in general?
canonical_kwargs = self._make_canonical(ip_version, kwargs)
args_tuple = self._make__flat_args_tuple('del', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
self._as_root([], ('set', self.name, 'address', mac_address))
def set_mtu(self, mtu_size):
self._as_root([], ('set', self.name, 'mtu', mtu_size))
def set_up(self):
return self._as_root([], ('set', self.name, 'up'))
def set_down(self):
return self._as_root([], ('set', self.name, 'down'))
def set_netns(self, namespace):
self._as_root([], ('set', self.name, 'netns', namespace))
self._parent.namespace = namespace
def set_name(self, name):
self._as_root([], ('set', self.name, 'name', name))
self._parent.name = name
def set_alias(self, alias_name):
self._as_root([], ('set', self.name, 'alias', alias_name))
def delete(self):
self._as_root([], ('delete', self.name))
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return self._parse_line(self._run(['o'], ('show', self.name)))
def _parse_line(self, value):
if not value:
return {}
device_name, settings = value.replace("\\", '').split('>', 1)
tokens = settings.split()
keys = tokens[::2]
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
retval = dict(zip(keys, values))
return retval
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, cidr, scope='global'):
net = netaddr.IPNetwork(cidr)
args = ['add', cidr,
'scope', scope,
'dev', self.name]
if net.version == 4:
args += ['brd', str(net[-1])]
self._as_root([net.version], tuple(args))
def delete(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('del', cidr,
'dev', self.name))
def flush(self, ip_version):
self._as_root([ip_version], ('flush', self.name))
def list(self, scope=None, to=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['show', self.name]
if filters:
args += filters
retval = []
if scope:
args += ['scope', scope]
if to:
args += ['to', to]
for line in self._run(options, tuple(args)).split('\n'):
line = line.strip()
if not line.startswith('inet'):
continue
parts = line.split()
if parts[0] == 'inet6':
scope = parts[3]
else:
if parts[2] == 'brd':
scope = parts[5]
else:
scope = parts[3]
retval.append(dict(cidr=parts[1],
scope=scope,
dynamic=('dynamic' == parts[-1]),
tentative=('tentative' in line),
dadfailed=('dadfailed' == parts[-1])))
return retval
def wait_until_address_ready(self, address, wait_time=30):
"""Wait until an address is no longer marked 'tentative'
raises AddressNotReady if times out or address not present on interface
"""
def is_address_ready():
try:
addr_info = self.list(to=address)[0]
except IndexError:
raise AddressNotReady(
address=address,
reason=_LE('Address not present on interface'))
if not addr_info['tentative']:
return True
if addr_info['dadfailed']:
raise AddressNotReady(
address=address, reason=_LE('Duplicate adddress detected'))
errmsg = _LE("Exceeded %s second limit waiting for "
"address to leave the tentative state.") % wait_time
utils.utils.wait_until_true(
is_address_ready, timeout=wait_time, sleep=0.20,
exception=AddressNotReady(address=address, reason=errmsg))
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def __init__(self, parent, table=None):
super(IpRouteCommand, self).__init__(parent)
self._table = table
def table(self, table):
"""Return an instance of IpRouteCommand which works on given table"""
return IpRouteCommand(self._parent, table)
def _table_args(self, override=None):
if override:
return ['table', override]
return ['table', self._table] if self._table else []
def _dev_args(self):
return ['dev', self.name] if self.name else []
def add_gateway(self, gateway, metric=None, table=None):
ip_version = get_ip_version(gateway)
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += self._dev_args()
args += self._table_args(table)
self._as_root([ip_version], tuple(args))
def _run_as_root_detect_device_not_found(self, *args, **kwargs):
try:
return self._as_root(*args, **kwargs)
except RuntimeError as rte:
with excutils.save_and_reraise_exception() as ctx:
if "Cannot find device" in str(rte):
ctx.reraise = False
raise exceptions.DeviceNotFoundError(device_name=self.name)
def delete_gateway(self, gateway, table=None):
ip_version = get_ip_version(gateway)
args = ['del', 'default',
'via', gateway]
args += self._dev_args()
args += self._table_args(table)
self._run_as_root_detect_device_not_found([ip_version], tuple(args))
def _parse_routes(self, ip_version, output, **kwargs):
for line in output.splitlines():
parts = line.split()
# Format of line is: "<cidr>|default [<key> <value>] ..."
route = {k: v for k, v in zip(parts[1::2], parts[2::2])}
route['cidr'] = parts[0]
# Avoids having to explicitly pass around the IP version
if route['cidr'] == 'default':
route['cidr'] = constants.IP_ANY[ip_version]
# ip route drops things like scope and dev from the output if it
# was specified as a filter. This allows us to add them back.
if self.name:
route['dev'] = self.name
if self._table:
route['table'] = self._table
# Callers add any filters they use as kwargs
route.update(kwargs)
yield route
def list_routes(self, ip_version, **kwargs):
args = ['list']
args += self._dev_args()
args += self._table_args()
for k, v in kwargs.items():
args += [k, v]
output = self._run([ip_version], tuple(args))
return [r for r in self._parse_routes(ip_version, output, **kwargs)]
def list_onlink_routes(self, ip_version):
routes = self.list_routes(ip_version, scope='link')
return [r for r in routes if 'src' not in r]
def add_onlink_route(self, cidr):
self.add_route(cidr, scope='link')
def delete_onlink_route(self, cidr):
self.delete_route(cidr, scope='link')
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['list']
args += self._dev_args()
args += self._table_args()
if filters:
args += filters
retval = None
if scope:
args += ['scope', scope]
route_list_lines = self._run(options, tuple(args)).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
retval = dict()
gateway = DEFAULT_GW_PATTERN.search(default_route_line)
if gateway:
retval.update(gateway=gateway.group(1))
metric = METRIC_PATTERN.search(default_route_line)
if metric:
retval.update(metric=int(metric.group(1)))
return retval
def pullup_route(self, interface_name, ip_version):
"""Ensures that the route entry for the interface is before all
others on the same subnet.
"""
options = [ip_version]
device_list = []
device_route_list_lines = self._run(options,
('list',
'proto', 'kernel',
'dev', interface_name)
).split('\n')
for device_route_line in device_route_list_lines:
try:
subnet = device_route_line.split()[0]
except Exception:
continue
subnet_route_list_lines = self._run(options,
('list',
'proto', 'kernel',
'match', subnet)
).split('\n')
for subnet_route_line in subnet_route_list_lines:
i = iter(subnet_route_line.split())
while(next(i) != 'dev'):
pass
device = next(i)
try:
while(next(i) != 'src'):
pass
src = next(i)
except Exception:
src = ''
if device != interface_name:
device_list.append((device, src))
else:
break
for (device, src) in device_list:
self._as_root(options, ('del', subnet, 'dev', device))
if (src != ''):
self._as_root(options,
('append', subnet,
'proto', 'kernel',
'src', src,
'dev', device))
else:
self._as_root(options,
('append', subnet,
'proto', 'kernel',
'dev', device))
def add_route(self, cidr, via=None, table=None, **kwargs):
ip_version = get_ip_version(cidr)
args = ['replace', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._run_as_root_detect_device_not_found([ip_version], tuple(args))
def delete_route(self, cidr, via=None, table=None, **kwargs):
ip_version = get_ip_version(cidr)
args = ['del', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._run_as_root_detect_device_not_found([ip_version], tuple(args))
class IPRoute(SubProcessBase):
def __init__(self, namespace=None, table=None):
super(IPRoute, self).__init__(namespace=namespace)
self.name = None
self.route = IpRouteCommand(self, table=table)
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('replace', ip_address,
'lladdr', mac_address,
'nud', 'permanent',
'dev', self.name))
def delete(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('del', ip_address,
'lladdr', mac_address,
'dev', self.name))
def show(self, ip_version):
options = [ip_version]
return self._as_root(options,
('show',
'dev', self.name))
def flush(self, ip_version, ip_address):
"""Flush neighbour entries
Given address entry is removed from neighbour cache (ARP or NDP). To
flush all entries pass string 'all' as an address.
:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
:param ip_address: The prefix selecting the neighbours to flush
"""
self._as_root([ip_version], ('flush', 'to', ip_address))
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
self._as_root([], ('add', name), use_root_namespace=True)
wrapper = IPWrapper(namespace=name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
self._as_root([], ('delete', name), use_root_namespace=True)
def execute(self, cmds, addl_env=None, check_exit_code=True,
extra_ok_codes=None, run_as_root=False):
ns_params = []
kwargs = {'run_as_root': run_as_root}
if self._parent.namespace:
kwargs['run_as_root'] = True
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
cmd = ns_params + env_params + list(cmds)
return utils.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes, **kwargs)
def exists(self, name):
output = self._parent._execute(
['o'], 'netns', ['list'],
run_as_root=cfg.CONF.AGENT.use_helper_for_ns_read)
for line in [l.split()[0] for l in output.splitlines()]:
if name == line:
return True
return False
def vxlan_in_use(segmentation_id, namespace=None):
"""Return True if VXLAN VNID is in use by an interface, else False."""
ip_wrapper = IPWrapper(namespace=namespace)
interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"],
check_exit_code=True)
return 'vxlan id %s ' % segmentation_id in interfaces
def device_exists(device_name, namespace=None):
"""Return True if the device exists in the namespace."""
return IPDevice(device_name, namespace=namespace).exists()
def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None):
"""Return True if the device with the given IP addresses and MAC address
exists in the namespace.
"""
try:
device = IPDevice(device_name, namespace=namespace)
if mac != device.link.address:
return False
device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()]
for ip_cidr in ip_cidrs:
if ip_cidr not in device_ip_cidrs:
return False
except RuntimeError:
return False
else:
return True
def get_routing_table(ip_version, namespace=None):
"""Return a list of dictionaries, each representing a route.
@param ip_version: the routes of version to return, for example 4
@param namespace
@return: a list of dictionaries, each representing a route.
The dictionary format is: {'destination': cidr,
'nexthop': ip,
'device': device_name,
'scope': scope}
"""
ip_wrapper = IPWrapper(namespace=namespace)
table = ip_wrapper.netns.execute(
['ip', '-%s' % ip_version, 'route'],
check_exit_code=True)
routes = []
# Example for route_lines:
# default via 192.168.3.120 dev wlp3s0 proto static metric 1024
# 10.0.0.0/8 dev tun0 proto static scope link metric 1024
# The first column is the destination, followed by key/value pairs.
# The generator splits the routing table by newline, then strips and splits
# each individual line.
route_lines = (line.split() for line in table.split('\n') if line.strip())
for route in route_lines:
network = route[0]
# Create a dict of key/value pairs (For example - 'dev': 'tun0')
# excluding the first column.
data = dict(route[i:i + 2] for i in range(1, len(route), 2))
routes.append({'destination': network,
'nexthop': data.get('via'),
'device': data.get('dev'),
'scope': data.get('scope')})
return routes
def ensure_device_is_ready(device_name, namespace=None):
dev = IPDevice(device_name, namespace=namespace)
dev.set_log_fail_as_error(False)
try:
# Ensure the device is up, even if it is already up. If the device
# doesn't exist, a RuntimeError will be raised.
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg):
command += ['help']
stdout, stderr = utils.execute(command, check_exit_code=False,
return_stderr=True, log_fail_as_error=False)
return any(arg in line for line in stderr.split('\n'))
def _arping(ns_name, iface_name, address, count):
# Pass -w to set timeout to ensure exit if interface removed while running
arping_cmd = ['arping', '-A', '-I', iface_name, '-c', count,
'-w', 1.5 * count, address]
try:
ip_wrapper = IPWrapper(namespace=ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception:
msg = _LE("Failed sending gratuitous ARP "
"to %(addr)s on %(iface)s in namespace %(ns)s")
LOG.exception(msg, {'addr': address,
'iface': iface_name,
'ns': ns_name})
def send_ip_addr_adv_notif(ns_name, iface_name, address, config):
"""Send advance notification of an IP address assignment.
If the address is in the IPv4 family, send gratuitous ARP.
If the address is in the IPv6 family, no advance notification is
necessary, since the Neighbor Discovery Protocol (NDP), Duplicate
Address Discovery (DAD), and (for stateless addresses) router
advertisements (RAs) are sufficient for address resolution and
duplicate address detection.
"""
count = config.send_arp_for_ha
def arping():
_arping(ns_name, iface_name, address, count)
if count > 0 and netaddr.IPAddress(address).version == 4:
eventlet.spawn_n(arping)
def add_namespace_to_cmd(cmd, namespace=None):
"""Add an optional namespace to the command."""
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
def get_ip_version(ip_or_cidr):
return netaddr.IPNetwork(ip_or_cidr).version
def get_ipv6_lladdr(mac_addr):
return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local()
|
|
import zmq
import Queue
import threading
import msgpack
import snappy
import traceback
import time
def _unpack(str) :
if str[0] == 'S':
tmp = snappy.uncompress(str[1:])
obj = msgpack.loads(tmp)
elif str[0] == '\0':
obj = msgpack.loads(str[1:])
else:
return None
#print "UNPACK", obj
return obj
def _pack(obj) :
# print "PACK", obj
tmp = msgpack.dumps(obj)
if len(tmp) > 1000:
return 'S' + snappy.compress(tmp)
else:
return '\0' + tmp
class JRpcServer :
def __init__(self) :
self._waiter_lock = threading.Lock()
self._waiter_map = {}
self._should_close = False
self._send_lock = threading.Lock()
self._callback_queue = Queue.Queue()
self._ctx = zmq.Context()
self._pull_sock = self._ctx.socket(zmq.PULL)
self._pull_sock.bind("inproc://pull_sock")
self._push_sock = self._ctx.socket(zmq.PUSH)
self._push_sock.connect("inproc://pull_sock")
self.on_call = None
t = threading.Thread(target=self._recv_run)
t.setDaemon(True)
t.start()
t = threading.Thread(target=self._callback_run)
t.setDaemon(True)
t.start()
def __del__(self):
self.close()
# def set_on_call(self, on_call):
# """def on_call(client_id, req_msg)"""
# self._on_call = on_call
def _recv_run(self):
poller = zmq.Poller()
poller.register(self._pull_sock, zmq.POLLIN)
remote_sock = None
#client_addr_map = {}
while not self._should_close:
try:
socks = dict(poller.poll(500))
if self._pull_sock in socks and socks[self._pull_sock] == zmq.POLLIN:
msgs = self._pull_sock.recv_multipart()
if len(msgs) == 2:
if remote_sock:
# [addr, data]
#print "send data", msgs[0], msgs[1]
remote_sock.send_multipart(msgs)
elif len(msgs) == 1:
cmd = msgs[0]
if cmd == "LISTEN":
if remote_sock:
poller.unregister(remote_sock)
remote_sock.close()
remote_sock = None
remote_sock = self._do_listen()
if remote_sock :
poller.register(remote_sock, zmq.POLLIN)
elif cmd == "CLOSE":
self._should_close = True
break
if remote_sock and remote_sock in socks and socks[remote_sock] == zmq.POLLIN:
msgs = remote_sock.recv_multipart()
if len(msgs) == 2:
identity = msgs[0]
data = msgs[1]
#client_id = identity.split('$')
#client_addr_map[client_id] = identity
self._on_data_arrived(identity, data)
except zmq.error.Again, e:
#print "RECV timeout: ", e
pass
except Exception, e:
print("_recv_run:", e)
def _callback_run(self):
while not self._should_close:
try:
r = self._callback_queue.get(timeout = 1)
if r :
r()
except Queue.Empty, e:
pass
except Exception, e:
traceback.print_exc(e)
print "_callback_run", type(e), e
def _async_call(self, func):
self._callback_queue.put( func )
def listen(self, addr) :
self._addr = addr
self._push_sock.send("LISTEN")
def _do_listen(self):
socket = self._ctx.socket(zmq.ROUTER)
socket.setsockopt(zmq.RCVTIMEO, 1000)
socket.setsockopt(zmq.SNDTIMEO, 1000)
socket.setsockopt(zmq.LINGER, 0)
socket.bind(self._addr)
return socket
def close(self):
self._should_close = True
self.push_sock.send("CLOSE")
def _send(self, data, addr):
self._send_lock.acquire()
#self._push_sock.send(addr, flags=zmq.SNDMORE)
#self._push_sock.send(data)
self._push_sock.send_multipart([addr, data])
self._send_lock.release()
def _on_data_arrived(self, identity, data):
try:
msg = _unpack(data)
#print "RECV", msg
if not msg:
print "wrong message format"
return
method = msg['method'] if msg.has_key('method') else None
call_id = msg['id'] if msg.has_key('id') and msg['id'] else None
if call_id and method:
if method == ".sys.heartbeat":
# Unlike scala implementation, here reply to client directly
rsp_msg = { 'jsonrpc' : '2.0',
'method' : method,
'result' : { "time" : time.time() },
'id' : call_id }
self._send( _pack(rsp_msg), identity)
if self.on_call :
self._async_call( lambda : self.on_call(identity, msg))
except Exception, e:
print( "_on_data_arrived:", e)
pass
def send_rsp(self, client_id, req, result=None, error=None):
"""send response message to client
example:
send_rsp(client_id, req, result={'data': 123})
send_rsp(client_id, req, error=(-1, "wrong argument"))
"""
if req['method'] == '.sys.heartbeat':
return
rsp_msg = { 'jsonrpc' : '2.0',
'method' : req["method"],
'id' : req['id'] }
if result is not None:
rsp_msg['result'] = result
if error is not None:
rsp_msg['error'] = {'error': error[0], 'message' : error[1]}
self._send(_pack(rsp_msg), client_id)
|
|
"""
Bundle subcommands.
"""
import logging
import sys
import typing as ty
import click
from git_pw import api
from git_pw import utils
LOG = logging.getLogger(__name__)
_list_headers = ('ID', 'Name', 'Owner', 'Public')
_sort_fields = ('id', '-id', 'name', '-name')
def _get_bundle(bundle_id: str) -> dict:
"""Fetch bundle by ID or name.
Allow users to provide a string to search for bundles. This doesn't make
sense to expose via the API since there's no uniqueness constraint on
bundle names.
"""
if bundle_id.isdigit():
return api.detail('bundles', bundle_id)
bundles = api.index('bundles', [('q', bundle_id)])
if len(bundles) == 0:
LOG.error('No matching bundle found: %s', bundle_id)
sys.exit(1)
elif len(bundles) > 1:
LOG.error('More than one bundle found: %s', bundle_id)
sys.exit(1)
return bundles[0]
@click.command(name='apply', context_settings=dict(
ignore_unknown_options=True,
))
@click.argument('bundle_id')
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def apply_cmd(bundle_id: str, args: ty.Tuple[str]) -> None:
"""Apply bundle.
Apply a bundle locally using the 'git-am' command. Any additional ARGS
provided will be passed to the 'git-am' command.
"""
LOG.debug('Applying bundle: id=%s', bundle_id)
bundle = _get_bundle(bundle_id)
mbox = api.download(bundle['mbox'])
if mbox:
utils.git_am(mbox, args)
@click.command(name='download')
@click.argument('bundle_id')
@click.argument(
'output',
type=click.Path(file_okay=True, writable=True, readable=True),
required=False,
)
def download_cmd(bundle_id: str, output: ty.Optional[str]) -> None:
"""Download bundle in mbox format.
Download a bundle but do not apply it. ``OUTPUT`` is optional and can be an
output full file path or a directory or ``-`` to output to ``stdout``. If
``OUTPUT`` is not provided, the output path will be automatically chosen.
"""
LOG.debug('Downloading bundle: id=%s', bundle_id)
path = None
bundle = _get_bundle(bundle_id)
path = api.download(bundle['mbox'], output=output)
if path:
LOG.info('Downloaded bundle to %s', path)
def _show_bundle(bundle: dict, fmt: str) -> None:
def _format_patch(patch):
return '%-4d %s' % (patch.get('id'), patch.get('name'))
output = [
('ID', bundle.get('id')),
('Name', bundle.get('name')),
('URL', bundle.get('web_url')),
('Owner', bundle.get('owner', {}).get('username')),
('Project', bundle.get('project', {}).get('name')),
('Public', bundle.get('public'))]
prefix = 'Patches'
for patch in bundle.get('patches', []):
output.append((prefix, _format_patch(patch)))
prefix = ''
utils.echo(output, ['Property', 'Value'], fmt=fmt)
@click.command(name='show')
@utils.format_options
@click.argument('bundle_id')
def show_cmd(fmt: str, bundle_id: str) -> None:
"""Show information about bundle.
Retrieve Patchwork metadata for a bundle.
"""
LOG.debug('Showing bundle: id=%s', bundle_id)
bundle = _get_bundle(bundle_id)
_show_bundle(bundle, fmt)
@click.command(name='list')
@click.option('--owner', 'owners', metavar='OWNER', multiple=True,
help='Show only bundles with these owners. Should be an email, '
'name or ID. Private bundles of other users will not be shown.')
@utils.pagination_options(sort_fields=_sort_fields, default_sort='name')
@utils.format_options(headers=_list_headers)
@click.argument('name', required=False)
@api.validate_multiple_filter_support
def list_cmd(owners, limit, page, sort, fmt, headers, name):
"""List bundles.
List bundles on the Patchwork instance.
"""
LOG.debug('List bundles: owners=%s, limit=%r, page=%r, sort=%r',
','.join(owners), limit, page, sort)
params = []
for owner in owners:
# we support server-side filtering by username (but not email) in 1.1
if (api.version() >= (1, 1) and '@' not in owner) or owner.isdigit():
params.append(('owner', owner))
else:
params.extend(api.retrieve_filter_ids('users', 'owner', owner))
params.extend([
('q', name),
('page', page),
('per_page', limit),
('order', sort),
])
bundles = api.index('bundles', params)
# Format and print output
output = []
for bundle in bundles:
item = [
bundle.get('id'),
utils.trim(bundle.get('name') or ''),
bundle.get('owner').get('username'),
'yes' if bundle.get('public') else 'no',
]
output.append([])
for idx, header in enumerate(_list_headers):
if header not in headers:
continue
output[-1].append(item[idx])
utils.echo_via_pager(output, headers, fmt=fmt)
@click.command(name='create')
@click.option('--public/--private', default=False,
help='Allow other users to view this bundle. If private, only '
'you will be able to see this bundle.')
@click.argument('name')
@click.argument('patch_ids', type=click.INT, nargs=-1, required=True)
@api.validate_minimum_version(
(1, 2), 'Creating bundles is only supported from API version 1.2',
)
@utils.format_options
def create_cmd(
name: str, patch_ids: ty.Tuple[int], public: bool, fmt: str,
) -> None:
"""Create a bundle.
Create a bundle with the given NAME and patches from PATCH_ID.
Requires API version 1.2 or greater.
"""
LOG.debug('Create bundle: name=%s, patches=%s, public=%s',
name, patch_ids, public)
data = [
('name', name),
('patches', patch_ids),
('public', public),
]
bundle = api.create('bundles', data)
_show_bundle(bundle, fmt)
@click.command(name='update')
@click.option('--name')
@click.option('--patch', 'patch_ids', type=click.INT, multiple=True,
help='Add the specified patch(es) to the bundle.')
@click.option('--public/--private', default=None,
help='Allow other users to view this bundle. If private, only '
'you will be able to see this bundle.')
@click.argument('bundle_id')
@api.validate_minimum_version(
(1, 2), 'Updating bundles is only supported from API version 1.2',
)
@utils.format_options
def update_cmd(
bundle_id: str, name: str, patch_ids: ty.List[int], public: bool, fmt: str,
) -> None:
"""Update a bundle.
Update bundle BUNDLE_ID. If PATCH_IDs are specified, this will overwrite
all patches in the bundle. Use 'bundle add' and 'bundle remove' to add or
remove patches.
Requires API version 1.2 or greater.
"""
LOG.debug(
'Updating bundle: id=%s, name=%s, patches=%s, public=%s',
bundle_id, name, patch_ids, public,
)
data = []
for key, value in [('name', name), ('public', public)]:
if value is None:
continue
data.append((key, value))
if patch_ids: # special case patches to ignore the empty set
data.append(('patches', patch_ids))
bundle = api.update('bundles', bundle_id, data)
_show_bundle(bundle, fmt)
@click.command(name='delete')
@click.argument('bundle_id')
@api.validate_minimum_version(
(1, 2), 'Deleting bundles is only supported from API version 1.2',
)
@utils.format_options
def delete_cmd(bundle_id: str, fmt: str) -> None:
"""Delete a bundle.
Delete bundle BUNDLE_ID.
Requires API version 1.2 or greater.
"""
LOG.debug('Delete bundle: id=%s', bundle_id)
api.delete('bundles', bundle_id)
@click.command(name='add')
@click.argument('bundle_id')
@click.argument('patch_ids', type=click.INT, nargs=-1, required=True)
@api.validate_minimum_version(
(1, 2), 'Modifying bundles is only supported from API version 1.2',
)
@utils.format_options
def add_cmd(bundle_id: str, patch_ids: ty.Tuple[int], fmt: str) -> None:
"""Add one or more patches to a bundle.
Append the provided PATCH_IDS to bundle BUNDLE_ID.
Requires API version 1.2 or greater.
"""
LOG.debug('Add to bundle: id=%s, patches=%s', bundle_id, patch_ids)
bundle = _get_bundle(bundle_id)
data = [
('patches', patch_ids + tuple([p['id'] for p in bundle['patches']])),
]
bundle = api.update('bundles', bundle_id, data)
_show_bundle(bundle, fmt)
@click.command(name='remove')
@click.argument('bundle_id')
@click.argument('patch_ids', type=click.INT, nargs=-1, required=True)
@api.validate_minimum_version(
(1, 2), 'Modifying bundles is only supported from API version 1.2',
)
@utils.format_options
def remove_cmd(
bundle_id: str, patch_ids: ty.Tuple[int], fmt: str,
) -> None:
"""Remove one or more patches from a bundle.
Remove the provided PATCH_IDS to bundle BUNDLE_ID.
Requires API version 1.2 or greater.
"""
LOG.debug('Remove from bundle: id=%s, patches=%s', bundle_id, patch_ids)
bundle = _get_bundle(bundle_id)
patches = [p['id'] for p in bundle['patches'] if p['id'] not in patch_ids]
if not patches:
LOG.error(
'Bundles cannot be empty. Consider deleting the bundle instead'
)
sys.exit(1)
data = [('patches', tuple(patches))]
bundle = api.update('bundles', bundle_id, data)
_show_bundle(bundle, fmt)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import libvirt
import logging
from ovm.configuration import Configuration
from ovm.utils.logger import logger
from ovm.vmcli.management import * # noqa
def add_subparsers(parser):
subparsers = parser.add_subparsers()
# console
subcommand = subparsers.add_parser(
'console',
help='enter in the VM console')
subcommand.add_argument('name', help='name of the VM')
subcommand.set_defaults(func=vm_console)
# create
subcommand = subparsers.add_parser('create', help='create a new VM')
subcommand.add_argument('name', help='set the name of the VM')
subcommand.add_argument('--template', required=True)
subcommand.add_argument('--network', required=True)
subcommand.add_argument('--storage', required=True)
subcommand.add_argument('--ip', nargs='?')
subcommand.add_argument('--size', nargs='?')
subcommand.add_argument('--vcpu', nargs='?')
subcommand.add_argument('--memory', nargs='?')
subcommand.set_defaults(func=vm_create)
# templates
subcommand = subparsers.add_parser(
'templates',
help='list templates')
subcommand.add_argument(
'--short',
action='store_true',
help='print only the list of templates names')
subcommand.set_defaults(func=vm_templates)
# storage
subcommand = subparsers.add_parser('storage', help='list storage')
subcommand.add_argument(
'--short',
action='store_true',
help='print only the list of storage names')
subcommand.set_defaults(func=vm_storage)
# list
subcommand = subparsers.add_parser(
'ls',
help='list VMs')
subcommand.add_argument(
'--short',
action='store_true',
help='print only the list of VM names')
subcommand.add_argument(
'--active',
action='store_true',
help='print only the list of active VM')
subcommand.add_argument(
'--inactive',
action='store_true',
help='print only the list of inactive VM')
subcommand.set_defaults(func=vm_list)
# set
subcommand = subparsers.add_parser('set', help='set a metadata on a vm')
subcommand.add_argument('name', help='name of the VM')
subcommand.add_argument(
'metadata',
nargs='+',
help='enter metadata as <key>=<value>')
subcommand.set_defaults(func=vm_set)
# unset
subcommand = subparsers.add_parser(
'unset',
help='unset a metadata on a vm')
subcommand.add_argument('name', help='name of the VM')
subcommand.add_argument('key', nargs='+')
subcommand.set_defaults(func=vm_unset)
# autostart
subcommand = subparsers.add_parser(
'autostart',
help='choose if the VM starts automatically at boot')
subcommand.add_argument('name', help='name of the VM')
subcommand.add_argument('value', choices=['on', 'off'])
subcommand.set_defaults(func=vm_autostart)
# start
subcommand = subparsers.add_parser(
'start',
help='start one or many VMs')
subcommand.add_argument('name', nargs='+', help='name of VMs')
subcommand.set_defaults(func=vm_start)
# info
subcommand = subparsers.add_parser(
'info',
help='show information about a VM')
subcommand.add_argument('name', help='name of the VM')
subcommand.set_defaults(func=vm_info)
# reboot
subcommand = subparsers.add_parser('reboot', help='reboot a VM')
subcommand.add_argument('name', help='name of the VM')
subcommand.set_defaults(func=vm_reboot)
# save
subcommand = subparsers.add_parser('save', help='save a VM')
subcommand.add_argument('name', nargs='+', help='name of VMs')
subcommand.set_defaults(func=vm_save)
# restore
subcommand = subparsers.add_parser('restore', help='restore a VM')
subcommand.add_argument('name', nargs='+', help='name of VMs')
subcommand.set_defaults(func=vm_restore)
# stop
subcommand = subparsers.add_parser('stop', help='stop one or many VMs')
subcommand.add_argument('name', nargs='+', help='name of VMs')
subcommand.add_argument(
'-f', '--force',
action='store_true',
help='force the VM shutdown')
subcommand.set_defaults(func=vm_stop)
# remove
subcommand = subparsers.add_parser(
'rm',
help='remove one or many VMs')
subcommand.add_argument('name', help='name of VMs', nargs='+')
subcommand.add_argument(
'-f', '--force',
action='store_true',
dest='force',
help='Remove VM without asking confirmation.')
subcommand.set_defaults(func=vm_remove)
# ssh
subcommand = subparsers.add_parser('ssh', help='ssh a VM')
subcommand.add_argument('name', help='name of the VM')
subcommand.set_defaults(func=vm_ssh)
# ping
subcommand = subparsers.add_parser('ping', help='ping a VM')
subcommand.add_argument('name', help='name of the VM')
subcommand.set_defaults(func=vm_ping)
# top
subcommand = subparsers.add_parser(
'top',
help='show all VMs and their states')
subcommand.set_defaults(func=vm_top)
# networks
subcommand = subparsers.add_parser('network')
add_network_subparsers(subcommand)
def add_network_subparsers(parser):
subparsers = parser.add_subparsers()
cmd = subparsers.add_parser('list')
cmd.add_argument(
'--short',
action='store_true',
help='print only the list of networks names')
cmd.set_defaults(func=network_list)
cmd = subparsers.add_parser(
'ipv4-list',
help='show IPv4 allocated to a network')
cmd.add_argument('network')
cmd.set_defaults(func=network_ipv4_list)
cmd = subparsers.add_parser(
'ipv4-del',
help='delete an IPv4 associated with a network')
cmd.add_argument('network')
cmd.add_argument('address', nargs='+')
cmd.set_defaults(func=network_ipv4_delete)
cmd = subparsers.add_parser(
'ipv4-add',
help='add a new association between a domain and an IP address')
cmd.add_argument('network')
cmd.add_argument('domain')
cmd.add_argument('address', nargs='?')
cmd.set_defaults(func=network_ipv4_add)
cmd = subparsers.add_parser(
'ipv4-flush',
help='remove all ip address in a network')
cmd.add_argument('network')
cmd.set_defaults(func=network_ipv4_flush)
def main():
# Ignore text error from libvirt
libvirt.registerErrorHandler(lambda: 1, None)
parser = argparse.ArgumentParser(
description='Provide functions to create and manage VMs on KVM.',
prog='vm')
parser.add_argument('--version', action='version',
version=Configuration.VERSION)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'--fork',
default=4,
type=int,
help='set how many tasks launch parallelly')
add_subparsers(parser)
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
Resources(Configuration.RESOURCE_CONFIG)
if hasattr(args, 'func'):
getattr(args, 'func')(args)
else:
parser.print_help()
sys.exit(0)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from botocore_eb.exceptions import MissingParametersError
from botocore_eb.exceptions import UnknownParameterError
from botocore_eb.paginate import DeprecatedPaginator
from botocore_eb import serialize
from botocore_eb import BotoCoreObject, xform_name
from botocore_eb.validate import ParamValidator
from botocore_eb.exceptions import ParamValidationError
logger = logging.getLogger(__name__)
class Operation(BotoCoreObject):
_DEFAULT_PAGINATOR_CLS = DeprecatedPaginator
def __init__(self, service, op_data, model, paginator_cls=None):
self.input = {}
self.output = {}
self._model = model
BotoCoreObject.__init__(self, **op_data)
self.service = service
if self.service:
self.session = self.service.session
else:
self.session = None
self.type = 'operation'
self._params = None
if paginator_cls is None:
paginator_cls = self._DEFAULT_PAGINATOR_CLS
self._paginator_cls = paginator_cls
def __repr__(self):
return 'Operation:%s' % self.name
@property
def model(self):
return self._model
@property
def output_shape(self):
return self._model.output_shape
@property
def signature_version(self):
return self.service.signature_version
def call(self, endpoint, **kwargs):
logger.debug("%s called with kwargs: %s", self, kwargs)
# It probably seems a little weird to be firing two different
# events here. The reason is that the first event is fired
# with the parameters exactly as supplied. The second event
# is fired with the built parameters. Generally, it's easier
# to manipulate the former but at times, like with ReST operations
# that build an XML or JSON payload, you have to wait for
# build_parameters to do it's job and the latter is necessary.
event = self.session.create_event('before-parameter-build',
self.service.endpoint_prefix,
self.name)
self.session.emit(event, operation=self, endpoint=endpoint,
params=kwargs)
request_dict = self.build_parameters(**kwargs)
event = self.session.create_event('before-call',
self.service.endpoint_prefix,
self.name)
self.session.emit(event, operation=self, endpoint=endpoint,
params=request_dict)
response = endpoint.make_request(self.model, request_dict)
event = self.session.create_event('after-call',
self.service.endpoint_prefix,
self.name)
self.session.emit(event, operation=self,
http_response=response[0],
model=self.model,
parsed=response[1])
return response
@property
def pagination(self):
try:
return self._load_pagination_config()
except Exception as e:
return {}
@property
def can_paginate(self):
try:
self._load_pagination_config()
except Exception as e:
return False
return True
def paginate(self, endpoint, **kwargs):
"""Iterate over the responses of an operation.
This will return an iterator with each element
being a tuple of (``http_response``, ``parsed_response``).
If the operation does not paginate, a ``TypeError`` will
be raised. You can check if an operation can be paginated
by using the ``can_paginate`` arg.
"""
if not self.can_paginate:
raise TypeError("Operation cannot be paginated: %s" % self)
config = self._load_pagination_config()
paginator = self._paginator_cls(self, config)
return paginator.paginate(endpoint, **kwargs)
def _load_pagination_config(self):
loader = self.session.get_component('data_loader')
api_version = self.service.api_version
config = loader.load_data('aws/%s/%s.paginators' %
(self.service.service_name, api_version))
return config['pagination'][self.name]
@property
def params(self):
raise RuntimeError(
"Attempted to access removed parameter objects in botocore.")
if self._params is None:
self._params = self._create_parameter_objects()
return self._params
def _create_parameter_objects(self):
"""
Build the list of Parameter objects for this operation.
"""
logger.debug("Creating parameter objects for: %s", self)
params = []
return params
def _find_payload(self):
"""
Searches the parameters for an operation to find the payload
parameter, if it exists. Returns that param or None.
"""
payload = None
for param in self.params:
if hasattr(param, 'payload') and param.payload:
payload = param
break
return payload
def build_parameters(self, **kwargs):
"""
Returns a dictionary containing the kwargs for the
given operation formatted as required to pass to the service
in a request.
"""
protocol = self._model.metadata['protocol']
input_shape = self._model.input_shape
if input_shape is not None:
self._convert_kwargs_to_correct_casing(kwargs)
validator = ParamValidator()
errors = validator.validate(kwargs, self._model.input_shape)
if errors.has_errors():
raise ParamValidationError(report=errors.generate_report())
serializer = serialize.create_serializer(protocol)
request_dict = serializer.serialize_to_request(kwargs, self._model)
return request_dict
def _convert_kwargs_to_correct_casing(self, kwargs):
# XXX: This will be removed in botocore 1.0, but we should
# support snake casing for now.
# First we're going to build a map of snake_casing -> service casing
actual_casing = list(self._model.input_shape.members)
mapping = {}
for key in actual_casing:
transformed = xform_name(key)
if key != transformed:
mapping[xform_name(key)] = key
# Look for anything in the user provided kwargs that is in the mapping
# dict and convert appropriately.
for key in list(kwargs):
if key in mapping:
# TODO: add a pending deprecation warning.
value = kwargs[key]
kwargs[mapping[key]] = value
del kwargs[key]
def _check_for_unknown_params(self, kwargs):
valid_names = [p.py_name for p in self.params]
for key in kwargs:
if key not in valid_names:
raise UnknownParameterError(name=key, operation=self,
choices=', '.join(valid_names))
def is_streaming(self):
# TODO: add deprecation warning
return self._model.has_streaming_output
@property
def has_streaming_output(self):
return self._model.has_streaming_output
|
|
import weakref
from twisted.internet import task
from twisted.trial import unittest
from axiom import store
from imaginary import iimaginary, objects, events, action
from imaginary.test import commandutils
from examplegame import mice
from examplegame import japanese
class MouseChallengeMixin(object):
"""
A mixin meant to be used in TestCases which want to assert things
about mouse challenges.
The subclass must be sure to provide a C{player} instance
attribute, which is the L{IThing<iimaginary.IThing>} provider of
the player which observes the mouse, and a C{mouseName} attribute
which should be the mouse's name.
"""
def assertChallenge(self, concept):
"""
Assert that the given concept is a challenge from the mouse
named self.mouseName, as observed by self.player.
"""
said = commandutils.flatten(concept.plaintext(self.player))
self.failUnless(said.startswith(u"A %s says, '" % (self.mouseName,)), repr(said))
self.failUnlessIn(said[-3], japanese.hiragana)
self.failUnless(said.endswith("'\n"), repr(said))
class HiraganaMouseTestCase(MouseChallengeMixin, unittest.TestCase):
"""
Test that there is a mouse that says hiragana and stuff
"""
def setUp(self):
self.store = store.Store()
self.clock = objects.Thing(store=self.store, name=u"Clock")
self.clockContainer = objects.Container.createFor(self.clock, capacity=10)
self.mouseName = u"\N{KATAKANA LETTER PI}\N{KATAKANA LETTER SMALL YU}"
self.mouse = mice.createHiraganaMouse(
store=self.store,
name=self.mouseName)
self.mouseActor = iimaginary.IActor(self.mouse)
self.mousehood = self.mouseActor.getIntelligence()
self.mouse.moveTo(self.clock)
(self.player,
self.playerActor,
self.playerIntelligence) = commandutils.createPlayer(self.store,
u"Mean Old Man")
self.player.moveTo(self.clock)
self.reactorTime = task.Clock()
self.mousehood._callLater = self.reactorTime.callLater
def test_mouseCanSqueak(self):
"""
When explicitly told to challenge with a given romaji syllable, the
mouse should say a hiragana letter.
"""
events.runEventTransaction(
self.store,
self.mousehood.challenge,
character=u"\N{HIRAGANA LETTER A}")
self.assertEquals(len(self.playerIntelligence.concepts), 1)
event = self.playerIntelligence.concepts[0]
self.assertEquals(
commandutils.flatten(event.otherMessage.plaintext(self.player)),
u"A %s says, '\N{HIRAGANA LETTER A}'" % (self.mouseName,))
def test_randomHiragana(self):
"""
When explicitly told to challenge without specifying a syllable, the
mouse should say a random one.
"""
events.runEventTransaction(self.store, self.mousehood.challenge)
self.assertEquals(len(self.playerIntelligence.concepts), 1)
event = self.playerIntelligence.concepts[0]
self.assertChallenge(event)
def test_ji(self):
"""
Two hiragana characters map to the romaji 'ji'. Test that we do the
right thing for them.
"""
self.mousehood.challenge(character=u"\N{HIRAGANA LETTER DI}")
self.failUnless(self.mousehood.vetteChallengeResponse(u"ji"))
self.mousehood.challenge(character=u"\N{HIRAGANA LETTER ZI}")
self.failUnless(self.mousehood.vetteChallengeResponse(u"ji"))
def test_zu(self):
"""
Two hiragana characters map to the romaji 'zu'. Test that we do the
right thing for them.
"""
self.mousehood.challenge(character=u"\N{HIRAGANA LETTER DU}")
self.failUnless(self.mousehood.vetteChallengeResponse(u"zu"))
self.mousehood.challenge(character=u"\N{HIRAGANA LETTER ZU}")
self.failUnless(self.mousehood.vetteChallengeResponse(u"zu"))
def test_mouseStartsChallengingWhenPlayersArrive(self):
"""
When a player arrives, the mouse should go into the 'I am
challenging' state.
"""
# Whitebox
self.assertEquals(self.mousehood.challenging, False)
evt = events.ArrivalEvent(actor=self.player)
self.mouseActor.send(evt)
self.assertEquals(self.mousehood.challenging, True)
def test_mouseSchedulesChallenges(self):
"""
After telling a mouse to start challenging, it should schedule timed
events to say challenges.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
concepts = self.playerIntelligence.concepts
self.assertEquals(len(concepts), 1)
self.assertChallenge(concepts[0])
def test_mouseStopsChallengingWhenPlayersLeave(self):
"""
When the 'last' player leaves, the mouse stops challenging.
"""
# Whitebox
self.mousehood.startChallenging()
evt = events.DepartureEvent(location=self.clock,
actor=self.player)
self.player.moveTo(None)
self.mouseActor.send(evt)
self.assertEquals(self.mousehood.challenging, False)
def test_mouseStopsSchedulingChallenges(self):
"""
When a mouse is told to stop challenging, it should cancel any
challenges it had scheduled.
"""
self.mousehood.startChallenging()
self.mousehood.stopChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
self.assertEquals(self.playerIntelligence.concepts, [])
def test_stopChallengingWhenNotChallengingFails(self):
"""
Don't stop challenging when you're not challenging.
"""
self.assertRaises(mice.ChallengeVacuum, self.mousehood.stopChallenging)
def test_startChallengingTwiceFails(self):
"""
Don't start challenging twice.
"""
self.mousehood.startChallenging()
self.assertRaises(mice.ChallengeCollision, self.mousehood.startChallenging)
def test_challengeRecurrence(self):
"""
After a challenge is issued another one should be issued later.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
self.assertIn(self.mousehood.getCurrentChallenge(), japanese.hiragana)
self.mousehood._currentChallenge = None # Clear his challenge evilly
self.reactorTime.advance(self.mousehood.challengeInterval)
self.assertIn(self.mousehood.getCurrentChallenge(), japanese.hiragana)
def test_twoMenEnter(self):
"""
Test that when *TWO* players join, the mouse doesn't schedule too many
challenges.
"""
otherPlayer = commandutils.createPlayer(self.store,
u"Polite Young Man")[0]
# Send an arrival event because setUp doesn't
firstEvent = events.ArrivalEvent(actor=self.player)
self.mouseActor.send(firstEvent)
otherPlayer.moveTo(self.clock, arrivalEventFactory=events.MovementArrivalEvent)
self.playerIntelligence.concepts = []
self.reactorTime.advance(self.mousehood.challengeInterval)
self.assertEquals(len(self.playerIntelligence.concepts), 1)
self.assertChallenge(self.playerIntelligence.concepts[0])
def test_twoMenLeave(self):
"""
Test that when two players are near the mouse, the mouse doesn't
unschedule its challenge until they both leave.
"""
otherPlayer = commandutils.createPlayer(self.store,
u"Polite Young Man")[0]
otherPlayer.moveTo(self.clock)
self.mousehood.startChallenging()
firstEvent = events.DepartureEvent(location=self.clock,
actor=self.player)
secondEvent = events.DepartureEvent(location=self.clock,
actor=otherPlayer)
otherPlayer.moveTo(None)
self.mouseActor.send(secondEvent)
self.playerIntelligence.concepts = []
self.reactorTime.advance(self.mousehood.challengeInterval)
self.assertEquals(len(self.playerIntelligence.concepts), 1)
self.assertChallenge(self.playerIntelligence.concepts[0])
self.player.moveTo(None)
self.mouseActor.send(firstEvent)
self.failIf(self.mousehood.challenging)
def test_getCurrentChallenge(self):
"""
Test that we can introspect the current challenge of a mouse.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
self.failUnlessIn(self.mousehood.getCurrentChallenge(), japanese.hiragana)
self.mousehood.stopChallenging()
self.assertIdentical(self.mousehood.getCurrentChallenge(), None)
def test_vetteChallengeResponse(self):
"""
Test that the correct response to the current challenge is accepted by
the mouse.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
romaji = japanese.hiragana[self.mousehood.getCurrentChallenge()]
self.failUnless(self.mousehood.vetteChallengeResponse(romaji))
for romaji in japanese.hiragana.values():
if romaji != japanese.hiragana[self.mousehood.getCurrentChallenge()]:
self.failIf(self.mousehood.vetteChallengeResponse(romaji))
def test_respondToChallengeCorrectly(self):
"""
Test that when a correct response is received, the current challenge is
expired and the mouse salutes you.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
correctResponse = japanese.hiragana[
self.mousehood.getCurrentChallenge()]
self.mousehood.responseReceived(self.player, correctResponse)
self.reactorTime.advance(0)
self.assertIdentical(self.mousehood.getCurrentChallenge(), None)
self.assertEquals(len(self.playerIntelligence.concepts), 2)
c = self.playerIntelligence.concepts[1]
self.assertEquals(
commandutils.flatten(c.plaintext(self.player)),
u"%s salutes you!\n" % (self.mouseName,))
def test_respondToChallengeInorrectly(self):
"""
Test that when an incorrect response is received, the current challenge
is not expired and the mouse bites you.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
correctResponse = japanese.hiragana[
self.mousehood.getCurrentChallenge()]
for ch in japanese.hiragana.values():
if ch != correctResponse:
self.mousehood.responseReceived(self.player, ch)
break
else:
self.fail("Buggy test")
self.reactorTime.advance(0)
self.assertIn(self.mousehood.getCurrentChallenge(),
japanese.romajiToHiragana[correctResponse])
self.assertEquals(len(self.playerIntelligence.concepts), 2)
c = self.playerIntelligence.concepts[1]
self.assertEquals(
commandutils.flatten(c.plaintext(self.player)),
u"%s bites you!\n" % (self.mouseName,))
def test_playerSaysCorrectThing(self):
"""
Test that when someone gives voice to the correct response to a mouse's
current challenge, the mouse acknowledges this with a salute.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
action.Say().do(
# http://divmod.org/trac/ticket/2917
iimaginary.IActor(self.player),
None,
japanese.hiragana[self.mousehood.getCurrentChallenge()])
self.assertIdentical(self.mousehood.getCurrentChallenge(), None)
self.reactorTime.advance(0)
self.assertEquals(len(self.playerIntelligence.concepts), 3)
c = self.playerIntelligence.concepts[2]
self.assertEquals(
commandutils.flatten(c.plaintext(self.player)),
u"%s salutes you!\n" % (self.mouseName,))
def test_playerSaysIncorrectThing(self):
"""
Test that when someone gives voice to the correct response to a mouse's
current challenge, the mouse acknowledges this with a salute.
"""
self.mousehood.startChallenging()
self.reactorTime.advance(self.mousehood.challengeInterval)
action.Say().do(
# http://divmod.org/trac/ticket/2917
iimaginary.IActor(self.player), None, u"lolololo pew")
self.failIfIdentical(self.mousehood.getCurrentChallenge(), None)
self.reactorTime.advance(0)
self.assertEquals(len(self.playerIntelligence.concepts), 3)
c = self.playerIntelligence.concepts[2]
self.assertEquals(
commandutils.flatten(c.plaintext(self.player)),
u"%s bites you!\n" % (self.mouseName,))
class HiraganaMouseActivationTestCase(unittest.TestCase):
"""
Test the default scheduler of the mouse.
This isn't part of HiraganaMouseTestCase because that replaces the
scheduler before the test method runs.
"""
def setUp(self):
self.store = store.Store()
self.mouseName = u"\N{KATAKANA LETTER PI}\N{KATAKANA LETTER SMALL YU}"
self.mouse = mice.createHiraganaMouse(
store=self.store,
name=self.mouseName)
def test_activationUsesReactorScheduling(self):
"""
Test that the default scheduler of the mouse is the Twisted
reactor, since that is the scheduler that needs to be used
with the actual Imaginary server.
"""
mousehood = self.store.findUnique(mice.HiraganaMouse)
from twisted.internet import reactor
self.assertEquals(mousehood._callLater, reactor.callLater)
class HiraganaMouseCommandTestCase(commandutils.CommandTestCaseMixin, unittest.TestCase):
"""
H-mouse tests which use the command system.
"""
mouseName = u"\N{KATAKANA LETTER PI}\N{KATAKANA LETTER SMALL YU}"
hiraganaCharacterPattern = u"'[" + u''.join(japanese.hiragana.keys()) + u"]'"
speechPattern = mouseName + u" says, " + hiraganaCharacterPattern
def test_oneManEnters(self):
"""
Test that when a fellow jaunts into a venue inhabited by a mouse of the
Nipponese persuasion, a hiragana allocution follows.
"""
clock = task.Clock()
closetContainer = commandutils.createLocation(
self.store, u"Closet", None)
closet = closetContainer.thing
mouse = mice.createHiraganaMouse(
store=self.store,
name=self.mouseName,
proper=True)
mouseActor = iimaginary.IActor(mouse)
mousehood = mouseActor.getIntelligence()
mousehood._callLater = clock.callLater
mouse.moveTo(closet)
objects.Exit.link(self.location, closet, u"north")
self._test(
"north",
[commandutils.E("[ Closet ]"),
commandutils.E("( south )"),
commandutils.E(u"Here, you see " + self.mouseName + u".")],
["Test Player leaves north."])
clock.advance(mousehood.challengeInterval)
self._test(None, [self.speechPattern])
def test_creation(self):
"""
Test the creation of a hiragana-speaking mouse using the thing creation
plugin system.
"""
self._test(
u"create the 'hiragana mouse' named " + self.mouseName,
[commandutils.E(u"You create " + self.mouseName + u".")],
[commandutils.E(u"Test Player creates %s." % (self.mouseName,))])
for thing in self.location.findProviders(iimaginary.IThing, 0):
if thing.name == self.mouseName:
break
else:
self.fail("Could not find the mouse! Test bug.")
clock = task.Clock()
jimhood = iimaginary.IActor(thing).getIntelligence()
jimhood._callLater = clock.callLater
self._test(
u"drop " + self.mouseName,
[commandutils.E(u"You drop %s." % (self.mouseName,))],
[commandutils.E(u"Test Player drops %s." % (self.mouseName,))])
clock.advance(jimhood.challengeInterval)
self._test(
None,
[self.speechPattern],
[self.speechPattern])
|
|
# Copyright 2013 the Neutrino authors (see AUTHORS).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Neutrino syntax tree definitions
from abc import abstractmethod
import data
import plankton
# Syntax tree visitor. The default behavior of each visit method is to apply
# the visitor recursively.
class Visitor(object):
def __init__(self):
pass
def visit_ast(self, that):
that.traverse(self)
def visit_literal(self, that):
self.visit_ast(that)
def visit_array(self, that):
self.visit_ast(that)
def visit_lambda(self, that):
self.visit_ast(that)
def visit_variable(self, that):
self.visit_ast(that)
def visit_variable_assignment(self, that):
self.visit_ast(that)
def visit_invocation(self, that):
self.visit_ast(that)
def visit_call_literal(self, that):
self.visit_ast(that)
def visit_call_literal_argument(self, that):
self.visit_ast(that)
def visit_signal(self, that):
self.visit_ast(that)
def visit_signal_handler(self, that):
self.visit_ast(that)
def visit_ensure(self, that):
self.visit_ast(that)
def visit_argument(self, that):
self.visit_ast(that)
def visit_sequence(self, that):
self.visit_ast(that)
def visit_local_declaration(self, that):
self.visit_ast(that)
def visit_block(self, that):
self.visit_ast(that)
def visit_with_escape(self, that):
self.visit_ast(that)
def visit_namespace_declaration(self, that):
self.visit_ast(that)
def visit_method_declaration(self, that):
self.visit_ast(that)
def visit_function_declaration(self, that):
self.visit_ast(that)
def visit_signature(self, that):
self.visit_ast(that)
def visit_parameter(self, that):
self.visit_ast(that)
def visit_method(self, that):
self.visit_ast(that)
def visit_guard(self, that):
self.visit_ast(that)
def visit_quote(self, that):
self.visit_ast(that)
def visit_import(self, that):
self.visit_ast(that)
def visit_is_declaration(self, that):
self.visit_ast(that)
def visit_current_module(self, that):
self.visit_ast(that)
# A constant literal value.
@plankton.serializable("ast:Literal")
class Literal(object):
@plankton.field("value")
def __init__(self, value):
self.value = value
def accept(self, visitor):
return visitor.visit_literal(self);
def traverse(self, visitor):
pass
def __str__(self):
return "(literal %s)" % self.value
# An array expression.
@plankton.serializable("ast:Array")
class Array(object):
@plankton.field("elements")
def __init__(self, elements):
self.elements = elements
def accept(self, visitor):
return visitor.visit_array(self)
def traverse(self, visitor):
for element in self.elements:
element.accept(visitor)
def __str__(self):
return "(array %s)" % map(str, self.elements)
# A reference to an enclosing binding. The name is used before the variable
# has been resolved, the symbol after.
@plankton.serializable()
class Variable(object):
_LOCAL_HEADER = "ast:LocalVariable"
_NAMESPACE_HEADER = "ast:NamespaceVariable"
def __init__(self, ident, symbol=None):
assert not ident is None
self.ident = ident
self.symbol = symbol
def accept(self, visitor):
return visitor.visit_variable(self)
def traverse(self, visitor):
pass
def get_name(self):
return self.ident.get_name()
def to_assignment(self, rvalue):
return VariableAssignment(self, rvalue)
@plankton.header
def get_header(self):
if self.symbol is None:
return Variable._NAMESPACE_HEADER
else:
return Variable._LOCAL_HEADER
@plankton.payload
def get_payload(self):
if self.symbol is None:
return {'name': self.ident}
else:
return {'symbol': self.symbol}
def __str__(self):
return "(var %s)" % str(self.ident)
@plankton.serializable("ast:VariableAssignment")
class VariableAssignment(object):
@plankton.field("target")
@plankton.field("value")
def __init__(self, target, value):
self.target = target
self.value = value
def accept(self, visitor):
visitor.visit_variable_assignment(self)
def traverse(self, visitor):
self.target.accept(visitor)
self.value.accept(visitor)
# A multi-method invocation.
@plankton.serializable("ast:Invocation")
class Invocation(object):
@plankton.field("arguments")
def __init__(self, arguments):
self.arguments = arguments
def accept(self, visitor):
return visitor.visit_invocation(self)
def traverse(self, visitor):
for argument in self.arguments:
argument.accept(visitor)
def to_assignment(self, rvalue):
new_args = []
max_arg_index = -1
for arg in self.arguments:
new_arg = arg
if arg.tag is data._SELECTOR:
inner_op = arg.value.value
outer_op = data.Operation.assign(inner_op)
new_arg = Argument(arg.tag, Literal(outer_op))
elif type(arg.tag) == int:
max_arg_index = max(max_arg_index, arg.tag)
new_args.append(new_arg)
new_args.append(Argument(max_arg_index + 1, rvalue))
return Invocation(new_args)
def __str__(self):
return "(call %s)" % " ".join(map(str, self.arguments))
# A reified multi-method call literal.
@plankton.serializable("ast:CallLiteral")
class CallLiteral(object):
@plankton.field("arguments")
def __init__(self, arguments):
self.arguments = arguments
def accept(self, visitor):
return visitor.visit_call_literal(self)
def traverse(self, visitor):
for argument in self.arguments:
argument.accept(visitor)
# An individual argument to an invocation.
@plankton.serializable("ast:CallLiteralArgument")
class CallLiteralArgument(object):
@plankton.field("tag")
@plankton.field("value")
def __init__(self, tag, value):
self.tag = tag
self.value = value
def accept(self, visitor):
return visitor.visit_call_literal_argument(self)
def traverse(self, visitor):
self.tag.accept(visitor)
self.value.accept(visitor)
def __str__(self):
return "(call literal argument %s %s)" % (self.tag, self.value)
@plankton.serializable("ast:Signal")
class Signal(object):
@plankton.field("escape")
@plankton.field("arguments")
@plankton.field("default")
def __init__(self, escape, arguments, default):
self.escape = escape
self.arguments = arguments
self.default = default
def accept(self, visitor):
return visitor.visit_signal(self)
def traverse(self, visitor):
for argument in self.arguments:
argument.accept(visitor)
if not self.default is None:
self.default.accept(visitor)
@plankton.serializable("ast:SignalHandler")
class SignalHandler(object):
@plankton.field("body")
@plankton.field("handlers")
def __init__(self, body, handlers):
self.body = body
self.handlers = handlers
def accept(self, visitor):
visitor.visit_signal_handler(self)
def traverse(self, visitor):
self.body.accept(visitor)
for handler in self.handlers:
handler.accept(visitor)
@plankton.serializable("ast:Ensure")
class Ensure(object):
@plankton.field("body")
@plankton.field("on_exit")
def __init__(self, body, on_exit):
self.body = body
self.on_exit = on_exit
def accept(self, visitor):
return visitor.visit_ensure(self)
def traverse(self, visitor):
self.body.accept(visitor)
self.on_exit.accept(visitor)
# An individual argument to an invocation.
@plankton.serializable("ast:Argument")
class Argument(object):
@plankton.field("tag")
@plankton.field("value")
@plankton.field("next_guard")
def __init__(self, tag, value):
self.tag = tag
if isinstance(value, NextDirective):
self.value = value.value
self.next_guard = value.guard
else:
self.value = value
self.next_guard = None
def accept(self, visitor):
return visitor.visit_argument(self)
def traverse(self, visitor):
self.value.accept(visitor)
if not self.next_guard is None:
self.next_guard.accept(visitor)
def __str__(self):
return "(argument %s %s)" % (self.tag, self.value)
class NextDirective(object):
def __init__(self, value, guard):
self.value = value
self.guard = guard
# A binding from a symbol to a value.
@plankton.serializable()
class Binding(object):
@plankton.field("symbol")
@plankton.field("value")
def __init__(self, symbol, value):
self.symbol = symbol
self.value = value
# A sequence of expressions to execute in order, yielding the value of the last
# expression.
@plankton.serializable("ast:Sequence")
class Sequence(object):
@plankton.field("values")
def __init__(self, values):
self.values = values
def accept(self, visitor):
return visitor.visit_sequence(self)
def traverse(self, visitor):
for value in self.values:
value.accept(visitor)
@staticmethod
def make(values):
if len(values) == 0:
return Literal(None)
elif len(values) == 1:
return values[0]
else:
return Sequence(values)
def __str__(self):
return "(sequence %s)" % map(str, self.values)
# A local variable declaration.
@plankton.serializable("ast:LocalDeclaration")
class LocalDeclaration(object):
@plankton.field("symbol")
@plankton.field("is_mutable")
@plankton.field("value")
@plankton.field("body")
def __init__(self, ident, is_mutable, value, body):
self.ident = ident
self.symbol = None
self.is_mutable = is_mutable
self.value = value
self.body = body
def accept(self, visitor):
return visitor.visit_local_declaration(self)
def traverse(self, visitor):
self.value.accept(visitor)
self.body.accept(visitor)
def get_name(self):
return self.ident.get_name()
def __str__(self):
if self.is_mutable:
type = "def"
else:
type = "var"
return "(%s %s := %s in %s)" % (type, self.ident, self.value, self.body)
@plankton.serializable("ast:Block")
class Block(object):
@plankton.field("symbol")
@plankton.field("methods")
@plankton.field("body")
def __init__(self, ident, methods, body):
self.ident = ident
self.symbol = None
self.methods = methods
self.body = body
def accept(self, visitor):
return visitor.visit_block(self)
def traverse(self, visitor):
for method in self.methods:
method.accept(visitor)
self.body.accept(visitor)
def get_name(self):
return self.ident.get_name()
def __str__(self):
return "(lfn %s %s in %s)" % (type, self.ident, self.methods[0], self.body)
# A local escape capture.
@plankton.serializable("ast:WithEscape")
class WithEscape(object):
@plankton.field("symbol")
@plankton.field("body")
def __init__(self, ident, body):
self.ident = ident
self.symbol = None
self.body = body
def accept(self, visitor):
return visitor.visit_with_escape(self)
def traverse(self, visitor):
self.body.accept(visitor)
def get_name(self):
return self.ident.get_name()
# A symbol that identifies a scoped binding.
@plankton.serializable("ast:Symbol")
class Symbol(object):
@plankton.field("name")
@plankton.field("origin")
def __init__(self, name, origin=None):
self.name = name
self.origin = origin
def set_origin(self, value):
assert self.origin is None
self.origin = value
return self
# An individual method parameter.
@plankton.serializable("ast:Parameter")
class Parameter(object):
@plankton.field("symbol")
@plankton.field("tags")
@plankton.field("guard")
def __init__(self, ident, tags, guard):
self.ident = ident
if self.ident is None:
self.symbol = None
else:
self.symbol = Symbol(self.get_name())
self.tags = tags
self.guard = guard
def accept(self, visitor):
visitor.visit_parameter(self)
def traverse(self, visitor):
self.guard.accept(visitor)
def get_name(self):
return self.ident.get_name()
def get_symbol(self):
return self.symbol
def __str__(self):
return "(param (tags %s) (name %s) (guard %s))" % (
", ".join(map(str, self.tags)),
self.ident,
self.guard)
@plankton.serializable("ast:Signature")
class Signature(object):
@plankton.field("parameters")
@plankton.field("allow_extra")
@plankton.field("reified")
def __init__(self, parameters, allow_extra, reified_ident):
self.parameters = parameters
self.allow_extra = allow_extra
if reified_ident is None:
self.reified_ident = None
self.reified = None
else:
self.reified_ident = reified_ident
self.reified = Symbol(reified_ident.get_name())
self.check_tags_unique()
# Assert that the same tag is only ever used once.
def check_tags_unique(self):
tags = set()
for param in self.parameters:
for tag in param.tags:
assert not tag in tags
tags.add(tag)
def accept(self, visitor):
visitor.visit_signature(self)
def traverse(self, visitor):
for param in self.parameters:
param.accept(visitor)
def to_data(self):
data_params = [param.to_data() for param in self.parameters]
return data.Signature(data_params)
def __str__(self):
return "(signature %s)" % " ".join(map(str, self.parameters))
@plankton.serializable("ast:Guard")
class Guard(object):
@plankton.field("type")
@plankton.field("value")
def __init__(self, type, value):
self.type = type
self.value = value
def accept(self, visitor):
visitor.visit_guard(self)
def traverse(self, visitor):
if not self.value is None:
self.value.accept(visitor)
# Is this an any-guard?
def is_any(self):
return self.type == data.Guard._ANY
def __str__(self):
if self.value is None:
return "%s()" % self.type
else:
return "%s(%s)" % (self.type, self.value)
@staticmethod
def any():
return Guard(data.Guard._ANY, None)
@staticmethod
def eq(value):
assert not value is None
return Guard(data.Guard._EQ, value)
# 'is' is a reserved word in python. T t t.
@staticmethod
def is_(value):
assert not value is None
return Guard(data.Guard._IS, value)
# An anonymous function. These can be broken down into equivalent new-object
# and set-property calls but that's for later.
@plankton.serializable("ast:Lambda")
class Lambda(object):
@plankton.field("methods")
def __init__(self, methods):
assert isinstance(methods, list)
self.methods = methods
def accept(self, visitor):
return visitor.visit_lambda(self)
def traverse(self, visitor):
for method in self.methods:
method.accept(visitor)
def __str__(self):
return "(fn (%s) => %s)" % (self.methods[0].signature, self.methods[0].body)
# Creates a no-argument lambda with the given expression as the body.
@staticmethod
def thunk(body):
method = Lambda.method(body, data.Operation.call())
return Lambda([method])
@staticmethod
def method(body, op):
signature = Signature([
Parameter(None, [data._SUBJECT], Guard.any()),
Parameter(None, [data._SELECTOR], Guard.eq(Literal(op))),
Parameter(None, [data._TRANSPORT], Guard.eq(Literal(data._SYNC)))
], False, None)
return Method(signature, body)
# Yields the current bound module fragment.
@plankton.serializable("ast:CurrentModule")
class CurrentModule(object):
def accept(self, visitor):
return visitor.visit_current_module(self)
def traverse(self, visitor):
pass
def __str__(self):
return "(current-module)"
@plankton.serializable("ast:Program")
class Program(object):
@plankton.field("entry_point")
@plankton.field("module")
def __init__(self, entry_point, module):
self.entry_point = entry_point
self.module = module
def accept(self, visitor):
return visitor.visit_program(self)
def __str__(self):
return "(program %s)" % str(self.module)
# A toplevel namespace declaration.
@plankton.serializable("ast:NamespaceDeclaration")
class NamespaceDeclaration(object):
@plankton.field("annotations")
@plankton.field("path")
@plankton.field("value")
def __init__(self, annotations, ident, value):
self.annotations = annotations
self.ident = ident
self.path = ident.path
self.value = value
# Returns the stage this declaration belongs to.
def get_stage(self):
return self.ident.stage
def accept(self, visitor):
return visitor.visit_namespace_declaration(self)
def apply(self, module):
fragment = module.get_or_create_fragment(self.ident.stage)
fragment.add_element(self)
def traverse(self, visitor):
for annot in self.annotations:
annot.accept(visitor)
self.value.accept(visitor)
def __str__(self):
return "(namespace-declaration %s %s)" % (self.ident, self.value)
# Syntax of a method.
@plankton.serializable("ast:Method")
class Method(object):
@plankton.field("signature")
@plankton.field("body")
def __init__(self, signature, body):
self.signature = signature
self.body = body
def accept(self, visitor):
visitor.visit_method(self)
def traverse(self, visitor):
self.signature.accept(visitor)
self.body.accept(visitor)
# A toplevel method declaration.
@plankton.serializable("ast:MethodDeclaration")
class MethodDeclaration(object):
@plankton.field("annotations")
@plankton.field("method")
def __init__(self, stage, annotations, method):
self.stage = stage
self.annotations = annotations
self.method = method
def accept(self, visitor):
return visitor.visit_method_declaration(self)
def traverse(self, visitor):
for annot in self.annotations:
annot.accept(visitor)
self.method.accept(visitor)
def apply(self, module):
fragment = module.get_or_create_fragment(self.stage)
fragment.add_element(self)
def __str__(self):
return "(method-declaration %s %s)" % (self.method.signature, self.method.body)
# A toplevel function declaration.
class FunctionDeclaration(object):
def __init__(self, ident, method):
self.ident = ident
self.method = method
def get_stage(self):
return self.ident.stage
def accept(self, visitor):
return visitor.visit_function_declaration(self)
def traverse(self, visitor):
self.method.accept(visitor)
def apply(self, module):
stage = self.ident.stage
value_fragment = module.get_or_create_fragment(stage - 1);
value_fragment.ensure_function_declared(self.ident)
method_fragment = module.get_or_create_fragment(stage)
method_fragment.add_element(MethodDeclaration(0, [], self.method))
def __str__(self):
return "(function-declaration %s %s)" % (self.method.signature, self.method.body)
# A toplevel type declaration
class TypeDeclaration(object):
_NEW_TYPE = data.Operation.infix("new_type")
def __init__(self, ident, supers, members):
self.ident = ident
self.supers = supers
self.members = members
def apply(self, module):
name_decl = NamespaceDeclaration([], self.ident, Invocation([
Argument(data._SUBJECT, CurrentModule()),
Argument(data._SELECTOR, Literal(TypeDeclaration._NEW_TYPE)),
Argument(data._TRANSPORT, Literal(data._SYNC)),
Argument(0, Literal(self.ident)),
]))
name_decl.apply(module)
for parent in self.supers:
sub = Variable(self.ident)
is_decl = IsDeclaration(sub, parent)
is_decl.apply(module)
for member in self.members:
member.apply(module)
# A stand-alone field declaration.
class FieldDeclaration(object):
_NEW_GLOBAL_FIELD = data.Operation.infix("new_hard_field")
_SQUARE_SAUSAGES = data.Operation.index()
_SQUARE_SAUSAGE_ASSIGN = data.Operation.assign(data.Operation.index())
def __init__(self, subject, key_name, getter, setter):
self.subject = subject
self.key_name = key_name
self.getter = getter
self.setter = setter
def apply(self, module):
# TODO: this field shouldn't be accessible through the namespace.
key_ident = data.Identifier(-1, self.key_name)
key_access = Variable(key_ident)
not_async = Parameter(None, [data._TRANSPORT], Guard.eq(Literal(data._SYNC)))
key_decl = NamespaceDeclaration([], key_ident, Invocation([
Argument(data._SUBJECT, CurrentModule()),
Argument(data._SELECTOR, Literal(FieldDeclaration._NEW_GLOBAL_FIELD)),
Argument(data._TRANSPORT, Literal(data._SYNC)),
Argument(0, Literal(self.key_name)),
]))
key_decl.apply(module)
getter = MethodDeclaration(0, [], Method(
Signature([self.subject, self.getter, not_async], False, None), Invocation([
Argument(data._SUBJECT, key_access),
Argument(data._SELECTOR, Literal(FieldDeclaration._SQUARE_SAUSAGES)),
Argument(data._TRANSPORT, Literal(data._SYNC)),
Argument(0, Variable(self.subject.ident))
])))
value = value = Parameter(data.Identifier(0, data.Path(['value'])), [0],
Guard.any())
setter = MethodDeclaration(0, [], Method(
Signature([self.subject, self.setter, not_async, value], False, None), Invocation([
Argument(data._SUBJECT, key_access),
Argument(data._SELECTOR, Literal(FieldDeclaration._SQUARE_SAUSAGE_ASSIGN)),
Argument(data._TRANSPORT, Literal(data._SYNC)),
Argument(0, Variable(self.subject.ident)),
Argument(1, Variable(value.ident))
])))
getter.apply(module)
setter.apply(module)
@plankton.serializable("core:UnboundModule")
class UnboundModule(object):
@plankton.field('path')
@plankton.field('fragments')
def __init__(self, path, fragments):
self.path = path
self.fragments = fragments
def __str__(self):
return "(module %s %s)" % (self.path, " ".join(map(str, self.fragments)))
@plankton.serializable("core:UnboundModuleFragment")
class UnboundModuleFragment(object):
@plankton.field('stage')
@plankton.field('imports')
@plankton.field('elements')
def __init__(self, stage):
self.stage = stage
self.imports = []
self.elements = []
self.functions = set()
# Returns all the imports for this stage.
def add_import(self, path):
self.imports.append(path)
def add_element(self, element):
self.elements.append(element)
def ensure_function_declared(self, name):
if name in self.functions:
return
self.functions.add(name)
value = Invocation([
Argument(data._SUBJECT, Variable(data.Identifier(-1, data.Path(["ctrino"])))),
Argument(data._SELECTOR, Literal(data.Operation.infix("new_function"))),
Argument(data._TRANSPORT, Literal(data._SYNC)),
Argument(0, Literal(name.path))
])
self.add_element(NamespaceDeclaration([], name, value))
def __str__(self):
return "(fragment %s %s)" % (self.stage, " ".join(map(str, self.elements)))
# A full compilation unit.
class Module(object):
def __init__(self, module_name):
self.module_name = module_name
self.entry_point = None
self.stages = {}
self.get_or_create_fragment(0)
def add_element(self, *elements):
for element in elements:
element.apply(self)
return self
def get_stages(self):
for index in sorted(self.stages.keys()):
yield (index, self.stages[index])
def set_entry_point(self, value):
self.entry_point = value
return self
# Returns the stage with the given index. If no such stage exists an exception
# is raised.
def get_stage(self, index):
return self.stages[index]
# Returns the oldest stage, the one with the lowest stage offset.
def get_oldest_stage(self):
result = None
result_stage = 100
for (stage, value) in self.stages.items():
if stage < result_stage:
result = value
result_stage = stage
return result
def get_or_create_fragment(self, index):
if not index in self.stages:
self.stages[index] = self.create_fragment(index)
return self.stages[index]
def create_fragment(self, stage):
return UnboundModuleFragment(stage)
def get_present(self):
return self.get_or_create_fragment(0)
def get_present_program(self):
module = self.as_unbound_module()
return Program(self.entry_point, module)
def get_present_module(self):
last_stage = self.get_present()
return last_stage.get_module()
def accept(self, visitor):
return visitor.visit_unit(self)
def as_unbound_module(self):
fragments = []
for (index, fragment) in self.stages.iteritems():
fragments.append(fragment)
return UnboundModule(data.Path([self.module_name]), fragments)
def __str__(self):
stage_list = list(self.get_stages())
stage_strs = ["(%s %s)" % (i, " ".join(map(str, s.elements))) for (i, s) in stage_list]
return "(unit %s)" % " ".join(stage_strs)
# A quote/unquote. The stage indicates which direction to quote in -- less than
# 0 means unquote, greater than means quote.
@plankton.serializable()
class Quote(object):
def __init__(self, stage, ast):
self.stage = stage
self.ast = ast
self.value = None
def accept(self, visitor):
return visitor.visit_quote(self)
def traverse(self, visitor):
self.ast.accept(visitor)
@plankton.replacement
def get_substitute(self):
return Literal(self.value)
def __str__(self):
return "(@ %s)" % self.ast
@plankton.serializable("ast:Unquote")
class Unquote(object):
@plankton.field('stage')
@plankton.field('')
def __init__(self, stage, ast):
self.stage = stage
self.value = value
class Import(object):
def __init__(self, ident=None):
self.ident = ident
def get_stage(self):
return self.ident.stage
def accept(self, visitor):
visitor.visit_import(self)
def apply(self, module):
fragment = module.get_or_create_fragment(self.ident.stage)
fragment.add_import(self.ident.path)
def traverse(self, visitor):
pass
def __str__(self):
return "(import %s)" % self.ident
@plankton.serializable("ast:IsDeclaration")
class IsDeclaration(object):
@plankton.field("subtype")
@plankton.field("supertype")
def __init__(self, subtype, supertype):
self.subtype = subtype
self.supertype = supertype
def accept(self, visitor):
visitor.visit_is_declaration(self)
def traverse(self, visitor):
self.subtype.accept(visitor)
self.supertype.accept(visitor)
def apply(self, module):
# TODO: allow past is-declarations.
fragment = module.get_or_create_fragment(0)
fragment.add_element(self)
class ModuleManifest(object):
def __init__(self, ident, sources):
assert isinstance(sources, Array)
self.ident = ident
self.sources = sources
def get_path(self):
return self.ident.path
def get_sources(self):
for element in self.sources.elements:
assert isinstance(element, Literal)
yield element.value
|
|
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
from datetime import datetime
import json
import time
import uuid
import mock
from neutron.api.v2 import attributes as neutron_attrs
from neutron.common import exceptions as n_exc_ext
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from quark.db import api as db_api
from quark.db import models
from quark import exceptions as q_exc
from quark import network_strategy
from quark import plugin_views
from quark.tests import test_quark_plugin
class TestQuarkGetSubnetCount(test_quark_plugin.TestQuarkPlugin):
def test_get_subnet_count(self):
"""This isn't really testable."""
with mock.patch("quark.db.api.subnet_count_all"):
self.plugin.get_subnets_count(self.context, {})
class TestQuarkGetSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_subnets_list(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet], routes=[route]):
res = self.plugin.get_subnets(self.context, None, None, None, {},
{})
# Compare routes separately
routes = res[0].pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[0][key], subnet[key])
self.assertEqual(len(routes), 0)
def test_subnets_list_two_default_routes_shows_last_one(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
route2 = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.2")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet], routes=[route, route2]):
res = self.plugin.get_subnets(self.context, None, None, None, {},
{})
# Don't want to test that LOG.info is called but we can
# know the case is covered by checking the gateway is the one
# we expect it to be
self.assertEqual(res[0]["gateway_ip"], "192.168.0.2")
self.assertEqual(len(res[0]["host_routes"]), 0)
def test_subnet_show_fail(self):
with self._stubs():
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.get_subnet(self.context, 1)
def test_subnet_show(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1",
subnet_id=subnet_id)
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
res = self.plugin.get_subnet(self.context, subnet_id)
# Compare routes separately
routes = res.pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[key], subnet[key])
self.assertEqual(len(routes), 0)
class TestQuarkGetSubnetsHideAllocPools(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None):
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
cfg.CONF.set_override('show_allocation_pools', False, "QUARK")
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
cfg.CONF.set_override('show_allocation_pools', True, "QUARK")
def test_subnets_list(self):
subnet_id = str(uuid.uuid4())
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet]):
res = self.plugin.get_subnets(self.context, None, None, None, {},
{})
self.assertEqual(res[0]["allocation_pools"], [])
class TestQuarkCreateSubnetOverlapping(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None):
if subnets is None:
subnets = []
subnet_models = []
for subnet in subnets:
s = models.Subnet()
s.update(subnet)
subnet_models.append(s)
network = models.Network()
network.update(dict(id=1, subnets=subnet_models))
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create"),
mock.patch("neutron.common.rpc.get_notifier")
) as (net_find, subnet_find, subnet_create, get_notifier):
net_find.return_value = network
subnet_find.return_value = subnet_models
subnet_create.return_value = models.Subnet(
network=models.Network(),
cidr="192.168.1.1/24")
yield subnet_create
def test_create_subnet_overlapping_true(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_false(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_conflict(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs(subnets=[dict(cidr="192.168.10.1/24")]):
with self.assertRaises(n_exc.InvalidInput):
s = dict(subnet=dict(cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
class TestQuarkCreateSubnetAllocationPools(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet):
s = models.Subnet(network=models.Network(id=1, subnets=[]))
allocation_pools = subnet.pop("allocation_pools", None)
s.update(subnet)
if allocation_pools is not None:
subnet["allocation_pools"] = allocation_pools
def _allocation_pools_mock():
if allocation_pools is not None:
return mock.patch.object(models.Subnet, "allocation_pools")
return mock.MagicMock()
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create"),
mock.patch("neutron.common.rpc.get_notifier"),
_allocation_pools_mock(),
) as (net_find, subnet_find, subnet_create, get_notifier,
alloc_pools_method):
net_find.return_value = s["network"]
subnet_find.return_value = []
subnet_create.return_value = s
alloc_pools_method.__get__ = mock.Mock(
return_value=allocation_pools)
yield subnet_create
def setUp(self):
super(TestQuarkCreateSubnetAllocationPools, self).setUp()
def tearDown(self):
super(TestQuarkCreateSubnetAllocationPools, self).tearDown()
def test_create_subnet_allocation_pools_zero(self):
s = dict(subnet=dict(
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"],
[dict(start="192.168.1.1", end="192.168.1.254")])
def test_create_subnet_allocation_pools_zero_v6(self):
s = dict(subnet=dict(
cidr="2607:f0d0:1002:51::0/64",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(
resp["allocation_pools"],
[dict(start="2607:f0d0:1002:51::1",
end="2607:f0d0:1002:51:ffff:ffff:ffff:fffe")])
def test_create_subnet_allocation_pools_one(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_gateway_conflict(self):
pools = [dict(start="192.168.1.1", end="192.168.1.20")]
s = dict(subnet=dict(allocation_pools=pools,
cidr="192.168.1.1/24",
gateway_ip="192.168.1.1",
network_id=1))
with self._stubs(s["subnet"]):
with self.assertRaises(
n_exc_ext.GatewayConflictWithAllocationPools):
self.plugin.create_subnet(self.context, s)
def test_create_subnet_allocation_pools_invalid_outside(self):
pools = [dict(start="192.168.0.10", end="192.168.0.20")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]):
with self.assertRaises(n_exc_ext.OutOfBoundsAllocationPool):
self.plugin.create_subnet(self.context, s)
def test_create_subnet_allocation_pools_invalid_overlaps(self):
pools = [dict(start="192.168.0.255", end="192.168.1.20")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]):
with self.assertRaises(n_exc_ext.OutOfBoundsAllocationPool):
self.plugin.create_subnet(self.context, s)
def test_create_subnet_allocation_pools_two(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20"),
dict(start="192.168.1.40", end="192.168.1.50")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_three(self):
pools = [dict(start="192.168.1.5", end="192.168.1.254")]
s = dict(subnet=dict(
allocation_pools=pools,
ip_version=4,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_four(self):
pools = [dict(start="2607:f0d0:1002:51::a",
end="2607:f0d0:1002:51::ffff:fffe")]
s = dict(subnet=dict(
allocation_pools=pools,
ip_version=6,
cidr="2607:f0d0:1002:51::0/64",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_empty_list(self):
# Empty allocation_pools list yields subnet completely blocked out.
pools = []
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
expected_pools = []
self.assertEqual(resp["allocation_pools"], expected_pools)
# TODO(amir): Refactor the tests to test individual subnet attributes.
# * copy.deepcopy was necessary to maintain tests on keys, which is a bit ugly.
# * workaround is also in place for lame ATTR_NOT_SPECIFIED object()
class TestQuarkCreateSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet=None, network=True, routes=(), dns=()):
subnet_mod = models.Subnet(
network=models.Network(id=1) if network else None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
allocation_pools = subnet.pop("allocation_pools", None)
subnet_mod.update(subnet)
subnet["dns_nameservers"] = dns_ips
subnet["host_routes"] = host_routes
if allocation_pools is not None:
subnet["allocation_pools"] = allocation_pools
dns = [{"ip": x} for x in dns]
route_models = [models.Route(**r) for r in routes]
dns_models = [models.DNSNameserver(**d) for d in dns]
def _allocation_pools_mock():
if allocation_pools is not None:
return mock.patch.object(models.Subnet, "allocation_pools")
return mock.MagicMock()
with contextlib.nested(
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_create"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("neutron.common.rpc.get_notifier"),
_allocation_pools_mock()
) as (subnet_create, net_find, dns_create, route_create, subnet_find,
get_notifier, alloc_pools_method):
subnet_create.return_value = subnet_mod
net_find.return_value = network
route_create.side_effect = route_models
dns_create.side_effect = dns_models
alloc_pools_method.__get__ = mock.Mock(
return_value=allocation_pools)
yield subnet_create, dns_create, route_create
def test_create_subnet(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
res = self.plugin.create_subnet(self.context,
subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
expected_pools = [{'start': '172.16.0.1',
'end': '172.16.0.254'}]
self.assertEqual(res["allocation_pools"], expected_pools)
def test_create_subnet_v6_too_small(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="1234::/80", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
with self.assertRaises(n_exc.InvalidInput):
self.plugin.create_subnet(self.context, subnet_request)
def test_create_subnet_v4_too_small(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/31", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
with self.assertRaises(n_exc.InvalidInput):
self.plugin.create_subnet(self.context, subnet_request)
def test_create_subnet_not_admin_segment_id_ignored(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
subnet_request["subnet"]["segment_id"] = "cell01"
res = self.plugin.create_subnet(self.context,
subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertTrue("segment_id" not in subnet_create.called_with)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_no_network_fails(self):
subnet = dict(subnet=dict(network_id=1))
with self._stubs(subnet=dict(), network=False):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_no_gateway_ip_defaults(self):
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=[]
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.1")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_dns_nameservers(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
dns_ns = ["4.2.2.1", "4.2.2.2"]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=dns_ns, enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes,
dns=dns_ns
) as (subnet_create, dns_create, route_create):
res = self.plugin.create_subnet(self.context,
copy.deepcopy(subnet))
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 2)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes(self):
routes = [dict(cidr="1.1.1.1/8", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "1.1.1.1/8",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 2)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertIn(("1.1.1.1/8", "172.16.0.4"), res_tuples)
self.assertEqual(1, len(res_tuples))
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_two_default_routes_fails(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[
{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"},
{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
with self.assertRaises(q_exc.DuplicateRouteConflict):
self.plugin.create_subnet(self.context, subnet_request)
def test_create_subnet_default_route_gateway_ip(self):
"""Host_routes precedence
If default route (host_routes) and gateway_ip are both provided,
then host_route takes precedence.
"""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip="172.16.0.3",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "172.16.0.4")
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_null_gateway_no_routes(self):
"""A subnet with a NULL gateway IP shouldn't create routes."""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=None,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertIsNone(res[key])
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes_quota_pass(self):
routes = (("0.0.0.0/0", "127.0.0.1"),
("1.0.0.0/0", "127.0.0.1"),
("2.0.0.0/0", "127.0.0.1"))
host_routes = [{"destination": x, "nexthop": y} for x, y in routes]
stub_routes = [{"cidr": x, "gateway": y} for x, y in routes]
subnet = {"subnet":
{"cidr": "192.167.10.0/24", "created_at": datetime.now(),
"host_routes": host_routes, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), routes=stub_routes):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_routes_quota_fail(self):
routes = (("0.0.0.0/0", "127.0.0.1"),
("1.0.0.0/0", "127.0.0.1"),
("2.0.0.0/0", "127.0.0.1"),
("3.0.0.0/0", "127.0.0.1"))
host_routes = [{"destination": x, "nexthop": y} for x, y in routes]
stub_routes = [{"cidr": x, "gateway": y} for x, y in routes]
subnet = {"subnet":
{"cidr": "192.167.10.0/24", "created_at": datetime.now(),
"host_routes": host_routes, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), routes=stub_routes):
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_dns_quota_pass(self):
nameservers = ["7.0.0.1", "7.0.0.2"]
subnet = {"subnet":
{"cidr": "192.167.10.0/24", "created_at": datetime.now(),
"dns_nameservers": nameservers, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), dns=nameservers):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_dns_quota_fail(self):
nameservers = ["7.0.0.1", "7.0.0.2", "7.0.0.3"]
subnet = {"subnet":
{"cidr": "192.167.10.0/24", "created_at": datetime.now(),
"dns_nameservers": nameservers, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), dns=nameservers):
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, subnet)
class TestQuarkAllocationPoolCache(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, has_subnet=True, host_routes=None, new_routes=None,
find_routes=True, new_dns_servers=None, new_ip_policy=None,
ip_version=4):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
if new_ip_policy:
exc = [models.IPPolicyCIDR(cidr=excluded_cidr)
for excluded_cidr in new_ip_policy]
new_ip_policy = models.IPPolicy(exclude=exc)
if ip_version == 4:
cidr = "172.16.0.0/24"
else:
cidr = "2607:f0d0:1002:51::0/64"
subnet_mod = None
if has_subnet:
subnet = dict(
id=0,
network_id=1,
tenant_id=self.context.tenant_id,
ip_version=ip_version,
cidr=cidr,
host_routes=host_routes,
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None,
_allocation_pool_cache=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
exclude = [models.IPPolicyCIDR(cidr="172.16.0.0/32"),
models.IPPolicyCIDR(cidr="172.16.0.255/32")]
subnet_mod = models.Subnet(
ip_policy=models.IPPolicy(exclude=exclude),
network=models.Network(id=1)
)
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create, route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
if has_subnet:
route_find.return_value = (subnet_mod["routes"][0] if
subnet_mod["routes"] and
find_routes else None)
new_subnet_mod = models.Subnet()
new_subnet_mod.update(subnet_mod)
new_subnet_mod.update(dict(id=1))
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
if new_ip_policy:
new_subnet_mod["ip_policy"] = new_ip_policy
subnet_update.return_value = new_subnet_mod
yield subnet_mod
@mock.patch("quark.db.api.subnet_update_set_alloc_pool_cache")
def test_update_subnet_allocation_pools_invalidate_cache(self, set_cache):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
with self._stubs() as subnet_found:
pools = [dict(start="172.16.0.1", end="172.16.0.12")]
s = dict(subnet=dict(allocation_pools=pools))
self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(set_cache.call_count, 1)
set_cache.assert_called_with(self.context, subnet_found)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
@mock.patch("quark.db.api.subnet_update_set_alloc_pool_cache")
def test_get_subnet_set_alloc_cache_if_cache_is_none(self, set_cache):
with self._stubs() as subnet_found:
self.plugin.get_subnet(self.context, 1)
self.assertEqual(set_cache.call_count, 1)
set_cache.assert_called_with(self.context, subnet_found,
[dict(start="172.16.0.1",
end="172.16.0.254")])
class TestQuarkUpdateSubnet(test_quark_plugin.TestQuarkPlugin):
DEFAULT_ROUTE = [dict(destination="0.0.0.0/0",
nexthop="172.16.0.1")]
@contextlib.contextmanager
def _stubs(self, has_subnet=True, host_routes=None, new_routes=None,
find_routes=True, new_dns_servers=None, new_ip_policy=None,
ip_version=4):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
if new_ip_policy:
exc = [models.IPPolicyCIDR(cidr=excluded_cidr)
for excluded_cidr in new_ip_policy]
new_ip_policy = models.IPPolicy(exclude=exc)
if ip_version == 4:
cidr = "172.16.0.0/24"
else:
cidr = "2607:f0d0:1002:51::0/64"
subnet_mod = None
if has_subnet:
subnet = dict(
id=0,
network_id=1,
tenant_id=self.context.tenant_id,
ip_version=ip_version,
cidr=cidr,
host_routes=host_routes,
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
exclude = [models.IPPolicyCIDR(cidr="172.16.0.0/32"),
models.IPPolicyCIDR(cidr="172.16.0.255/32")]
subnet_mod = models.Subnet(
ip_policy=models.IPPolicy(exclude=exclude),
network=models.Network(id=1)
)
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create,
route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
if has_subnet:
route_find.return_value = (subnet_mod["routes"][0] if
subnet_mod["routes"] and
find_routes else None)
new_subnet_mod = models.Subnet()
new_subnet_mod.update(subnet_mod)
new_subnet_mod.update(dict(id=1))
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
if new_ip_policy:
new_subnet_mod["ip_policy"] = new_ip_policy
subnet_update.return_value = new_subnet_mod
yield dns_create, route_update, route_create
def test_update_subnet_not_found(self):
with self._stubs(has_subnet=False):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.update_subnet(self.context, 1, {})
def test_update_subnet_dns_nameservers(self):
new_dns_servers = ["1.1.1.2"]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_dns_servers=new_dns_servers
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(dns_nameservers=new_dns_servers))
res = self.plugin.update_subnet(self.context,
1,
req)
self.assertEqual(dns_create.call_count, 1)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(res["dns_nameservers"], new_dns_servers)
def test_update_subnet_routes(self):
new_routes = [dict(destination="10.0.0.0/24",
nexthop="1.1.1.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"10.0.0.0/24")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.1.1.1")
self.assertIsNone(res["gateway_ip"])
def test_update_subnet_gateway_ip_with_default_route_in_db(self):
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(route_update.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_non_default_route_in_db(self):
with self._stubs(
host_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9")],
find_routes=False,
new_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9"),
dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "1.2.3.4")
self.assertEqual(len(res["host_routes"]), 1)
res_tuples = [(r["destination"], r["nexthop"])
for r in res["host_routes"]]
self.assertIn(("1.1.1.1/8", "9.9.9.9"), res_tuples)
def test_update_subnet_gateway_ip_without_default_route_in_db(self):
with self._stubs(
host_routes=None,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_default_route_in_args(self):
new_routes = [dict(destination="0.0.0.0/0",
nexthop="4.3.2.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes,
gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "4.3.2.1")
def test_update_subnet_allocation_pools_invalid_outside(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
og1 = cfg.CONF.QUARK.allow_allocation_pool_growth
cfg.CONF.set_override('allow_allocation_pool_growth', True, 'QUARK')
pools = [dict(start="172.16.1.10", end="172.16.1.20")]
s = dict(subnet=dict(allocation_pools=pools))
try:
with self._stubs() as (dns_create, route_update, route_create):
with self.assertRaises(n_exc_ext.OutOfBoundsAllocationPool):
self.plugin.update_subnet(self.context, 1, s)
finally:
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
cfg.CONF.set_override('allow_allocation_pool_growth', og1, 'QUARK')
def test_update_subnet_allocation_pools_zero(self):
with self._stubs() as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1,
dict(subnet=dict()))
self.assertEqual(resp["allocation_pools"],
[dict(start="172.16.0.1", end="172.16.0.254")])
def test_update_subnet_allocation_pools_one(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="172.16.0.10", end="172.16.0.20")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs(
new_ip_policy=[
'172.16.0.0/29', '172.16.0.8/31', '172.16.0.21/32',
'172.16.0.22/31', '172.16.0.24/29', '172.16.0.32/27',
'172.16.0.64/26', '172.16.0.128/25']
) as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(resp["allocation_pools"], pools)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_allocation_pools_two(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="172.16.0.10", end="172.16.0.20"),
dict(start="172.16.0.40", end="172.16.0.50")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs(
new_ip_policy=[
'172.16.0.0/29', '172.16.0.8/31', '172.16.0.21/32',
'172.16.0.22/31', '172.16.0.24/29', '172.16.0.32/29',
'172.16.0.51/32', '172.16.0.52/30', '172.16.0.56/29',
'172.16.0.64/26', '172.16.0.128/25']
) as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(resp["allocation_pools"], pools)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_allocation_pools_three(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="172.16.0.5", end="172.16.0.254")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs(
new_ip_policy=['172.16.0.0/30', '172.16.0.4/32', '172.16.0.255/32']
) as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(resp["allocation_pools"], pools)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_allocation_pools_four(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="2607:f0d0:1002:51::a",
end="2607:f0d0:1002:51:ffff:ffff:ffff:fffe")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs(
ip_version=6,
new_ip_policy=[
'2607:f0d0:1002:51::/125', '2607:f0d0:1002:51::8/127',
'2607:f0d0:1002:51:ffff:ffff:ffff:ffff/128']
) as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(resp["allocation_pools"], pools)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_allocation_pools_invalid(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', False, 'QUARK')
pools = [dict(start="172.16.0.1", end="172.16.0.250")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs() as (dns_create, route_update, route_create):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_subnet(self.context, 1, s)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_conflicting_gateway(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="172.16.0.1", end="172.16.0.254")]
s = dict(subnet=dict(allocation_pools=pools, gateway_ip="172.16.0.1"))
with self._stubs(
new_ip_policy=['172.16.0.0/30', '172.16.0.4/32', '172.16.0.255/32']
) as (dns_create, route_update, route_create):
with self.assertRaises(
n_exc_ext.GatewayConflictWithAllocationPools):
self.plugin.update_subnet(self.context, 1, s)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
class TestQuarkDeleteSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet, ips):
ip_mods = []
subnet_mod = None
if subnet:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
for ip in ips:
ip_mod = models.IPAddress()
ip_mod.update(ip)
ip_mods.append(ip_mod)
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_delete"),
mock.patch("neutron.common.rpc.get_notifier")
) as (sub_find, sub_delete, get_notifier):
if subnet_mod:
subnet_mod.allocated_ips = ip_mods
sub_find.return_value = subnet_mod
yield sub_delete
def test_delete_subnet(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[]) as sub_delete:
self.plugin.delete_subnet(self.context, 1)
self.assertTrue(sub_delete.called)
def test_delete_subnet_no_subnet_fails(self):
with self._stubs(subnet=None, ips=[]):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.delete_subnet(self.context, 1)
def test_delete_subnet_has_allocated_ips_fails(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[{}]):
with self.assertRaises(n_exc.SubnetInUse):
self.plugin.delete_subnet(self.context, 1)
class TestSubnetsQuotas(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet_values, deleted_at=None):
self.context.session.begin = mock.MagicMock()
subnets = list()
for s in subnet_values:
s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
s["dns_nameservers"] = []
s["_allocation_pool_cache"] = None
subnet = models.Subnet(**s)
subnets.append(subnet)
with contextlib.nested(
mock.patch("quark.plugin_modules.subnets.get_subnets"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.subnet_delete"),
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("oslo_utils.timeutils.utcnow"),
mock.patch("quark.plugin_modules.subnets._validate_subnet_cidr")
) as (get_subnets, sub_find, net_find, sub_create, sub_del, notify,
time_func, sub_validate):
sub_create.return_value = subnets[0]
sub_find.return_value = subnets[0]
retsubs = []
if len(subnets) > 1:
retsubs = subnets[1:]
get_subnets.return_value = retsubs
time_func.return_value = deleted_at
yield notify
def test_create_subnet_v4_alongside_v6_quota_pass(self):
original_4 = cfg.CONF.QUOTAS.quota_v4_subnets_per_network
original_6 = cfg.CONF.QUOTAS.quota_v6_subnets_per_network
s = [dict(network_id=1, cidr="192.167.10.0/24",
tenant_id=1, id=1, created_at="123"),
dict(network_id=1, cidr="::0/24",
tenant_id=1, id=2, created_at="123")]
with self._stubs(s):
cfg.CONF.set_override('quota_v4_subnets_per_network', 1, "QUOTAS")
cfg.CONF.set_override('quota_v6_subnets_per_network', 1, "QUOTAS")
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', original_4,
"QUOTAS")
cfg.CONF.set_override('quota_v6_subnets_per_network', original_6,
"QUOTAS")
def test_create_subnet_v4_quota_pass(self):
original_4 = cfg.CONF.QUOTAS.quota_v4_subnets_per_network
s = [dict(network_id=1, cidr="192.167.10.0/24",
tenant_id=1, id=1, created_at="123")]
with self._stubs(s):
cfg.CONF.set_override('quota_v4_subnets_per_network', 1, "QUOTAS")
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', original_4,
"QUOTAS")
def test_create_subnet_v6_quota_pass(self):
original_6 = cfg.CONF.QUOTAS.quota_v6_subnets_per_network
s = [dict(network_id=1, cidr="::0/24",
tenant_id=1, id=1, created_at="123")]
with self._stubs(s):
cfg.CONF.set_override('quota_v6_subnets_per_network', 1, "QUOTAS")
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v6_subnets_per_network', original_6,
"QUOTAS")
def test_create_subnet_v4_quota_fail(self):
original_4 = cfg.CONF.QUOTAS.quota_v4_subnets_per_network
s = [dict(network_id=1, cidr="192.167.10.0/24",
tenant_id=1, id=1, created_at="123"),
dict(network_id=1, cidr="192.168.10.0/24",
tenant_id=1, id=2, created_at="124")]
with self._stubs(s):
cfg.CONF.set_override('quota_v4_subnets_per_network', 1, "QUOTAS")
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', original_4,
"QUOTAS")
def test_create_subnet_v6_quota_fail(self):
original_6 = cfg.CONF.QUOTAS.quota_v6_subnets_per_network
s = [dict(network_id=1, cidr="::0/24",
tenant_id=1, id=1, created_at="123"),
dict(network_id=1, cidr="::1/24",
tenant_id=1, id=2, created_at="124")]
with self._stubs(s):
cfg.CONF.set_override('quota_v6_subnets_per_network', 1, "QUOTAS")
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v6_subnets_per_network', original_6,
"QUOTAS")
def test_create_subnet_zero_quota_fail(self):
original_4 = cfg.CONF.QUOTAS.quota_v4_subnets_per_network
s = [dict(network_id=1, cidr="192.167.10.0/24",
tenant_id=1, id=1, created_at="123")]
with self._stubs(s):
cfg.CONF.set_override('quota_v4_subnets_per_network', 0, "QUOTAS")
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', original_4,
"QUOTAS")
def test_create_subnet_negative_one_quota_pass(self):
original_4 = cfg.CONF.QUOTAS.quota_v4_subnets_per_network
s = [dict(network_id=1, cidr="192.167.10.0/24",
tenant_id=1, id=1, created_at="123")]
with self._stubs(s):
cfg.CONF.set_override('quota_v4_subnets_per_network', 0, "QUOTAS")
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', -1, "QUOTAS")
self.plugin.create_subnet(self.context, dict(subnet=s[0]))
cfg.CONF.set_override('quota_v4_subnets_per_network', original_4,
"QUOTAS")
class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, s, deleted_at=None):
class FakeContext(object):
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
subnet = models.Subnet(**s)
with contextlib.nested(
mock.patch("quark.plugin_modules.subnets.get_subnets"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.subnet_delete"),
mock.patch("neutron.common.rpc.get_notifier"),
mock.patch("neutron.quota.QUOTAS"),
mock.patch("oslo_utils.timeutils.utcnow"),
mock.patch("quark.plugin_modules.subnets._validate_subnet_cidr")
) as (get_subnets, sub_find, net_find, sub_create, sub_del, notify,
quota_engine, time_func, sub_validate):
sub_create.return_value = subnet
get_subnets.return_value = []
sub_find.return_value = subnet
time_func.return_value = deleted_at
yield notify
def test_create_subnet_notification(self):
s = dict(network_id=1, cidr="192.168.10.0/24",
tenant_id=1, id=1, created_at="123")
with self._stubs(s) as notify:
admin_ctx = self.context.elevated()
self.plugin.create_subnet(admin_ctx, dict(subnet=s))
notify.assert_called_once_with("network")
notify.return_value.info.assert_called_once_with(
admin_ctx,
"ip_block.create",
dict(tenant_id=s["tenant_id"],
ip_block_id=s["id"],
created_at=s["created_at"]))
def test_delete_subnet_notification(self):
now = time.strftime('%Y-%m-%d %H:%M:%S')
later = time.strftime('%Y-%m-%d %H:%M:%S')
s = dict(tenant_id=1, id=1, created_at=now)
with self._stubs(s, deleted_at=later) as notify:
self.plugin.delete_subnet(self.context, 1)
notify.assert_called_once_with("network")
notify.return_value.info.assert_called_once_with(
self.context,
"ip_block.delete",
dict(tenant_id=s["tenant_id"],
created_at=s["created_at"],
ip_block_id=s["id"],
deleted_at=later))
class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_diagnose_subnet_with_wildcard_id_no_existing_subnets(self):
with self._stubs(subnets=[], routes=[]):
expected = {'subnets': []}
actual = self.plugin.diagnose_subnet(self.context.elevated(), "*",
None)
self.assertEqual(expected, actual)
def test_diagnose_subnet_not_authorized(self):
with self._stubs(subnets=[], routes=[]):
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.diagnose_subnet(self.context, "*", None)
def test_diagnose_subnet_with_wildcard_with_existing_subnets(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet], routes=[route]):
actual = self.plugin.diagnose_subnet(self.context.elevated(), "*",
None)
self.maxDiff = None
self.assertEqual(subnet["id"], actual["subnets"][0]["id"])
def test_diagnose_subnet_with_regular_id(self):
subnet_id = "12345"
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
actual = self.plugin.diagnose_subnet(self.context.elevated(),
subnet_id, None)
self.assertEqual(subnet["id"], actual["subnets"]["id"])
class TestQuarkCreateSubnetAttrFilters(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_create"),
mock.patch("quark.plugin_views._make_subnet_dict"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("neutron.common.rpc.get_notifier")
) as (subnet_create, net_find, dns_create, route_create, sub_dict,
subnet_find, get_notifier):
route_create.return_value = models.Route()
yield subnet_create, net_find
def test_create_subnet(self):
subnet = {"subnet": {
"network_id": 1, "tenant_id": self.context.tenant_id,
"ip_version": 4, "cidr": "172.16.0.0/24",
"gateway_ip": "0.0.0.0",
"dns_nameservers": neutron_attrs.ATTR_NOT_SPECIFIED,
"host_routes": neutron_attrs.ATTR_NOT_SPECIFIED,
"enable_dhcp": None, "first_ip": 0, "last_ip": 1,
"next_auto_assign_ip": 10}}
with self._stubs() as (subnet_create, net_find):
subnet_create.return_value = models.Subnet(
cidr=subnet["subnet"]["cidr"])
self.plugin.create_subnet(self.context, subnet)
self.assertEqual(subnet_create.call_count, 1)
subnet_create.assert_called_once_with(
self.context, network_id=subnet["subnet"]["network_id"],
tenant_id=subnet["subnet"]["tenant_id"],
cidr=subnet["subnet"]["cidr"], network=net_find())
def test_create_subnet_admin(self):
subnet = {"subnet": {
"network_id": 1, "tenant_id": self.context.tenant_id,
"ip_version": 4, "cidr": "172.16.0.0/24",
"gateway_ip": "0.0.0.0",
"dns_nameservers": neutron_attrs.ATTR_NOT_SPECIFIED,
"host_routes": neutron_attrs.ATTR_NOT_SPECIFIED,
"enable_dhcp": None, "first_ip": 0, "last_ip": 1,
"next_auto_assign_ip": 10}}
admin_ctx = self.context.elevated()
with self._stubs() as (subnet_create, net_find):
subnet_create.return_value = models.Subnet(
cidr=subnet["subnet"]["cidr"])
self.plugin.create_subnet(admin_ctx, subnet)
self.assertEqual(subnet_create.call_count, 1)
subnet_create.assert_called_once_with(
admin_ctx, network_id=subnet["subnet"]["network_id"],
tenant_id=subnet["subnet"]["tenant_id"],
cidr=subnet["subnet"]["cidr"], network=net_find(),
next_auto_assign_ip=subnet["subnet"]["next_auto_assign_ip"])
class TestQuarkUpdateSubnetAttrFilters(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self):
pool_mod = "quark.allocation_pool.AllocationPools"
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
mock.patch(pool_mod),
mock.patch("quark.plugin_views._make_subnet_dict")
) as (subnet_find, subnet_update, dns_create, route_find,
route_update, route_create, make_subnet, gateway_exclude):
yield subnet_update, subnet_find
def test_update_subnet_attr_filters(self):
subnet = {"subnet": {
"network_id": 1, "tenant_id": self.context.tenant_id,
"ip_version": 4, "cidr": "172.16.0.0/24",
"gateway_ip": "0.0.0.0",
"dns_nameservers": neutron_attrs.ATTR_NOT_SPECIFIED,
"host_routes": neutron_attrs.ATTR_NOT_SPECIFIED,
"enable_dhcp": None, "first_ip": 0, "last_ip": 1,
"next_auto_assign_ip": 10, "do_not_use": False}}
with self._stubs() as (subnet_update, subnet_find):
self.plugin.update_subnet(self.context, 1, subnet)
# NOTE(mdietz): the assertion here shows that, without admin,
# all of the attributes passed above are stripped
# from the request body. Otherwise, the attributes
# above would be passed as keyword arguments to the
# subnet_update db api call.
subnet_update.assert_called_once_with(
self.context, subnet_find())
def test_update_subnet_attr_filters_admin(self):
subnet = {"subnet": {
"network_id": 1, "tenant_id": self.context.tenant_id,
"ip_version": 4, "cidr": "172.16.0.0/24",
"gateway_ip": "0.0.0.0",
"dns_nameservers": neutron_attrs.ATTR_NOT_SPECIFIED,
"host_routes": neutron_attrs.ATTR_NOT_SPECIFIED,
"enable_dhcp": False, "first_ip": 0, "last_ip": 1,
"next_auto_assign_ip": 10, "do_not_use": True}}
admin_ctx = self.context.elevated()
with self._stubs() as (subnet_update, subnet_find):
self.plugin.update_subnet(admin_ctx, 1, subnet)
subnet_update.assert_called_once_with(
admin_ctx, subnet_find(),
next_auto_assign_ip=subnet["subnet"]["next_auto_assign_ip"],
tenant_id=subnet["subnet"]["tenant_id"],
enable_dhcp=subnet["subnet"]["enable_dhcp"],
do_not_use=subnet["subnet"]["do_not_use"])
class TestQuarkGetSubnetsShared(test_quark_plugin.TestQuarkPlugin):
def setUp(self):
super(TestQuarkGetSubnetsShared, self).setUp()
self.strategy = {"public_network":
{"bridge": "xenbr0",
"subnets": {"4": "public_v4",
"6": "public_v6"}}}
self.strategy_json = json.dumps(self.strategy)
self.old = plugin_views.STRATEGY
plugin_views.STRATEGY = network_strategy.JSONStrategy(
self.strategy_json)
cfg.CONF.set_override("default_net_strategy", self.strategy_json,
"QUARK")
def tearDown(self):
plugin_views.STRATEGY = self.old
@contextlib.contextmanager
def _stubs(self, subnets=None):
subnet_mods = []
if isinstance(subnets, list):
for sub in subnets:
subnet_mod = models.Subnet()
subnet_mod.update(sub)
subnet_mods.append(subnet_mod)
db_mod = "quark.db.api"
db_api.STRATEGY = network_strategy.JSONStrategy(self.strategy_json)
network_strategy.STRATEGY = network_strategy.JSONStrategy(
self.strategy_json)
with mock.patch("%s._subnet_find" % db_mod) as subnet_find:
subnet_find.return_value = subnet_mods
yield subnet_find
def test_get_subnets_shared(self):
sub0 = dict(id='public_v4', tenant_id="provider", name="public_v4",
_cidr="0.0.0.0/0", network_id="public_network")
sub1 = dict(id='public_v6', tenant_id="provider", name="public_v6",
_cidr="::/0", network_id="public_network")
with self._stubs(subnets=[sub0, sub1]) as subnet_find:
ret = self.plugin.get_subnets(self.context, None, None, None,
False, {"shared": [True]})
for sub in ret:
self.assertEqual("public_network", sub["network_id"])
subnet_find.assert_called_with(self.context, None, None, False,
None, None,
join_routes=True,
defaults=["public_v4", "public_v6"],
join_dns=True,
join_pool=True,
provider_query=False)
def test_get_subnets_shared_false(self):
sub0 = dict(id='public_v4', tenant_id="provider", name="public_v4",
_cidr="0.0.0.0/0", network_id="public_network")
sub1 = dict(id='public_v6', tenant_id="provider", name="public_v6",
_cidr="::/0", network_id="public_network")
with self._stubs(subnets=[sub0, sub1]) as subnet_find:
self.plugin.get_subnets(self.context, None, None, None,
False, {"shared": [False]})
invert = db_api.INVERT_DEFAULTS
subnet_find.assert_called_with(self.context, None, None, False,
None, None,
defaults=[invert, "public_v4",
"public_v6"],
provider_query=False,
join_routes=True, join_dns=True,
join_pool=True)
def test_get_subnets_no_shared(self):
sub0 = dict(id='public_v4', tenant_id="provider", name="public_v4",
_cidr="0.0.0.0/0", network_id="public_network")
sub1 = dict(id='tenant_v4', tenant_id="tenant", name="tenant_v4",
_cidr="0.0.0.0/0", network_id="tenant_network")
with self._stubs(subnets=[sub0, sub1]) as subnet_find:
self.plugin.get_subnets(self.context, None, None, None,
False)
subnet_find.assert_called_with(self.context, None, None, False,
None, None,
defaults=[],
provider_query=False,
join_routes=True, join_dns=True,
join_pool=True)
|
|
# -*- coding: utf-8 -*-
from scipy.interpolate import griddata
import numpy as np
import logging
from .helpers import get_latex_table_frame
class Comparison(object):
def __init__(self, parent):
self.parent = parent
self._logger = logging.getLogger("chainconsumer")
def dic(self):
r""" Returns the corrected Deviance Information Criterion (DIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, this method will return `None` for that chain. **Note that
the DIC metric is only valid on posterior surfaces which closely resemble multivariate normals!**
Formally, we follow Liddle (2007) and first define *Bayesian complexity* as
.. math::
p_D = \bar{D}(\theta) - D(\bar{\theta}),
where :math:`D(\theta) = -2\ln(P(\theta)) + C` is the deviance, where :math:`P` is the posterior
and :math:`C` a constant. From here the DIC is defined as
.. math::
DIC \equiv D(\bar{\theta}) + 2p_D = \bar{D}(\theta) + p_D.
Returns
-------
list[float]
A list of all the DIC values - one per chain, in the order in which the chains were added.
References
----------
[1] Andrew R. Liddle, "Information criteria for astrophysical model selection", MNRAS (2007)
"""
dics = []
dics_bool = []
for i, chain in enumerate(self.parent.chains):
p = chain.posterior
if p is None:
dics_bool.append(False)
self._logger.warning("You need to set the posterior for chain %s to get the DIC" % chain.name)
else:
dics_bool.append(True)
num_params = chain.chain.shape[1]
means = np.array([np.average(chain.chain[:, ii], weights=chain.weights) for ii in range(num_params)])
d = -2 * p
d_of_mean = griddata(chain.chain, d, means, method="nearest")[0]
mean_d = np.average(d, weights=chain.weights)
p_d = mean_d - d_of_mean
dic = mean_d + p_d
dics.append(dic)
if len(dics) > 0:
dics -= np.min(dics)
dics_fin = []
i = 0
for b in dics_bool:
if not b:
dics_fin.append(None)
else:
dics_fin.append(dics[i])
i += 1
return dics_fin
def bic(self):
r""" Returns the corrected Bayesian Information Criterion (BIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the BIC is defined as
.. math::
BIC \equiv -2\ln(P) + k \ln(N),
where :math:`P` represents the posterior, :math:`k` the number of model parameters and :math:`N`
the number of independent data points used in the model fitting.
Returns
-------
list[float]
A list of all the BIC values - one per chain, in the order in which the chains were added.
"""
bics = []
bics_bool = []
for i, chain in enumerate(self.parent.chains):
p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params
if p is None or n_data is None or n_free is None:
bics_bool.append(False)
missing = ""
if p is None:
missing += "posterior, "
if n_data is None:
missing += "num_eff_data_points, "
if n_free is None:
missing += "num_free_params, "
self._logger.warning("You need to set %s for chain %s to get the BIC" % (missing[:-2], chain.name))
else:
bics_bool.append(True)
bics.append(n_free * np.log(n_data) - 2 * np.max(p))
if len(bics) > 0:
bics -= np.min(bics)
bics_fin = []
i = 0
for b in bics_bool:
if not b:
bics_fin.append(None)
else:
bics_fin.append(bics[i])
i += 1
return bics_fin
def aic(self):
r""" Returns the corrected Akaike Information Criterion (AICc) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the AIC is defined as
.. math::
AIC \equiv -2\ln(P) + 2k,
where :math:`P` represents the posterior, and :math:`k` the number of model parameters. The AICc
is then defined as
.. math::
AIC_c \equiv AIC + \frac{2k(k+1)}{N-k-1},
where :math:`N` represents the number of independent data points used in the model fitting.
The AICc is a correction for the AIC to take into account finite chain sizes.
Returns
-------
list[float]
A list of all the AICc values - one per chain, in the order in which the chains were added.
"""
aics = []
aics_bool = []
for i, chain in enumerate(self.parent.chains):
p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params
if p is None or n_data is None or n_free is None:
aics_bool.append(False)
missing = ""
if p is None:
missing += "posterior, "
if n_data is None:
missing += "num_eff_data_points, "
if n_free is None:
missing += "num_free_params, "
self._logger.warning("You need to set %s for chain %s to get the AIC" % (missing[:-2], chain.name))
else:
aics_bool.append(True)
c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)
aics.append(2.0 * (n_free + c_cor - np.max(p)))
if len(aics) > 0:
aics -= np.min(aics)
aics_fin = []
i = 0
for b in aics_bool:
if not b:
aics_fin.append(None)
else:
aics_fin.append(aics[i])
i += 1
return aics_fin
def comparison_table(
self, caption=None, label="tab:model_comp", hlines=True, aic=True, bic=True, dic=True, sort="bic", descending=True
): # pragma: no cover
"""
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
"""
if sort == "bic":
assert bic, "You cannot sort by BIC if you turn it off"
if sort == "aic":
assert aic, "You cannot sort by AIC if you turn it off"
if sort == "dic":
assert dic, "You cannot sort by DIC if you turn it off"
if caption is None:
caption = ""
if label is None:
label = ""
base_string = get_latex_table_frame(caption, label)
end_text = " \\\\ \n"
num_cols = 1 + (1 if aic else 0) + (1 if bic else 0)
column_text = "c" * (num_cols + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text
center_text += "\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") + (" & DIC " if dic else "") + end_text
if hlines:
center_text += "\t" + hline_text
if aic:
aics = self.aic()
else:
aics = np.zeros(len(self.parent.chains))
if bic:
bics = self.bic()
else:
bics = np.zeros(len(self.parent.chains))
if dic:
dics = self.dic()
else:
dics = np.zeros(len(self.parent.chains))
if sort == "bic":
to_sort = bics
elif sort == "aic":
to_sort = aics
elif sort == "dic":
to_sort = dics
else:
raise ValueError("sort %s not recognised, must be dic, aic or dic" % sort)
good = [i for i, t in enumerate(to_sort) if t is not None]
names = [self.parent.chains[g].name for g in good]
aics = [aics[g] for g in good]
bics = [bics[g] for g in good]
to_sort = bics if sort == "bic" else aics
indexes = np.argsort(to_sort)
if descending:
indexes = indexes[::-1]
for i in indexes:
line = "\t" + names[i]
if aic:
line += " & %5.1f " % aics[i]
if bic:
line += " & %5.1f " % bics[i]
if dic:
line += " & %5.1f " % dics[i]
line += end_text
center_text += line
if hlines:
center_text += "\t" + hline_text
return base_string % (column_text, center_text)
|
|
from __future__ import print_function, absolute_import
from ctypes import (c_char_p, byref, POINTER, c_bool, create_string_buffer,
c_void_p, c_size_t, cast, string_at)
from . import ffi
from .linker import link_modules
from .common import _decode_string, _encode_string
from .value import ValueRef
def parse_assembly(llvmir):
"""
Create Module from a LLVM IR string
"""
context = ffi.lib.LLVMPY_GetGlobalContext()
llvmir = _encode_string(llvmir)
strbuf = c_char_p(llvmir)
with ffi.OutputString() as errmsg:
mod = ModuleRef(ffi.lib.LLVMPY_ParseAssembly(context, strbuf, errmsg))
if errmsg:
mod.close()
raise RuntimeError("LLVM IR parsing error\n{0}".format(errmsg))
return mod
def parse_bitcode(bitcode):
"""
Create Module from a LLVM *bitcode* (a bytes object).
"""
context = ffi.lib.LLVMPY_GetGlobalContext()
buf = c_char_p(bitcode)
bufsize = len(bitcode)
with ffi.OutputString() as errmsg:
mod = ModuleRef(ffi.lib.LLVMPY_ParseBitcode(context, buf, bufsize, errmsg))
if errmsg:
mod.close()
raise RuntimeError("LLVM bitcode parsing error\n{0}".format(errmsg))
return mod
class ModuleRef(ffi.ObjectRef):
"""
A reference to a LLVM module.
"""
def __str__(self):
with ffi.OutputString() as outstr:
ffi.lib.LLVMPY_PrintModuleToString(self, outstr)
return str(outstr)
def as_bitcode(self):
"""
Return the module's LLVM bitcode, as a bytes object.
"""
ptr = c_char_p(None)
size = c_size_t(-1)
ffi.lib.LLVMPY_WriteBitcodeToString(self, byref(ptr), byref(size))
if not ptr:
raise MemoryError
try:
assert size.value >= 0
return string_at(ptr, size.value)
finally:
ffi.lib.LLVMPY_DisposeString(ptr)
def _dispose(self):
self._capi.LLVMPY_DisposeModule(self)
def get_function(self, name):
"""
Get a ValueRef pointing to the function named *name*.
NameError is raised if the symbol isn't found.
"""
p = ffi.lib.LLVMPY_GetNamedFunction(self, _encode_string(name))
if not p:
raise NameError(name)
return ValueRef(p, module=self)
def get_global_variable(self, name):
"""
Get a ValueRef pointing to the global variable named *name*.
NameError is raised if the symbol isn't found.
"""
p = ffi.lib.LLVMPY_GetNamedGlobalVariable(self, _encode_string(name))
if not p:
raise NameError(name)
return ValueRef(p, module=self)
def verify(self):
"""
Verify the module IR's correctness. RuntimeError is raised on error.
"""
with ffi.OutputString() as outmsg:
if ffi.lib.LLVMPY_VerifyModule(self, outmsg):
raise RuntimeError(str(outmsg))
@property
def name(self):
"""
The module's identifier.
"""
return _decode_string(ffi.lib.LLVMPY_GetModuleName(self))
@name.setter
def name(self, value):
ffi.lib.LLVMPY_SetModuleName(self, _encode_string(value))
@property
def data_layout(self):
"""
This module's data layout specification, as a string.
"""
# LLVMGetDataLayout() points inside a std::string managed by LLVM.
with ffi.OutputString(owned=False) as outmsg:
ffi.lib.LLVMPY_GetDataLayout(self, outmsg)
return str(outmsg)
@data_layout.setter
def data_layout(self, strrep):
ffi.lib.LLVMPY_SetDataLayout(self,
create_string_buffer(
strrep.encode('utf8')))
@property
def triple(self):
"""
This module's target "triple" specification, as a string.
"""
# LLVMGetTarget() points inside a std::string managed by LLVM.
with ffi.OutputString(owned=False) as outmsg:
ffi.lib.LLVMPY_GetTarget(self, outmsg)
return str(outmsg)
@triple.setter
def triple(self, strrep):
ffi.lib.LLVMPY_SetTarget(self,
create_string_buffer(
strrep.encode('utf8')))
def link_in(self, other, preserve=False):
link_modules(self, other, preserve)
if not preserve:
other.close()
@property
def global_variables(self):
"""
Return an iterator over this module's global variables.
The iterator will yield a ValueRef for each global variable.
Note that global variables don't include functions
(a function is a "global value" but not a "global variable" in
LLVM parlance)
"""
it = ffi.lib.LLVMPY_ModuleGlobalsIter(self)
return _GlobalsIterator(it, module=self)
@property
def functions(self):
"""
Return an iterator over this module's functions.
The iterator will yield a ValueRef for each function.
"""
it = ffi.lib.LLVMPY_ModuleFunctionsIter(self)
return _FunctionsIterator(it, module=self)
def clone(self):
return ModuleRef(ffi.lib.LLVMPY_CloneModule(self))
class _Iterator(ffi.ObjectRef):
def __init__(self, ptr, module):
ffi.ObjectRef.__init__(self, ptr)
# Keep Module alive
self._module = module
def __next__(self):
vp = self._next()
if vp:
return ValueRef(vp, self._module)
else:
raise StopIteration
next = __next__
def __iter__(self):
return self
class _GlobalsIterator(_Iterator):
def _dispose(self):
self._capi.LLVMPY_DisposeGlobalsIter(self)
def _next(self):
return ffi.lib.LLVMPY_GlobalsIterNext(self)
class _FunctionsIterator(_Iterator):
def _dispose(self):
self._capi.LLVMPY_DisposeFunctionsIter(self)
def _next(self):
return ffi.lib.LLVMPY_FunctionsIterNext(self)
# =============================================================================
# Set function FFI
ffi.lib.LLVMPY_ParseAssembly.argtypes = [ffi.LLVMContextRef,
c_char_p,
POINTER(c_char_p)]
ffi.lib.LLVMPY_ParseAssembly.restype = ffi.LLVMModuleRef
ffi.lib.LLVMPY_ParseBitcode.argtypes = [ffi.LLVMContextRef,
c_char_p, c_size_t,
POINTER(c_char_p)]
ffi.lib.LLVMPY_ParseBitcode.restype = ffi.LLVMModuleRef
ffi.lib.LLVMPY_GetGlobalContext.restype = ffi.LLVMContextRef
ffi.lib.LLVMPY_DisposeModule.argtypes = [ffi.LLVMModuleRef]
ffi.lib.LLVMPY_PrintModuleToString.argtypes = [ffi.LLVMModuleRef,
POINTER(c_char_p)]
ffi.lib.LLVMPY_WriteBitcodeToString.argtypes = [ffi.LLVMModuleRef,
POINTER(c_char_p),
POINTER(c_size_t)]
ffi.lib.LLVMPY_GetNamedFunction.argtypes = [ffi.LLVMModuleRef,
c_char_p]
ffi.lib.LLVMPY_GetNamedFunction.restype = ffi.LLVMValueRef
ffi.lib.LLVMPY_VerifyModule.argtypes = [ffi.LLVMModuleRef,
POINTER(c_char_p)]
ffi.lib.LLVMPY_VerifyModule.restype = c_bool
ffi.lib.LLVMPY_GetDataLayout.argtypes = [ffi.LLVMModuleRef, POINTER(c_char_p)]
ffi.lib.LLVMPY_SetDataLayout.argtypes = [ffi.LLVMModuleRef, c_char_p]
ffi.lib.LLVMPY_GetTarget.argtypes = [ffi.LLVMModuleRef, POINTER(c_char_p)]
ffi.lib.LLVMPY_SetTarget.argtypes = [ffi.LLVMModuleRef, c_char_p]
ffi.lib.LLVMPY_GetNamedGlobalVariable.argtypes = [ffi.LLVMModuleRef, c_char_p]
ffi.lib.LLVMPY_GetNamedGlobalVariable.restype = ffi.LLVMValueRef
ffi.lib.LLVMPY_ModuleGlobalsIter.argtypes = [ffi.LLVMModuleRef]
ffi.lib.LLVMPY_ModuleGlobalsIter.restype = ffi.LLVMGlobalsIterator
ffi.lib.LLVMPY_DisposeGlobalsIter.argtypes = [ffi.LLVMGlobalsIterator]
ffi.lib.LLVMPY_GlobalsIterNext.argtypes = [ffi.LLVMGlobalsIterator]
ffi.lib.LLVMPY_GlobalsIterNext.restype = ffi.LLVMValueRef
ffi.lib.LLVMPY_ModuleFunctionsIter.argtypes = [ffi.LLVMModuleRef]
ffi.lib.LLVMPY_ModuleFunctionsIter.restype = ffi.LLVMFunctionsIterator
ffi.lib.LLVMPY_DisposeFunctionsIter.argtypes = [ffi.LLVMFunctionsIterator]
ffi.lib.LLVMPY_FunctionsIterNext.argtypes = [ffi.LLVMFunctionsIterator]
ffi.lib.LLVMPY_FunctionsIterNext.restype = ffi.LLVMValueRef
ffi.lib.LLVMPY_CloneModule.argtypes = [ffi.LLVMModuleRef]
ffi.lib.LLVMPY_CloneModule.restype = ffi.LLVMModuleRef
ffi.lib.LLVMPY_GetModuleName.argtypes = [ffi.LLVMModuleRef]
ffi.lib.LLVMPY_GetModuleName.restype = c_char_p
ffi.lib.LLVMPY_SetModuleName.argtypes = [ffi.LLVMModuleRef, c_char_p]
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey.tests import SecurityMonkeyTestCase
from mock import patch
from mock import MagicMock
from security_monkey.auditors.sns import SNSAuditor
from security_monkey.constants import Constants
from security_monkey.exceptions import InvalidARN
#from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import InvalidSourceOwner
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.watchers.sns import SNSItem
class SNSTestCase(SecurityMonkeyTestCase):
@patch('security_monkey.common.sts_connect.connect')
def test_0_sns_slurp(self, test_patch):
"""Should add an exception to the exception map when
slurping accounts that don't exist."""
from security_monkey.watchers.sns import SNS
import boto.sns
test_patch.return_value = None
accounts = ['doesntexist1', 'doesntexist2']
cw = SNS(accounts=accounts, debug=True)
#with self.assertRaises(BotoConnectionIssue):
(items, el) = cw.slurp()
for account in accounts:
for region in boto.sns.regions():
if region.name not in TROUBLE_REGIONS:
self.assertIn(('sns', account, region.name), el)
@patch('security_monkey.common.sts_connect.connect')
def test_1_sns_slurp(self, test_patch):
"""Should add an exception to the exception map when provided with invalid JSON."""
class MockSNS(object):
def get_all_topics(self, next_token=None):
return {'ListTopicsResponse':
{'ListTopicsResult':
{'NextToken': False,
'Topics':
[
{'TopicArn': 'arn:aws:sns:us-west-2:000000000000:NameZero'
},
{'TopicArn': 'arn:aws:sns:us-east-1:111111111111:NameOne'
}
]
}
}
}
def get_topic_attributes(self, arn):
return {'GetTopicAttributesResponse':
{'GetTopicAttributesResult':
{'Attributes':
{'Policy': '{"json": "that": "won\'t": "parse"}'
}
}
}
}
from security_monkey.watchers.sns import SNS
import boto.sns
test_patch.return_value = MockSNS()
accounts = ['testaccount']
cw = SNS(accounts=accounts, debug=True)
(items, el) = cw.slurp()
for account in accounts:
for region in boto.sns.regions():
if region.name not in TROUBLE_REGIONS:
self.assertIn(('sns', account, region.name, 'arn:aws:sns:us-west-2:000000000000:NameZero'), el)
self.assertIn(('sns', account, region.name, 'arn:aws:sns:us-east-1:111111111111:NameOne'), el)
@patch('security_monkey.common.sts_connect.connect')
def test_2_sns_slurp(self, test_patch):
class MockSNS(object):
def get_all_topics(self, next_token=None):
return {'ListTopicsResponse':
{'ListTopicsResult':
{'NextToken': False,
'Topics':
[
{'TopicArn': 'arn:aws:sns:us-west-2:000000000000:NameZero'
}, # Invalid ARN is missing region:
{'TopicArn': 'arn:aws:sns::111111111111:NameOne'
}
]
}
}
}
def get_topic_attributes(self, arn):
return {'GetTopicAttributesResponse':
{'GetTopicAttributesResult':
{'Attributes':
{'Policy': '{"json": "is_fun"}'
}
}
}
}
from security_monkey.watchers.sns import SNS
import boto.sns
test_patch.return_value = MockSNS()
accounts = ['testaccount']
cw = SNS(accounts=accounts, debug=True)
(items, el) = cw.slurp()
for account in accounts:
for region in boto.sns.regions():
if region.name not in TROUBLE_REGIONS:
self.assertIn(('sns', account, region.name, 'arn:aws:sns::111111111111:NameOne'), el)
@patch('security_monkey.common.sts_connect.connect')
def test_3_sns_slurp(self, test_patch):
class MockSNS(object):
def get_all_topics(self):
return {'ListTopicsResponse':
{'ListTopicsResult':
{'Topics':
[
{'TopicArn': 'arn:aws:sns:us-west-2:000000000000:NameZero'
}
]
}
}
}
def get_topic_attributes(self, arn):
return {'GetTopicAttributesResponse':
{'GetTopicAttributesResult':
{'Attributes':
{'Policy': '{"json": "value"}'
}
}
}
}
from security_monkey.watchers.sns import SNS
test_patch.return_value = MockSNS()
cw = SNS(debug=True)
(items, el) = cw.slurp()
for item in items:
name = item.config['Name']['Name']
self.assertEqual(name, 'NameZero')
policy = item.config['SNSPolicy']
self.assertDictEqual(policy, {"json": "value"})
def test_empty_snstopicpolicy(self):
au = SNSAuditor(debug=True)
obj = SNSItem(region='test-region', account='test-account', name='test-name', config={'SNSPolicy': {}})
au.check_snstopicpolicy_empty(obj)
self.assertEquals(len(obj.audit_issues), 1)
if len(obj.audit_issues) == 1:
for issue in obj.audit_issues:
self.assertEquals(issue.score, 1)
self.assertEquals(issue.issue, "SNS Topic Policy is empty")
self.assertIsNone(issue.notes)
def test_crossaccount_snstopicpolicy_method_1(self):
au = SNSAuditor(debug=True)
data = {
'SNSPolicy': {
'Statement': [
{
'Principal': {
'AWS': '*'
},
'Condition': {
'StringEquals': {
'AWS:SourceOwner': '000000000000'
}
}
}
]
}
}
obj = SNSItem(region='test-region', account='test-account', name='test-name', config=data)
au.check_snstopicpolicy_crossaccount(obj)
self.assertEquals(len(obj.audit_issues), 1)
if len(obj.audit_issues) == 1:
for issue in obj.audit_issues:
self.assertEquals(issue.score, 10)
self.assertRegexpMatches(issue.issue, "Unknown Cross Account Access from .*")
self.assertIsNone(issue.notes)
def test_crossaccount_snstopicpolicy_method_2(self):
obj = self.check_arn('arn:aws:iam::000000000000:')
self.assertEquals(len(obj.audit_issues), 1)
if len(obj.audit_issues) == 1:
for issue in obj.audit_issues:
self.assertEquals(issue.score, 10)
self.assertRegexpMatches(issue.issue, "Unknown Cross Account Access from .*")
self.assertIsNone(issue.notes)
def test_crossaccount_snstopicpolicy_method_3(self):
friend_name = 'friendly'
Constants.account_by_number = MagicMock(return_value=friend_name)
obj = self.check_arn('arn:aws:iam::010101010101:')
self.assertEquals(len(obj.audit_issues), 1)
if len(obj.audit_issues) == 1:
for issue in obj.audit_issues:
self.assertEquals(issue.score, 5)
expected = "Friendly Cross Account Access from " + friend_name + " to test-account"
self.assertEqual(expected, issue.issue, "\n" + expected + "\n" + issue.issue)
self.assertIsNone(issue.notes)
def test_crossaccount_snstopicpolicy_method_4(self):
# Bad ARN
with self.assertRaises(InvalidARN):
self.check_arn('arn::aws:iam:-:010101010101:')
def test_crossaccount_snstopicpolicy_method_5(self):
au = SNSAuditor(debug=True)
data = {
'SNSPolicy': {
'Statement': [
{
'Principal': {
'AWS': '*'
},
'Condition': {
'StringEquals': {
# Missing SourceOwner
}
}
}
]
}
}
obj = SNSItem(region='test-region', account='test-account', name='test-name', config=data)
au.check_snstopicpolicy_crossaccount(obj)
self.assertEquals(len(obj.audit_issues), 1)
issue = obj.audit_issues[0]
self.assertEqual(issue.score, 10)
self.assertEqual(issue.issue, "SNS Topic open to everyone")
def test_crossaccount_snstopicpolicy_method_6(self):
au = SNSAuditor(debug=True)
data = {
'SNSPolicy': {
'Statement': [
{
'Principal': {
'AWS': '*'
},
'Condition': {
'StringEquals': {
'AWS:SourceOwner': 'BADDEADBEEF'
}
}
}
]
}
}
obj = SNSItem(region='test-region', account='test-account', name='test-name', config=data)
with self.assertRaises(InvalidSourceOwner):
au.check_snstopicpolicy_crossaccount(obj)
def check_arn(self, arn):
au = SNSAuditor(debug=True)
data = {
'SNSPolicy': {
'Statement': [
{
'Principal': {
'AWS': arn
}
}
]
}
}
obj = SNSItem(region='test-region', account='test-account', name='test-name', config=data)
au.check_snstopicpolicy_crossaccount(obj)
return obj
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
from matplotlib import patches, path, transforms
from nose.tools import raises
import io
nan = np.nan
Path = path.Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0,2.0,3.0,2.0e5])
y = np.arange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_xlim(xmin=2,xmax=6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
@cleanup
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(5000,)) * 50
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 3884
@cleanup
def test_sine_plus_noise():
np.random.seed(0)
x = np.sin(np.linspace(0, np.pi * 2.0, 1000)) + np.random.uniform(size=(1000,)) * 0.01
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 876
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (nan, 1), (0, 0), (2, 0), (2, 2), (0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(plt.Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig = plt.figure()
t = np.arange(65536)
ax = fig.add_subplot(111)
p1 = ax.plot(abs(np.fft.fft(np.sin(2*np.pi*.01*t)*np.blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 20
@cleanup
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
import base64
if hasattr(base64, 'encodebytes'):
# Python 3 case
decodebytes = base64.decodebytes
else:
# Python 2 case
decodebytes = base64.decodestring
verts = np.fromstring(decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) // 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(), clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
@cleanup
@raises(OverflowError)
def test_throw_rendering_complexity_exceeded():
plt.rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx, yy)
try:
fig.savefig(io.BytesIO())
finally:
plt.rcParams['path.simplify'] = True
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left = 0, bottom = 0, wspace = 0, hspace = 0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim = (0, 5), autoscale_on = False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
def test_clipping_full():
p = path.Path([[1e30, 1e30]] * 5)
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert simplified == []
p = path.Path([[50, 40], [75, 65]], [1, 2])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1), ([75, 65], 2)])
p = path.Path([[50, 40]], [1])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1)])
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
|
|
"""
work queue specification
"""
import re
import types
from six import iteritems
from pandaserver.taskbuffer.GlobalShares import Share
RESOURCE = 'Resource'
ACTIVE_FUNCTIONS = [RESOURCE]
class WorkQueue(object):
# attributes
_attributes = ('queue_id', 'queue_name', 'queue_type', 'VO', 'queue_share', 'queue_order',
'criteria', 'variables', 'partitionID', 'stretchable', 'status', 'queue_function')
# parameters for selection criteria
_paramsForSelection = ('prodSourceLabel', 'workingGroup', 'processingType', 'coreCount',
'site', 'eventService', 'splitRule', 'campaign')
# correspondence with Global Shares attributes and parameters
_attributes_gs_conversion_dic = {'name': 'queue_name',
'value': 'queue_share',
'prodsourcelabel': 'queue_type',
'queue_id': 'queue_id',
'vo': 'VO'}
_params_gs_conversion_dic = {'prodsourcelabel': 'prodSourceLabel',
'workinggroup': 'workingGroup',
'campaign': 'campaign',
'processingtype': 'processingType'}
def __init__(self):
"""
Constructor
"""
# install attributes
for attr in self._attributes:
setattr(self, attr, None)
# global share is by default false
self.is_global_share = False
# throttled is set to True by default. Some Global Shares will overwrite it to False
self.throttled = True
def __str__(self):
"""
String representation of a workqueue
:return: string with the representation of the work queue
"""
return str(self.queue_name)
def dump(self):
"""
Creates a human-friendly string with the work queue information
:return: string representation of the work queue
"""
dump_str = 'id:{0} order:{1} name:{2} share:{3} '.format(self.queue_id, self.queue_order,
self.queue_name, self.queue_share)
# normal queue
if self.is_global_share:
dump_str += 'gs_name:{0} (global share)'.format(self.queue_name)
else:
dump_str += 'criteria:{0} var:{1} eval:{2}'.format(self.criteria, str(self.variables), self.evalString)
return dump_str
def getID(self):
"""
get ID
:return: returns a list with the ID of the work queue
"""
return self.queue_id
def pack(self, values):
"""
Packs tuple into the object
:param values: list with the values in the order declared in the attributes section
:return: nothing
"""
for i, attr in enumerate(self._attributes):
val = values[i]
setattr(self, attr, val)
# disallow negative share
if self.queue_share is not None and self.queue_share < 0:
self.queue_share = 0
# convert variables string to a map of bind-variables
tmp_map = {}
try:
for item in self.variables.split(','):
# look for key: value
item = item.strip()
items = item.split(':')
if len(items) != 2:
continue
# add
tmp_map[':%s' % items[0]] = items[1]
except Exception:
pass
# assign map
self.variables = tmp_map
# make a python statement for eval
if self.criteria in ['', None]:
# catch all
self.evalString = 'True'
else:
tmp_eval_str = self.criteria
# replace IN/OR/AND to in/or/and
tmp_eval_str = re.sub(' IN ', ' in ', tmp_eval_str, re.I)
tmp_eval_str = re.sub(' OR ', ' or ', tmp_eval_str, re.I)
tmp_eval_str = re.sub(' AND ', ' and ', tmp_eval_str, re.I)
# replace = to ==
tmp_eval_str = tmp_eval_str.replace('=', '==')
# replace LIKE
tmp_eval_str = re.sub('(?P<var>[^ \(]+)\s+LIKE\s+(?P<pat>[^ \(]+)',
"re.search(\g<pat>,\g<var>,re.I) is not None",
tmp_eval_str, re.I)
# NULL
tmp_eval_str = re.sub(' IS NULL', '==None', tmp_eval_str)
tmp_eval_str = re.sub(' IS NOT NULL', "!=None", tmp_eval_str)
# replace NOT to not
tmp_eval_str = re.sub(' NOT ', ' not ', tmp_eval_str, re.I)
# fomat cases
for tmp_param in self._paramsForSelection:
tmp_eval_str = re.sub(tmp_param, tmp_param, tmp_eval_str, re.I)
# replace bind-variables
for tmp_key, tmp_val in iteritems(self.variables):
if '%' in tmp_val:
# wildcard
tmp_val = tmp_val.replace('%', '.*')
tmp_val = "'^%s$'" % tmp_val
else:
# normal variable
tmp_val = "'%s'" % tmp_val
tmp_eval_str = tmp_eval_str.replace(tmp_key, tmp_val)
# assign
self.evalString = tmp_eval_str
def pack_gs(self, gshare):
"""
Packs tuple into the object
:param gshare: global share
:return: nothing
"""
# the object becomes a global share wq
self.is_global_share = True
try:
tmp_map = {}
for i, attr in enumerate(gshare._attributes):
# global share attributes can be mapped to a wq attribute(1), to a wq param(2), or to none of both
# 1. if the gs attribute is mapped to a wq attribute, do a get and a set
if attr in self._attributes_gs_conversion_dic:
attr_wq = self._attributes_gs_conversion_dic[attr]
val = getattr(gshare, attr)
setattr(self, attr_wq, val)
# 2. if the gs attribute is mapped to a wq param, add it to the bind variables dictionary
# Probably we don't need this, we just care about matching the gs name
if attr in self._params_gs_conversion_dic:
param_wq = self._params_gs_conversion_dic[attr]
val = getattr(gshare, attr)
tmp_map[':{0}'.format(param_wq)] = val
# 3. Special case for throttled. This is defined additionally, since it's not present in WQs
if attr == 'throttled' and gshare.throttled == 'N':
self.throttled = False
self.variables = tmp_map
except Exception:
pass
# evaluate in python
def evaluate(self, param_map):
# only active queues are evaluated
if self.isActive():
# normal queue
# expand parameters to local namespace
for tmp_param_key, tmp_param_val in iteritems(param_map):
if isinstance(tmp_param_val, str):
# add quotes for string
exec('{0}="{1}"'.format(tmp_param_key, tmp_param_val), globals())
else:
exec('{0}={1}'.format(tmp_param_key, tmp_param_val), globals())
# add default parameters if missing
for tmp_param in self._paramsForSelection:
if tmp_param not in param_map:
exec('{0}=None'.format(tmp_param), globals())
# evaluate
exec("ret_var = {0}".format(self.evalString), globals())
return self, ret_var
# return False
return self, False
# check if active
def isActive(self):
if self.status != 'inactive': # and self.queue_function in ACTIVE_FUNCTIONS:
return True
return False
# check if its eligible after global share alignment
def isAligned(self):
if self.queue_function == RESOURCE or self.is_global_share:
return True
return False
# check if there is workload
def hasWorkload(self, queue_list):
# inactive
if not self.isActive():
return False
if self.queue_id in queue_list:
return True
# not found
return False
# return column names for INSERT
def column_names(cls):
ret = ""
for attr in cls._attributes:
if ret != "":
ret += ','
ret += attr
return ret
column_names = classmethod(column_names)
|
|
# encoding=utf8
"""
Module for computing various vector and matrix norms. Vector norms define a measure
of distance for vectors. Matrix norms are measures of distance in the space of the
matrix. The most common vector norm is the 2-norm, the equivalent matrix norm
is the Frobenius norm.
"""
import numpy as np
import pandas as pd
def norm(x, order=None):
r"""
Interface function for computing vector and matrix norms. The default method is
the 2-norm for vectors and the Frobenius norm for vectors.
The following vector norm calculations can be performed with the corresponding
argument to the order parameter.
================= ===========
order vector norm
================= ===========
None 2-norm
2 or '2' 2-norm
1 or '1' 1-norm
np.inf or 'inf' inf-norm
-np.inf or '-inf' -inf-norm
other p-norm
================= ===========
The following matrix norm calculations are also available.
=============== ==============
order matrix norm
=============== ==============
None Frobenius norm
'fro' Frobenius norm
'frobenius' Frobenius norm
'1' or 1 1-norm
np.inf or 'inf' inf-norm
=============== ==============
Parameters
----------
x : array_like
Accepts a numpy array, nested list, dictionary, or
pandas DataFrame. The private function _create_array
is called to create a copy of x as a numpy array.
order : int or str, optional
Defaults to the 2-norm computation for vectors and the
Frobenius norm for matrices. Please refer to the above
tables for the available norm calculations.
Returns
-------
v : float
vector or matrix norm of the input
Notes
-----
Please see the respective implementations of the vector and matrix norms
in the _VectorNorm class.
Examples
--------
>>> v = np.array([5, 2, 1])
>>> norm(v)
5.477225575051661
>>> norm(v, 2)
5.477225575051661
>>> norm([5, 2, 1], 'inf')
5.0
>>> m = np.array([[2, 1], [1, 2]])
>>> norm(m)
3.1622776601683795
>>> b = np.array([[5, -4, 2], [-1, 2, 3], [-2, 1, 0]])
>>> norm(b, '1')
8
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
vec_norm = {
None: 'norm2',
2: 'norm2',
'2': 'norm2',
'1': 'norm1',
1: 'norm1',
'inf': 'norminf',
np.inf: 'norminf',
'-inf': 'norm_neg_inf',
-np.inf: 'norm_neg_inf'
}
matrix_norm = {
None: 'frobenius',
'fro': 'frobenius',
'frobenius': 'frobenius',
1: 'norm1',
'1': 'norm1',
np.inf: 'norminf',
'inf': 'norminf'
}
x = np.array(x)
if x.ndim == 1:
x = _VectorNorm(x)
try:
v = getattr(x, vec_norm[order])
except KeyError:
if isinstance(order, str):
try:
order = int(order)
except ValueError:
print('unknown norm, ' + str(order) + ' given.')
raise
return x.pnorm(order)
else:
x = _MatrixNorm(x)
try:
if hasattr(x, matrix_norm[order]):
v = getattr(x, matrix_norm[order])
except ValueError:
print('unknown norm, ' + str(order) + ' given.')
raise
return float(v())
class _VectorNorm(object):
r"""
Class containing implementations of vector norms used in the
front-end interface function 'norm'.
Parameters
----------
x : np.ndarray, array_like
Conversion is attempted if passed matrix representation is not a numpy array.
Methods
-------
pnorm()
Calculates the p-norm of a vector.
norm1()
Computes the 1-norm of a vector.
norminf()
Implements the inf-norm computation of a vector.
norm2()
Implements the 2-norm computation.
Notes
-----
Given a vector space :math:`\mathbb{R}^n`, a vector norm is defined as a function :math:`f: \mathbb{R}^n
\rightarrow \mathbb{R}`. Norms are represented by double-bar notation, for example, a norm :math:`x` would be
denoted :math:`||x||`. Vector norms have the following properties:
- :math:`||x|| > 0` for a vector :mat:h`x \in \mathbb{R}^n`
+ :math:`||x|| = 0` if the vector :math:`x = 0`
- :math:`||\alpha x|| = |\alpha| ||x||` for a vector :math:`x \in \mathbb{R}^n` and a scalar
:math:`\alpha \in \mathbb{R}`
- :math:`||x + y|| \leq ||x|| + ||y||` for vectors :math:`x,y \in \mathbb{R}^n`
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
def __init__(self, x):
if isinstance(x, pd.DataFrame):
self.x = x.values
elif isinstance(x, np.ndarray) is False:
self.x = np.array(x)
else:
self.x = x
self.order = 'norm2'
def norm2(self):
r"""
Computes the 2-norm of a vector.
Returns
-------
l2 : float
The 2-norm of the vector
Notes
-----
The 2-norm of a vector :math:`x` is defined as:
.. math::
||x||_2 = \sqrt{x_1^2 + x_2^2 + \cdots + x_n^2} = \sqrt{x^T x}
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
l2 = np.sqrt(np.sum(np.power(self.x, 2)))
return l2
def pnorm(self, p):
r"""
Calculates the p-norm of a vector.
Parameters
----------
p : int
Used in computing the p-norm. This should only be set
when calculating the pnorm of a vector is desired.
Returns
-------
pn : float
The p-norm of the vector.
Notes
-----
The p-norm, which is considered a class of vector norms is defined as:
.. math::
||x||_p = \sqrt[p]{|x_1|^p + |x_2|^p + \cdots + |x_n|^p} \qquad p \geq 1
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
if p != np.floor(p):
p = np.floor(p)
if np.iscomplex(p) or p < 1:
raise ValueError('p must be at least 1 and real')
pn = np.sum(np.absolute(self.x) ** p) ** (1. / p)
return pn
def norm1(self):
r"""
Calculates the 1-norm of a vector.
Returns
-------
l1 : float
The 1-norm of the vector.
Notes
-----
The 1-norm of a vector :math:`x` is defined as:
.. math::
||x||_1 = |x_1| + |x_2| + \cdots + |x_n|
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
l1 = np.sum(np.absolute(self.x))
return l1
def norminf(self):
r"""
Calculates the :math:`\inf` norm of a vector.
Returns
-------
ninf : float
The :math:`\inf` of the vector.
Notes
-----
The :math:`\inf` norm of a vector :math:`x` is defined as:
.. math::
||x||_\inf = max_{1 \leq i \leq n} |x_i|
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
ninf = np.max(np.absolute(self.x))
return ninf
def norm_neg_inf(self):
neg_inf = np.min(np.absolute(self.x))
return neg_inf
class _MatrixNorm(object):
r"""
Class containing implementations of matrix norms for front-end function `norm`.
Attributes
----------
order : string
Defines the default norm calculation used by the front-end norm function `norm`.
n : int
Row-wise length of array passed in initialization of class
m : int
Column-wise length of array passed in initialization of class
Parameters
----------
x : np.ndarray, array_like
Conversion is attempted if passed matrix representation is not a numpy array.
Methods
-------
frobenius()
Calculates the Frobenius norm of a matrix.
norm1()
Calculates the 1-norm of a matrix.
norminf()
Calculates the inf-norm of a matrix.
Notes
-----
Matrix norms are an extension of vector norms to matrices and are used to define a
measure of distance on the space of a matrix. More specifically, a matrix norm is
defined as a function :math:`f: \mathbb{R}^{m \times n} \rightarrow \mathbb{R}`. The double bar
notation used to denote vector norms are also used for matrix norms. The properties
of a matrix norm are similar to those of a vector norm.
- :math:`||A|| \geq 0` for any matrix :math:`A \in \mathbb{R}^{m \times n}`
+ :math:`||A|| = 0` if the matrix :math:`A = 0`
- :math:`||\alpha A|| = |\alpha| ||A||` for a :math:`m \times n` matrix and scalar :math:`\alpha`
- :math:`||A + B|| \leq ||A|| + ||B||` for :math:`m \times n` matrices :math:`A` and :math:`B`
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
def __init__(self, x):
if isinstance(x, pd.DataFrame):
self.x = x.values
elif isinstance(x, np.ndarray) is False:
self.x = np.array(x)
else:
self.x = x
self.order = 'frobenius'
self.n, self.m = self.x.shape
def frobenius(self):
r"""
Calculates the Frobenius norm of a matrix.
Returns
-------
f : float
The Frobenius norm of a matrix
Notes
-----
The Frobenius norm is one of the most commonly employed matrix norms and is the default
norm calculation of the front-end function `norm` as designated by the class
attribute `method`. The Frobenius norm is defined as:
.. math::
||A||_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^m |a_{ij}|^2}
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
f = 0
for i in np.arange(self.m):
for j in np.arange(self.n):
f = f + np.sum(np.power(np.absolute(self.x[i, j]), 2))
return np.sqrt(f)
def norm1(self):
r"""
Calculates the 1-norm of a matrix.
Returns
-------
v : float
The 1-norm of the matrix
Notes
-----
The matrix 1-norm is defined as the maximum absolute column sum of a matrix.
.. math::
||A||_1 = \underset{1 \leq j \leq n}{max}\left( \sum^n_{i=1} |a_{ij}| \right)
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
colsums = []
for i in np.arange(self.m):
v = np.sum(np.absolute(self.x[:, i]))
colsums.append(v)
return np.max(colsums)
def norminf(self):
r"""
Calculates the inf-norm of a matrix.
Returns
-------
v : float
The inf-norm of the matrix
Notes
-----
The inf-norm of a matrix is defined as the maximum absolute sum of the matrix rows.
.. math::
||A||_\inf = \underset{1 \leq i \leq n}{max} \left(\sum^n_{j=1} |a_{ij}| \right)
References
----------
Golub, G., & Van Loan, C. (2013). Matrix computations (3rd ed.). Baltimore (MD): Johns Hopkins U.P.
"""
rowsums = []
for i in np.arange(self.n):
v = np.sum(np.absolute(self.x[i, :]))
rowsums.append(v)
return np.max(rowsums)
|
|
#!/usr/bin/env python
"""Convert SeqAn profiling information into PDF graphic.
USAGE: profile2pdf.py <program.profile.txt> <out.pdf>
"""
from __future__ import with_statement
__author__ = 'Manuel Holtgrewe <[email protected]>'
import math
import sys
import cairo
IGNORE_LIMIT = 0.00001
# Automatic legend colors.
COLORS = [
"#3366FF",
"#FF33CC",
"#FF6633",
"#CCFF33",
"#33FF66",
"#33CCFF",
"#002EB8",
"#B88A00",
"#CC33FF",
"#FF3366",
"#FFCC33",
"#66FF33",
"#33FFCC",
"#003DF5",
"#F5B800"
]
def htmlColorToRgb(colorstring):
colorstring = colorstring.strip()
if colorstring[0] == '#': colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError, "input #%s is not in #RRGGBB format" % colorstring
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return (r / 255.0, g / 255.0, b / 255.0)
COLORS = map(htmlColorToRgb, COLORS)
class Meta(object):
def __init__(self, beginTimestamp, endTimestamp):
self.beginTimestamp = beginTimestamp
self.endTimestamp = endTimestamp
class JobType(object):
"""Describe a job type."""
def __init__(self, identifier, shortName, longName=None, color=None):
self.identifier = identifier
self.shortName = shortName
self.longName = longName or shortName
self.color = color or COLORS[identifier % len(COLORS)]
@classmethod
def fromString(klass, s):
columns = s.split('\t')
if columns[0] != '@EVENT':
print >>sys.stderr, 'First column\'s value was not "@EVENT@'
sys.exit(1)
identifier = int(columns[1])
shortName = columns[2]
longName = columns[3]
# Read in optional arguments.
color = None
for col in columns[4:]:
key, value = col.split(':')
if key == 'COLOR':
color = value
return JobType(identifier, shortName, longName=longName, color=color)
class Event(object):
"""Describes an event."""
def __init__(self, threadId, isBegin, jobType, timestamp):
self.threadId = threadId
self.isBegin = isBegin
self.jobType = jobType
self.timestamp = timestamp
@classmethod
def fromString(klass, s):
columns = s.split('\t')
threadId = int(columns[0])
if columns[1] not in ['BEGIN', 'END']:
print >>sys.stderr, 'Second column\'s value was not BEGIN or END'
sys.exit(1)
isBegin = columns[1] == 'BEGIN'
jobType = int(columns[2])
timestamp = float(columns[3])
return Event(threadId, isBegin, jobType, timestamp)
class Section(object):
"""Describe a section in the program run."""
def __init__(self, jobType, beginTime, endTime, parent=None):
self.children = []
self.jobType = jobType
self.beginTime = beginTime
self.endTime = endTime
self.parent = parent
if self.parent:
self.parent.children.append(self)
def buildSections(events):
forest = []
sections = []
stack = []
for e in events:
if e.isBegin:
if not stack:
section = Section(e.jobType, e.timestamp, endTime=None, parent=None)
forest.append(section)
else:
section = Section(e.jobType, e.timestamp, endTime=None, parent=stack[-1])
sections.append(section)
stack.append(section)
else: # e.isBegin
assert stack
assert stack[-1].jobType == e.jobType
section = stack[-1]
section.endTime = e.timestamp
stack.pop()
return forest, sections
def printSection(section, jobTypes, offset, level=0):
span = section.endTime - section.beginTime
print '%s%s %f (%f to %f)' % ('\t' * level, jobTypes[section.jobType].shortName, span, section.beginTime - offset, section.endTime - offset)
for s in section.children:
printSection(s, jobTypes, offset, level+1)
def loadFile(path):
with open(path, 'r') as f:
line = f.readline()
if line.strip() != '@SQN:PROFILE':
print >>sys.stderr, 'Invalid file, does not start with "@SQN:PROFILE"'
sys.exit(1)
line = f.readline()
if not line.startswith('@TIME'):
print >>sys.stderr, 'Invalid file, second line does not start with "@TIME"'
sys.exit(1)
meta = Meta(*map(float, line.strip().split('\t')[1:]))
# Load job types.
jobTypes = []
while True:
line = f.readline()
if not line or not line.startswith('@EVENT'):
break # End of file, no more job types.
jobTypes.append(JobType.fromString(line.strip()))
# Events.
events = []
while True:
if not line:
break
events.append(Event.fromString(line.strip()))
line = f.readline()
# Remove redundant entries.
if False:
events2 = []
for i in range(len(events)):
if not events2 or not i + 1 < len(events):
events2.append(events[i])
continue
while True:
if not events2[-1].isBegin and events[i].isBegin and events[-1].jobType == events[i].jobType and events[-1].threadId == events[i].threadId and events[-1].timestamp - events[i].timestamp < 0.0001 and not events[i + 1].isBegin and events[i].jobType == events[i + 1].jobType and events[i].threadId == events[i + 1].threadId:
i += 2
else:
break
events2.append(events[i])
return meta, jobTypes, events
POINTS_SPACE_OUTER = 10
POINTS_PER_SECOND = 10
POINTS_BAR_HEIGHT = 5
POINTS_SPACE = 2
POINTS_KEY_ENTRY_HEIGHT = 5;
def drawBox(cr, jobTypes, section, offset, threadId, level):
assert level < 10
x = POINTS_SPACE_OUTER + (section.beginTime - offset) * POINTS_PER_SECOND
y = POINTS_SPACE_OUTER + POINTS_SPACE * threadId + POINTS_BAR_HEIGHT * threadId + level * 0.1 * POINTS_BAR_HEIGHT
width = (section.endTime - section.beginTime) * POINTS_PER_SECOND
height = (1.0 - 0.1 * level) * POINTS_BAR_HEIGHT
#print 'rectangle(%s, %s, %s, %s), level = %s' % (x, y, width, height, level)
cr.set_source_rgb(*jobTypes[section.jobType].color)
cr.rectangle(x, y, width, height)
cr.fill()
cr.set_line_width(0.2)
cr.set_source_rgb(0, 0, 0)
cr.move_to(x + width, y)
cr.line_to(x + width, y + height)
cr.stroke()
def drawBoxesForSection(cr, jobTypes, section, offset, threadId, level=0):
drawBox(cr, jobTypes, section, offset, threadId, level)
for s in section.children:
drawBoxesForSection(cr, jobTypes, s, offset, threadId, level + 1)
def drawKey(cr, jobTypes, threadCount):
for i, jobType in enumerate(jobTypes):
x = POINTS_SPACE_OUTER
y = POINTS_BAR_HEIGHT * threadCount + POINTS_SPACE_OUTER + POINTS_SPACE * (threadCount + 1) + POINTS_KEY_ENTRY_HEIGHT * i
width = POINTS_KEY_ENTRY_HEIGHT * 2
height = POINTS_KEY_ENTRY_HEIGHT
cr.set_source_rgb(*jobTypes[i].color)
cr.rectangle(x, y, width, height)
cr.fill()
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(POINTS_KEY_ENTRY_HEIGHT)
cr.move_to(x + 3 * POINTS_KEY_ENTRY_HEIGHT, y + 0.8 * POINTS_KEY_ENTRY_HEIGHT)
cr.show_text(jobType.shortName)
def drawScale(cr, totalBegin, totalEnd, threadCount):
cr.set_line_width(0.2)
cr.set_font_size(POINTS_KEY_ENTRY_HEIGHT * 0.5)
for i in range(0, int(totalEnd-totalBegin) + 1):
# Draw ticks at top.
cr.set_source_rgb(0, 0, 0)
cr.move_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i, POINTS_SPACE_OUTER)
if i % 5: # small tick
cr.line_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i, 0.9 * POINTS_SPACE_OUTER);
else: # large tick
cr.line_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i, 0.8 * POINTS_SPACE_OUTER);
cr.stroke()
# Draw grid.
cr.set_source_rgba(.3, .3, .3, 0.5)
cr.move_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i, POINTS_SPACE_OUTER)
cr.line_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i, POINTS_SPACE_OUTER + POINTS_SPACE * (threadCount - 1) + POINTS_BAR_HEIGHT * threadCount);
cr.stroke()
if not i % 5: # at large ticks
# Draw seconds display.
cr.set_source_rgb(0, 0, 0)
extents = cr.text_extents(str(i))
cr.move_to(POINTS_SPACE_OUTER + POINTS_PER_SECOND * i - extents[2] / 2.0, 0.75 * POINTS_SPACE_OUTER);
cr.show_text(str(i))
def breakDownTimesHelper(counter, section):
counter[section.jobType] = counter.get(section.jobType, 0) + section.endTime - section.beginTime
if section.parent:
counter[section.parent.jobType] -= section.endTime - section.beginTime
for child in section.children:
breakDownTimesHelper(counter, child)
def breakDownTimes(jobTypes, forests):
for threadId in sorted(forests.keys()):
print 'Breakdown for thread #%d' % threadId
counter = {}
for section in forests[threadId]:
breakDownTimesHelper(counter, section)
for jobType in jobTypes:
print ' %20s %10.5f' % (jobType.shortName, counter.get(jobType.identifier, 0))
def createDiagram(meta, jobTypes, forests, path):
totalBegin = meta.beginTimestamp
totalEnd = meta.endTimestamp
totalTime = totalEnd - totalBegin
# Create Cairo PDF surface.
width = math.ceil(totalTime) * POINTS_PER_SECOND + POINTS_SPACE + 2 * POINTS_SPACE_OUTER
height = POINTS_BAR_HEIGHT * len(forests) + POINTS_SPACE_OUTER + POINTS_SPACE * (len(forests) + 2) + POINTS_KEY_ENTRY_HEIGHT * len(jobTypes)
cs = cairo.PDFSurface(path, width, height)
cr = cairo.Context(cs)
for threadId, forest in forests.iteritems():
for section in forest:
drawBoxesForSection(cr, jobTypes, section, totalBegin, threadId)
drawKey(cr, jobTypes, len(forests))
drawScale(cr, totalBegin, totalEnd, len(forests))
cr.show_page()
cs.finish()
def main(args):
if len(args) != 3:
print >>sys.stderr, 'Invalid number of arguments!'
print >>sys.stderr, 'USAGE: profile2pdf.py <program.profile.txt> <out.pdf>'
return 1
# Load input file.
print >>sys.stderr, 'Loading file', args[1]
meta, jobTypes, events = loadFile(args[1])
# Partition events by thread id.
print >>sys.stderr, 'Partition events'
eventsForThread = {}
for e in events:
eventsForThread.setdefault(e.threadId, []).append(e)
# Build sections list and forest for each thread.
print >>sys.stderr, 'Build sections'
forests = {}
sections = {}
for threadId in sorted(eventsForThread.keys()):
events = eventsForThread[threadId]
f, s = buildSections(events)
forests[threadId], sections[threadId] = f, s
# Print sections (debug only):
#print 'SECTIONS, threadId =', threadId
#for x in f:
# printSection(x, jobTypes, s[0].beginTime)
# Build diagram.
print >>sys.stderr, 'Create diagram'
createDiagram(meta, jobTypes, forests, args[2])
# Show how much time each thread spent in each job type.
breakDownTimes(jobTypes, forests)
print 'TOTAL TIME: %f s' % (meta.endTimestamp - meta.beginTimestamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <[email protected]>'
class Node(object):
def __init__(self, children=None):
self._children_list = [] if children is None else children
def __iter__(self):
for child in self.children():
if child is not None:
yield child
def children(self):
return self._children_list
def to_ecma(self):
# Can't import at module level as ecmavisitor depends
# on ast module...
from slimit.visitors.ecmavisitor import ECMAVisitor
visitor = ECMAVisitor()
return visitor.visit(self)
class Program(Node):
pass
class Block(Node):
pass
class Boolean(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Null(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Number(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Identifier(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class String(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Regex(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class Array(Node):
def __init__(self, items):
self.items = items
def children(self):
return self.items
class Object(Node):
def __init__(self, properties=None):
self.properties = [] if properties is None else properties
def children(self):
return self.properties
class NewExpr(Node):
def __init__(self, identifier, args=None):
self.identifier = identifier
self.args = [] if args is None else args
def children(self):
return [self.identifier, self.args]
class FunctionCall(Node):
def __init__(self, identifier, args=None):
self.identifier = identifier
self.args = [] if args is None else args
def children(self):
return [self.identifier] + self.args
class BracketAccessor(Node):
def __init__(self, node, expr):
self.node = node
self.expr = expr
def children(self):
return [self.node, self.expr]
class DotAccessor(Node):
def __init__(self, node, identifier):
self.node = node
self.identifier = identifier
def children(self):
return [self.node, self.identifier]
class Assign(Node):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class GetPropAssign(Node):
def __init__(self, prop_name, elements):
"""elements - function body"""
self.prop_name = prop_name
self.elements = elements
def children(self):
return [self.prop_name] + self.elements
class SetPropAssign(Node):
def __init__(self, prop_name, parameters, elements):
"""elements - function body"""
self.prop_name = prop_name
self.parameters = parameters
self.elements = elements
def children(self):
return [self.prop_name] + self.parameters + self.elements
class VarStatement(Node):
pass
class VarDecl(Node):
def __init__(self, identifier, initializer=None):
self.identifier = identifier
self.identifier._mangle_candidate = True
self.initializer = initializer
def children(self):
return [self.identifier, self.initializer]
class UnaryOp(Node):
def __init__(self, op, value, postfix=False):
self.op = op
self.value = value
self.postfix = postfix
def children(self):
return [self.value]
class BinOp(Node):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class Conditional(Node):
"""Conditional Operator ( ? : )"""
def __init__(self, predicate, consequent, alternative):
self.predicate = predicate
self.consequent = consequent
self.alternative = alternative
def children(self):
return [self.predicate, self.consequent, self.alternative]
class If(Node):
def __init__(self, predicate, consequent, alternative=None):
self.predicate = predicate
self.consequent = consequent
self.alternative = alternative
def children(self):
return [self.predicate, self.consequent, self.alternative]
class DoWhile(Node):
def __init__(self, predicate, statement):
self.predicate = predicate
self.statement = statement
def children(self):
return [self.predicate, self.statement]
class While(Node):
def __init__(self, predicate, statement):
self.predicate = predicate
self.statement = statement
def children(self):
return [self.predicate, self.statement]
class For(Node):
def __init__(self, init, cond, count, statement):
self.init = init
self.cond = cond
self.count = count
self.statement = statement
def children(self):
return [self.init, self.cond, self.count, self.statement]
class ForIn(Node):
def __init__(self, item, iterable, statement):
self.item = item
self.iterable = iterable
self.statement = statement
def children(self):
return [self.item, self.iterable, self.statement]
class Continue(Node):
def __init__(self, identifier=None):
self.identifier = identifier
def children(self):
return [self.identifier]
class Break(Node):
def __init__(self, identifier=None):
self.identifier = identifier
def children(self):
return [self.identifier]
class Return(Node):
def __init__(self, expr=None):
self.expr = expr
def children(self):
return [self.expr]
class With(Node):
def __init__(self, expr, statement):
self.expr = expr
self.statement = statement
def children(self):
return [self.expr, self.statement]
class Switch(Node):
def __init__(self, expr, cases, default=None):
self.expr = expr
self.cases = cases
self.default = default
def children(self):
return [self.expr] + self.cases + [self.default]
class Case(Node):
def __init__(self, expr, elements):
self.expr = expr
self.elements = elements if elements is not None else []
def children(self):
return [self.expr] + self.elements
class Default(Node):
def __init__(self, elements):
self.elements = elements if elements is not None else []
def children(self):
return self.elements
class Label(Node):
def __init__(self, identifier, statement):
self.identifier = identifier
self.statement = statement
def children(self):
return [self.identifier, self.statement]
class Throw(Node):
def __init__(self, expr):
self.expr = expr
def children(self):
return [self.expr]
class Try(Node):
def __init__(self, statements, catch=None, fin=None):
self.statements = statements
self.catch = catch
self.fin = fin
def children(self):
return [self.statements] + [self.catch, self.fin]
class Catch(Node):
def __init__(self, identifier, elements):
self.identifier = identifier
# CATCH identifiers are subject to name mangling. we need to mark them.
self.identifier._mangle_candidate = True
self.elements = elements
def children(self):
return [self.identifier, self.elements]
class Finally(Node):
def __init__(self, elements):
self.elements = elements
def children(self):
return self.elements
class Debugger(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class FuncBase(Node):
def __init__(self, identifier, parameters, elements):
self.identifier = identifier
self.parameters = parameters if parameters is not None else []
self.elements = elements if elements is not None else []
self._init_ids()
def _init_ids(self):
# function declaration/expression name and parameters are identifiers
# and therefore are subject to name mangling. we need to mark them.
if self.identifier is not None:
self.identifier._mangle_candidate = True
for param in self.parameters:
param._mangle_candidate = True
def children(self):
return [self.identifier] + self.parameters + self.elements
class FuncDecl(FuncBase):
pass
# The only difference is that function expression might not have an identifier
class FuncExpr(FuncBase):
pass
class Comma(Node):
def __init__(self, left, right):
self.left = left
self.right = right
def children(self):
return [self.left, self.right]
class EmptyStatement(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class ExprStatement(Node):
def __init__(self, expr):
self.expr = expr
def children(self):
return [self.expr]
class Elision(Node):
def __init__(self, value):
self.value = value
def children(self):
return []
class This(Node):
def __init__(self):
pass
def children(self):
return []
|
|
import json
import boto3
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
import pytest
from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds
from moto.core import set_initial_no_auth_action_count
from moto.core import ACCOUNT_ID
from uuid import uuid4
@mock_iam
def create_user_with_access_key(user_name="test-user"):
client = boto3.client("iam", region_name="us-east-1")
client.create_user(UserName=user_name)
return client.create_access_key(UserName=user_name)["AccessKey"]
@mock_iam
def create_user_with_access_key_and_inline_policy(
user_name, policy_document, policy_name="policy1"
):
client = boto3.client("iam", region_name="us-east-1")
client.create_user(UserName=user_name)
client.put_user_policy(
UserName=user_name,
PolicyName=policy_name,
PolicyDocument=json.dumps(policy_document),
)
return client.create_access_key(UserName=user_name)["AccessKey"]
@mock_iam
def create_user_with_access_key_and_attached_policy(
user_name, policy_document, policy_name="policy1"
):
client = boto3.client("iam", region_name="us-east-1")
client.create_user(UserName=user_name)
policy_arn = client.create_policy(
PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)["Policy"]["Arn"]
client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
return client.create_access_key(UserName=user_name)["AccessKey"]
@mock_iam
def create_user_with_access_key_and_multiple_policies(
user_name,
inline_policy_document,
attached_policy_document,
inline_policy_name="policy1",
attached_policy_name="policy1",
):
client = boto3.client("iam", region_name="us-east-1")
client.create_user(UserName=user_name)
policy_arn = client.create_policy(
PolicyName=attached_policy_name,
PolicyDocument=json.dumps(attached_policy_document),
)["Policy"]["Arn"]
client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
client.put_user_policy(
UserName=user_name,
PolicyName=inline_policy_name,
PolicyDocument=json.dumps(inline_policy_document),
)
return client.create_access_key(UserName=user_name)["AccessKey"]
def create_group_with_attached_policy_and_add_user(
user_name, policy_document, group_name="test-group", policy_name=None
):
if not policy_name:
policy_name = str(uuid4())
client = boto3.client("iam", region_name="us-east-1")
client.create_group(GroupName=group_name)
policy_arn = client.create_policy(
PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)["Policy"]["Arn"]
client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
client.add_user_to_group(GroupName=group_name, UserName=user_name)
def create_group_with_inline_policy_and_add_user(
user_name, policy_document, group_name="test-group", policy_name="policy1"
):
client = boto3.client("iam", region_name="us-east-1")
client.create_group(GroupName=group_name)
client.put_group_policy(
GroupName=group_name,
PolicyName=policy_name,
PolicyDocument=json.dumps(policy_document),
)
client.add_user_to_group(GroupName=group_name, UserName=user_name)
def create_group_with_multiple_policies_and_add_user(
user_name,
inline_policy_document,
attached_policy_document,
group_name="test-group",
inline_policy_name="policy1",
attached_policy_name=None,
):
if not attached_policy_name:
attached_policy_name = str(uuid4())
client = boto3.client("iam", region_name="us-east-1")
client.create_group(GroupName=group_name)
client.put_group_policy(
GroupName=group_name,
PolicyName=inline_policy_name,
PolicyDocument=json.dumps(inline_policy_document),
)
policy_arn = client.create_policy(
PolicyName=attached_policy_name,
PolicyDocument=json.dumps(attached_policy_document),
)["Policy"]["Arn"]
client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
client.add_user_to_group(GroupName=group_name, UserName=user_name)
@mock_iam
@mock_sts
def create_role_with_attached_policy_and_assume_it(
role_name,
trust_policy_document,
policy_document,
session_name="session1",
policy_name="policy1",
):
iam_client = boto3.client("iam", region_name="us-east-1")
sts_client = boto3.client("sts", region_name="us-east-1")
role_arn = iam_client.create_role(
RoleName=role_name, AssumeRolePolicyDocument=json.dumps(trust_policy_document)
)["Role"]["Arn"]
policy_arn = iam_client.create_policy(
PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)["Policy"]["Arn"]
iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn)
return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)[
"Credentials"
]
@mock_iam
@mock_sts
def create_role_with_inline_policy_and_assume_it(
role_name,
trust_policy_document,
policy_document,
session_name="session1",
policy_name="policy1",
):
iam_client = boto3.client("iam", region_name="us-east-1")
sts_client = boto3.client("sts", region_name="us-east-1")
role_arn = iam_client.create_role(
RoleName=role_name, AssumeRolePolicyDocument=json.dumps(trust_policy_document)
)["Role"]["Arn"]
iam_client.put_role_policy(
RoleName=role_name,
PolicyName=policy_name,
PolicyDocument=json.dumps(policy_document),
)
return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)[
"Credentials"
]
@set_initial_no_auth_action_count(0)
@mock_iam
def test_invalid_client_token_id():
client = boto3.client(
"iam",
region_name="us-east-1",
aws_access_key_id="invalid",
aws_secret_access_key="invalid",
)
with pytest.raises(ClientError) as ex:
client.get_user()
ex.value.response["Error"]["Code"].should.equal("InvalidClientTokenId")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"The security token included in the request is invalid."
)
@set_initial_no_auth_action_count(0)
@mock_ec2
def test_auth_failure():
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id="invalid",
aws_secret_access_key="invalid",
)
with pytest.raises(ClientError) as ex:
client.describe_instances()
ex.value.response["Error"]["Code"].should.equal("AuthFailure")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401)
ex.value.response["Error"]["Message"].should.equal(
"AWS was not able to validate the provided access credentials"
)
@set_initial_no_auth_action_count(2)
@mock_iam
def test_signature_does_not_match():
access_key = create_user_with_access_key()
client = boto3.client(
"iam",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key="invalid",
)
with pytest.raises(ClientError) as ex:
client.get_user()
ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details."
)
@set_initial_no_auth_action_count(2)
@mock_ec2
def test_auth_failure_with_valid_access_key_id():
access_key = create_user_with_access_key()
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key="invalid",
)
with pytest.raises(ClientError) as ex:
client.describe_instances()
ex.value.response["Error"]["Code"].should.equal("AuthFailure")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401)
ex.value.response["Error"]["Message"].should.equal(
"AWS was not able to validate the provided access credentials"
)
@set_initial_no_auth_action_count(2)
@mock_ec2
def test_access_denied_with_no_policy():
user_name = "test-user"
access_key = create_user_with_access_key(user_name)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.describe_instances()
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID,
user_name=user_name,
operation="ec2:DescribeInstances",
)
)
@set_initial_no_auth_action_count(3)
@mock_ec2
def test_access_denied_with_not_allowing_policy():
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": ["ec2:Run*"], "Resource": "*"}],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.describe_instances()
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID,
user_name=user_name,
operation="ec2:DescribeInstances",
)
)
@set_initial_no_auth_action_count(3)
@mock_ec2
def test_access_denied_for_run_instances():
# https://github.com/spulec/moto/issues/2774
# The run-instances method was broken between botocore versions 1.15.8 and 1.15.12
# This was due to the inclusion of '"idempotencyToken":true' in the response, somehow altering the signature and breaking the authentication
# Keeping this test in place in case botocore decides to break again
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Action": ["ec2:Describe*"], "Resource": "*"}
],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.run_instances(MaxCount=1, MinCount=1)
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances",
)
)
@set_initial_no_auth_action_count(3)
@mock_ec2
def test_access_denied_with_denying_policy():
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Action": ["ec2:*"], "Resource": "*"},
{"Effect": "Deny", "Action": "ec2:CreateVpc", "Resource": "*"},
],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.create_vpc(CidrBlock="10.0.0.0/16")
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateVpc"
)
)
@set_initial_no_auth_action_count(3)
@mock_sts
def test_get_caller_identity_allowed_with_denying_policy():
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Deny", "Action": "sts:GetCallerIdentity", "Resource": "*"}
],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"sts",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.get_caller_identity().should.be.a(dict)
@set_initial_no_auth_action_count(3)
@mock_ec2
def test_allowed_with_wildcard_action():
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "ec2:Describe*", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.describe_tags()["Tags"].should.be.empty
@set_initial_no_auth_action_count(4)
@mock_iam
def test_allowed_with_explicit_action_in_attached_policy():
user_name = "test-user"
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "iam:ListGroups", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_attached_policy(
user_name, attached_policy_document
)
client = boto3.client(
"iam",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.list_groups()["Groups"].should.be.empty
@set_initial_no_auth_action_count(8)
@mock_s3
@mock_iam
def test_s3_access_denied_with_denying_attached_group_policy():
user_name = "test-user"
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Action": "s3:ListAllMyBuckets", "Resource": "*"}
],
}
group_attached_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Deny", "Action": "s3:List*", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_attached_policy(
user_name, attached_policy_document, policy_name="policy1"
)
create_group_with_attached_policy_and_add_user(
user_name, group_attached_policy_document, policy_name="policy2"
)
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.list_buckets()
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal("Access Denied")
@set_initial_no_auth_action_count(6)
@mock_s3
@mock_iam
def test_s3_access_denied_with_denying_inline_group_policy():
user_name = "test-user"
bucket_name = "test-bucket"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "*", "Resource": "*"}],
}
group_inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Deny", "Action": "s3:GetObject", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
create_group_with_inline_policy_and_add_user(
user_name, group_inline_policy_document
)
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as ex:
client.get_object(Bucket=bucket_name, Key="sdfsdf")
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal("Access Denied")
@set_initial_no_auth_action_count(10)
@mock_iam
@mock_ec2
def test_access_denied_with_many_irrelevant_policies():
user_name = "test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "ec2:Describe*", "Resource": "*"}],
}
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "s3:*", "Resource": "*"}],
}
group_inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Deny", "Action": "iam:List*", "Resource": "*"}],
}
group_attached_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Deny", "Action": "lambda:*", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_multiple_policies(
user_name,
inline_policy_document,
attached_policy_document,
attached_policy_name="policy1",
)
create_group_with_multiple_policies_and_add_user(
user_name,
group_inline_policy_document,
group_attached_policy_document,
attached_policy_name="policy2",
)
client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
with pytest.raises(ClientError) as ex:
client.create_key_pair(KeyName="TestKey")
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateKeyPair"
)
)
@set_initial_no_auth_action_count(4)
@mock_iam
@mock_sts
@mock_ec2
@mock_elbv2
def test_allowed_with_temporary_credentials():
role_name = "test-role"
trust_policy_document = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)
},
"Action": "sts:AssumeRole",
},
}
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:CreateLoadBalancer",
"ec2:DescribeSubnets",
],
"Resource": "*",
}
],
}
credentials = create_role_with_attached_policy_and_assume_it(
role_name, trust_policy_document, attached_policy_document
)
elbv2_client = boto3.client(
"elbv2",
region_name="us-east-1",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
ec2_client = boto3.client(
"ec2",
region_name="us-east-1",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
subnets = ec2_client.describe_subnets()["Subnets"]
len(subnets).should.be.greater_than(1)
elbv2_client.create_load_balancer(
Name="test-load-balancer",
Subnets=[subnets[0]["SubnetId"], subnets[1]["SubnetId"]],
)["LoadBalancers"].should.have.length_of(1)
@set_initial_no_auth_action_count(3)
@mock_iam
@mock_sts
@mock_rds
def test_access_denied_with_temporary_credentials():
role_name = "test-role"
session_name = "test-session"
trust_policy_document = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)
},
"Action": "sts:AssumeRole",
},
}
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Action": ["rds:Describe*"], "Resource": "*"}
],
}
credentials = create_role_with_inline_policy_and_assume_it(
role_name, trust_policy_document, attached_policy_document, session_name
)
client = boto3.client(
"rds",
region_name="us-east-1",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
with pytest.raises(ClientError) as ex:
client.create_db_instance(
DBInstanceIdentifier="test-db-instance",
DBInstanceClass="db.t3",
Engine="aurora-postgresql",
)
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}".format(
account_id=ACCOUNT_ID,
role_name=role_name,
session_name=session_name,
operation="rds:CreateDBInstance",
)
)
@set_initial_no_auth_action_count(3)
@mock_iam
def test_get_user_from_credentials():
user_name = "new-test-user"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "iam:*", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
client = boto3.client(
"iam",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.get_user()["User"]["UserName"].should.equal(user_name)
@set_initial_no_auth_action_count(0)
@mock_s3
def test_s3_invalid_access_key_id():
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id="invalid",
aws_secret_access_key="invalid",
)
with pytest.raises(ClientError) as ex:
client.list_buckets()
ex.value.response["Error"]["Code"].should.equal("InvalidAccessKeyId")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"The AWS Access Key Id you provided does not exist in our records."
)
@set_initial_no_auth_action_count(3)
@mock_s3
@mock_iam
def test_s3_signature_does_not_match():
bucket_name = "test-bucket"
access_key = create_user_with_access_key()
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key="invalid",
)
client.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as ex:
client.put_object(Bucket=bucket_name, Key="abc")
ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal(
"The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
@set_initial_no_auth_action_count(7)
@mock_s3
@mock_iam
def test_s3_access_denied_not_action():
user_name = "test-user"
bucket_name = "test-bucket"
inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": "*", "Resource": "*"}],
}
group_inline_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Deny", "NotAction": "iam:GetUser", "Resource": "*"}],
}
access_key = create_user_with_access_key_and_inline_policy(
user_name, inline_policy_document
)
create_group_with_inline_policy_and_add_user(
user_name, group_inline_policy_document
)
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=access_key["AccessKeyId"],
aws_secret_access_key=access_key["SecretAccessKey"],
)
client.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as ex:
client.delete_object(Bucket=bucket_name, Key="sdfsdf")
ex.value.response["Error"]["Code"].should.equal("AccessDenied")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403)
ex.value.response["Error"]["Message"].should.equal("Access Denied")
@set_initial_no_auth_action_count(4)
@mock_iam
@mock_sts
@mock_s3
def test_s3_invalid_token_with_temporary_credentials():
role_name = "test-role"
session_name = "test-session"
bucket_name = "test-bucket-888"
trust_policy_document = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)
},
"Action": "sts:AssumeRole",
},
}
attached_policy_document = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": ["*"], "Resource": "*"}],
}
credentials = create_role_with_inline_policy_and_assume_it(
role_name, trust_policy_document, attached_policy_document, session_name
)
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token="invalid",
)
client.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as ex:
client.list_bucket_metrics_configurations(Bucket=bucket_name)
ex.value.response["Error"]["Code"].should.equal("InvalidToken")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The provided token is malformed or otherwise invalid."
)
|
|
import json
import os
from subprocess import PIPE
from subprocess import Popen
from time import sleep
from unittest import TestCase
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from api.decorator import threaded, retry_on_fail
from database.dictionary import Base, Definition, Word
from database.exceptions.http import InvalidJsonReceivedException
from database.exceptions.http import WordAlreadyExistsException
URL_HEAD = 'http://0.0.0.0:8001'
DB_PATH = '/tmp/test.db'
# Temporarily patch requests API in case of refused connections
# (connecting while service is not ready)
possible_errors = [requests.exceptions.ConnectionError]
requests.post = retry_on_fail(
possible_errors,
retries=5,
time_between_retries=.4)(
requests.post)
requests.get = retry_on_fail(
possible_errors,
retries=5,
time_between_retries=.4)(
requests.get)
requests.put = retry_on_fail(
possible_errors,
retries=5,
time_between_retries=.4)(
requests.put)
requests.delete = retry_on_fail(
possible_errors,
retries=5,
time_between_retries=.4)(
requests.delete)
class TestDictionaryRestService(TestCase):
def setUp(self):
self.ENGINE = create_engine('sqlite:///%s' % DB_PATH)
Base.metadata.create_all(self.ENGINE)
SessionClass = sessionmaker(bind=self.ENGINE)
session = SessionClass()
definition = Definition('rikizimai', 'de')
word = Word(word='tehanu',
language='jm',
part_of_speech='ana',
definitions=[definition])
session.add(definition)
session.add(word)
self.launch_service()
self.wait_until_ready()
session.commit()
session.flush()
def tearDown(self):
self.kill_service()
sleep(.4)
@staticmethod
@threaded
def launch_service():
global DICTIONARY_SERVICE
DICTIONARY_SERVICE = Popen(["python3",
"dictionary_service.py",
'--db-file',
DB_PATH],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
DICTIONARY_SERVICE.communicate()
@retry_on_fail([Exception], retries=10, time_between_retries=.4)
def wait_until_ready(self):
resp = requests.get(URL_HEAD + '/ping')
assert resp.status_code == 200
@staticmethod
def kill_service():
DICTIONARY_SERVICE.kill()
os.system('rm %s' % DB_PATH)
@retry_on_fail([Exception], 10, .3)
def create_entry(self, word, language, pos, definition, def_language):
resp = requests.post(
URL_HEAD + '/entry/%s/create' % language,
json=json.dumps({
'definitions': [{
'definition': definition,
'definition_language': def_language
}],
'word': word,
'part_of_speech': pos,
})
)
self.assertEquals(resp.status_code, 200)
j = resp.json()
# check in database if definition has been added
def test_disable_autocommit(self):
requests.put(
URL_HEAD + '/configure',
json=json.dumps({
'autocommit': False
})
)
for i in range(20):
self.create_entry(
'nanaika%d' %
i,
'ka',
'ana',
'tarameguni%d' %
i,
'de')
requests.post(URL_HEAD + '/rollback')
for i in range(20):
resp = requests.get(URL_HEAD + '/entry/ka/nanaika%d' % i)
self.assertEquals(resp.status_code, 404)
def test_enable_autocommit(self):
requests.put(
URL_HEAD + '/configure',
json=json.dumps({
'autocommit': True
})
)
for i in range(20):
self.create_entry(
'nanaika%d' %
i,
'ka',
'ana',
'tarameguni%d' %
i,
'de')
for i in range(20):
resp = requests.get(URL_HEAD + '/entry/ka/nanaika%d' % i)
self.assertEquals(resp.status_code, 200)
def test_get_entry(self):
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
data = resp.json()
self.assertEquals(resp.status_code, 200)
for datum in data:
self.assertEquals(datum['word'], 'tehanu')
self.assertEquals(datum['language'], 'jm')
self.assertEquals(datum['part_of_speech'], 'ana')
self.assertEquals(len(datum['definitions']), 1)
def test_get_entry_404(self):
resp = requests.get(URL_HEAD + '/entry/jm/teklkhanu')
self.assertEquals(resp.status_code, 404)
def test_create_entry(self):
self.create_entry('nakaina', 'jm', 'ana', 'tarameguni', 'de')
def test_add_batch(self):
batch = [
{
'language': 'mnh',
'part_of_speech': 'ana',
'word': 'mbala',
'definitions': [{
'definition': 'elefanta',
'definition_language': 'mg'
}],
},
{
'language': 'kg',
'part_of_speech': 'ana',
'word': 'mbala',
'definitions': [{
'definition': 'ovy',
'definition_language': 'mg'
}],
},
{
'language': 'ln',
'part_of_speech': 'adv',
'word': 'mbala',
'definitions': [{
'definition': 'indray mandeha',
'definition_language': 'mg'
}],
}
]
resp = requests.post(URL_HEAD + '/entry/batch', json=batch)
self.assertEquals(resp.status_code, 200, 'Batch posting failed!')
# committing data
requests.get(URL_HEAD + '/commit')
entries = [
('ln', 'mbala'),
('kg', 'mbala'),
('mnh', 'mbala'),
]
for language, word in entries:
resp = requests.get(URL_HEAD +
'/entry/{}/{}'.format(language, word))
self.assertEquals(resp.status_code, 200, 'Entry check failed!')
data = resp.json()
self.assertEqual(data[0]['language'], language)
self.assertEqual(data[0]['word'], word)
def test_create_existing_entry(self):
sleep(1)
resp = requests.post(
URL_HEAD + '/entry/jm/create',
json=json.dumps({
'definitions': [{
'definition': 'rikizimai',
'definition_language': 'de'
}],
'word': 'tehanu',
'part_of_speech': 'ana',
})
)
self.assertEquals(
resp.status_code,
WordAlreadyExistsException.status_code)
def test_append_to_existing_entry(self):
resp = requests.post(
URL_HEAD + '/entry/jm/create',
json=json.dumps({
'definitions': [{
'definition': 'nanganasla',
'definition_language': 'mg'
}],
'word': 'tehanu',
'part_of_speech': 'ana',
})
)
self.assertEquals(resp.status_code, 200)
def test_create_entry_invalid_json(self):
resp = requests.post(
URL_HEAD + '/entry/jm/create',
json=json.dumps({
'word': 'tehanu',
'part_of_speech': 'ana',
})
)
self.assertEquals(
resp.status_code,
InvalidJsonReceivedException.status_code)
def test_edit_entry(self):
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
data = resp.json()
word_id = data[0]['id']
resp = requests.put(
URL_HEAD + '/entry/%d/edit' % word_id,
json={
'definitions': [{
'definition': 'tarameguni',
'definition_language': 'mg'
}],
'part_of_speech': 'aojs',
}
)
self.assertEquals(resp.status_code, 200)
def test_read_after_write_get_after_post(self):
self.create_entry('nanaika', 'ka', 'ana', 'tarameguni', 'de')
resp = requests.get(URL_HEAD + '/entry/ka/nanaika')
data = resp.json()
self.assertEquals(resp.status_code, 200)
for datum in data:
self.assertEquals(datum['word'], 'nanaika')
self.assertEquals(datum['language'], 'ka')
self.assertEquals(datum['part_of_speech'], 'ana')
self.assertEquals(len(datum['definitions']), 1)
def test_read_after_write_get_after_put(self):
self.test_edit_entry()
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
data = resp.json()
self.assertEquals(resp.status_code, 200)
for datum in data:
self.assertEquals(datum['word'], 'tehanu')
self.assertEquals(datum['language'], 'jm')
self.assertEquals(datum['part_of_speech'], 'aojs')
self.assertEquals(len(datum['definitions']), 1)
def test_delete_entry(self):
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
self.assertEquals(resp.status_code, 200)
data = resp.json()[0]
del_url = URL_HEAD + '/entry/%d/delete' % data['id']
resp = requests.delete(del_url)
self.assertEquals(resp.status_code, 204)
def test_read_after_write_get_after_delete(self):
self.test_delete_entry()
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
self.assertEquals(resp.status_code, 404)
def test_get_definition(self):
resp = requests.get(URL_HEAD + '/definition/1')
self.assertEquals(resp.status_code, 200)
def test_search_definition(self):
search_params = {
'definition': 'rikizimai'
}
resp = requests.post(
URL_HEAD + '/definition/search',
json=json.dumps(search_params)
)
j = resp.json()
self.assertEquals(len(j), 1)
self.assertEquals(resp.status_code, 200)
def test_search_definition_wildcard(self):
search_params = {
'definition': 'rik%'
}
resp = requests.post(
URL_HEAD + '/definition/search',
json=json.dumps(search_params)
)
j = resp.json()
print(j)
self.assertEquals(len(j), 1)
self.assertEquals(resp.status_code, 200)
def test_delete_definition(self):
resp = requests.get(URL_HEAD + '/entry/jm/tehanu')
self.assertEquals(resp.status_code, 200)
data = resp.json()[0]
del_url = URL_HEAD + '/entry/%d/delete' % data['id']
resp = requests.delete(del_url)
self.assertEquals(resp.status_code, 204)
def test_get_translation(self):
self.create_entry('toki', 'tpo', 'ana', 'Sprach', 'de')
self.create_entry('pona', 'tpo', 'ana', 'gut', 'de')
self.create_entry('alks', 'tpo', 'ana', 'pals', 'fr')
resp = requests.get(URL_HEAD + '/translations/tpo/de/toki')
print(resp)
j = resp.json()
print(j)
self.assertEquals(len(j), 1)
self.assertEquals(resp.status_code, 200)
def test_get_all_translations(self):
self.create_entry('toki', 'tpo', 'ana', 'Sprach', 'de')
self.create_entry('pona', 'tpo', 'ana', 'gut', 'de')
resp = requests.get(URL_HEAD + '/translations/jm/tehanu')
j = resp.json()
self.assertEquals(len(j), 1)
self.assertEquals(resp.status_code, 200)
|
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 26
# Methods Covered : 26
# Examples Total : 26
# Examples Tested : 20
# Coverage % : 77
# ----------------------
# Current Operation Coverage:
# Operations: 1/1
# Redis: 10/13
# FirewallRules: 4/4
# PatchSchedules: 4/4
# LinkedServer: 1/4
import time
import unittest
import azure.mgmt.redis
from devtools_testutils import AzureMgmtTestCase, RandomNameResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtRedisTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtRedisTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.redis.RedisManagementClient
)
if self.is_live:
from azure.mgmt.network import NetworkManagementClient
self.network_client = self.create_mgmt_client(
NetworkManagementClient
)
def create_virtual_network(self, group_name, location, network_name, subnet_name):
azure_operation_poller = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
return subnet_info
@unittest.skip('hard to test')
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
def test_redis(self, resource_group):
# UNIQUE = resource_group.name[-4:]
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
TENANT_ID = self.settings.TENANT_ID
RESOURCE_GROUP = resource_group.name
NAME = "myosgkxy"
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mySubnet"
# CACHE_NAME = "myCache"
CACHE_NAME = NAME
RULE_NAME = "myRule"
DEFAULT = "default"
LINKED_SERVER_NAME = "myLinkedServer"
REDIS_NAME = "myRedis"
if self.is_live:
self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
#--------------------------------------------------------------------------
# /Redis/put/RedisCacheCreate[put]
#--------------------------------------------------------------------------
BODY = {
"location": AZURE_LOCATION,
"zones": [
"1"
],
"sku": {
"name": "Premium",
"family": "P",
"capacity": "1"
},
"enable_non_ssl_port": True,
"shard_count": "2",
# "replicas_per_master": "2",
"redis_configuration": {
"maxmemory-policy": "allkeys-lru"
},
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME,
"static_ip": "10.0.0.5",
"minimum_tls_version": "1.2"
}
result = self.mgmt_client.redis.begin_create(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /PatchSchedules/put/RedisCachePatchSchedulesCreateOrUpdate[put]
#--------------------------------------------------------------------------
BODY = {
"schedule_entries": [
{
"day_of_week": "Monday",
"start_hour_utc": "12",
"maintenance_window": "PT5H"
},
{
"day_of_week": "Tuesday",
"start_hour_utc": "12"
}
]
}
result = self.mgmt_client.patch_schedules.create_or_update(resource_group_name=RESOURCE_GROUP, name=NAME, default=DEFAULT, parameters=BODY)
if self.is_live:
time.sleep(1800)
#--------------------------------------------------------------------------
# /FirewallRules/put/RedisCacheFirewallRuleCreate[put]
#--------------------------------------------------------------------------
BODY = {
"start_ip": "10.0.1.1",
"end_ip": "10.0.1.4"
}
result = self.mgmt_client.firewall_rules.create_or_update(resource_group_name=RESOURCE_GROUP, cache_name=CACHE_NAME, rule_name=RULE_NAME, parameters=BODY)
#--------------------------------------------------------------------------
# /LinkedServer/put/LinkedServer_Create[put]
#--------------------------------------------------------------------------
BODY = {
"linked_redis_cache_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Cache/Redis/" + REDIS_NAME,
"linked_redis_cache_location": "West US",
"server_role": "Secondary"
}
# result = self.mgmt_client.linked_server.begin_create(resource_group_name=RESOURCE_GROUP, name=NAME, linked_server_name=LINKED_SERVER_NAME, parameters=BODY)
# result = result.result()
#--------------------------------------------------------------------------
# /LinkedServer/get/LinkedServer_Get[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.linked_server.get(resource_group_name=RESOURCE_GROUP, name=NAME, linked_server_name=LINKED_SERVER_NAME)
#--------------------------------------------------------------------------
# /FirewallRules/get/RedisCacheFirewallRuleGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.firewall_rules.get(resource_group_name=RESOURCE_GROUP, cache_name=CACHE_NAME, rule_name=RULE_NAME)
#--------------------------------------------------------------------------
# /PatchSchedules/get/RedisCachePatchSchedulesGet[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.patch_schedules.get(resource_group_name=RESOURCE_GROUP, name=NAME, default=DEFAULT)
#--------------------------------------------------------------------------
# /Redis/get/RedisCacheGet[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.redis.list_upgrade_notifications(resource_group_name=RESOURCE_GROUP, name=NAME, history="5000")
#--------------------------------------------------------------------------
# /PatchSchedules/get/RedisCachePatchSchedulesList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.patch_schedules.list_by_redis_resource(resource_group_name=RESOURCE_GROUP, cache_name=CACHE_NAME)
#--------------------------------------------------------------------------
# /FirewallRules/get/RedisCacheFirewallRulesList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.firewall_rules.list_by_redis_resource(resource_group_name=RESOURCE_GROUP, cache_name=CACHE_NAME)
#--------------------------------------------------------------------------
# /LinkedServer/get/LinkedServer_List[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.linked_server.list(resource_group_name=RESOURCE_GROUP, name=NAME)
#--------------------------------------------------------------------------
# /Redis/get/RedisCacheGet[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.redis.get(resource_group_name=RESOURCE_GROUP, name=NAME)
#--------------------------------------------------------------------------
# /Redis/get/RedisCacheListByResourceGroup[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.redis.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
#--------------------------------------------------------------------------
# /Redis/get/RedisCacheList[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.redis.list()
#--------------------------------------------------------------------------
# /Operations/get/Operations_List[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.operations.list()
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheRegenerateKey[post]
#--------------------------------------------------------------------------
BODY = {
"key_type": "Primary"
}
result = self.mgmt_client.redis.regenerate_key(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheForceReboot[post]
#--------------------------------------------------------------------------
BODY = {
"shard_id": "0",
"reboot_type": "AllNodes"
}
result = self.mgmt_client.redis.force_reboot(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheListKeys[post]
#--------------------------------------------------------------------------
result = self.mgmt_client.redis.list_keys(resource_group_name=RESOURCE_GROUP, name=NAME)
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheImport[post]
#--------------------------------------------------------------------------
BODY = {
"format": "RDB",
"files": [
"http://fileuris.contoso.com/pathtofile1"
]
}
# result = self.mgmt_client.redis.begin_import_data(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
# result = result.result()
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheExport[post]
#--------------------------------------------------------------------------
BODY = {
"format": "RDB",
"prefix": "datadump1",
"container": "https://contosostorage.blob.core.window.net/urltoBlobContainer?sasKeyParameters"
}
# result = self.mgmt_client.redis.begin_export_data(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
# result = result.result()
#--------------------------------------------------------------------------
# /Redis/patch/RedisCacheUpdate[patch]
#--------------------------------------------------------------------------
BODY = {
"enable_non_ssl_port": True
}
result = self.mgmt_client.redis.update(resource_group_name=RESOURCE_GROUP, name=NAME, parameters=BODY)
#--------------------------------------------------------------------------
# /Redis/post/RedisCacheList[post]
#--------------------------------------------------------------------------
BODY = {
"type": "Microsoft.Cache/Redis",
"name": "cacheName"
}
result = self.mgmt_client.redis.check_name_availability(parameters=BODY)
#--------------------------------------------------------------------------
# /LinkedServer/delete/LinkedServerDelete[delete]
#--------------------------------------------------------------------------
# result = self.mgmt_client.linked_server.delete(resource_group_name=RESOURCE_GROUP, name=NAME, linked_server_name=LINKED_SERVER_NAME)
#--------------------------------------------------------------------------
# /FirewallRules/delete/RedisCacheFirewallRuleDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.firewall_rules.delete(resource_group_name=RESOURCE_GROUP, cache_name=CACHE_NAME, rule_name=RULE_NAME)
#--------------------------------------------------------------------------
# /PatchSchedules/delete/RedisCachePatchSchedulesDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.patch_schedules.delete(resource_group_name=RESOURCE_GROUP, name=NAME, default=DEFAULT)
#--------------------------------------------------------------------------
# /Redis/delete/RedisCacheDelete[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.redis.begin_delete(resource_group_name=RESOURCE_GROUP, name=NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import os
import pkgutil
import pipes
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_system_cxx_lib = self.get_lit_bool('use_system_cxx_lib', False)
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.target_info.is_windows():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_triple()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_path', None)
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_debug_mode()
self.configure_warnings()
self.configure_sanitizer()
self.configure_coverage()
self.configure_modules()
self.configure_substitutions()
self.configure_features()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.executor,
exec_env=self.exec_env)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx) == 'clang-cl.exe'
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = _prefixed_env_list('INCLUDE', '-isystem')
link_flags = _prefixed_env_list('LIB', '-L')
for path in _split_env_var('LIB'):
self.add_path(self.exec_env, path)
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_features(self):
additional_features = self.get_lit_conf('additional_features')
if additional_features:
for f in additional_features.split(','):
self.config.available_features.add(f.strip())
# Write an "available feature" that combines the triple when
# use_system_cxx_lib is enabled. This is so that we can easily write
# XFAIL markers for tests that are known to fail with versions of
# libc++ as were shipped with a particular triple.
if self.use_system_cxx_lib:
(arch, vendor, platform) = self.config.target_triple.split('-', 2)
(sysname, version) = re.match(r'([^0-9]+)([0-9\.]*)', platform).groups()
self.config.available_features.add('with_system_cxx_lib={}-{}-{}{}'.format(arch, vendor, sysname, version))
self.config.available_features.add('with_system_cxx_lib={}{}'.format(sysname, version))
self.config.available_features.add('with_system_cxx_lib={}'.format(sysname))
if self.target_info.is_windows():
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
libcxx_gdb = self.get_lit_conf('libcxx_gdb')
if libcxx_gdb and 'NOTFOUND' not in libcxx_gdb:
self.config.available_features.add('libcxx_gdb')
self.cxx.libcxx_gdb = libcxx_gdb
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
# FIXME: Can we remove this?
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
# Configure feature flags.
enable_32bit = self.get_lit_bool('enable_32bit', False)
if enable_32bit:
self.cxx.flags += ['-m32']
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
if not self.cxx.addFlagIfSupported(['--target=' + self.config.target_triple]):
self.lit_config.warning('Not adding any target triple -- the compiler does '
'not support --target=<triple>')
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# If we're testing the upstream LLVM libc++, disable availability markup,
# which is not relevant for non-shipped flavors of libc++.
if not self.use_system_cxx_lib:
self.cxx.compile_flags += ['-D_LIBCPP_DISABLE_AVAILABILITY']
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
self.configure_config_site_header()
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.target_info.is_windows():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers == '' or (cxx_headers is None
and self.cxx_stdlib_under_test != 'libc++'):
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if cxx_headers is None:
cxx_headers = os.path.join(self.libcxx_src_root, 'include')
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='%s' is not a directory."
% cxx_headers)
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_config_site_header(self):
# Check for a possible __config_site in the build directory. We
# use this if it exists.
if self.libcxx_obj_root is None:
return
config_site_header = os.path.join(self.libcxx_obj_root, '__config_site')
if not os.path.isfile(config_site_header):
return
self.cxx.compile_flags += ['-include', config_site_header]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_library_root]
else:
self.add_path(self.exec_env, self.abi_library_root)
def configure_link_flags_cxx_library(self):
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_libcxxabi_flag = abs_path
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt']]
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_debug_mode(self):
debug_level = self.get_lit_conf('debug_level', None)
if not debug_level:
return
if debug_level not in ['0', '1']:
self.lit_config.fatal('Invalid value for debug_level "%s".'
% debug_level)
self.cxx.compile_flags += ['-D_LIBCPP_DEBUG=%s' % debug_level]
def configure_warnings(self):
# Turn on warnings by default for Clang based compilers
default_enable_warnings = self.cxx.type in ['clang', 'apple-clang']
enable_warnings = self.get_lit_bool('enable_warnings',
default_enable_warnings)
self.cxx.useWarnings(enable_warnings)
self.cxx.warning_flags += ['-Werror', '-Wall', '-Wextra']
# On GCC, the libc++ headers cause errors due to throw() decorators
# on operator new clashing with those from the test suite, so we
# don't enable warnings in system headers on GCC.
if self.cxx.type != 'gcc':
self.cxx.warning_flags += ['-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER']
self.cxx.addWarningFlagIfSupported('-Wshadow')
self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument')
self.cxx.addWarningFlagIfSupported('-Wno-attributes')
self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move')
self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions')
self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals')
self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type')
self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable')
self.cxx.addWarningFlagIfSupported('-Wno-atomic-alignment')
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
self.cxx.addWarningFlagIfSupported('-Wsign-compare')
self.cxx.addWarningFlagIfSupported('-Wunused-variable')
self.cxx.addWarningFlagIfSupported('-Wunused-parameter')
self.cxx.addWarningFlagIfSupported('-Wunreachable-code')
self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef')
def configure_sanitizer(self):
san = self.get_lit_conf('use_sanitizer', '').strip()
if san:
# Search for llvm-symbolizer along the compiler path first
# and then along the PATH env variable.
symbolizer_search_paths = os.environ.get('PATH', '')
cxx_path = libcxx.util.which(self.cxx.path)
if cxx_path is not None:
symbolizer_search_paths = (
os.path.dirname(cxx_path) +
os.pathsep + symbolizer_search_paths)
llvm_symbolizer = libcxx.util.which('llvm-symbolizer',
symbolizer_search_paths)
def add_ubsan():
self.cxx.flags += ['-fsanitize=undefined',
'-fno-sanitize=float-divide-by-zero',
'-fno-sanitize-recover=all']
self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1'
self.config.available_features.add('ubsan')
# Setup the sanitizer compile flags
self.cxx.flags += ['-g', '-fno-omit-frame-pointer']
if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address':
self.cxx.flags += ['-fsanitize=address']
if llvm_symbolizer is not None:
self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer
# FIXME: Turn ODR violation back on after PR28391 is resolved
# https://bugs.llvm.org/show_bug.cgi?id=28391
self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0'
self.config.available_features.add('asan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
if san == 'Address;Undefined' or san == 'Undefined;Address':
add_ubsan()
elif san == 'Memory' or san == 'MemoryWithOrigins':
self.cxx.flags += ['-fsanitize=memory']
if san == 'MemoryWithOrigins':
self.cxx.compile_flags += [
'-fsanitize-memory-track-origins']
if llvm_symbolizer is not None:
self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer
self.config.available_features.add('msan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
elif san == 'Undefined':
add_ubsan()
self.cxx.compile_flags += ['-O2']
elif san == 'Thread':
self.cxx.flags += ['-fsanitize=thread']
self.config.available_features.add('tsan')
self.config.available_features.add('sanitizer-new-delete')
elif san == 'DataFlow':
self.cxx.flags += ['-fsanitize=dataflow']
else:
self.lit_config.fatal('unsupported value for '
'use_sanitizer: {0}'.format(san))
san_lib = self.get_lit_conf('sanitizer_library')
if san_lib:
self.cxx.link_flags += [
san_lib, '-Wl,-rpath,%s' % os.path.dirname(san_lib)]
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def configure_modules(self):
modules_flags = ['-fmodules', '-Xclang', '-fmodules-local-submodule-visibility']
supports_modules = self.cxx.hasCompileFlag(modules_flags)
enable_modules = self.get_lit_bool('enable_modules', default=False,
env_var='LIBCXX_ENABLE_MODULES')
if enable_modules and not supports_modules:
self.lit_config.fatal(
'-fmodules is enabled but not supported by the compiler')
if not supports_modules:
return
module_cache = os.path.join(self.config.test_exec_root,
'modules.cache')
module_cache = os.path.realpath(module_cache)
if os.path.isdir(module_cache):
shutil.rmtree(module_cache)
os.makedirs(module_cache)
self.cxx.modules_flags += modules_flags + \
['-fmodules-cache-path=' + module_cache]
if enable_modules:
self.config.available_features.add('-fmodules')
self.cxx.useModules()
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', pipes.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(pipes.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(pipes.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(pipes.quote, self.cxx.link_flags))))
sub.append(('%{link_libcxxabi}', pipes.quote(self.cxx.link_libcxxabi_flag)))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, pipes.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
if self.get_lit_conf('libcxx_gdb'):
sub.append(('%{libcxx_gdb}', self.get_lit_conf('libcxx_gdb')))
def configure_triple(self):
# Get or infer the target triple.
target_triple = self.get_lit_conf('target_triple')
# If no target triple was given, try to infer it from the compiler
# under test.
if not target_triple:
self.lit_config.note('Trying to infer the target_triple because none was specified')
target_triple = self.cxx.getTriple()
# Drop sub-major version components from the triple, because the
# current XFAIL handling expects exact matches for feature checks.
# Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14
# The 5th group handles triples greater than 3 parts
# (ex x86_64-pc-linux-gnu).
target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)',
r'\1-\2-\3\5', target_triple)
# linux-gnu is needed in the triple to properly identify linuxes
# that use GLIBC. Handle redhat and opensuse triples as special
# cases and append the missing `-gnu` portion.
if (target_triple.endswith('redhat-linux') or
target_triple.endswith('suse-linux')):
target_triple += '-gnu'
# Save the triple
self.lit_config.note("Setting target_triple to {}".format(target_triple))
self.config.target_triple = target_triple
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
|
|
#!/usr/bin/env python
import os.path
import argparse
import math
import random
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import stats.methods as methods
from stats.utils import *
################
# Declarations #
################
SYM_X, SYM_Y = SYM_VALUES = sp.symbols('x y')
SYM_A, SYM_ALPHA = SYM_PARAMS = sp.symbols('a alpha')
# SYM_EXPR = sp.sympify('a * exp(-alpha*x)')
# SYM_EXPR_DELTA = sp.sympify('y - a * exp(-alpha*x)')
SYM_EXPR = sp.sympify('a * exp(alpha*x)')
SYM_EXPR_DELTA = sp.sympify('y - a * exp(alpha*x)')
# linear function
# SYM_EXPR = sp.sympify('a + alpha*x')
# SYM_EXPR_DELTA = sp.sympify('y - a - alpha*x')
# quadratic function
# SYM_EXPR = sp.sympify('a*(x**2) + alpha*x')
# SYM_EXPR_DELTA = sp.sympify('y - a*(x**2) - alpha*x')
# logarithmic function
# SYM_EXPR = sp.sympify('a + alpha*log(x)')
# SYM_EXPR_DELTA = sp.sympify('y - a - alpha*log(x)')
# sinusoidal function
# SYM_EXPR = sp.sympify('a + alpha*sin(x)')
# SYM_EXPR_DELTA = sp.sympify('y - (a + alpha*sin(x))')
MIN_X = 0
MAX_X = 10
NUM_VALS = 20 # number of source values
REAL_A = 31 # real 'a' value of source distribution
REAL_ALPHA = 0.5 # real 'alpha' value of source distiribution
ERR_X_AVG = 0 # average of X error values
ERR_X_STD = 0.02 # std of X error values
ERR_Y_AVG = 0 # average of Y error values
ERR_Y_STD = 7 # std of Y error values
NUM_ITER = 10 # number of realizations
MNK_NUM_ITER = 1 # number of MNK iterations
################
# Program code #
################
DESCRIPTION = 'Use this script to determine estimates accuracy'
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-w', '--write-to', metavar='PATH',
type=str, help='file to write plot in')
# parse cli options
args = parser.parse_args()
# real X values without errors
real_x = np.linspace(MIN_X, MAX_X, NUM_VALS, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(
SYM_X,
SYM_EXPR.subs({SYM_A: REAL_A, SYM_ALPHA: REAL_ALPHA}),
'numpy'
)
)(real_x)
print('Expression: {}'.format(SYM_EXPR))
print('Real A: {}'.format(REAL_A))
print('Real ALPHA: {}'.format(REAL_ALPHA))
print('Error X std: {}'.format(ERR_X_STD))
print('Error Y std: {}'.format(ERR_Y_STD))
print('Number of iterations: {}'.format(NUM_ITER))
print('-' * 40, '\n')
# plot real parameters
plt.figure(0)
plt.plot(REAL_A, REAL_ALPHA,
color='m', linestyle=' ',
marker='x', markersize=10,
mfc='r')
# current accuracies for this stds
cur_basic_acc = 0
cur_mnk_acc = 0
cur_mrt_acc = 0
# iterate by error standart derivation values
for iter_i in range(NUM_ITER):
print('Iteration #{}:'.format(iter_i + 1))
# add X errors with current normal distribution
x = np.vectorize(
lambda v: v + random.gauss(ERR_X_AVG, ERR_X_STD)
)(real_x)
half_len = len(x) / 2
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(ERR_Y_AVG, ERR_Y_STD)
)(real_y)
################################
# Base values for basic search #
################################
# get base values as first pairs of values
base_values_first = {
SYM_X: [x[0], x[1]],
SYM_Y: [y[0], y[1]]
}
# get base values as half-distant pairs of values
base_values_half_dist = {
SYM_X: [x[0], x[half_len]],
SYM_Y: [y[0], y[half_len]]
}
# get base values as maximal distant pairs of values
base_values_max_dist = {
SYM_X: [x[0], x[-1]],
SYM_Y: [y[0], y[-1]]
}
# get base values as averages of two half-length subgroups
base_values_avg = {
SYM_X: [avg(x[:half_len]), avg(x[half_len:])],
SYM_Y: [avg(y[:half_len]), avg(y[half_len:])]
}
################
# Basic search #
################
# find params with basic method
basic_a, basic_alpha = methods.search_basic(
delta_expression=SYM_EXPR_DELTA,
parameters=(SYM_A, SYM_ALPHA),
values=base_values_max_dist
)
print('Basic a: {}'.format(basic_a))
print('Basic alpha: {}'.format(basic_alpha))
# add distance between estimates and real values
cur_basic_dst = (basic_a - REAL_A)**2 + (basic_alpha - REAL_ALPHA)**2
cur_basic_acc += math.sqrt(cur_basic_dst)
plt.figure(0)
plt.plot(basic_a, basic_alpha,
color='g', linestyle=' ',
marker='.', markersize=10,
mfc='g', label='values')
##############
# MNK search #
##############
# use basic estimates as init estimates for MNK
for i, (mnk_a, mnk_alpha) in methods.search_mnk(
expression=SYM_EXPR,
parameters=(SYM_A, SYM_ALPHA),
values={SYM_X: x},
result_values={SYM_Y: y},
init_estimates={SYM_A: basic_a, SYM_ALPHA: basic_alpha},
num_iter=MNK_NUM_ITER
):
mnk_y = np.vectorize(
sp.lambdify(
SYM_X,
SYM_EXPR.subs({SYM_A: mnk_a,
SYM_ALPHA: mnk_alpha}),
'numpy'
)
)(real_x)
print('MNK({}) a: {}'.format(i, mnk_a))
print('MNK({}) alpha: {}'.format(i, mnk_alpha))
# add distance between estimates and real values
cur_mnk_dst = (mnk_a - REAL_A)**2 + (mnk_alpha - REAL_ALPHA)**2
cur_mnk_dst += math.sqrt(cur_mnk_dst)
plt.figure(0)
plt.plot(mnk_a, mnk_alpha,
color='b', linestyle=' ',
marker='.', markersize=10,
mfc='b')
#################
# Mrt search #
#################
# find params with mrt method
mrt_a, mrt_alpha = methods.search_mrt(
delta_expression=SYM_EXPR_DELTA,
parameters=(SYM_A, SYM_ALPHA),
values={SYM_X: x, SYM_Y: y},
err_stds={SYM_X: ERR_X_STD, SYM_Y: ERR_Y_STD}
)
print('MRT a: {}'.format(mrt_a))
print('MRT alpha: {}'.format(mrt_alpha))
# add distance between estimates and real values
cur_mrt_dst = (mrt_a - REAL_A)**2 + (mrt_alpha - REAL_ALPHA)**2
cur_mrt_acc += math.sqrt(cur_mrt_dst)
plt.figure(0)
plt.plot(mrt_a, mrt_alpha,
color='r', linestyle=' ',
marker='.', markersize=10,
mfc='r')
print('-' * 40, '\n')
print('Basic accuracy: {}'.format(cur_basic_acc))
print('MNK accuracy: {}'.format(cur_mnk_acc))
print('MRT accuracy: {}'.format(cur_mrt_acc))
plt.figure(0)
plt.xlabel('$ a $')
plt.ylabel('$ \\alpha $')
plt.grid(True)
if args.write_to:
plt.savefig(args.write_to, dpi=100)
plt.show()
|
|
""" Selective Kernel Networks (ResNet base)
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import SelectiveKernel, ConvBnAct, create_attn
from .registry import register_model
from .resnet import ResNet
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'skresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'),
'skresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'),
'skresnet50': _cfg(),
'skresnet50d': _cfg(
first_conv='conv1.0'),
'skresnext50_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'),
}
class SelectiveKernelBasic(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(SelectiveKernelBasic, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer)
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock doest not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = SelectiveKernel(
inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv2 = ConvBnAct(
first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None,
drop_block=None, drop_path=None):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer)
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(
first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
**conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def skresnet18(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-18 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet18', pretrained, **model_args)
@register_model
def skresnet34(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-34 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args)
@register_model
def skresnet50(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50', pretrained, **model_args)
@register_model
def skresnet50d(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50-D model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50d', pretrained, **model_args)
@register_model
def skresnext50_32x4d(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to
the SKNet-50 model in the Select Kernel Paper
"""
sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
|
|
"""
.. module:: pdf
:synopsis: This module implements Probability Density Functions (PDFs)."
.. moduleauthor:: Martin Hunt <[email protected]>
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import math
from scipy import trapz, interpolate
import scipy.stats
from puq.options import options
from scipy.stats import gaussian_kde
from logging import info, debug, exception, warning, critical
import sys, matplotlib
if sys.platform == 'darwin':
matplotlib.use('macosx', warn=False)
else:
matplotlib.use('tkagg', warn=False)
import matplotlib.pyplot as plt
# Python 3
if sys.version[0] == "3":
import builtins
else:
import __builtin__ as builtins
"""
Class implementing a PDF (Probability Density Function).
"""
class PDF(object):
"""
Create a PDF (Probability Density Function) object.
Use this to create a PDF object given a list or array
of x values the corresponding PDF values.
Args:
xvals (1D array or list): x values
yvals (1D array or list): values for PDF(x)
"""
def __init__(self, xvals, yvals):
# if order is reversed, flip it
if xvals[0] > xvals[-1]:
xvals = xvals[::-1]
yvals = yvals[::-1]
# number of intervals to partition our range
nsamp = options['pdf']['numpart']
# for pdfs with tails, set the range for sampling
_range = options['pdf']['range']
range = [(1.0 - _range)/2.0, (1.0 + _range)/2.0]
self.x = xvals
if len(xvals) == 1 or xvals[0] == xvals[-1]:
self.x = [xvals[0]]
self.y = [1]
self.cdfy = [1]
self.mean = xvals[0]
self.dev = 0
return
self.cdfy = np.append([0.0], np.cumsum((np.diff(yvals)/2.0 + yvals[:-1])*np.diff(xvals)))
self.cdfy /= self.cdfy[-1]
# Trim tails that have grown to 10% of the range of the PDF
resample = False
mmin, mmax = self.ppf([0, 1])
dist = mmax - mmin
if dist == 0.0:
self.x = [xvals[0]]
self.y = [1]
self.cdfy = [1]
self.mean = xvals[0]
self.dev = 0
return
# print "range of pdf = [%s - %s]" % (mmin, mmax)
# print "range of PDF = [%s - %s]" % (xvals[0], xvals[-1])
# print "dist=%s" % (dist)
# print "proposed range = [%s - %s]" % (self.ppf(range[0]), self.ppf(range[1]))
if np.isnan(mmin) or abs((mmin - self.ppf(range[0])) / dist) > .1:
mmin = self.ppf(range[0])
resample = True
else:
mmin = xvals[0]
if np.isnan(mmax) or abs((mmax - self.ppf(range[1])) / dist) > .1:
mmax = self.ppf(range[1])
resample = True
else:
mmax = xvals[-1]
# resample if not even spacing
if not resample:
resample = not np.allclose(np.diff(xvals)[0], np.diff(xvals))
# resample if number of intervals is 10% too large or small
if not resample:
resample = np.abs(len(xvals) - nsamp) > (nsamp * .1)
if resample:
self.x = np.linspace(mmin, mmax, nsamp)
self.y = np.interp(self.x, xvals, yvals)
self.y = np.abs(self.y / trapz(self.y, self.x))
self.cdfy = np.append([0.0], np.cumsum((np.diff(self.y)/2.0 + self.y[:-1])*np.diff(self.x)))
else:
# normalize (integral must be 1.0)
self.y = yvals / trapz(yvals, self.x)
self.mean = trapz(self.x * self.y, self.x)
self.dev = np.sqrt(np.abs(trapz(self.y * (self.x - self.mean)**2, self.x)))
@property
def range(self):
"""
The range for the PDF. For PDFs with long tails,
it is truncated to 99.99% by default. You can
customize this by setting options['pdf']['range'].
Returns:
A tuple containing the min and max.
"""
return (self.x[0], self.x[-1])
@property
def srange(self):
"""
The small range for the PDF. For PDFs with long tails,
it is truncated to 99.8% by default. You can
customize this by setting options['pdf']['srange'].
Returns:
A tuple containing the min and max.
"""
_range = options['pdf']['srange']
range = [(1.0 - _range)/2.0, (1.0 + _range)/2.0]
mmin, mmax = self.ppf([0, 1])
dist = mmax - mmin
if np.isnan(mmin) or abs((mmin - self.ppf(range[0])) / dist) > .1:
mmin = self.ppf(range[0])
else:
mmin = self.x[0]
if np.isnan(mmax) or abs((mmax - self.ppf(range[1])) / dist) > .1:
mmax = self.ppf(range[1])
else:
mmax = self.x[-1]
return (mmin, mmax)
def _cdf(self, yvals, delta):
y = (yvals + yvals[0]) / 2.0
return delta * (yvals.cumsum() - y)
def pdf(self, arr):
"""
Computes the Probability Density Function (PDF) for some values.
Args:
arr: Array of x values.
Returns:
Array of pdf(x).
"""
return np.interp(arr, self.x, self.y, left=0.0, right=0.0)
def cdf(self, arr):
"""
Computes the Cumulative Density Function (CDF) for some values.
Args:
arr: Array of x values.
Returns:
Array of cdf(x).
"""
return np.interp(arr, self.x, self.cdfy, left=0.0, right=1.0)
def ppf(self, arr):
"""
Percent Point Function (inverse CDF)
Args:
arr: Array of x values.
Returns:
Array of ppf(x).
"""
return np.interp(arr, self.cdfy, self.x)
def lhs1(self, num):
"""
Latin Hypercube Sample in [-1,1] for this distribution.
The order of the numbers
in the array is random, so it can be combined with other arrays
to form a latin hypercube. Note that this can return values
outside the range [-1,1] for distributions with long tails.
This method is used by :mod:`puq.Smolyak`.
Args:
num: Number of samples to generate.
Returns:
1D array of length *num*.
"""
pmin, pmax = self.range
return (2. * self.lhs(num) - (pmax + pmin)) / (pmax - pmin)
def ds1(self, num):
'''
Generates a descriptive sample in [-1,1] for this distribution.
The order of the numbers
in the array is random, so it can be combined with other arrays
to form a latin hypercube. Note that this *can* return values
outside the range [-1,1] for distributions with long tails.
This method is used by :mod:`puq.Smolyak`.
:param num: Number of samples to generate.
:returns: 1D array of length *num*.
'''
pmin, pmax = self.range
return (2. * self.ds(num) - (pmax + pmin)) / (pmax - pmin)
def lhs(self, num):
'''
Latin Hypercube Sample for this distribution.
The order of the numbers in the array is random, so it can be
combined with other arrays to form a latin hypercube.
This method is used by :class:`LHS`.
:param num: Number of samples to generate.
:returns: 1D array of length *num*.
'''
return np.random.permutation(self.ppf((np.arange(0, num) + np.random.uniform(0, 1, num))/num))
def ds(self, num):
'''
Generates a descriptive sample for this distribution.
The order of the numbers
in the array is random, so it can be combined with other arrays
to form a latin hypercube.
This method is used by :class:`LHS`.
:param num: Number of samples to generate.
:returns: 1D array of length *num*.
'''
return np.random.permutation(self.ppf(np.arange(0.5, num)/num))
def random(self, num):
"""
Generate random numbers fitting this parameter's distribution.
This method is used by :class:`MonteCarlo`.
:param num: Number of samples to generate.
:returns: 1D array of length *num*.
"""
return self.ppf(np.random.uniform(0, 1, num))
def __neg__(self):
return PDF(-self.x[::-1], self.y[::-1])
def __radd__(self, b):
# print "__radd %s %s" % (self,b)
return self._nadd(b)
def _nadd(self, b):
# print "_nadd %s" % (b)
# add a scalar to a PDF
return PDF(b + self.x, self.y)
def __add__(self, b):
"Add two PDFs, returning a new one."
# print "__add__ %s %s" % (self,b)
if isinstance(b, int) or isinstance(b, float):
return self._nadd(b)
if sys.version[0] == "2" and isinstance(b, long):
return self._nadd(b)
a = self
if (a.x[-1] - a.x[0] < b.x[-1] - b.x[0]):
a, b = b, a
a0, a1 = a.x[0], a.x[-1]
b0, b1 = b.x[0], b.x[-1]
ar = a1 - a0
br = b1 - b0
nsamp = options['pdf']['numpart']
cx = np.linspace(0, ar, nsamp)
dx = ar/(nsamp-1.0)
blen = int(math.ceil(br/dx))
c = np.convolve(a.pdf(cx + a0), b.pdf(cx[:blen] + b0))
cx = np.linspace(a0 + b0, a1 + b1, num=len(c), endpoint=False)
return PDF(cx, c)
def __rsub__(self, b):
return PDF(b-self.x[::-1], self.y[::-1])
def __sub__(self, b):
'Subtract two PDFs, returning a new PDF'
return self.__add__(-b)
def __rmul__(self, b):
return self._nmul(b)
def _nmul(self, b):
if b == 0:
raise ValueError("Multiplying by 0 does not produce a PDF.")
return PDF(b * self.x, self.y)
def __mul__(self, b):
"Multiply two PDFs, returning a new PDF"
if isinstance(b, int) or isinstance(b, float):
return self._nmul(b)
if sys.version[0] == "2" and isinstance(b, long):
return self._nmul(b)
a = self
# if second variable crosses 0, swap the order for best results
if b.x[0] < 0 and b.x[-1] > 0:
a, b = b, a
extremes = np.outer([a.x[0], a.x[-1]], [b.x[0], b.x[-1]])
zmin, zmax = np.min(extremes), np.max(extremes)
bx = b.x
by = b.y.reshape(-1, 1)
if zmin * zmax <= 0:
# if the range crosses 0, do not evaluate at 0
by = by[bx != 0.0]
bx = bx[bx != 0.0]
cx = np.linspace(zmin, zmax, options['pdf']['numpart'])
cy = np.sum(np.abs([a.pdf(cx/x)/x for x in bx]) * by, 0)
return PDF(cx, cy)
def _ndiv(self, b):
if b == 0:
raise ValueError("Cannot divide a PDF by 0.")
return PDF(self.x/b, self.y)
def __rdiv__(self, b):
if self.x[0]*self.x[-1] <= 0:
raise ValueError("Cannot divide by PDFs that include 0")
if b == 0:
raise ValueError("Dividing 0 by a PDF does not return a PDF")
extremes = [b/self.x[0], b/self.x[-1]]
zmin, zmax = np.min(extremes), np.max(extremes)
nsamp = options['pdf']['numpart']
cx = np.linspace(zmin, zmax, nsamp)
return PDF(cx, self.pdf(b/cx)/cx**2)
def __truediv__(self, b):
return self.__div__(b)
def __rtruediv__(self, b):
return self.__rdiv__(b)
def __div__(self, b):
"Divide two PDFs, returning a new PDF"
if isinstance(b, int) or isinstance(b, float):
return self._ndiv(b)
if sys.version[0] == "2" and isinstance(b, long):
return self._ndiv(b)
if b.x[0]*b.x[-1] <= 0:
raise ValueError("Cannot divide by PDFs that include 0")
a = self
extremes = np.outer([a.x[0], a.x[-1]], [1.0/b.x[0], 1.0/b.x[-1]])
zmin, zmax = np.min(extremes), np.max(extremes)
bx = b.x
by = b.y.reshape(-1, 1)
nsamp = options['pdf']['numpart']
cx = np.linspace(zmin, zmax, nsamp)
cy = np.sum([a.pdf(x * cx)*x for x in bx] * by, 0)
return PDF(cx, cy)
@property
def mode(self):
"""
Find the mode of the PDF. The mode is the x value at which pdf(x)
is at its maximum. It is the peak of the PDF.
"""
if len(self.x) == 1:
return self.x[0]
mode = None
maxy = None
for x, y in zip(self.x, self.y):
if mode is None or y > maxy:
mode = x
maxy = y
return mode
def __str__(self):
_str = "PDF [%.3g - %.3g] " % (self.x[0], self.x[-1])
_str += "mean=%.3g dev=%.3g mode=%.3g" % (self.mean, self.dev, self.mode)
return _str
def plot(self, color='', fig=False):
"""
Plot a PDF.
:param color: Optional color for the plot.
:type color: String.
:param fig: Create a new matplotlib figure to hold the plot.
:type fig: Boolean.
:returns: A list of lines that were added.
"""
if fig:
plt.figure()
if color:
plt.plot([self.x[0], self.x[0]], [0, self.y[0]], color=color)
plt.plot([self.x[-1], self.x[-1]], [0, self.y[-1]], color=color)
return plt.plot(self.x, self.y, color=color)
else:
plt.plot([self.x[0], self.x[0]], [0, self.y[0]], color='g')
plt.plot([self.x[-1], self.x[-1]], [0, self.y[-1]], color='g')
return plt.plot(self.x, self.y, color='g')
# ipython pretty print method
def _repr_pretty_(self, p, cycle):
if cycle:
return
self.plot()
p.text(self.__str__())
def _get_range(sfunc, min, max):
" Truncate PDFs with long tails"
num_tails = int(sfunc.ppf(0) == np.NINF) + int(sfunc.ppf(1) == np.PINF)
_range = options['pdf']['range']
if num_tails:
if num_tails == 2:
range = [(1.0 - _range)/2, (1.0 + _range)/2]
else:
range = [1.0 - _range, _range]
mmin = sfunc.ppf(0)
if mmin == np.NINF:
mmin = sfunc.ppf(range[0])
mmax = sfunc.ppf(1)
if mmax == np.PINF:
mmax = sfunc.ppf(range[1])
if min is not None:
min = builtins.max(min, mmin)
else:
min = mmin
if max is not None:
max = builtins.min(max, mmax)
else:
max = mmax
return min, max
def ExponPDF(rate):
"""
Creates Exponential Probability Density Function.
:param rate: The rate parameter for the distribution. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Exponential_distribution
"""
if rate <= 0:
raise ValueError("Rate must be greater than 0.")
sfunc = scipy.stats.expon(loc=0, scale=1.0/rate)
nsamp = options['pdf']['numpart']
min, max = _get_range(sfunc, None, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def RayleighPDF(scale):
"""
Creates Rayleigh Probability Density Function.
:param scale: The scale. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Rayleigh_distribution
"""
if scale <= 0:
raise ValueError("Scale must be greater than 0.")
sfunc = scipy.stats.rayleigh(loc=0, scale=scale)
nsamp = options['pdf']['numpart']
min, max = _get_range(sfunc, None, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def WeibullPDF(shape, scale):
"""
Creates Weibull Probability Density Function.
:param shape: The shape. Must be > 0.
:param scale: The scale. Must be > 0.
:returns: A PDF object
See http://en.wikipedia.org/wiki/Weibull_distribution
"""
if shape <= 0 or scale <= 0:
raise ValueError("Shape and Scale must be greater than 0.")
sfunc = scipy.stats.exponweib(1, shape, scale=scale)
nsamp = options['pdf']['numpart']
mmin = None
if sfunc.pdf(0) == np.PINF:
mmin = .01
min, max = _get_range(sfunc, mmin, None)
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def NormalPDF(mean, dev, min=None, max=None):
"""
Creates a normal (gaussian) Probability Density Function.
:param mean: The mean.
:param dev: The standard deviation.
:param min: A minimum value for the PDF (default None).
:param max: A maximum value for the PDF (default None).
:returns: A PDF object
For the normal distribution, you must specify **mean** and **dev**.
:Example:
>>> n = NormalPDF(10,1)
>>> n = NormalPDF(mean=10, dev=1)
>>> n = NormalPDF(mean=10, dev=1, min=10)
"""
if dev <= 0:
raise ValueError("Deviation must be positive.")
sfunc = scipy.stats.norm(loc=mean, scale=dev)
min, max = _get_range(sfunc, min, max)
dev = float(dev)
a = (min - mean) / dev
b = (max - mean) / dev
sfunc = scipy.stats.truncnorm(a, b, loc=mean, scale=dev)
nsamp = options['pdf']['numpart']
x = np.linspace(min, max, nsamp)
return PDF(x, sfunc.pdf(x))
def NetPDF(addr):
"""
Retrieves a PDF from a remote address.
:param addr: URI. PDF must be stored in JSON format
:returns: A PDF object
:Example:
>>> u = NetPDF('http://foo.com/myproject/parameters/density')
"""
from jpickle import NetObj
p = NetObj(addr)
if not isinstance(p, PDF):
raise Exception('Link is not a PDF')
return p
def UniformPDF(min=None, max=None, mean=None):
"""
Creates a uniform Probability Density Function.
:param min: The minimum value
:param max: The maximum value
:param mean: The mean value
:returns: A PDF object
For the uniform distribution, you must specify two of (min, max, and mean).
The third parameter will be calculated automatically.
:Example:
>>> u = UniformPDF(10,20)
>>> u = UniformPDF(min=10, max=20)
>>> u = UniformPDF(min=10, mean=15)
"""
def usage(match=0):
if match:
raise ValueError("mean must be (min+max)/2. Try specifying just min and max.")
raise ValueError("For uniform distribution, you must specify two of (min, max, and mean).")
if min is not None and max is not None and mean is not None:
# check agreement
if not np.allclose(mean, (min + max)/2.0, atol=1e-6):
usage(1)
if mean is None:
if max is None or min is None:
usage()
mean = (max + min) / 2.0
if max is None:
if mean is None or min is None:
usage()
max = mean + (mean - min)
if min is None:
min = mean - (max - mean)
if min > max:
raise ValueError("min must not be > mean or max!")
return PDF([min, max], [1, 1])
def TrianglePDF(min, mode, max):
"""
Creates a triangle Probability Density Function.
See http://en.wikipedia.org/wiki/Triangular_distribution
:param min: The minimum value
:param mode: The mode
:param max: The maximum value
:returns: A PDF object
You can enter the parameters in any order. They will be sorted so that the mode
is the middle value.
"""
min, mode, max = np.sort([min, mode, max])
return PDF([min, mode, max], [0, 1, 0])
def JeffreysPDF(min, max):
# untested
min = float(min)
max = float(max)
return PDF([min, max], [1.0 / (min * np.log(max/min)), 1.0 / (max * np.log(max/min))])
def ExperimentalPDF(data, min=None, max=None, fit=False, bw=None, nbins=0, prior=None, error=None, force=False):
"""
Create an experimental PDF.
An experimental PDF is derived from the results of an experiment or
measurement of some parameter. It has actual data attached to it.
That data is then used to create a PDF by one of three different methods.
The PDF can built by binning the data and linearly
interpolating, using a Gaussian KDE, or using Bayesian Inference.
:param data: Our quantity of interest.
:type data: Array of scalars
:param nbins: Number of bins (used if fit is false). Default is
2*IQR/n^(1/3) where IQR is the interquartile range
of the data.
:type nbins: int
:param fit: Use Gaussian KDE (default=False)
:type fit: True or "Gaussian"
:param bw: Bandwidth for Gaussian KDE (default=None)
:type bw: string or float. String must be 'scott' or 'silverman'
:param prior: Prior PDF to use for Bayesian Inference.
[default=None (uninformative)]
:type prior: PDF
:param error: Error in the data. For example, the measurement error.
Required for Bayesian.
:type error: PDF. Typically a NormalPDF with a mean of 0.
"""
data = np.array(data).astype(np.float64)
if not force and min is not None and min > np.min(data):
raise ValueError('min cannot be set to more than minimum value in the data.')
if not force and max is not None and max < np.max(data):
raise ValueError('max cannot be set to less than maximum value in the data.')
if nbins and nbins <= 1:
raise ValueError("ERROR: invalid number of bins: %s" % nbins)
# constant
if np.min(data) == np.max(data) and not error:
p = PDF([np.min(data)], [1])
p.data = data
return p
if len(data) < 1 or (len(data) == 1 and not error):
raise ValueError("ERROR: need at least two data points to build a PDF, or a prior and 1 data point.")
if error:
# Bayesian parameter estimation
if not isinstance(error, PDF):
raise ValueError("ERROR: error is not a PDF")
data = data + error
p = posterior(data, prior)
elif fit is True or (type(fit) is str and fit.lower() == 'gaussian'):
# Gaussian KDE
if np.min(data) == np.max(data):
raise ValueError("Cannot generate PDF fron non-variable data.")
gkde = gaussian_kde(data, bw_method=bw)
dev = np.std(data)
mean = np.mean(data)
if min is None:
min = mean - 5 * dev
if max is None:
max = mean + 5 * dev
x = np.linspace(float(min), float(max), options['pdf']['numpart'])
p = PDF(x, gkde.evaluate(x))
else:
# linear interpolation from histograms
if nbins == 0:
iqr = scipy.stats.scoreatpercentile(data, 75) - scipy.stats.scoreatpercentile(data, 25)
if iqr == 0.0:
# constant
p = PDF([np.min(data)], [1])
p.data = data
return p
nbins = int((np.max(data) - np.min(data)) / (2*iqr/len(data)**(1.0/3)) + .5)
y, bins = np.histogram(data, nbins, normed=True)
if len(bins) > 2:
x = bins[:-1] + np.diff(bins) / 2.0
sp = interpolate.splrep(x, y, s=0, k=1)
mmin = bins[0]
mmax = bins[-1]
if min is not None:
mmin = min
if max is not None:
mmax = max
x = np.linspace(float(mmin), float(mmax), options['pdf']['numpart'])
y = interpolate.splev(x, sp, der=0)
if np.isnan(np.sum(y)):
# interpolate failed. constant pdf
p = PDF([np.min(data)], [1])
p.data = [data[0]]
return p
y[y < 0] = 0 # if the extrapolation goes negative...
p = PDF(x, y)
else:
# not enough data. assume uniform over range
p = PDF([np.min(data), np.max(data)], [1, 1])
p.data = data
return p
def HPDF(data, min=None, max=None):
"""
Histogram PDF - initialized with points from a histogram.
This function creates a PDF from a histogram. This is useful when some other software has
generated a PDF from your data.
:param data: A two dimensional array. The first column is the histogram interval mean,
and the second column is the probability. The probability values do not need to be
normalized.
:param min: A minimum value for the PDF range. If your histogram has values very close
to 0, and you know values of 0 are impossible, then you should set the ***min*** parameter.
:param max: A maximum value for the PDF range.
:type data: 2D numpy array
:returns: A PDF object.
"""
x = data[:, 0]
y = data[:, 1]
sp = interpolate.splrep(x, y)
dx = (x[1] - x[0]) / 2.0
mmin = x[0] - dx
mmax = x[-1] + dx
if min is not None:
mmin = builtins.max(min, mmin)
if max is not None:
mmax = builtins.min(max, mmax)
x = np.linspace(mmin, mmax, options['pdf']['numpart'])
y = interpolate.splev(x, sp)
y[y < 0] = 0 # if the extrapolation goes negative...
return PDF(x, y)
def posterior(data, prior=None):
"""
Computes posterior PDF.
:param data: A PDF or list or array of PDFs.
:param prior: If no prior is specified, a noninformative prior is used.
:returns: A posterior PDF object.
"""
if prior:
if not isinstance(prior, PDF):
raise ValueError("ERROR: prior is not a PDF")
data = np.append(data, prior)
else:
data = np.array(data)
# The X range needs to be constrained to where all
# the input PDFS are defined.
rmin = max([c.x[0] for c in data])
rmax = min([c.x[-1] for c in data])
x = np.linspace(rmin, rmax, options['pdf']['numpart'])
y = np.prod([c.pdf(x) for c in data], 0)
return PDF(x, y)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
import django
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject
from django.utils.module_loading import module_has_submodule
from django.utils.translation import ugettext_lazy as _
from importlib import import_module
import six
from horizon import conf
from horizon.decorators import _current_component
from horizon.decorators import require_auth
from horizon.decorators import require_perms
from horizon import loaders
from horizon.utils import settings as utils_settings
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
decorated = decorator(pattern.callback, *args, **kwargs)
if django.VERSION >= (1, 10):
pattern.callback = decorated
else:
# prior to 1.10 callback was a property and we had
# to modify the private attribute behind the property
pattern._callback = decorated
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = []
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = utils_settings.import_setting("POLICY_CHECK_FUNCTION")
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
rule_param = rule
if not any(isinstance(r, (list, tuple)) for r in rule):
rule_param = (rule,)
if policy_check(rule_param, request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either a boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %(self)s: %(exc)s",
{'self': self, 'exc': exc})
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either a boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s.", self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns.append(url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns.append(url(r'', include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns.append(url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
|
from __future__ import unicode_literals
import logging
from PIL import Image
from os import path
import os.path
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text, smart_str, python_2_unicode_compatible
from django.contrib.sites.models import Site
from django.core.files.base import ContentFile
from django.conf import settings
from django.template.defaultfilters import slugify
from app_data import AppDataField
from ella.core.models.main import Author, Source
from ella.core.cache import CachedForeignKey, get_cached_object
from ella.photos.conf import photos_settings
from ella.utils.timezone import now
from .formatter import Formatter
__all__ = ("Format", "FormatedPhoto", "Photo")
log = logging.getLogger('ella.photos')
redis = None
REDIS_PHOTO_KEY = 'photo:%s'
REDIS_FORMATTED_PHOTO_KEY = 'photo:%s:%s'
if hasattr(settings, 'PHOTOS_REDIS'):
try:
from redis import Redis
except:
log.error('Redis support requested but Redis client not installed.')
redis = None
else:
redis = Redis(**getattr(settings, 'PHOTOS_REDIS'))
def upload_to(instance, filename):
name, ext = os.path.splitext(filename)
if instance.slug:
name = instance.slug
ext = photos_settings.TYPE_EXTENSION.get(instance._get_image().format, ext.lower())
instance.image.file.seek(0)
return os.path.join(
force_text(now().strftime(smart_str(photos_settings.UPLOAD_TO))),
name + ext
)
@python_2_unicode_compatible
class Photo(models.Model):
"""
Represents original (unformated) photo uploaded by user. Used as source
object for all the formatting stuff and to keep the metadata common to
all related ``FormatedPhoto`` objects.
"""
title = models.CharField(_('Title'), max_length=200)
description = models.TextField(_('Description'), blank=True)
slug = models.SlugField(_('Slug'), max_length=255)
# save it to YYYY/MM/DD structure
image = models.ImageField(_('Image'), upload_to=upload_to,
max_length=255, height_field='height', width_field='width')
width = models.PositiveIntegerField(editable=False)
height = models.PositiveIntegerField(editable=False)
# important area
important_top = models.PositiveIntegerField(null=True, blank=True)
important_left = models.PositiveIntegerField(null=True, blank=True)
important_bottom = models.PositiveIntegerField(null=True, blank=True)
important_right = models.PositiveIntegerField(null=True, blank=True)
# Authors and Sources
authors = models.ManyToManyField(Author, verbose_name=_('Authors'), related_name='photo_set')
source = CachedForeignKey(Source, blank=True, null=True, verbose_name=_('Source'), on_delete=models.SET_NULL)
created = models.DateTimeField(auto_now_add=True)
# generic JSON field to store app cpecific data
app_data = AppDataField()
class Meta:
verbose_name = _('Photo')
verbose_name_plural = _('Photos')
def __str__(self):
return self.title
def get_absolute_url(self):
return self.image.url
def get_image_info(self):
return {
'url': self.image.url,
'width': self.width,
'height': self.height,
}
def _get_image(self):
if not hasattr(self, '_pil_image'):
self.image.open()
self._pil_image = Image.open(self.image)
return self._pil_image
def save(self, **kwargs):
"""Overrides models.Model.save.
- Generates slug.
- Saves image file.
"""
if not self.width or not self.height:
self.width, self.height = self.image.width, self.image.height
# prefill the slug with the ID, it requires double save
if not self.id:
img = self.image
# store dummy values first...
w, h = self.width, self.height
self.image = ''
self.width, self.height = w, h
self.slug = ''
super(Photo, self).save(force_insert=True)
# ... so that we can generate the slug
self.slug = str(self.id) + '-' + slugify(self.title)
# truncate slug in order to fit in an ImageField and/or paths in Redirects
self.slug = self.slug[:64]
# .. tha will be used in the image's upload_to function
self.image = img
# and the image will be saved properly
super(Photo, self).save(force_update=True)
else:
try:
old = Photo.objects.get(pk=self.pk)
force_update = True
# delete formatedphotos if new image was uploaded
if old.image != self.image:
for f_photo in self.formatedphoto_set.all():
f_photo.delete()
except Photo.DoesNotExist:
# somebody is just trying to create new model with given PK
force_update = False
super(Photo, self).save(force_update=force_update)
def ratio(self):
"Return photo's width to height ratio"
if self.height:
return float(self.width) / self.height
else:
return None
def get_formated_photo(self, format):
"Return formated photo"
return FormatedPhoto.objects.get_photo_in_format(self, format)
FORMAT_CACHE = {}
class FormatManager(models.Manager):
def get_for_name(self, name):
try:
return FORMAT_CACHE[name]
except KeyError:
FORMAT_CACHE[name] = format = get_cached_object(Format, name=name, sites__id=settings.SITE_ID)
return format
@python_2_unicode_compatible
class Format(models.Model):
"""
Defines per-site photo sizes together with rules how to adhere to them.
This includes:
* maximum width and height
* cropping settings
* stretch (rescale) settings
* sample quality
"""
name = models.CharField(_('Name'), max_length=80)
max_width = models.PositiveIntegerField(_('Max width'))
max_height = models.PositiveIntegerField(_('Max height'))
flexible_height = models.BooleanField(_('Flexible height'), default=False, help_text=_((
'Determines whether max_height is an absolute maximum, or the formatted'
'photo can vary from max_height to flexible_max_height.')))
flexible_max_height = models.PositiveIntegerField(_('Flexible max height'),
blank=True, null=True)
stretch = models.BooleanField(_('Stretch'), default=False)
nocrop = models.BooleanField(_('Do not crop'), default=False)
resample_quality = models.IntegerField(_('Resample quality'),
choices=photos_settings.FORMAT_QUALITY, default=85)
sites = models.ManyToManyField(Site, verbose_name=_('Sites'))
master = CachedForeignKey('self', verbose_name=_('Master'), null=True, blank=True, help_text=_((
'When generating formatted image, use the image formatted to master format instead of the original.'
'Useful when editors crop certain formats by hand and you wish to re-use those coordinates automatically.'
)))
objects = FormatManager()
class Meta:
verbose_name = _('Format')
verbose_name_plural = _('Formats')
def __str__(self):
return "%s (%sx%s) " % (self.name, self.max_width, self.max_height)
def get_blank_img(self):
"""
Return fake ``FormatedPhoto`` object to be used in templates when an error
occurs in image generation.
"""
if photos_settings.DEBUG:
return self.get_placeholder_img()
out = {
'blank': True,
'width': self.max_width,
'height': self.max_height,
'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),
}
return out
def get_placeholder_img(self):
"""
Returns fake ``FormatedPhoto`` object grabbed from image placeholder
generator service for the purpose of debugging when images
are not available but we still want to see something.
"""
pars = {
'width': self.max_width,
'height': self.max_height
}
out = {
'placeholder': True,
'width': self.max_width,
'height': self.max_height,
'url': photos_settings.DEBUG_PLACEHOLDER_PROVIDER_TEMPLATE % pars
}
return out
def ratio(self):
"""Return photo's width to height ratio"""
return float(self.max_width) / self.max_height
def save(self, **kwargs):
"""Overrides models.Model.save.
- Delete formatted photos if format save and not now created
(because of possible changes)
"""
if self.id:
for f_photo in self.formatedphoto_set.all():
f_photo.delete()
super(Format, self).save(**kwargs)
class FormatedPhotoManager(models.Manager):
def get_photo_in_format(self, photo, format, include_original=True):
if isinstance(photo, Photo):
photo_id = photo.id
else:
photo_id = photo
photo = None
if not isinstance(format, Format):
format = Format.objects.get_for_name(format)
if redis:
p = redis.pipeline()
p.hgetall(REDIS_PHOTO_KEY % photo_id)
p.hgetall(REDIS_FORMATTED_PHOTO_KEY % (photo_id, format.id))
original, formatted = p.execute()
if formatted:
if include_original:
formatted['original'] = original
return formatted
if not photo:
try:
photo = get_cached_object(Photo, pk=photo_id)
except Photo.DoesNotExist:
return format.get_blank_img()
try:
formated_photo = get_cached_object(FormatedPhoto, photo=photo, format=format)
except FormatedPhoto.DoesNotExist:
try:
# use get or create because there is a possible race condition here
# we don't want to JUST use get_or_create to go through cache 99.9% of the time
formated_photo, _ = self.get_or_create(photo=photo, format=format)
except (IOError, SystemError) as e:
log.warning("Cannot create formatted photo due to %s.", e)
return format.get_blank_img()
info = {
'url': formated_photo.url,
'width': formated_photo.width,
'height': formated_photo.height,
}
if include_original:
info['original'] = photo.get_image_info()
return info
@python_2_unicode_compatible
class FormatedPhoto(models.Model):
"""
Cache-like container of specific photo of specific format. Besides
the path to the generated image file, crop used is also stored together
with new ``width`` and ``height`` attributes.
"""
photo = CachedForeignKey(Photo)
format = CachedForeignKey(Format)
# save it to YYYY/MM/DD structure
image = models.ImageField(upload_to=photos_settings.UPLOAD_TO,
height_field='height', width_field='width', max_length=300)
crop_left = models.IntegerField()
crop_top = models.IntegerField()
crop_width = models.IntegerField()
crop_height = models.IntegerField()
width = models.PositiveIntegerField(editable=False)
height = models.PositiveIntegerField(editable=False)
objects = FormatedPhotoManager()
class Meta:
verbose_name = _('Formated photo')
verbose_name_plural = _('Formated photos')
unique_together = (('photo', 'format'),)
def __str__(self):
return "%s - %s" % (self.photo, self.format)
@property
def url(self):
"Returns url of the photo file."
return self.image.url
def _generate_img(self):
crop_box = None
if self.crop_left:
crop_box = (self.crop_left, self.crop_top, \
self.crop_left + self.crop_width, self.crop_top + self.crop_height)
important_box = None
if self.photo.important_top is not None:
p = self.photo
important_box = (p.important_left, p.important_top, p.important_right, p.important_bottom)
image = None
if crop_box is None and self.format.master_id:
try:
fp = FormatedPhoto.objects.get(format=self.format.master_id, photo=self.photo)
image = Image.open(fp.image)
except FormatedPhoto.DoesNotExist:
pass
if image is None:
image = self.photo._get_image()
formatter = Formatter(image, self.format, crop_box=crop_box, important_box=important_box)
return formatter.format()
def generate(self, save=True):
"""
Generates photo file in current format.
If ``save`` is ``True``, file is saved too.
"""
stretched_photo, crop_box = self._generate_img()
# set crop_box to (0,0,0,0) if photo not cropped
if not crop_box:
crop_box = 0, 0, 0, 0
self.crop_left, self.crop_top, right, bottom = crop_box
self.crop_width = right - self.crop_left
self.crop_height = bottom - self.crop_top
self.width, self.height = stretched_photo.size
f = BytesIO()
imgf = (self.photo._get_image().format or
Image.EXTENSION[path.splitext(self.photo.image.name)[1]])
stretched_photo.save(f, format=imgf, quality=self.format.resample_quality)
f.seek(0)
self.image.save(self.file(), ContentFile(f.read()), save)
def save(self, **kwargs):
"""Overrides models.Model.save
- Removes old file from the FS
- Generates new file.
"""
self.remove_file()
if not self.image:
self.generate(save=False)
else:
self.image.name = self.file()
super(FormatedPhoto, self).save(**kwargs)
def delete(self):
try:
self.remove_file()
except:
log.warning('Error deleting FormatedPhoto %d-%s (%s).', self.photo_id, self.format.name, self.image.name)
super(FormatedPhoto, self).delete()
def remove_file(self):
if self.image.name:
self.image.delete()
def file(self):
""" Method returns formated photo path - derived from format.id and source Photo filename """
if photos_settings.FORMATED_PHOTO_FILENAME is not None:
return photos_settings.FORMATED_PHOTO_FILENAME(self)
source_file = path.split(self.photo.image.name)
return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])
if redis:
def store_photo(instance, **kwargs):
if instance.image:
redis.hmset(REDIS_PHOTO_KEY % instance.pk, instance.get_image_info())
def remove_photo(instance, **kwargs):
redis.delete(REDIS_PHOTO_KEY % instance.id)
def store_formated_photo(instance, **kwargs):
redis.hmset(
REDIS_FORMATTED_PHOTO_KEY % (instance.photo_id, instance.format.id),
{
'url': instance.url,
'width': instance.width,
'height': instance.height,
}
)
def remove_formated_photo(instance, **kwargs):
redis.delete(REDIS_FORMATTED_PHOTO_KEY % (instance.photo_id, instance.format.id))
signals.post_save.connect(store_photo, sender=Photo)
signals.post_delete.connect(remove_photo, sender=Photo)
signals.post_save.connect(store_formated_photo, sender=FormatedPhoto)
signals.post_delete.connect(remove_formated_photo, sender=FormatedPhoto)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Generic SSO client using Twisted Perspective Broker.
This module should never import from the multiplatform one (main/__init__.py),
but the other way around. Likewise, this module should *not* have any logic
regarding error processing or decision making about when to send a given
signal.
Also, most of the logging is being made in the main module to avoid
duplication between the different platform implementations.
"""
from twisted.internet.task import LoopingCall
from twisted.python.failure import Failure
from ubuntu_sso.logger import setup_logging
from ubuntu_sso.utils.ipc import (
BaseService,
RemoteClient,
RemoteService,
signal,
)
logger = setup_logging("ubuntu_sso.main.perspective_broker")
SSO_SERVICE_NAME = "ubuntu-sso-client"
# Invalid name for signals that are CamelCase
# pylint: disable=C0103
class SSOLoginProxy(RemoteService):
"""Login thru the Single Sign On service."""
remote_calls = [
'generate_captcha',
'register_user',
'login',
'login_and_ping',
'validate_email',
'validate_email_and_ping',
'request_password_reset_token',
'set_new_password',
]
def __init__(self, root, *args, **kwargs):
super(SSOLoginProxy, self).__init__(*args, **kwargs)
self.root = root
# generate_capcha signals
@signal
def CaptchaGenerated(self, app_name, result):
"""Signal thrown after the captcha is generated."""
@signal
def CaptchaGenerationError(self, app_name, error):
"""Signal thrown when there's a problem generating the captcha."""
def generate_captcha(self, app_name, filename):
"""Call the matching method in the processor."""
self.root.sso_login.generate_captcha(app_name, filename)
# register_user signals
@signal
def UserRegistered(self, app_name, result):
"""Signal thrown when the user is registered."""
@signal
def UserRegistrationError(self, app_name, error):
"""Signal thrown when there's a problem registering the user."""
def register_user(self, app_name, email, password, name,
captcha_id, captcha_solution):
"""Call the matching method in the processor."""
self.root.sso_login.register_user(app_name, email, password, name,
captcha_id, captcha_solution)
# login signals
@signal
def LoggedIn(self, app_name, result):
"""Signal thrown when the user is logged in."""
@signal
def LoginError(self, app_name, error):
"""Signal thrown when there is a problem in the login."""
@signal
def UserNotValidated(self, app_name, result):
"""Signal thrown when the user is not validated."""
def login(self, app_name, email, password, ping_url=None):
"""Call the matching method in the processor."""
self.root.sso_login.login(app_name, email, password, ping_url)
login_and_ping = login
# validate_email signals
@signal
def EmailValidated(self, app_name, result):
"""Signal thrown after the email is validated."""
@signal
def EmailValidationError(self, app_name, error):
"""Signal thrown when there's a problem validating the email."""
def validate_email(self, app_name, email, password, email_token,
ping_url=None):
"""Call the matching method in the processor."""
self.root.sso_login.validate_email(app_name,
email, password, email_token, ping_url)
validate_email_and_ping = validate_email
# request_password_reset_token signals
@signal
def PasswordResetTokenSent(self, app_name, result):
"""Signal thrown when the token is succesfully sent."""
@signal
def PasswordResetError(self, app_name, error):
"""Signal thrown when there's a problem sending the token."""
def request_password_reset_token(self, app_name, email):
"""Call the matching method in the processor."""
self.root.sso_login.request_password_reset_token(app_name, email)
# set_new_password signals
@signal
def PasswordChanged(self, app_name, result):
"""Signal thrown when the token is succesfully sent."""
@signal
def PasswordChangeError(self, app_name, error):
"""Signal thrown when there's a problem sending the token."""
def set_new_password(self, app_name, email, token, new_password):
"""Call the matching method in the processor."""
self.root.sso_login.set_new_password(app_name,
email, token, new_password)
class CredentialsManagementProxy(RemoteService):
"""Object that manages credentials.
Every exposed method in this class requires one mandatory argument:
- 'app_name': the name of the application. Will be displayed in the
GUI header, plus it will be used to find/build/clear tokens.
And accepts another parameter named 'args', which is a dictionary that
can contain the following:
- 'help_text': an explanatory text for the end-users, will be
shown below the header. This is an optional free text field.
- 'ping_url': the url to open after successful token retrieval. If
defined, the email will be attached to the url and will be pinged
with a OAuth-signed request.
- 'tc_url': the link to the Terms and Conditions page. If defined,
the checkbox to agree to the terms will link to it.
- 'window_id': the id of the window which will be set as a parent
of the GUI. If not defined, no parent will be set.
"""
remote_calls = [
'find_credentials',
'clear_credentials',
'store_credentials',
'register',
'login',
'login_email_password',
]
def __init__(self, root, *args, **kwargs):
super(CredentialsManagementProxy, self).__init__(*args, **kwargs)
self.root = root
@signal
def AuthorizationDenied(self, app_name):
"""Signal thrown when the user denies the authorization."""
@signal
def CredentialsFound(self, app_name, credentials):
"""Signal thrown when the credentials are found."""
@signal
def CredentialsNotFound(self, app_name):
"""Signal thrown when the credentials are not found."""
@signal
def CredentialsCleared(self, app_name):
"""Signal thrown when the credentials were cleared."""
@signal
def CredentialsStored(self, app_name):
"""Signal thrown when the credentials were cleared."""
@signal
def CredentialsError(self, app_name, error_dict):
"""Signal thrown when there is a problem getting the credentials."""
def find_credentials(self, app_name, args):
"""Look for the credentials for an application.
- 'app_name': the name of the application which credentials are
going to be removed.
- 'args' is a dictionary, currently not used.
"""
self.root.cred_manager.find_credentials(app_name, args)
def clear_credentials(self, app_name, args):
"""Clear the credentials for an application.
- 'app_name': the name of the application which credentials are
going to be removed.
- 'args' is a dictionary, currently not used.
"""
self.root.cred_manager.clear_credentials(app_name, args)
def store_credentials(self, app_name, args):
"""Store the token for an application.
- 'app_name': the name of the application which credentials are
going to be stored.
- 'args' is the dictionary holding the credentials. Needs to provide
the following mandatory keys: 'token', 'token_key', 'consumer_key',
'consumer_secret'.
"""
self.root.cred_manager.store_credentials(app_name, args)
def register(self, app_name, args):
"""Get credentials if found else prompt GUI to register."""
self.root.cred_manager.register(app_name, args)
def login(self, app_name, args):
"""Get credentials if found else prompt GUI to login."""
self.root.cred_manager.login(app_name, args)
def login_email_password(self, app_name, args):
"""Get credentials if found, else login using email and password.
- 'args' should contain at least the follwing keys: 'email' and
'password'. Those will be used to issue a new SSO token, which will be
returned trough the CredentialsFound signal.
"""
self.root.cred_manager.login_email_password(app_name, args)
class UbuntuSSOProxyBase(BaseService):
"""Object that exposes the diff referenceable objects."""
services = {
'sso_login': SSOLoginProxy,
'cred_manager': CredentialsManagementProxy,
}
name = SSO_SERVICE_NAME
# ============================== client classes ==============================
class SSOLoginClient(RemoteClient):
"""Client that can perform calls to the remote SSOLogin object."""
call_remote_functions = SSOLoginProxy.remote_calls
signal_handlers = [
'CaptchaGenerated',
'CaptchaGenerationError',
'UserRegistered',
'UserRegistrationError',
'LoggedIn',
'LoginError',
'UserNotValidated',
'EmailValidated',
'EmailValidationError',
'PasswordResetTokenSent',
'PasswordResetError',
'PasswordChanged',
'PasswordChangeError',
]
class CredentialsManagementClient(RemoteClient):
"""Client that can perform calls to the remote CredManagement object."""
call_remote_functions = CredentialsManagementProxy.remote_calls
signal_handlers = [
'AuthorizationDenied',
'CredentialsFound',
'CredentialsNotFound',
'CredentialsCleared',
'CredentialsStored',
'CredentialsError',
]
def add_timeout(interval, callback, *args, **kwargs):
"""Add a timeout callback as a task."""
time_out_task = LoopingCall(callback, *args, **kwargs)
time_out_task.start(interval / 1000, now=False)
timeout_func = add_timeout
start_setup = lambda *a, **kw: None
# the reactor does have run and stop methods
# pylint: disable=E1101
def shutdown_func():
"""Stop the reactor."""
from twisted.internet import reactor
reactor.stop()
def finish_setup(result, loop):
"""Stop the reactor if a failure ocurred."""
if isinstance(result, Failure):
shutdown_func()
def main():
"""Run the specific mainloop."""
from twisted.internet import reactor
reactor.run()
# pylint: enable=E1101
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feedback-related services."""
import json
from core.domain import email_services
from core.domain import event_services
from core.domain import feedback_domain
from core.domain import feedback_jobs_continuous
from core.domain import feedback_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(feedback_models, email_models) = models.Registry.import_models([
models.NAMES.feedback, models.NAMES.email])
taskqueue_services = models.Registry.import_taskqueue_services()
class FeedbackServicesUnitTests(test_utils.GenericTestBase):
"""Test functions in feedback_services."""
def test_feedback_ids(self):
"""Test various conventions for thread and message ids."""
exp_id = '0'
feedback_services.create_thread(
'exploration', exp_id, None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0].id
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 1)
message_id = messages[0].message_id
self.assertTrue(isinstance(message_id, int))
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0].id
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 1)
message_id = messages[0].message_id
self.assertTrue(isinstance(message_id, int))
# Retrieve the message instance from the storage layer.
datastore_id = feedback_models.GeneralFeedbackMessageModel.get_messages(
thread_id)[0].id
# The message id should be prefixed with the thread id and a full
# stop, followed by the message id.
self.assertEqual(datastore_id, '%s.%s' % (thread_id, message_id))
def test_create_message_fails_if_invalid_thread_id(self):
with self.assertRaises(
feedback_models.GeneralFeedbackMessageModel.EntityNotFoundError
):
feedback_services.create_message(
'invalid_thread_id', 'user_id', None, None, 'Hello')
def test_status_of_newly_created_thread_is_open(self):
exp_id = '0'
feedback_services.create_thread(
'exploration', exp_id, None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
thread_status = threadlist[0].status
self.assertEqual(thread_status, feedback_models.STATUS_CHOICES_OPEN)
def test_get_exp_id_from_thread_id(self):
thread_id = 'exploration.exp1.1234'
self.assertEqual(
feedback_services.get_exp_id_from_thread_id(thread_id), 'exp1')
class MockFeedbackAnalyticsAggregator(
feedback_jobs_continuous.FeedbackAnalyticsAggregator):
"""A modified FeedbackAnalyticsAggregator that does not start a new batch
job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockFeedbackAnalyticsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockFeedbackAnalyticsMRJobManager(
feedback_jobs_continuous.FeedbackAnalyticsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockFeedbackAnalyticsAggregator
class FeedbackThreadUnitTests(test_utils.GenericTestBase):
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
THREAD_ID = 'thread_id'
EXPECTED_THREAD_DICT = {
'status': u'open',
'summary': None,
'original_author_username': None,
'subject': u'a subject'
}
EXPECTED_THREAD_DICT_VIEWER = {
'status': u'open',
'summary': None,
'original_author_username': None,
'subject': u'a subject second'
}
USER_EMAIL = '[email protected]'
USER_USERNAME = 'user'
def setUp(self):
super(FeedbackThreadUnitTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.USER_EMAIL, self.USER_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(self.USER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id, title='Bridges in England',
category='Architecture', language_code='en')
self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id, title='Sillat Suomi',
category='Architecture', language_code='fi')
self.save_new_valid_exploration(
self.EXP_ID_3, self.owner_id, title='Leaning tower of Pisa',
category='Architecture', language_code='fi')
def _get_all_messages_read(self, user_id, thread_id):
"""Returns the list of the ids of all the messages corresponding to the
given thread id read by the user.
"""
feedback_thread_user_model = (
feedback_models.GeneralFeedbackThreadUserModel.get(
user_id, thread_id))
return (
feedback_thread_user_model.message_ids_read_by_user if
feedback_thread_user_model else [])
def _run_computation(self):
"""Runs the MockFeedbackAnalyticsAggregator computation."""
MockFeedbackAnalyticsAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
self.process_and_flush_pending_tasks()
def test_get_threads_single_exploration(self):
threads = feedback_services.get_threads('exploration', self.EXP_ID_1)
self.assertEqual(len(threads), 0)
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_threads('exploration', self.EXP_ID_1)
self.assertEqual(1, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT, threads[0].to_dict())
def test_get_all_threads(self):
# Create an anonymous feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT, threads[0].to_dict())
self.EXPECTED_THREAD_DICT_VIEWER['original_author_username'] = (
self.VIEWER_USERNAME)
# Viewer creates feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.viewer_id,
self.EXPECTED_THREAD_DICT_VIEWER['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(2, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT_VIEWER, threads[0].to_dict())
def test_get_total_open_threads_before_job_run(self):
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi([self.EXP_ID_1])), 0)
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1])), 0)
def test_get_total_open_threads_for_single_exploration(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self._run_computation()
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1])), 1)
def test_get_total_open_threads_for_multiple_explorations(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
feedback_services.create_thread(
'exploration', self.EXP_ID_2, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads_exp_1 = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads_exp_1))
threads_exp_2 = feedback_services.get_all_threads(
'exploration', self.EXP_ID_2, False)
self.assertEqual(1, len(threads_exp_2))
def _close_thread(thread_id):
"""Closes the thread corresponding to its thread id by updating its
status.
"""
thread = feedback_models.GeneralFeedbackThreadModel.get_by_id(
thread_id)
thread.status = feedback_models.STATUS_CHOICES_FIXED
thread.put()
_close_thread(threads_exp_1[0].id)
self.assertEqual(len(feedback_services.get_closed_threads(
'exploration', self.EXP_ID_1, False)), 1)
self._run_computation()
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1, self.EXP_ID_2])), 1)
def test_get_thread_summaries(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
feedback_services.create_thread(
'exploration', self.EXP_ID_2, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
# The message count parameter is missing for this thread. The thread
# summaries function should account for this and function
# flawlessly.
thread_3 = feedback_models.GeneralFeedbackThreadModel(
id='exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID,
entity_type='exploration', entity_id=self.EXP_ID_3,
original_author_id=self.user_id, subject='Feedback',
status=feedback_models.STATUS_CHOICES_OPEN, message_count=0,
has_suggestion=False)
thread_3.put()
feedback_services.create_message(
'exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID,
self.user_id, None, None, 'not used here')
thread_ids = subscription_services.get_all_threads_subscribed_to(
self.user_id)
thread_ids.append('exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID)
thread_summaries, number_of_unread_threads = (
feedback_services.get_thread_summaries(
self.user_id, thread_ids))
exploration_titles = (
['Bridges in England', 'Sillat Suomi', 'Leaning tower of Pisa'])
# Fetch the threads.
threads = []
threads.append(feedback_services.get_thread(thread_ids[0]))
threads.append(feedback_services.get_thread(thread_ids[1]))
threads.append(feedback_services.get_thread(
'exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID))
# Check if the number of unread messages match.
self.assertEqual(number_of_unread_threads, 0)
for index, thread in enumerate(threads):
thread_summary = {
'status': thread.status,
'original_author_id': thread.original_author_id,
'last_updated': thread_summaries[index]['last_updated'],
'last_message_text': 'not used here',
'total_message_count': 1,
'last_message_is_read': True,
'second_last_message_is_read': False,
'author_last_message': user_services.get_username(
self.user_id),
'author_second_last_message': None,
'exploration_title': exploration_titles[index]
}
# Check if the summaries match.
self.assertDictContainsSubset(
thread_summary, thread_summaries[index])
feedback_services.create_message(
threads[0].id, self.owner_id, None, None, 'editor message')
_, number_of_unread_threads = (
feedback_services.get_thread_summaries(self.user_id, thread_ids))
# Check if the number of unread messages is equal to 1.
self.assertEqual(number_of_unread_threads, 1)
def test_get_thread_summaries_returns_correct_message_count(self):
thread_id_1 = feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id_1)
thread.message_count = 0
thread.put()
thread_id_2 = feedback_services.create_thread(
'exploration', self.EXP_ID_2, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id_2)
thread.message_count = 0
thread.put()
thread_summaries, _ = feedback_services.get_thread_summaries(
self.owner_id, [thread_id_1, thread_id_2])
self.assertEqual(len(thread_summaries), 2)
self.assertEqual(thread_summaries[0]['total_message_count'], 1)
self.assertEqual(thread_summaries[1]['total_message_count'], 1)
def test_update_messages_read_by_the_user(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
thread_id = threads[0].id
messages = feedback_services.get_messages(thread_id)
message_ids = [message.message_id for message in messages]
# The viewer has not read in messages yet.
self.assertEqual(self._get_all_messages_read(
self.viewer_id, thread_id), [])
feedback_services.update_messages_read_by_the_user(
self.viewer_id, thread_id, message_ids)
# Check if the message is added to the read section of the viewer.
self.assertEqual(self._get_all_messages_read(
self.viewer_id, thread_id), message_ids)
def test_only_exploration_threads_trigger_events(self):
exp_id = 'eid'
self.save_new_valid_exploration(exp_id, 'owner')
event_handler_call_counter_exploration = test_utils.CallCounter(
event_services.FeedbackThreadCreatedEventHandler.record)
with self.swap(
event_services.FeedbackThreadCreatedEventHandler, 'record',
event_handler_call_counter_exploration):
feedback_services.create_thread(
feconf.ENTITY_TYPE_EXPLORATION, exp_id,
None, 'a subject', 'some text')
self.assertEqual(
event_handler_call_counter_exploration.times_called, 1)
event_handler_call_counter_non_exploration = (
test_utils.CallCounter(
event_services.FeedbackThreadCreatedEventHandler.record))
with self.swap(
event_services.FeedbackThreadCreatedEventHandler, 'record',
event_handler_call_counter_non_exploration):
feedback_services.create_thread(
'topic', 'topic_id', None, 'a subject',
'some text')
self.assertEqual(
event_handler_call_counter_non_exploration.times_called, 0)
def test_create_message_increments_message_count(self):
thread_id = feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id)
self.assertEqual(thread.message_count, 1)
feedback_services.create_message(
thread_id, self.user_id,
feedback_models.STATUS_CHOICES_FIXED, None, 'editor message')
thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id)
self.assertEqual(thread.message_count, 2)
def test_get_thread_returns_correct_message_count(self):
thread_id = feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id)
thread.message_count = 0
thread.put()
thread = feedback_services.get_thread(thread_id)
self.assertEqual(thread.message_count, 1)
class EmailsTaskqueueTests(test_utils.GenericTestBase):
"""Tests for tasks in emails taskqueue."""
def test_create_new_batch_task(self):
user_id = 'user'
feedback_services.enqueue_feedback_message_batch_email_task(user_id)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_EMAILS),
1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS)
def test_create_new_instant_task(self):
user_id = 'user'
reference_dict = {
'entity_type': 'exploration',
'entity_id': 'eid',
'thread_id': 'tid',
'message_id': 'mid'
}
reference = feedback_domain.FeedbackMessageReference(
reference_dict['entity_type'], reference_dict['entity_id'],
reference_dict['thread_id'], reference_dict['message_id'])
feedback_services.enqueue_feedback_message_instant_email_task(
user_id, reference)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_EMAILS),
1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
payload = json.loads(tasks[0].payload)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS)
self.assertDictEqual(payload['reference_dict'], reference_dict)
class FeedbackMessageEmailTests(test_utils.GenericTestBase):
"""Tests for feedback message emails."""
def setUp(self):
super(FeedbackMessageEmailTests, self).setUp()
self.signup('[email protected]', 'A')
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.signup('[email protected]', 'B')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_pop_feedback_message_references(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
feedback_services.pop_feedback_message_references(self.editor_id, 0)
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id, strict=False)
self.assertEqual(
len(model.feedback_message_references), 1)
self.assertEqual(
model.feedback_message_references[0]['thread_id'], thread_id)
feedback_services.pop_feedback_message_references(self.editor_id, 1)
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id, strict=False)
self.assertIsNone(model)
def test_update_feedback_message_references(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# There are no feedback message references to remove.
self.assertIsNone(
feedback_services.clear_feedback_message_references(
self.editor_id, self.exploration.id, 'thread_id'))
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id)
self.assertEqual(
len(model.feedback_message_references), 1)
self.assertEqual(
model.feedback_message_references[0]['thread_id'], thread_id)
feedback_services.clear_feedback_message_references(
self.editor_id, self.exploration.id, 'new_thread_id')
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id)
self.assertEqual(
len(model.feedback_message_references), 1)
self.assertEqual(
model.feedback_message_references[0]['thread_id'],
thread_id)
def test_update_feedback_email_retries(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id)
self.assertEqual(model.retries, 0)
with self.swap(
feconf, 'DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS', -1):
feedback_services.update_feedback_email_retries(self.editor_id)
model = feedback_models.UnsentFeedbackEmailModel.get(
self.editor_id)
self.assertEqual(model.retries, 1)
def test_send_feedback_message_email(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
expected_feedback_message_dict = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[0].message_id
}
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
model = feedback_models.UnsentFeedbackEmailModel.get(self.editor_id)
self.assertEqual(len(model.feedback_message_references), 1)
self.assertDictEqual(
model.feedback_message_references[0],
expected_feedback_message_dict)
self.assertEqual(model.retries, 0)
def test_add_new_feedback_message(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.user_id_a, None, None, 'editor message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 2)
expected_feedback_message_dict1 = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[0].message_id
}
expected_feedback_message_dict2 = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[1].message_id
}
model = feedback_models.UnsentFeedbackEmailModel.get(self.editor_id)
self.assertEqual(len(model.feedback_message_references), 2)
self.assertDictEqual(
model.feedback_message_references[0],
expected_feedback_message_dict1)
self.assertDictEqual(
model.feedback_message_references[1],
expected_feedback_message_dict2)
self.assertEqual(model.retries, 0)
def test_email_is_not_sent_recipient_has_muted_emails_globally(self):
user_services.update_email_preferences(
self.editor_id, True, False, False, False)
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_email_is_not_sent_recipient_has_muted_this_exploration(self):
user_services.set_email_preferences_for_exploration(
self.editor_id, self.exploration.id,
mute_feedback_notifications=True)
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_not_sent_for_anonymous_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id, None,
'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_sent_for_registered_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
def test_that_emails_are_not_sent_if_service_is_disabled(self):
cannot_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
cannot_send_feedback_message_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False)
with cannot_send_emails_ctx, cannot_send_feedback_message_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_not_sent_for_thread_status_changes(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', '')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_are_not_sent_to_author_himself(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.editor_id, 'a subject', 'A message')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_is_sent_for_reply_on_feedback(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
def test_that_email_is_sent_for_changing_status_of_thread(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None, '')
# There are two jobs in the taskqueue: one for the realtime
# event associated with changing subject of thread, and one for
# sending the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
def test_that_email_is_sent_for_each_feedback_message(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message2')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
def test_that_reply_to_id_is_created(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.user_id_b, None, None, 'user b message')
# Check that reply_to id is created for user A.
queried_object = (
email_services
.get_feedback_thread_reply_info_by_user_and_thread_ids(
self.user_id_a, thread_id))
self.assertEqual(queried_object.user_id, self.user_id_a)
self.assertEqual(queried_object.thread_id, thread_id)
feedback_services.create_message(
thread_id, self.user_id_a, None, None, 'user a message')
# Check that reply_to id is created for user B.
queried_object = (
email_services
.get_feedback_thread_reply_info_by_user_and_thread_ids(
self.user_id_b, thread_id))
self.assertEqual(queried_object.user_id, self.user_id_b)
self.assertEqual(queried_object.thread_id, thread_id)
class FeedbackMessageBatchEmailHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageBatchEmailHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_emails_are_sent(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received a new message on your Oppia explorations:<br>'
'<ul>'
'<li><a href="https://www.oppia.org/create/A#/feedback">Title</a>:'
'<br>'
'<ul><li>some text<br></li>'
'</ul></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received a new message on your Oppia explorations:\n'
'- Title:\n'
'- some text\n'
'You can view and reply to your messages from your dashboard.\n'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_correct_emails_are_sent_for_multiple_feedback(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received 2 new messages on your Oppia explorations:<br>'
'<ul>'
'<li><a href="https://www.oppia.org/create/A#/feedback">Title</a>:'
'<br>'
'<ul><li>some text<br></li>'
'<li>more text<br></li>'
'</ul></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received 2 new messages on your Oppia explorations:\n'
'- Title:\n'
'- some text\n'
'- more text\n'
'You can view and reply to your messages from your dashboard.\n'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.new_user_id,
feedback_models.STATUS_CHOICES_OPEN, 'subject', 'more text')
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 2)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(
to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_not_sent_if_already_seen(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/%s' % (
feconf.FEEDBACK_THREAD_VIEW_EVENT_URL, thread_id),
{'thread_id': thread_id}, csrf_token=csrf_token)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
class FeedbackMessageInstantEmailHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageInstantEmailHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_emails_are_sent_for_feedback_message(self):
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_sent_for_status_change(self):
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: changed status from open to fixed<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: changed status from open to fixed\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None, '')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_sent_for_both_status_change_and_message(self):
expected_email_html_body_message = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body_message = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
expected_email_html_body_status = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: changed status from open to fixed<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body_status = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: changed status from open to fixed\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None,
'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 2)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body_status)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body_status)
self.assertEqual(
messages[1].html.decode(), expected_email_html_body_message)
self.assertEqual(
messages[1].body.decode(), expected_email_text_body_message)
def test_that_emails_are_not_sent_to_anonymous_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# Create thread as anonoymous user.
feedback_services.create_thread(
'exploration', self.exploration.id,
None, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None,
'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages()
self.assertEqual(len(messages), 0)
|
|
"""
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils import datetime_safe, formats, six
from django.utils.datastructures import MultiValueDict
from django.utils.dates import MONTHS
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = (super(MediaDefiningClass, mcs)
.__new__(mcs, name, bases, attrs))
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value:
value = None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id', None)
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
# hasattr() masks exceptions on Python 2.
if six.PY2:
try:
getattr(value, 'url')
except AttributeError:
return False
else:
return bool(value)
return bool(value and hasattr(value, 'url'))
def get_template_substitution_values(self, value):
"""
Return value-related substitutions.
"""
return {
'initial': conditional_escape(value),
'initial_url': conditional_escape(value.url),
}
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{}>\r\n{}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def _format_value(self, value):
return formats.localize_input(value,
self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{}"{}>{}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value, None)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name, None)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propagate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = ChoiceFieldRenderer(name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html,
choice_value=force_text(w), sub_widgets=''))
return format_html(self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
if year_val is None:
match = self.date_re.match(value)
if match:
year_val, month_val, day_val = [int(val) for val in match.groups()]
html = {}
choices = [(i, i) for i in self.years]
html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value)
choices = list(self.months.items())
html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value)
choices = [(i, i) for i in range(1, 32)]
html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value)
output = []
for field in self._parse_date_fmt():
output.append(html[field])
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices, none_value):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not self.is_required:
choices.insert(0, none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
|
|
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from flask_pagination import Pagination
from db import *
from flask_login import LoginManager, login_user
import consts
import md5
# system consts
records_per_page = 10
md5_salt = 'coersive control'
app = Flask(__name__)
app.secret_key = 'the manipulative man'
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
return User.get(User.id==userid)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/search", methods=["GET"])
def search():
if 'page' in request.args:
page = request.args['page']
else:
page = 1
query_object = PoliceLog.select()
if 'keyword' in request.args:
keyword = request.args['keyword']
sql_key = "%"+keyword+"%"
query_object = query_object.where((PoliceLog.location ** sql_key) | (PoliceLog.content ** sql_key) | (PoliceLog.type ** sql_key))
else:
keyword = ''
if 'source' in request.args and request.args['source'] != '':
query_object = query_object.where(PoliceLog.source == request.args['source'])
if 'tag' in request.args and request.args['tag'] != '':
query_object = query_object.join(PoliceLogTag).join(Tag).where(Tag.name == request.args['tag'])
records_num = query_object.count()
#get count broken by source
source_count_map = {}
source_count_result = query_object.select(fn.Count(PoliceLog.id).alias('cnt'), PoliceLog.source).group_by(PoliceLog.source)
for line in source_count_result:
source_count_map[line.source] = line.cnt
# get all tags
tags = Tag.select(fn.Count(PoliceLog.id).alias('cnt'), Tag.name).join(PoliceLogTag).join(PoliceLog).group_by(Tag.id)
result = query_object.paginate(int(page), records_per_page)
pagination = Pagination(total=records_num, page=int(page), bs_version=3)
return render_template('result.html', records=result, pagination=pagination, keyword=keyword, page=page, source_count_map=source_count_map, tags=tags )
@app.route("/decisions", methods=["GET"])
def decisions():
if 'page' in request.args:
page = request.args['page']
else:
page = 1
query_object = Decision.select()
if 'keyword' in request.args:
keyword = request.args['keyword']
sql_key = "%"+keyword+"%"
query_object = query_object.where((Decision.title ** sql_key) | (Decision.content ** sql_key))
else:
keyword = ''
if 'source' in request.args and request.args['source'] != '':
query_object = query_object.where(Decision.source == request.args['source'])
if 'tag' in request.args and request.args['tag'] != '':
query_object = query_object.join(DecisionTag).join(Tag).where(Tag.name == request.args['tag'])
records_num = query_object.count()
#get count broken by source
source_count_map = {}
source_count_result = query_object.select(fn.Count(Decision.id).alias('cnt'), Decision.source).group_by(Decision.source)
for line in source_count_result:
source_count_map[line.source] = line.cnt
tags = Tag.select(fn.Count(Decision.id).alias('cnt'), Tag.name).join(DecisionTag).join(Decision).group_by(Tag.id)
result = query_object.paginate(int(page), records_per_page)
pagination = Pagination(total=records_num, page=int(page), bs_version=3)
return render_template('decision_result.html', records=result, pagination=pagination, keyword=keyword, page=page, source_count_map=source_count_map, tags=tags )
@app.route("/search_data", methods=["GET"])
def search_data():
# percentage = count_result / count_base
aggregate_base = PoliceLog.select()
count_base = PoliceLog.select(fn.Date_format(PoliceLog.report_time, '%Y-%m').alias('x'), fn.Count(fn.Date_format(PoliceLog.report_time, '%Y-%m')).alias('date_count')).where(~(PoliceLog.report_time >> None)).group_by(fn.Date_format(PoliceLog.report_time, '%Y-%m'))
count_result = count_base
if 'keyword' in request.args:
keyword = request.args['keyword']
sql_key = "%"+keyword+"%"
count_result = count_result.where((PoliceLog.location ** sql_key) | (PoliceLog.content ** sql_key) | (PoliceLog.type ** sql_key))
start_year = aggregate_base.aggregate(fn.Min(fn.Year(PoliceLog.report_time)))
start_month = aggregate_base.aggregate(fn.Min(fn.Month(PoliceLog.report_time)))
end_year = aggregate_base.aggregate(fn.Max(fn.Year(PoliceLog.report_time)))
end_month = aggregate_base.aggregate(fn.Max(fn.Month(PoliceLog.report_time)))
result_map = {}
base_map = {}
count_list = []
percentage_list = []
for item in count_result:
result_map[str(item.x)] = item.date_count
for item in count_base:
base_map[str(item.x)] = item.date_count
while start_year*100+start_month <= end_year*100+end_month:
date_string = str(start_year)+'-'+str(start_month).zfill(2)
if date_string in base_map and date_string in result_map:
count = int(result_map[date_string])
percentage = float(count) / float(base_map[date_string])
else:
count = 0
percentage = 0.0
count_list.append([date_string, count])
percentage_list.append([date_string, percentage])
start_month = start_month + 1
if start_month > 12:
start_year = start_year + 1
start_month = 1
# for item in count_base:
# if str(item.x) in result_map:
# count = int(result_map[str(item.x)])
# else:
# count = 0
# percentage = float(count) / float(item.date_count)
# count_list.append([str(item.x), count])
# percentage_list.append([str(item.x), percentage])
return jsonify(data = [{'key':'count', 'bar':True, 'values':count_list}, {'key':'percentage', 'bar':False, 'values':percentage_list}])
@app.route("/decision_data", methods=["GET"])
def decision_data():
# percentage = count_result / count_base
aggregate_base = Decision.select()
count_base = Decision.select(fn.Concat(Decision.year, '-', Decision.month).alias('x'), fn.Count(Decision.id).alias('date_count')).group_by(fn.Concat(Decision.year, '-', Decision.month))
count_result = count_base
if 'keyword' in request.args:
keyword = request.args['keyword']
sql_key = "%"+keyword+"%"
count_result = count_result.where((Decision.title ** sql_key) | (Decision.content ** sql_key))
start_year = int(aggregate_base.where(Decision.year > '1000').aggregate(fn.Min(Decision.year)))
start_month = int(aggregate_base.where(Decision.month > '1000').aggregate(fn.Min(Decision.month)))
end_year = int(aggregate_base.aggregate(fn.Max(Decision.year)))
end_month = int(aggregate_base.aggregate(fn.Max(Decision.month)))
result_map = {}
base_map = {}
count_list = []
percentage_list = []
for item in count_result:
result_map[str(item.x)] = item.date_count
for item in count_base:
base_map[str(item.x)] = item.date_count
while start_year*100+start_month <= end_year*100+end_month:
date_string = str(start_year)+'-'+str(start_month).zfill(2)
if date_string in base_map and date_string in result_map:
count = int(result_map[date_string])
percentage = float(count) / float(base_map[date_string])
else:
count = 0
percentage = 0.0
count_list.append([date_string, count])
percentage_list.append([date_string, percentage])
start_month = start_month + 1
if start_month > 12:
start_year = start_year + 1
start_month = 1
# for item in count_base:
# if str(item.x) in result_map:
# count = int(result_map[str(item.x)])
# else:
# count = 0
# percentage = float(count) / float(item.date_count)
# count_list.append([str(item.x), count])
# percentage_list.append([str(item.x), percentage])
return jsonify(data = [{'key':'count', 'bar':True, 'values':count_list}, {'key':'percentage', 'bar':False, 'values':percentage_list}])
@app.route("/case/<case_id>")
def view_case(case_id):
result = PoliceLog.get(PoliceLog.id==case_id)
tags = Tag.select().join(PoliceLogTag).join(PoliceLog).where(PoliceLog.id==case_id)
case_tags = []
device_tags = []
for tag in tags:
if tag.type == 'case':
case_tags.append(tag.name)
elif tag.type == 'device':
device_tags.append(tag.name)
return render_template('case.html', case=result, suggested_types=consts.incident_types, suggested_devices=consts.incident_devices, case_tags=','.join(case_tags), device_tag=','.join(device_tags))
@app.route("/decision/<case_id>")
def decision(case_id):
result = Decision.get(Decision.id==case_id)
tags = Tag.select().join(DecisionTag).join(Decision).where(Decision.id==case_id)
case_tags = []
device_tags = []
for tag in tags:
if tag.type == 'case':
case_tags.append(tag.name)
elif tag.type == 'device':
device_tags.append(tag.name)
return render_template('decision.html', case=result, suggested_types=consts.incident_types, suggested_devices=consts.incident_devices, case_tags=','.join(case_tags), device_tags=','.join(device_tags))
@app.route("/report")
def new_incident():
return render_template('report.html', suggested_types=consts.incident_types, suggested_devices=consts.incident_devices)
@app.route("/insert", methods=["POST"])
def insert_incident():
log = PoliceLog()
log.type = request.form['type']
log.location = request.form['location']
log.content = request.form['content']
log.source = "reported"
log.save()
print request.form
return "something"
@app.route("/login_form")
def login_form():
return render_template('login.html')
@app.route("/login", methods=["POST"])
def login():
return "hello"
@app.route("/register", methods=["POST"])
def register():
user_count = User.select().where(User.username==request.form['username']).count()
if user_count > 0:
flash("the username is already exist", "danger")
return redirect(url_for("login_form"))
user = User()
user.name = request.form['name']
user.username = request.form['username']
user.password = md5.new(md5_salt+request.form['password']).hexdigest()
user.email = ''
user.is_masteruser = False
user.save()
login_user(user)
return redirect(url_for("index"))
@app.route("/save_decision_tags", methods=['GET'])
def save_decision_tags():
tags = request.args['type_tags'].lower().split(',')
DecisionTag.delete().where(DecisionTag.decision_id==request.args['id']).execute()
decision = Decision.get(Decision.id==request.args['id'])
for tag in tags:
if tag == '':
continue
clause = Tag.select().where((Tag.name==tag) & (Tag.type=='case'))
if clause.count()>0:
item = clause.get()
dt = DecisionTag()
dt.decision = decision
dt.tag = item
dt.save()
else:
item = Tag()
item.name = tag
item.type = 'case'
item.save()
dt = DecisionTag()
dt.decision = decision
dt.tag = item
dt.save()
tags = request.args['device_tags'].lower().split(',')
for tag in tags:
if tag == '':
continue
clause = Tag.select().where((Tag.name==tag) & (Tag.type=='device'))
if clause.count()>0:
item = clause.get()
dt = DecisionTag()
dt.decision = decision
dt.tag = item
dt.save()
else:
item = Tag()
item.name = tag
item.type = 'device'
item.save()
dt = DecisionTag()
dt.decision = decision
dt.tag = item
dt.save()
return "ok"
@app.route("/save_tags", methods=['GET'])
def save_tags():
tags = request.args['type_tags'].lower().split(',')
PoliceLogTag.delete().where(PoliceLogTag.policeLog_id==request.args['id']).execute()
policelog = PoliceLog.get(PoliceLog.id==request.args['id'])
for tag in tags:
if tag == '':
continue
clause = Tag.select().where((Tag.name==tag) & (Tag.type=='case'))
if clause.count()>0:
item = clause.get()
dt = PoliceLogTag()
dt.policeLog = policelog
dt.tag = item
dt.save()
else:
item = Tag()
item.name = tag
item.type = 'case'
item.save()
dt = PoliceLogTag()
dt.policeLog = policelog
dt.tag = item
dt.save()
tags = request.args['device_tags'].lower().split(',')
for tag in tags:
if tag == '':
continue
clause = Tag.select().where((Tag.name==tag) & (Tag.type=='device'))
if clause.count()>0:
item = clause.get()
dt = PoliceLogTag()
dt.policeLog = policelog
dt.tag = item
dt.save()
else:
item = Tag()
item.name = tag
item.type = 'device'
item.save()
dt = PoliceLogTag()
dt.policeLog = policelog
dt.tag = item
dt.save()
return "ok"
@app.route("/whats_this")
def whats_this():
return render_template('whats_this.html')
if __name__ == "__main__":
app.run(debug=True)
|
|
from __future__ import absolute_import, unicode_literals
import datetime
import os
from decimal import Decimal
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.forms.models import model_to_dict
from django.utils.unittest import skipUnless
from django.test import TestCase
from django.utils import six
from .models import (Article, ArticleStatus, BetterWriter, BigInt, Book,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, PhoneNumber, Post, Price,
Product, TextFile, Writer, WriterProfile, test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
class ProductForm(forms.ModelForm):
class Meta:
model = Product
class PriceForm(forms.ModelForm):
class Meta:
model = Price
class BookForm(forms.ModelForm):
class Meta:
model = Book
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields=('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
class BetterWriterForm(forms.ModelForm):
class Meta:
model = BetterWriter
class WriterProfileForm(forms.ModelForm):
class Meta:
model = WriterProfile
class PhoneNumberForm(forms.ModelForm):
class Meta:
model = PhoneNumber
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = PhoneNumber
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(BaseCategoryForm.base_fields.keys(),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(ExtraFields.base_fields.keys(),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
self.assertTrue(isinstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField))
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_fields(self):
class LimitFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url']
self.assertEqual(LimitFields.base_fields.keys(),
['url'])
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(ExcludeFields.base_fields.keys(),
['name', 'slug'])
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(ConfusedForm.base_fields.keys(),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
MixModelForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
ArticleForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
BadForm.base_fields.keys(),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(SubCategoryForm.base_fields.keys(),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(OrderFields.base_fields.keys(),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(OrderFields2.base_fields.keys(),
['slug', 'name'])
class TestWidgetForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
class TestWidgets(TestCase):
def test_base_widgets(self):
frm = TestWidgetForm()
self.assertHTMLEqual(
str(frm['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
)
self.assertHTMLEqual(
str(frm['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
)
self.assertHTMLEqual(
str(frm['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': '' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], 'third')
self.assertEqual(f.cleaned_data['name'], 'Third test')
self.assertEqual(f.cleaned_data['slug'], 'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Writers.
w_royko = Writer(name='Mike Royko')
w_royko.save()
w_woodward = Writer(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Writer.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, 'New headline')
# Add some categories and test the many-to-many form output.
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(c1.id), six.text_type(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk), 'article': 'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertEqual(map(lambda o: o.name, new_art.categories.all()), [])
# Save the m2m data on the form
f.save_m2m()
self.assertEqual(map(lambda o: o.name, new_art.categories.order_by('name')), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertEqual(map(lambda o: o.name, f.clean([c1.id])), ["Entertainment"])
self.assertEqual(map(lambda o: o.name, f.clean([c2.id])), ["It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean([str(c1.id)])), ["Entertainment"])
self.assertEqual(map(lambda o: o.name, f.clean([str(c1.id), str(c2.id)])), ["Entertainment", "It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean([c1.id, str(c2.id)])), ["Entertainment", "It's a test"])
self.assertEqual(map(lambda o: o.name, f.clean((c1.id, str(c2.id)))), ["Entertainment", "It's a test"])
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c6 = Category.objects.create(id=6, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertEqual(map(lambda o: o.name, f.clean([c6.id])), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertEqual(f.clean([]), [])
self.assertEqual(f.clean(()), [])
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(map(lambda o: o.name, f.clean([c3.id])), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(ImprovedArticleForm.base_fields.keys(), ['article'])
self.assertEqual(ImprovedArticleWithParentLinkForm.base_fields.keys(), [])
bw = BetterWriter(name='Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw).keys()),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': six.text_type(w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_phone_number_field(self):
f = PhoneNumberForm({'phone': '(312) 555-1212', 'description': 'Assistance'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['phone'], '312-555-1212')
self.assertEqual(f.cleaned_data['description'], 'Assistance')
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': ',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(CategoryForm.base_fields.keys(),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertEqual(map(lambda o: o.name, field.clean([86])), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertEqual(map(lambda o: o.name, form.cleaned_data['items']), ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(CustomFieldForExclusionForm.base_fields.keys(), ['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
|
|
'''
Alternate strategies to find IAC+SE in a bytestring.
The payload looks like a 4+ byte blob:
IAC SB <zero or more payload bytes> IAC SE
The tricky part is that an IAC in the payload is escaped to IAC IAC.
So the terminting IAC+SE is really the first SE that is preceded by
an *odd* number of IACs.
These functions do very little in loop a potentially large number of times.
The original dumb function took ~100ms to parse a 1 megabyte string.
Adding one function call per byte adds another ~80ms.
Adding one comparison operation per byte adds another ~30ms.
So look at as few bytes as possible and do as little as possible for each one.
Easier said than done -- if the whole payload is AICs you have to count them all.
In real life the payload will be small, maximum of tens of bytes.
But there was unittest that passed in a 1meg string and took 99% of the time
for the whole suite; so I wrote these to get that test down to ~0ms.
'''
from __future__ import print_function
from itertools import izip
import re
import time
from telnetlib import IAC, SB, SE, STATUS
all_finders = []
def register_finder(func):
all_finders.append(func)
return func
# RFC 855
# if parameters in an option "subnegotiation" include a byte
# with a value of IAC, it is necessary to double this byte in
# accordance the general TELNET rules.
@register_finder
def find_simple(data):
''' The original (and slow) function.
Scan from the left.
- if the next two bytes are IAC+SE we are done.
- elif the next two bytes are IAC+IAC i+=2
- else i+=1
'''
# this turns out to be really slow, ~100ms on a 1M string.
i = 0
try:
while True:
if data[i] == IAC:
if data[i+1] == SE:
return i # found it!
elif data[i+1] == IAC: # escaped IAC
i += 2
else:
i += 1
else:
i += 1
except IndexError:
return -1
@register_finder
def find_simple2(data):
i = 0
iacs = 0
try:
while True:
if iacs % 2 and data[i] == SE:
return i-1
elif data[i] == IAC:
iacs += 1
else:
iacs = 0
i += 1
except IndexError:
return -1
return -1
@register_finder
def find_simple3(data):
# find the IAC+SE then increment backwards
iacse_i = data.find(IAC+SE)
if iacse_i == 0:
return iacse_i
try:
while iacse_i >= 0:
end = i = iacse_i - 1
while data[i] == IAC:
i -= 1
if not (end - i) % 2: # even number of preceding IACS
return iacse_i
else:
iacse_i = data.find(IAC+SE, iacse_i+1)
except IndexError:
pass
return -1
@register_finder
def find_regexp(haystack):
# regexps speed things up, but not enough. ~25ms on a 1M string.
# not an IAC followed by zero or an even number of IACs followed by IAC+SE
want = re.compile('(?<!%s)(%s%s)*(%s%s)' % (IAC, IAC, IAC, IAC, SE))
m = want.search(haystack)
if not m:
return -1
return m.end() - 2
@register_finder
def find_regexp2(haystack):
# regexps speed things up, but not enough. ~25ms on a 1M string.
want = re.compile('%s+%s' % (IAC, SE))
m = None
for m in want.finditer(haystack):
if (m.end() - m.start()) % 2 == 0: # even number of chars
return m.end() - 2
return -1
@register_finder
def find_find(haystack):
# simple bytes.find() works pretty well for the normal case. ~1ms on a 1M string.
iaciac_i = haystack.find(IAC+IAC)
iacse_i = haystack.find(IAC+SE)
while iacse_i != -1:
if iaciac_i == -1 or iaciac_i > iacse_i:
break
if iaciac_i+2 <= iacse_i:
iaciac_i = haystack.find(IAC+IAC, iaciac_i+2)
else:
iacse_i = haystack.find(IAC+SE, iacse_i+2)
return iacse_i
@register_finder
def find_hybrid(haystack):
# hybrid of bytes.find() and regexp
iaciac_i = haystack.find(IAC+IAC)
iacse_i = haystack.find(IAC+SE)
loops = 0
while iacse_i != -1:
if iaciac_i == -1 or iaciac_i > iacse_i:
break
if iaciac_i+2 <= iacse_i:
iaciac_i = haystack.find(IAC+IAC, iaciac_i+2)
else:
iacse_i = haystack.find(IAC+SE, iacse_i+2)
loops += 1
if loops > 100:
# pathologically weird case, the regexp version is good for those
return find_regexp2(haystack)
return iacse_i
@register_finder
def find_rfind(haystack):
''' the most complicated strategy (which is why there are lots of comments) '''
iacse_i = haystack.find(IAC+SE)
iaciac_j = haystack.rfind(IAC+IAC, 0, iacse_i+1)
iaciac_pairs = set()
maybe_iaciac_pairs = set()
while iacse_i != -1:
if (iacse_i == -1 or # IACSE not found
iaciac_j == -1 or # IACSE found, no IACIAC before it
iaciac_j != iacse_i-1): # IACIAC not overlapping IACSE
return iacse_i
while (iaciac_j not in iaciac_pairs and # IACIAC not already tested
haystack[iaciac_j:iaciac_j+2] == IAC+IAC): # possibly blocking
maybe_iaciac_pairs.add(iaciac_j)
assert iaciac_j >= 0, (iacse_i, iaciac_j, iaciac_pairs)
iaciac_j -= 2
# odd number of IACs followed by SE means the IAC+SE is good
# even number of IACs followed by SE means the IACs are all escaped
if (iaciac_j == -1 or # decremented by two from 1 to -1, only one IAC
haystack[iaciac_j:iaciac_j+2].count(IAC) == 1):
return iacse_i
# it was an even numbered block of IACS (i.e. all escaped)
iaciac_pairs |= maybe_iaciac_pairs
maybe_iaciac_pairs = set()
# find the next candidate IACSE
iacse_i = haystack.find(IAC+SE, iacse_i+1)
# rfind an IACIAC before it
iaciac_j = haystack.rfind(IAC+IAC, 0, iacse_i+1)
if iacse_i == -1: # no new IACSE found
return -1
elif iaciac_j in iaciac_pairs: # already tested and found acceptable
return iacse_i
assert iaciac_j >= -1 and iacse_i >= -1
return -1
@register_finder
def find_replace(data):
''' extremely simple and fast search (at the expense of a full in memory copy) '''
ndata = data.replace(IAC+IAC, ':)')
return ndata.find(IAC+SE)
@register_finder
def find_itertools(data):
it1 = iter(data)
it2 = iter(data)
try:
next(it2)
enumerated_pairs = enumerate(izip(it1, it2))
for i, pair in enumerated_pairs:
if pair == (IAC, IAC):
# skip ahead an extra byte each to avoid IAC+IAC+SE
next(enumerated_pairs)
elif pair == (IAC, SE):
return i
except StopIteration:
pass
return -1
def speed_regressions():
# for large SB payloads it is easy to do a very bad & slow parse
# this test compares all the IAC+SE parsers.
SIZE = 1024
big_plain = 'x' * 1024 * SIZE
big_iacs = ('xxxxxx' + IAC + IAC) * 128 * SIZE
all_iacs = (IAC+IAC) * 512 * SIZE
fake_iacses = ('xxxxx' + IAC + IAC + SE) * 128 * SIZE
all_ses = SE*1024*SIZE
blobs = [big_plain, big_iacs, all_iacs, fake_iacses, all_ses]
print(map(len, blobs))
results = []
answers = []
for func in all_finders:
times_row = []
answer_row = []
for blob in blobs:
start = time.time()
ans = func(IAC+SB+STATUS + blob + IAC+SE)
end = time.time()
answer_row.append(ans)
times_row.append('\t%6.3f' % (end - start))
results.append(times_row)
answers.append(answer_row)
correct = answers[0]
i = 1
for also_correct in answers[1:]:
i += 1
assert correct == also_correct, (correct, also_correct)
print(' ' * 18, ''.join(['\txxxxxx', '\txxxxII', '\tIIIIII', '\tIISIIS', '\tSSSSSS']))
for func, times in zip(all_finders, results):
print(func.__name__.ljust(20, ' '), ''.join(times))
if __name__ == '__main__':
speed_regressions()
|
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED B Y THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import errno
import json
import os
import shutil
import subprocess
import sys
from vistrails.core.modules.vistrails_module import Module, ModuleError, IncompleteImplementation, new_module
import vistrails.core.modules.module_registry
from vistrails.core import debug
from vistrails.core.packagemanager import get_package_manager
import vistrails.core.system
from vistrails.core.system import packages_directory, vistrails_root_directory
import identifiers
cl_tools = {}
class CLTools(Module):
""" CLTools is the base Module.
We will create a SUDSWebService Module for each method published by
the web service.
"""
def compute(self):
raise IncompleteImplementation # pragma: no cover
SUFFIX = '.clt'
DEFAULTFILESUFFIX = '.cld'
def _eintr_retry_call(func, *args):
"""Fixes OSErrors and IOErrors
From: http://code.google.com/p/seascope/source/detail?spec=svn8dbe5e23d41db673727ce90fd338e9a43f8877e8&name=8dbe5e23d41d&r=8dbe5e23d41db673727ce90fd338e9a43f8877e8
IOError added
"""
while True:
try:
return func(*args)
except (OSError, IOError), e: # pragma: no cover
if e.errno == errno.EINTR:
continue
raise
def _add_tool(path):
# first create classes
tool_name = os.path.basename(path)
if not tool_name.endswith(SUFFIX): # pragma: no cover
return
(tool_name, _) = os.path.splitext(tool_name)
if tool_name in cl_tools: # pragma: no cover
debug.critical("Package CLTools already added: '%s'" % tool_name)
try:
conf = json.load(open(path))
except ValueError as exc: # pragma: no cover
debug.critical("Package CLTools could not parse '%s'" % path, exc)
return
def compute(self):
""" 1. read inputs
2. call with inputs
3. set outputs
"""
# add all arguments as an unordered list
args = [self.conf['command']]
file_std = 'options' in self.conf and 'std_using_files' in self.conf['options']
fail_with_cmd = 'options' in self.conf and 'fail_with_cmd' in self.conf['options']
setOutput = [] # (name, File) - set File contents as output for name
open_files = []
stdin = None
kwargs = {}
for type, name, klass, options in self.conf['args']:
type = type.lower()
klass = klass.lower()
if "constant" == type:
flag = 'flag' in options and options['flag']
if flag:
args.append(flag)
if name:
# if flag==name we assume user tried to name a constant
if not name == flag:
args.append('%s%s' % (options.get('prefix', ''), name))
elif "input" == type:
# handle multiple inputs
values = self.force_get_input_list(name)
if values and 'list' == klass:
values = values[0]
klass = options['type'].lower() \
if 'type' in options else 'string'
for value in values:
if 'flag' == klass:
if not value:
continue
if 'flag' in options and options['flag']:
value = options['flag']
else:
# use name as flag
value = name
elif klass in ('file', 'directory', 'path'):
value = value.name
# check for flag and append file name
if not 'flag' == klass and 'flag' in options:
args.append(options['flag'])
value = '%s%s' % (options.get('prefix', ''),
value)
args.append(value)
elif "output" == type:
# output must be a filename but we may convert the result to a string
# create new file
file = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
fname = file.name
if 'prefix' in options:
fname = options['prefix'] + fname
if 'flag' in options:
args.append(options['flag'])
args.append(fname)
if "file" == klass:
self.set_output(name, file)
elif "string" == klass:
setOutput.append((name, file))
else:
raise ValueError
elif "inputoutput" == type:
# handle single file that is both input and output
value = self.get_input(name)
# create copy of infile to operate on
outfile = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
try:
shutil.copyfile(value.name, outfile.name)
except IOError, e: # pragma: no cover
raise ModuleError(self,
"Error copying file '%s': %s" %
(value.name, debug.format_exception(e)))
value = '%s%s' % (options.get('prefix', ''), outfile.name)
# check for flag and append file name
if 'flag' in options:
args.append(options['flag'])
args.append(value)
self.set_output(name, outfile)
if "stdin" in self.conf:
name, type, options = self.conf["stdin"]
type = type.lower()
if self.has_input(name):
value = self.get_input(name)
if "file" == type:
if file_std:
f = open(value.name, 'rb')
else:
f = open(value.name, 'rb')
stdin = f.read()
f.close()
elif "string" == type:
if file_std:
file = self.interpreter.filePool.create_file()
f = open(file.name, 'wb')
f.write(value)
f.close()
f = open(file.name, 'rb')
else:
stdin = value
else: # pragma: no cover
raise ValueError
if file_std:
open_files.append(f)
kwargs['stdin'] = f.fileno()
else:
kwargs['stdin'] = subprocess.PIPE
if "stdout" in self.conf:
if file_std:
name, type, options = self.conf["stdout"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stdout'] = f.fileno()
else:
kwargs['stdout'] = subprocess.PIPE
if "stderr" in self.conf:
if file_std:
name, type, options = self.conf["stderr"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stderr'] = f.fileno()
else:
kwargs['stderr'] = subprocess.PIPE
if fail_with_cmd:
return_code = 0
else:
return_code = self.conf.get('return_code', None)
env = {}
# 0. add defaults
# 1. add from configuration
# 2. add from module env
# 3. add from env port
if configuration.check('env'):
try:
for var in configuration.env.split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing configuration env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env' in self.conf['options']:
try:
for var in self.conf['options']['env'].split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing module env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env_port' in self.conf['options']:
for e in self.force_get_input_list('env'):
try:
for var in e.split(';'):
if not var:
continue
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing env port: %s" % (
debug.format_exception(e)))
if env:
kwargs['env'] = dict(os.environ)
kwargs['env'].update(env)
# write to execution provenance
env = ';'.join(['%s=%s'%(k,v) for k,v in env.iteritems()])
self.annotate({'execution_env': env})
if 'dir' in self.conf:
kwargs['cwd'] = self.conf['dir']
process = subprocess.Popen(args, **kwargs)
if file_std:
process.wait()
else:
#if stdin:
# print "stdin:", len(stdin), stdin[:30]
stdout, stderr = _eintr_retry_call(process.communicate, stdin)
#stdout, stderr = process.communicate(stdin)
#if stdout:
# print "stdout:", len(stdout), stdout[:30]
#if stderr:
# print "stderr:", len(stderr), stderr[:30]
if return_code is not None:
if process.returncode != return_code:
raise ModuleError(self, "Command returned %d (!= %d)" % (
process.returncode, return_code))
self.set_output('return_code', process.returncode)
for f in open_files:
f.close()
for name, file in setOutput:
f = open(file.name, 'rb')
self.set_output(name, f.read())
f.close()
if not file_std:
if "stdout" in self.conf:
name, type, options = self.conf["stdout"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stdout)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stdout)
else: # pragma: no cover
raise ValueError
if "stderr" in self.conf:
name, type, options = self.conf["stderr"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stderr)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stderr)
else: # pragma: no cover
raise ValueError
# create docstring
d = """This module is a wrapper for the command line tool '%s'""" % \
conf['command']
# create module
M = new_module(CLTools, tool_name,{"compute": compute,
"conf": conf,
"tool_name": tool_name,
"__doc__": d})
reg = vistrails.core.modules.module_registry.get_module_registry()
reg.add_module(M, package=identifiers.identifier,
package_version=identifiers.version)
def to_vt_type(s):
# add recognized types here - default is String
return '(basic:%s)' % \
{'file':'File', 'path':'Path', 'directory': 'Directory',
'flag':'Boolean', 'list':'List',
'float':'Float','integer':'Integer'
}.get(s.lower(), 'String')
# add module ports
if 'stdin' in conf:
name, type, options = conf['stdin']
optional = 'required' not in options
reg.add_input_port(M, name, to_vt_type(type), optional=optional)
if 'stdout' in conf:
name, type, options = conf['stdout']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'stderr' in conf:
name, type, options = conf['stderr']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'options' in conf and 'env_port' in conf['options']:
reg.add_input_port(M, 'env', to_vt_type('string'))
for type, name, klass, options in conf['args']:
optional = 'required' not in options
if 'input' == type.lower():
reg.add_input_port(M, name, to_vt_type(klass), optional=optional)
elif 'output' == type.lower():
reg.add_output_port(M, name, to_vt_type(klass), optional=optional)
elif 'inputoutput' == type.lower():
reg.add_input_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, 'return_code', to_vt_type('integer'))
cl_tools[tool_name] = M
def add_tool(path):
try:
_add_tool(path)
except Exception as exc: # pragma: no cover
import traceback
debug.critical("Package CLTools failed to create module "
"from '%s': %s" % (path, exc),
traceback.format_exc())
def initialize(*args, **keywords):
reload_scripts(initial=True)
def remove_all_scripts():
reg = vistrails.core.modules.module_registry.get_module_registry()
for tool_name in cl_tools.keys():
del cl_tools[tool_name]
reg.delete_module(identifiers.identifier, tool_name)
def reload_scripts(initial=False, name=None):
reg = vistrails.core.modules.module_registry.get_module_registry()
if not initial:
if name is None:
remove_all_scripts()
else:
del cl_tools[name]
reg.delete_module(identifiers.identifier, name)
if "CLTools" == identifiers.name:
# this is the original package
location = os.path.join(vistrails.core.system.current_dot_vistrails(),
"CLTools")
# make sure dir exist
if not os.path.isdir(location): # pragma: no cover # pragma: no branch
try:
debug.log("Creating CLTools directory...")
os.mkdir(location)
except Exception, e:
debug.critical("Could not create CLTools directory. Make "
"sure '%s' does not exist and parent directory "
"is writable" % location,
e)
sys.exit(1)
else: # pragma: no cover
# this is a standalone package so modules are placed in this directory
location = os.path.dirname(__file__)
if initial:
reg.add_module(CLTools, abstract=True)
if name is None:
for path in os.listdir(location):
if path.endswith(SUFFIX): # pragma: no branch
add_tool(os.path.join(location, path))
else:
path = os.path.join(location, name + SUFFIX)
if os.path.exists(path):
add_tool(path)
if not initial:
from vistrails.core.interpreter.cached import CachedInterpreter
CachedInterpreter.clear_package(identifiers.identifier)
from vistrails.gui.vistrails_window import _app
_app.invalidate_pipelines()
wizards_list = []
def menu_items():
"""menu_items() -> tuple of (str,function)
It returns a list of pairs containing text for the menu and a
callback function that will be executed when that menu item is selected.
"""
try:
from wizard import QCLToolsWizardWindow
except Exception, e: # pragma: no cover
if "CLTools" == identifiers.name:
debug.unexpected_exception(e)
raise
else:
return
lst = []
if "CLTools" == identifiers.name: # pragma: no branch
def open_wizard():
window = QCLToolsWizardWindow(reload_scripts=reload_scripts)
wizards_list.append(window)
window.show()
lst.append(("Open Wizard", open_wizard))
lst.append(("Reload All Scripts", reload_scripts))
return tuple(lst)
def finalize():
pass
def contextMenuName(name):
if "CLTools" == name:
return "Reload All Scripts"
else:
return "Reload Script"
def callContextMenu(name):
if "CLTools" == name:
reload_scripts()
else:
reload_scripts(name=name)
###############################################################################
import unittest
from vistrails.tests.utils import execute, intercept_results
class TestCLTools(unittest.TestCase):
@classmethod
def setUpClass(cls):
# first make sure CLTools is loaded
pm = get_package_manager()
if 'CLTools' not in pm._package_list: # pragma: no cover # pragma: no branch
pm.late_enable_package('CLTools')
remove_all_scripts()
cls.testdir = os.path.join(packages_directory(), 'CLTools', 'test_files')
cls._tools = {}
for name in os.listdir(cls.testdir):
if not name.endswith(SUFFIX):
continue
_add_tool(os.path.join(cls.testdir, name))
toolname = os.path.splitext(name)[0]
cls._tools[toolname] = cl_tools[toolname]
cls._old_dir = os.getcwd()
os.chdir(vistrails_root_directory())
@classmethod
def tearDownClass(cls):
os.chdir(cls._old_dir)
reload_scripts()
def do_the_test(self, toolname):
with intercept_results(
self._tools[toolname],
'return_code', 'f_out', 'stdout') as (
return_code, f_out, stdout):
self.assertFalse(execute([
(toolname, 'org.vistrails.vistrails.cltools', [
('f_in', [('File', self.testdir + '/test_1.cltest')]),
('chars', [('List', '["a", "b", "c"]')]),
('false', [('Boolean', 'False')]),
('true', [('Boolean', 'True')]),
('nb', [('Integer', '42')]),
('stdin', [('String', 'some line\nignored')]),
]),
]))
self.assertEqual(return_code, [0])
self.assertEqual(f_out, ['ok\nmessage received'])
self.assertEqual(stdout, ['program output here'])
def test_with_pipes(self):
"""Without std_using_files: use pipes instead of files.
"""
self.do_the_test('intern_cltools_1')
def test_with_files(self):
"""With std_using_files: use files instead of pipes.
"""
self.do_the_test('intern_cltools_2')
|
|
# -*- coding: utf-8 -*-
"""
celery.backends.base
~~~~~~~~~~~~~~~~~~~~
Result backend base classes.
- :class:`BaseBackend` defines the interface.
- :class:`KeyValueStoreBackend` is a common base class
using K/V semantics like _get and _put.
"""
from __future__ import absolute_import
import time
import sys
from datetime import timedelta
from billiard.einfo import ExceptionInfo
from kombu.serialization import (
dumps, loads, prepare_accept_content,
registry as serializer_registry,
)
from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
from celery import states
from celery.app import current_task
from celery.exceptions import ChordError, TimeoutError, TaskRevokedError
from celery.five import items
from celery.result import result_from_tuple, GroupResult
from celery.utils import timeutils
from celery.utils.functional import LRUCache
from celery.utils.serialization import (
get_pickled_exception,
get_pickleable_exception,
create_exception_cls,
)
__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend']
EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
PY3 = sys.version_info >= (3, 0)
def unpickle_backend(cls, args, kwargs):
"""Return an unpickled backend."""
from celery import current_app
return cls(*args, app=current_app._get_current_object(), **kwargs)
class BaseBackend(object):
READY_STATES = states.READY_STATES
UNREADY_STATES = states.UNREADY_STATES
EXCEPTION_STATES = states.EXCEPTION_STATES
TimeoutError = TimeoutError
#: Time to sleep between polling each individual item
#: in `ResultSet.iterate`. as opposed to the `interval`
#: argument which is for each pass.
subpolling_interval = None
#: If true the backend must implement :meth:`get_many`.
supports_native_join = False
#: If true the backend must automatically expire results.
#: The daily backend_cleanup periodic task will not be triggered
#: in this case.
supports_autoexpire = False
#: Set to true if the backend is peristent by default.
persistent = True
def __init__(self, app, serializer=None,
max_cached_results=None, accept=None, **kwargs):
self.app = app
conf = self.app.conf
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
(self.content_type,
self.content_encoding,
self.encoder) = serializer_registry._encoders[self.serializer]
self._cache = LRUCache(
limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS,
)
self.accept = prepare_accept_content(
conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
)
def mark_as_started(self, task_id, **meta):
"""Mark a task as started"""
return self.store_result(task_id, meta, status=states.STARTED)
def mark_as_done(self, task_id, result, request=None):
"""Mark task as successfully executed."""
return self.store_result(task_id, result,
status=states.SUCCESS, request=request)
def mark_as_failure(self, task_id, exc, traceback=None, request=None):
"""Mark task as executed with failure. Stores the execption."""
return self.store_result(task_id, exc, status=states.FAILURE,
traceback=traceback, request=request)
def fail_from_current_stack(self, task_id, exc=None):
type_, real_exc, tb = sys.exc_info()
try:
exc = real_exc if exc is None else exc
ei = ExceptionInfo((type_, exc, tb))
self.mark_as_failure(task_id, exc, ei.traceback)
return ei
finally:
del(tb)
def mark_as_retry(self, task_id, exc, traceback=None, request=None):
"""Mark task as being retries. Stores the current
exception (if any)."""
return self.store_result(task_id, exc, status=states.RETRY,
traceback=traceback, request=request)
def mark_as_revoked(self, task_id, reason='', request=None):
return self.store_result(task_id, TaskRevokedError(reason),
status=states.REVOKED, traceback=None,
request=request)
def prepare_exception(self, exc):
"""Prepare exception for serialization."""
if self.serializer in EXCEPTION_ABLE_CODECS:
return get_pickleable_exception(exc)
return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
if self.serializer in EXCEPTION_ABLE_CODECS:
return get_pickled_exception(exc)
return create_exception_cls(
from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
def prepare_value(self, result):
"""Prepare value for storage."""
if isinstance(result, GroupResult):
return result.as_tuple()
return result
def encode(self, data):
_, _, payload = dumps(data, serializer=self.serializer)
return payload
def decode(self, payload):
payload = PY3 and payload or str(payload)
return loads(payload,
content_type=self.content_type,
content_encoding=self.content_encoding,
accept=self.accept)
def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
"""Wait for task and return its result.
If the task raises an exception, this exception
will be re-raised by :func:`wait_for`.
If `timeout` is not :const:`None`, this raises the
:class:`celery.exceptions.TimeoutError` exception if the operation
takes longer than `timeout` seconds.
"""
time_elapsed = 0.0
while 1:
status = self.get_status(task_id)
if status == states.SUCCESS:
return self.get_result(task_id)
elif status in states.PROPAGATE_STATES:
result = self.get_result(task_id)
if propagate:
raise result
return result
# avoid hammering the CPU checking status.
time.sleep(interval)
time_elapsed += interval
if timeout and time_elapsed >= timeout:
raise TimeoutError('The operation timed out.')
def prepare_expires(self, value, type=None):
if value is None:
value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
if isinstance(value, timedelta):
value = timeutils.timedelta_seconds(value)
if value is not None and type:
return type(value)
return value
def prepare_persistent(self, enabled=None):
if enabled is not None:
return enabled
p = self.app.conf.CELERY_RESULT_PERSISTENT
return self.persistent if p is None else p
def encode_result(self, result, status):
if status in self.EXCEPTION_STATES and isinstance(result, Exception):
return self.prepare_exception(result)
else:
return self.prepare_value(result)
def is_cached(self, task_id):
return task_id in self._cache
def store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Update task state and result."""
result = self.encode_result(result, status)
self._store_result(task_id, result, status, traceback,
request=request, **kwargs)
return result
def forget(self, task_id):
self._cache.pop(task_id, None)
self._forget(task_id)
def _forget(self, task_id):
raise NotImplementedError('backend does not implement forget.')
def get_status(self, task_id):
"""Get the status of a task."""
return self.get_task_meta(task_id)['status']
def get_traceback(self, task_id):
"""Get the traceback for a failed task."""
return self.get_task_meta(task_id).get('traceback')
def get_result(self, task_id):
"""Get the result of a task."""
meta = self.get_task_meta(task_id)
if meta['status'] in self.EXCEPTION_STATES:
return self.exception_to_python(meta['result'])
else:
return meta['result']
def get_children(self, task_id):
"""Get the list of subtasks sent by a task."""
try:
return self.get_task_meta(task_id)['children']
except KeyError:
pass
def get_task_meta(self, task_id, cache=True):
if cache:
try:
return self._cache[task_id]
except KeyError:
pass
meta = self._get_task_meta_for(task_id)
if cache and meta.get('status') == states.SUCCESS:
self._cache[task_id] = meta
return meta
def reload_task_result(self, task_id):
"""Reload task result, even if it has been previously fetched."""
self._cache[task_id] = self.get_task_meta(task_id, cache=False)
def reload_group_result(self, group_id):
"""Reload group result, even if it has been previously fetched."""
self._cache[group_id] = self.get_group_meta(group_id, cache=False)
def get_group_meta(self, group_id, cache=True):
if cache:
try:
return self._cache[group_id]
except KeyError:
pass
meta = self._restore_group(group_id)
if cache and meta is not None:
self._cache[group_id] = meta
return meta
def restore_group(self, group_id, cache=True):
"""Get the result for a group."""
meta = self.get_group_meta(group_id, cache=cache)
if meta:
return meta['result']
def save_group(self, group_id, result):
"""Store the result of an executed group."""
return self._save_group(group_id, result)
def delete_group(self, group_id):
self._cache.pop(group_id, None)
return self._delete_group(group_id)
def cleanup(self):
"""Backend cleanup. Is run by
:class:`celery.task.DeleteExpiredTaskMetaTask`."""
pass
def process_cleanup(self):
"""Cleanup actions to do at the end of a task worker process."""
pass
def on_task_call(self, producer, task_id):
return {}
def on_chord_part_return(self, task, propagate=False):
pass
def fallback_chord_unlock(self, group_id, body, result=None,
countdown=1, **kwargs):
kwargs['result'] = [r.as_tuple() for r in result]
self.app.tasks['celery.chord_unlock'].apply_async(
(group_id, body, ), kwargs, countdown=countdown,
)
on_chord_apply = fallback_chord_unlock
def current_task_children(self, request=None):
request = request or getattr(current_task(), 'request', None)
if request:
return [r.as_tuple() for r in getattr(request, 'children', [])]
def __reduce__(self, args=(), kwargs={}):
return (unpickle_backend, (self.__class__, args, kwargs))
BaseDictBackend = BaseBackend # XXX compat
class KeyValueStoreBackend(BaseBackend):
task_keyprefix = ensure_bytes('celery-task-meta-')
group_keyprefix = ensure_bytes('celery-taskset-meta-')
chord_keyprefix = ensure_bytes('chord-unlock-')
implements_incr = False
def get(self, key):
raise NotImplementedError('Must implement the get method.')
def mget(self, keys):
raise NotImplementedError('Does not support get_many')
def set(self, key, value):
raise NotImplementedError('Must implement the set method.')
def delete(self, key):
raise NotImplementedError('Must implement the delete method')
def incr(self, key):
raise NotImplementedError('Does not implement incr')
def expire(self, key, value):
pass
def get_key_for_task(self, task_id):
"""Get the cache key for a task by id."""
return self.task_keyprefix + ensure_bytes(task_id)
def get_key_for_group(self, group_id):
"""Get the cache key for a group by id."""
return self.group_keyprefix + ensure_bytes(group_id)
def get_key_for_chord(self, group_id):
"""Get the cache key for the chord waiting on group with given id."""
return self.chord_keyprefix + ensure_bytes(group_id)
def _strip_prefix(self, key):
"""Takes bytes, emits string."""
key = ensure_bytes(key)
for prefix in self.task_keyprefix, self.group_keyprefix:
if key.startswith(prefix):
return bytes_to_str(key[len(prefix):])
return bytes_to_str(key)
def _mget_to_results(self, values, keys):
if hasattr(values, 'items'):
# client returns dict so mapping preserved.
return dict((self._strip_prefix(k), self.decode(v))
for k, v in items(values)
if v is not None)
else:
# client returns list so need to recreate mapping.
return dict((bytes_to_str(keys[i]), self.decode(value))
for i, value in enumerate(values)
if value is not None)
def get_many(self, task_ids, timeout=None, interval=0.5,
READY_STATES=states.READY_STATES):
interval = 0.5 if interval is None else interval
ids = task_ids if isinstance(task_ids, set) else set(task_ids)
cached_ids = set()
cache = self._cache
for task_id in ids:
try:
cached = cache[task_id]
except KeyError:
pass
else:
if cached['status'] in READY_STATES:
yield bytes_to_str(task_id), cached
cached_ids.add(task_id)
ids.difference_update(cached_ids)
iterations = 0
while ids:
keys = list(ids)
r = self._mget_to_results(self.mget([self.get_key_for_task(k)
for k in keys]), keys)
cache.update(r)
ids.difference_update(set(bytes_to_str(v) for v in r))
for key, value in items(r):
yield bytes_to_str(key), value
if timeout and iterations * interval >= timeout:
raise TimeoutError('Operation timed out ({0})'.format(timeout))
time.sleep(interval) # don't busy loop.
iterations += 1
def _forget(self, task_id):
self.delete(self.get_key_for_task(task_id))
def _store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
meta = {'status': status, 'result': result, 'traceback': traceback,
'children': self.current_task_children(request)}
self.set(self.get_key_for_task(task_id), self.encode(meta))
return result
def _save_group(self, group_id, result):
self.set(self.get_key_for_group(group_id),
self.encode({'result': result.as_tuple()}))
return result
def _delete_group(self, group_id):
self.delete(self.get_key_for_group(group_id))
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
meta = self.get(self.get_key_for_task(task_id))
if not meta:
return {'status': states.PENDING, 'result': None}
return self.decode(meta)
def _restore_group(self, group_id):
"""Get task metadata for a task by id."""
meta = self.get(self.get_key_for_group(group_id))
# previously this was always pickled, but later this
# was extended to support other serializers, so the
# structure is kind of weird.
if meta:
meta = self.decode(meta)
result = meta['result']
meta['result'] = result_from_tuple(result, self.app)
return meta
def on_chord_apply(self, group_id, body, result=None, **kwargs):
if self.implements_incr:
self.save_group(group_id, self.app.GroupResult(group_id, result))
else:
self.fallback_chord_unlock(group_id, body, result, **kwargs)
def on_chord_part_return(self, task, propagate=None):
if not self.implements_incr:
return
from celery import maybe_signature
from celery.result import GroupResult
app = self.app
if propagate is None:
propagate = self.app.conf.CELERY_CHORD_PROPAGATES
gid = task.request.group
if not gid:
return
key = self.get_key_for_chord(gid)
try:
deps = GroupResult.restore(gid, backend=task.backend)
except Exception as exc:
callback = maybe_signature(task.request.chord, app=self.app)
return app._tasks[callback.task].backend.fail_from_current_stack(
callback.id,
exc=ChordError('Cannot restore group: {0!r}'.format(exc)),
)
if deps is None:
try:
raise ValueError(gid)
except ValueError as exc:
callback = maybe_signature(task.request.chord, app=self.app)
task = app._tasks[callback.task]
return task.backend.fail_from_current_stack(
callback.id,
exc=ChordError('GroupResult {0} no longer exists'.format(
gid,
))
)
val = self.incr(key)
if val >= len(deps):
callback = maybe_signature(task.request.chord, app=self.app)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ret = j(propagate=propagate)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)
app._tasks[callback.task].backend.fail_from_current_stack(
callback.id, exc=ChordError(reason),
)
else:
try:
callback.delay(ret)
except Exception as exc:
app._tasks[callback.task].backend.fail_from_current_stack(
callback.id,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
finally:
deps.delete()
self.client.delete(key)
else:
self.expire(key, 86400)
class DisabledBackend(BaseBackend):
_cache = {} # need this attribute to reset cache in tests.
def store_result(self, *args, **kwargs):
pass
def _is_disabled(self, *args, **kwargs):
raise NotImplementedError(
'No result backend configured. '
'Please see the documentation for more information.')
wait_for = get_status = get_result = get_traceback = _is_disabled
|
|
# adding this to suppress sklearn DeprecationWarnings...
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import os
import time
import pickle
import datetime
from datetime import timedelta
from optparse import OptionParser
from sklearn.model_selection import cross_val_score, train_test_split
from multiprocessing.dummy import Pool as ThreadPool
import model
import model.feature_util
import model.sentiment_analysis
import model.topic_extractor
import math
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from model.linear_regression_model import LinearRegressionModel
from model.regression_model import RegressionModel
from model.MLPRegressionModel import MLPRegressionModel
from model.keras_lstm_regression_model import LSTMRegressionModel
from preprocess_text.load_corpora import load_corpora
from preprocess_text.setup_corpus import setup_corpus
from util.config import Config, Paths, RegressionModels
from preprocess_text.document import Document
current_milli_time = lambda: int(round(time.time() * 1000))
plt.style.use("ggplot")
def doc_to_text(doc, max_sentences=-1):
sentences = ""
num_sentences = 1
for sent in doc.sents:
if max_sentences > 0 and num_sentences > max_sentences:
break
sentences += str(sent).strip()
num_sentences += 1
return sentences
def corpus_to_day_features(date, corpus_for_day, output):
print("Starting build for date: " + str(date))
topic_extraction_cache_filename = "_".join([str(date), Config.CORPUS_NAME, Config.TOPIC_EXTRACTION_METHOD.name])
one_hot_topic_extraction_cache_filename = "_".join([str(date), Config.CORPUS_NAME, Config.SENTIMENT_ANALYSIS_METHOD.name, "ONE_HOT"])
sentiment_analysis_cache_filename = "_".join([str(date), Config.CORPUS_NAME, Config.SENTIMENT_ANALYSIS_METHOD.name])
one_hot_sentiment_analysis_cache_filename = "_".join([str(date), Config.CORPUS_NAME, Config.SENTIMENT_ANALYSIS_METHOD.name, "ONE_HOT"]) # gives the sentiment to the most relevant topic per doc
topic_extraction_cache_filename = os.path.join(Config.FEATURE_CACHE_DIR, topic_extraction_cache_filename)
sentiment_analysis_cache_filename = os.path.join(Config.FEATURE_CACHE_DIR, sentiment_analysis_cache_filename)
one_hot_topic_extraction_cache_filename = os.path.join(Config.FEATURE_CACHE_DIR, one_hot_topic_extraction_cache_filename)
one_hot_sentiment_analysis_cache_filename = os.path.join(Config.FEATURE_CACHE_DIR, one_hot_sentiment_analysis_cache_filename)
topics_precomputed = os.path.exists(topic_extraction_cache_filename)
one_hot_topics_precomputed = os.path.exists(one_hot_topic_extraction_cache_filename)
sentiments_precomputed = os.path.exists(sentiment_analysis_cache_filename)
one_hot_sentiments_precomputed = os.path.exists(one_hot_sentiment_analysis_cache_filename)
day_feature_vector = [0.0] * (2 * Config.NUM_TOPICS) # features are topic labels plus sentiment for each topic
day_sentiments = [0.0] * (Config.NUM_TOPICS)
one_hot_day_sentiments = [0.0] * (Config.NUM_TOPICS)
day_topics = [0.0] * (Config.NUM_TOPICS)
one_hot_day_topics = [0.0] * (Config.NUM_TOPICS)
if topics_precomputed and not Config.OVERWRITE:
#print("Loading topics from cache...")
day_topics = pickle.load(open(topic_extraction_cache_filename, "rb"))
if sentiments_precomputed and not Config.OVERWRITE:
#print("Loading sentiments from cache...")
day_sentiments = pickle.load(open(sentiment_analysis_cache_filename, "rb"))
if one_hot_topics_precomputed and not Config.OVERWRITE:
one_hot_day_topics = pickle.load(open(one_hot_topic_extraction_cache_filename, "rb"))
if one_hot_sentiments_precomputed and not Config.OVERWRITE:
one_hot_day_sentiments = pickle.load(open(one_hot_sentiment_analysis_cache_filename, "rb"))
if not topics_precomputed or not sentiments_precomputed or Config.OVERWRITE:
doc_num = 0
for doc in corpus_for_day:
doc_num += 1
doc_topic = []
doc_sentiment = []
identifying_topic = -1 #which index is the topic which the article identifies with the most
print("Computing doc # " + str(doc_num))
if not topics_precomputed or Config.OVERWRITE:
doc_topic = Config.TOPIC_EXTRACTION_METHOD.value(doc_to_text(doc, max_sentences=Config.MAX_SENTENCES))
max_topic_val = -1.0
for i in range(len(doc_topic)):
day_topics[i] += doc_topic[i]
if doc_topic[i] > max_topic_val and doc_topic[i] > 0.0:
indentifying_topic = i
max_topic_val = doc_topic[i]
one_hot_day_topics[identifying_topic] += 1.0
if not sentiments_precomputed or Config.OVERWRITE:
doc_sentiment = Config.SENTIMENT_ANALYSIS_METHOD.value(doc)
for i in range(len(day_sentiments)):
day_sentiments[i] += doc_sentiment * 1.0 * doc_topic[i]
one_hot_day_sentiments[identifying_topic] += doc_sentiment * 1.0
print("Finished doc #" + str(doc_num))
if not topics_precomputed or Config.OVERWRITE:
for i in range(len(day_topics)):
day_topics[i] = day_topics[i] / len(corpus_for_day)
pickle.dump(day_topics, open(topic_extraction_cache_filename, "wb"))
if not sentiments_precomputed or Config.OVERWRITE:
for i in range(len(day_sentiments)):
day_sentiments[i] = day_sentiments[i] / len(corpus_for_day)
pickle.dump(day_sentiments, open(sentiment_analysis_cache_filename, "wb"))
if not one_hot_topics_precomputed or Config.OVERWRITE:
for i in range(len(one_hot_day_topics)):
one_hot_day_topics[i] = one_hot_day_topics[i] / len(corpus_for_day)
pickle.dump(one_hot_day_topics, open(one_hot_topic_extraction_cache_filename, "wb"))
if not one_hot_sentiments_precomputed or Config.OVERWRITE:
for i in range(len(one_hot_day_sentiments)):
one_hot_day_sentiments[i] = one_hot_day_sentiments[i] / len(corpus_for_day)
pickle.dump(one_hot_day_sentiments, open(one_hot_sentiment_analysis_cache_filename, "wb"))
for i in range(Config.NUM_TOPICS):
day_feature_vector[2 * i] = day_topics[i]
day_feature_vector[2 * i + 1] = day_sentiments[i]
output[date] = day_feature_vector
print("Finished date: " + str(date))
# run topic extraction/sentiment analysis on the corpora
# to build feature vectors per day
# we expect corpora to be a map of {datetime: corpus}
def corpora_to_day_features(corpora):
output = {}
for date, corpus_for_day in corpora.items():
corpus_to_day_features(date, corpus_for_day, output)
"""threadpool = ThreadPool(4)
arg_list = [(item[0], item[1], output) for item in corpora.items()]
threadpool.starmap(corpus_to_day_features, arg_list)
threadpool.close()
threadpool.join()"""
return output
def init_corpora():
print("Loading daily approval ratings...")
approval_ratings = model.feature_util.get_approval_poll_data()
print("done.")
print("Loading corpus of political articles...")
num_articles = 100
corpus_name = Config.CORPUS_NAME
article_corpora = load_corpora(corpus_name, "/opt/nlp_shared/corpora/{}/".format(Config.CORPUS_SUBDIR), Config.CORPUS_YEARS)
print("done.")
return (approval_ratings, article_corpora)
# takes the features for individual days and does a running average for
# a shifting range of days (specified in config)
def combine_day_ranges(features_by_day, approval_ratings):
output = {}
for date, features in features_by_day.items():
range_features = [0.0] * (2 * Config.NUM_TOPICS)
days_with_data = 0 # count how many days in this range actually provided us data
# TODO: this might be biased since days with different # of articles are weighted the same
for i in range(0, Config.DAY_RANGE):
days_away = timedelta(days=i)
target_day = date - days_away
curr_day_features = features_by_day.get(target_day)
if curr_day_features is not None:
days_with_data += 1
for i in range(len(curr_day_features)):
range_features[i] += curr_day_features[i]
for i in range(len(range_features)):
range_features[i] = range_features[i] / days_with_data
'''prev_label = approval_ratings.get(date - timedelta(days=1)) # get yesterday's labels
if prev_label is not None:
range_features.append(prev_label[0])
range_features.append(prev_label[1])
else:
# no information so just provide a 50/50 guess...
range_features.append(50.0)
range_features.append(50.0)'''
output[date] = range_features
return output
def multid_combine_day_ranges(features_by_day):
output = {}
for date, features in features_by_day.items():
ranged_features = []
for i in range(0, Config.DAY_RANGE):
days_away = timedelta(days=i)
target_day = date - days_away
curr_day_features = features_by_day.get(target_day)
if curr_day_features is not None:
ranged_features.insert(0, curr_day_features)
else:
# if we're missing a day, put in a default -1.0 value vector
ranged_features.insert(0, ([-1.0] * (2 * Config.NUM_TOPICS)))
output[date] = ranged_features
return output
def match_features_to_labels(features_by_range, approval_ratings):
X = []
Y = []
# match up inputs (range features) w/ output label
for date, features in sorted(features_by_range.items(), key=lambda pair: pair[0]):
print(date)
actual_date = date + timedelta(days=Config.POLL_DELAY)
approval_label = approval_ratings.get(actual_date.date()) # approval label should be 'poll_lag' days into the future
if approval_label is not None:
X.append(features)
Y.append(approval_label[:-1]) # remove count of number of polls contributing to daily rating
else:
pass #print("UNABLE TO FIND APPROVAL RATINGS FOR DAY {}".format(str(actual_date.date())))
return (X, Y)
def split_data(X, Y):
print("Length of data samples: {}".format(len(X)))
print("Length of labels: {}".format(len(Y)))
np_X = np.array(X)
np_Y = np.array(Y)
first_split = Config.TRAINING_PARTITION
second_split = first_split + Config.TEST_PARTITION
first_split = int(first_split * len(np_X))
second_split = int(second_split * len(np_X))
train_X, test_X, val_X = np.split(np_X, [first_split, second_split])
train_Y, test_Y, val_Y = np.split(np_Y, [first_split, second_split])
return ([train_X, test_X, val_X], [train_Y, test_Y, val_Y])
# print the given message to console
# and write it to file
def pw(output_file, message):
output_file.write(message + "\n")
print(message)
if __name__ == '__main__':
# add command-line flags
# NOTE: Set hyper-parameters in util/Config.py
parser = OptionParser()
parser.add_option("-s", "--save", dest="save", action="store_true", help="save the model to disk with a default name")
parser.add_option("-l", "--load", dest="load_file", help="load the model from the given file", metavar="MODEL_FILE")
parser.add_option("-p", "--plot", dest="plot_results", action="store_true", help="plot the eval results")
parser.add_option("-d", "--dump_predictions", dest="dump_predictions", action="store_true", help="print feature vectors and prediction vectors for test set")
parser.add_option("-e", "--evaluate", dest="evaluate", action="store_true", help="run k-fold cross validation on the data")
parser.add_option("-m", "--model", dest="model_type", help="run with the given model type", metavar="MODEL_TYPE")
(options, args) = parser.parse_args()
# load various corpora and labels
approval_ratings, political_article_corpora = init_corpora()
print(len(political_article_corpora.keys()))
features_by_day = corpora_to_day_features(political_article_corpora)
print("Number of days of data: " + str(len(features_by_day.items())))
features_by_range = combine_day_ranges(features_by_day, approval_ratings)
X, Y = match_features_to_labels(features_by_range, approval_ratings)
print("Number of feature vectors (ideally this is # days - moving_range_size + 1): " + str(len(X)))
X_train = []
X_test = []
X_val = []
Y_train = []
Y_test = []
Y_val = []
if not Config.TRAIN_TEST_CONSECUTIVE:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=Config.TRAINING_PARTITION)
else:
X_data, Y_data = split_data(X, Y)
X_train, X_test, X_val = X_data
Y_train, Y_test, Y_val = Y_data
# setup model and configurations
model = None
model_type = ""
# train new model (only if no model is loaded)
if options.model_type is None or options.model_type == RegressionModels.LINEAR_REGRESSION.value:
model_type = "Linear Regression"
if not options.evaluate:
model = LinearRegressionModel([X, Y]) # when not evaluating, use entire data
else:
model = LinearRegressionModel([X_train, Y_train])
elif options.model_type == RegressionModels.MLP.value:
model_type = "MLP Regression"
if not options.evaluate:
model = MLPRegressionModel([X, Y]) # when not evaluating, use entire data
else:
model = MLPRegressionModel([X_train, Y_train])
elif options.model_type == RegressionModels.LSTM.value:
# we need to re-match our data since LSTM expects sequential data per day
ranged_features = multid_combine_day_ranges(features_by_day)
X, Y = match_features_to_labels(ranged_features, approval_ratings)
X_data, Y_data = split_data(X, Y)
X_train, X_test, X_val = X_data
Y_train, Y_test, Y_val = Y_data
model_type = "LSTM Regression"
lstm_file = "lstm_models/LSTM_EPOCH_195"
if not options.evaluate:
model = LSTMRegressionModel([X, Y], [X, Y], saved_model=lstm_file)
else:
model = LSTMRegressionModel(train_data=[X_train, Y_train], test_data=[X_test, Y_test], saved_model=lstm_file)
model_name = "TEMP_MODEL_" + str(datetime.datetime.now()).replace(" ", "_")
if options.load_file is not None:
model = RegressionModel.load(options.load_file)
model_name = options.load_file
elif options.model_type != RegressionModels.LSTM.value:
print("Training model...")
model.train()
print("Done.")
model_name = "TEMP_MODEL_" + str(datetime.datetime.now()).replace(" ", "_")
if options.save:
model_name = "TEMP_MODEL_" + str(datetime.datetime.now()).replace(" ", "_")
model.save(model_name)
if options.evaluate:
eval_file = open(Paths.EVAL_RESULTS_PATH.value + model_name + ".txt", 'w')
pw(eval_file, "BEGIN MODEL ANALYSIS FOR: " + model_name + " of type " + model_type)
pw(eval_file, "================================================")
pw(eval_file, "")
pw(eval_file, "")
pw(eval_file, "")
MSE_approval = 0.0
MSE_disapproval = 0.0
total_diff_approval = 0.0
total_diff_disapproval = 0.0
first_approval_group_count = 0
second_approval_group_count = 0
third_approval_group_count = 0
fourth_approval_group_count = 0
fifth_approval_group_count = 0
first_disapproval_group_count = 0
second_disapproval_group_count = 0
third_disapproval_group_count = 0
fourth_disapproval_group_count = 0
fifth_disapproval_group_count = 0
approval_over_count = 0
approval_under_count = 0
disapproval_over_count = 0
disapproval_under_count = 0
# keep track of outliers: tuples of the form (feature_vector, label, prediction)
approval_outliers = []
disapproval_outliers = []
X_test = X_val
Y_test = Y_val
for i in range(len(X_train)):
prediction = model.predict([X_train[i]])[0]
diff_approval_signed = prediction[0] - Y_train[i][0]
diff_disapproval_signed = prediction[1] - Y_train[i][1]
diff_approval = math.fabs(diff_approval_signed)
diff_disapproval = math.fabs(diff_disapproval_signed)
percent_diff_approval_signed = diff_approval / Y_train[i][0]
percent_diff_disapproval_signed = diff_disapproval / Y_train[i][1]
percent_diff_approval = math.fabs(percent_diff_approval_signed)
percent_diff_disapproval = math.fabs(percent_diff_disapproval_signed)
MSE_approval += math.pow(diff_approval, 2)
MSE_disapproval += math.pow(diff_disapproval, 2)
total_diff_approval += diff_approval
total_diff_disapproval += diff_disapproval
# count which 'percentiile' results fall in
if percent_diff_approval < Config.FIRST_CUTOFF:
first_approval_group_count += 1
if percent_diff_approval < Config.SECOND_CUTOFF:
second_approval_group_count += 1
if percent_diff_approval < Config.THIRD_CUTOFF:
third_approval_group_count += 1
if percent_diff_approval < Config.FOURTH_CUTOFF:
fourth_approval_group_count += 1
if percent_diff_approval < Config.FIFTH_CUTOFF:
fifth_approval_group_count += 1
# count which 'percentiile' results fall in
if percent_diff_disapproval < Config.FIRST_CUTOFF:
first_disapproval_group_count += 1
if percent_diff_disapproval < Config.SECOND_CUTOFF:
second_disapproval_group_count += 1
if percent_diff_disapproval < Config.THIRD_CUTOFF:
third_disapproval_group_count += 1
if percent_diff_disapproval < Config.FOURTH_CUTOFF:
fourth_disapproval_group_count += 1
if percent_diff_disapproval < Config.FIFTH_CUTOFF:
fifth_disapproval_group_count += 1
# count over/understimates
if diff_approval > Config.LENIENCY:
if diff_approval_signed > 0:
approval_over_count += 1
else:
approval_under_count += 1
if diff_disapproval > Config.LENIENCY:
if diff_disapproval_signed > 0:
disapproval_over_count += 1
else:
disapproval_under_count += 1
# handle outliers
if diff_approval >= Config.OUTLIER_THRESHOLD_HARD:
approval_outliers.append((X_train[i], Y_train[i], prediction[0]))
if diff_disapproval >= Config.OUTLIER_THRESHOLD_HARD:
disapproval_outliers.append((X_train[i], Y_train[i], prediction[1]))
#TODO: Check trend matching (does the directionality/magnitude change correlate with the actual labels)
# This might be difficult given random partitioning
RMSE_approval = math.sqrt(MSE_approval / len(Y_train))
RMSE_disapproval = math.sqrt(MSE_disapproval / len(Y_train))
avg_diff_approval = total_diff_approval / len(Y_train)
avg_diff_disapproval = total_diff_disapproval / len(Y_train)
# print out results:
pw(eval_file, "Evaluation results on {} points of Training Data".format(len(X_train)))
pw(eval_file, "==================================================")
pw(eval_file, "Root Mean Squared Error (Approval): " + str(RMSE_approval))
pw(eval_file, "Root Mean Squared Error (Disapproval): " + str(RMSE_disapproval))
pw(eval_file, "")
pw(eval_file, "Average distance (Approval): " + str(avg_diff_approval))
pw(eval_file, "Average distance (Disapproval): " + str(avg_diff_disapproval))
pw(eval_file, "")
pw(eval_file, "# of approval data points within " + str(Config.FIRST_CUTOFF * 100) + "% of actual: " + str(first_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.SECOND_CUTOFF * 100) + "% of actual: " + str(second_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.THIRD_CUTOFF * 100) + "% of actual: " + str(third_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.FOURTH_CUTOFF * 100) + "% of actual: " + str(fourth_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.FIFTH_CUTOFF * 100) + "% of actual: " + str(fifth_approval_group_count))
pw(eval_file, "")
pw(eval_file, "# of disapproval data points within " + str(Config.FIRST_CUTOFF * 100) + "% of actual: " + str(first_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.SECOND_CUTOFF * 100) + "% of actual: " + str(second_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.THIRD_CUTOFF * 100) + "% of actual: " + str(third_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.FOURTH_CUTOFF * 100) + "% of actual: " + str(fourth_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.FIFTH_CUTOFF * 100) + "% of actual: " + str(fifth_disapproval_group_count))
pw(eval_file, "")
pw(eval_file, "# of approval over-estimates: " + str(approval_over_count))
pw(eval_file, "# of approval under-estimates: " + str(approval_under_count))
pw(eval_file, "Ratio of over to under (Approval): " + str(approval_over_count * 1.0 / approval_under_count))
pw(eval_file, "# of disapproval over-estimates: " + str(disapproval_over_count))
pw(eval_file, "# of disapproval under-estimates: " + str(disapproval_under_count))
pw(eval_file, "Ratio of over to under (Disapproval): " + str(disapproval_over_count * 1.0 / disapproval_under_count))
pw(eval_file, "")
pw(eval_file, "# of Outliers (Approval): " + str(len(approval_outliers)))
pw(eval_file, "---------------------------------------------------")
for i in range(len(approval_outliers)):
features, actual, predicted = approval_outliers[i]
pw(eval_file, "Outlier " + str(i) + ": " + str(features) + " => " + str(predicted) + "(when actual is " + str(actual) + ")")
pw(eval_file, "")
pw(eval_file, "# of Outliers (Disapproval): " + str(len(approval_outliers)))
pw(eval_file, "---------------------------------------------------")
for i in range(len(disapproval_outliers)):
features, actual, predicted = disapproval_outliers[i]
pw(eval_file, "Outlier " + str(i) + ": " + str(features) + " => " + str(predicted) + "(when actual is " + str(actual) + ")")
MSE_approval = 0.0
MSE_disapproval = 0.0
total_diff_approval = 0.0
total_diff_disapproval = 0.0
first_approval_group_count = 0
second_approval_group_count = 0
third_approval_group_count = 0
fourth_approval_group_count = 0
fifth_approval_group_count = 0
first_disapproval_group_count = 0
second_disapproval_group_count = 0
third_disapproval_group_count = 0
fourth_disapproval_group_count = 0
fifth_disapproval_group_count = 0
approval_over_count = 0
approval_under_count = 0
disapproval_over_count = 0
disapproval_under_count = 0
# keep track of outliers: tuples of the form (feature_vector, label, prediction)
approval_outliers = []
disapproval_outliers = []
for i in range(len(X_test)):
prediction = model.predict([X_test[i]])[0]
diff_approval_signed = prediction[0] - Y_test[i][0]
diff_disapproval_signed = prediction[1] - Y_test[i][1]
diff_approval = math.fabs(diff_approval_signed)
diff_disapproval = math.fabs(diff_disapproval_signed)
percent_diff_approval_signed = diff_approval / Y_test[i][0]
percent_diff_disapproval_signed = diff_disapproval / Y_test[i][1]
percent_diff_approval = math.fabs(percent_diff_approval_signed)
percent_diff_disapproval = math.fabs(percent_diff_disapproval_signed)
MSE_approval += math.pow(diff_approval, 2)
MSE_disapproval += math.pow(diff_disapproval, 2)
total_diff_approval += diff_approval
total_diff_disapproval += diff_disapproval
# count which 'percentiile' results fall in
if percent_diff_approval < Config.FIRST_CUTOFF:
first_approval_group_count += 1
if percent_diff_approval < Config.SECOND_CUTOFF:
second_approval_group_count += 1
if percent_diff_approval < Config.THIRD_CUTOFF:
third_approval_group_count += 1
if percent_diff_approval < Config.FOURTH_CUTOFF:
fourth_approval_group_count += 1
if percent_diff_approval < Config.FIFTH_CUTOFF:
fifth_approval_group_count += 1
# count which 'percentiile' results fall in
if percent_diff_disapproval < Config.FIRST_CUTOFF:
first_disapproval_group_count += 1
if percent_diff_disapproval < Config.SECOND_CUTOFF:
second_disapproval_group_count += 1
if percent_diff_disapproval < Config.THIRD_CUTOFF:
third_disapproval_group_count += 1
if percent_diff_disapproval < Config.FOURTH_CUTOFF:
fourth_disapproval_group_count += 1
if percent_diff_disapproval < Config.FIFTH_CUTOFF:
fifth_disapproval_group_count += 1
# count over/understimates
if diff_approval > Config.LENIENCY:
if diff_approval_signed > 0:
approval_over_count += 1
else:
approval_under_count += 1
if diff_disapproval > Config.LENIENCY:
if diff_disapproval_signed > 0:
disapproval_over_count += 1
else:
disapproval_under_count += 1
# handle outliers
if diff_approval >= Config.OUTLIER_THRESHOLD_HARD:
approval_outliers.append((X_test[i], Y_test[i], prediction[0]))
if diff_disapproval >= Config.OUTLIER_THRESHOLD_HARD:
disapproval_outliers.append((X_test[i], Y_test[i], prediction[1]))
#TODO: Check trend matching (does the directionality/magnitude change correlate with the actual labels)
# This might be difficult given random partitioning
RMSE_approval = math.sqrt(MSE_approval / len(Y_test))
RMSE_disapproval = math.sqrt(MSE_disapproval / len(Y_test))
avg_diff_approval = total_diff_approval / len(Y_test)
avg_diff_disapproval = total_diff_disapproval / len(Y_test)
# print out results:
pw(eval_file, "Evaluation results on {} points of Test Data".format(len(X_test)))
pw(eval_file, "==================================================")
pw(eval_file, "Root Mean Squared Error (Approval): " + str(RMSE_approval))
pw(eval_file, "Root Mean Squared Error (Disapproval): " + str(RMSE_disapproval))
pw(eval_file, "")
pw(eval_file, "Average distance (Approval): " + str(avg_diff_approval))
pw(eval_file, "Average distance (Disapproval): " + str(avg_diff_disapproval))
pw(eval_file, "")
pw(eval_file, "# of approval data points within " + str(Config.FIRST_CUTOFF * 100) + "% of actual: " + str(first_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.SECOND_CUTOFF * 100) + "% of actual: " + str(second_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.THIRD_CUTOFF * 100) + "% of actual: " + str(third_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.FOURTH_CUTOFF * 100) + "% of actual: " + str(fourth_approval_group_count))
pw(eval_file, "# of approval data points within " + str(Config.FIFTH_CUTOFF * 100) + "% of actual: " + str(fifth_approval_group_count))
pw(eval_file, "")
pw(eval_file, "# of disapproval data points within " + str(Config.FIRST_CUTOFF * 100) + "% of actual: " + str(first_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.SECOND_CUTOFF * 100) + "% of actual: " + str(second_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.THIRD_CUTOFF * 100) + "% of actual: " + str(third_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.FOURTH_CUTOFF * 100) + "% of actual: " + str(fourth_disapproval_group_count))
pw(eval_file, "# of disapproval data points within " + str(Config.FIFTH_CUTOFF * 100) + "% of actual: " + str(fifth_disapproval_group_count))
pw(eval_file, "")
pw(eval_file, "# of approval over-estimates: " + str(approval_over_count))
pw(eval_file, "# of approval under-estimates: " + str(approval_under_count))
pw(eval_file, "Ratio of over to under (Approval): " + str(approval_over_count * 1.0 / approval_under_count))
pw(eval_file, "# of disapproval over-estimates: " + str(disapproval_over_count))
pw(eval_file, "# of disapproval under-estimates: " + str(disapproval_under_count))
pw(eval_file, "Ratio of over to under (Disapproval): " + str(disapproval_over_count * 1.0 / disapproval_under_count))
pw(eval_file, "")
pw(eval_file, "# of Outliers (Approval): " + str(len(approval_outliers)))
pw(eval_file, "---------------------------------------------------")
for i in range(len(approval_outliers)):
features, actual, predicted = approval_outliers[i]
pw(eval_file, "Outlier " + str(i) + ": " + str(features) + " => " + str(predicted) + "(when actual is " + str(actual) + ")")
pw(eval_file, "")
pw(eval_file, "# of Outliers (Disapproval): " + str(len(approval_outliers)))
pw(eval_file, "---------------------------------------------------")
for i in range(len(disapproval_outliers)):
features, actual, predicted = disapproval_outliers[i]
pw(eval_file, "Outlier " + str(i) + ": " + str(features) + " => " + str(predicted) + "(when actual is " + str(actual) + ")")
pw(eval_file, "")
pw(eval_file, "")
pw(eval_file, "========================================================")
if options.model_type != RegressionModels.LSTM.value:
pw(eval_file, "K-fold cross validation scores: ")
k_fold_scores = cross_val_score(model.model, X, Y, n_jobs=-1, cv=4, scoring="neg_mean_squared_error")
pw(eval_file, str(k_fold_scores))
eval_file.close()
# ------------------------ Plotting Results ----------------------------------------
if options.plot_results:
actual_approval = []
actual_disapproval = []
predict_approval = []
predict_disapproval = []
axis_vals = []
for label in Y_test:
actual_approval.append(label[0])
actual_disapproval.append(label[1])
for i in range(len(X_test)):
prediction = model.predict([X_test[i]])[0]
if options.dump_predictions:
print("Predicting day " + str(i) + " given: " + str(X_test[i]))
print("Output: " + str(prediction))
predict_approval.append(prediction[0])
predict_disapproval.append(prediction[1])
if options.plot_results:
axis_vals.append(i)
plt.figure(1)
# red is actual, blue is predicted
print("RED VALUES ARE ACTUAL - BLUE VALUES ARE PREDICTED") # just a nice console reminder
plt.subplot(211)
approval_actual, = plt.plot(axis_vals, actual_approval, 'ro', markersize=4)
approval_predicted, = plt.plot(axis_vals, predict_approval, 'bo', markersize=4)
plt.legend([approval_actual, approval_predicted], ["Actual", "Predicted"], loc=2, borderaxespad=0.)
plt.ylabel('Approval percentage')
axes = plt.gca()
axes.set_ylim([20, 70])
plt.subplot(212)
disapproval_actual, = plt.plot(axis_vals, actual_disapproval, 'ro', markersize=4)
disapproval_predicted, = plt.plot(axis_vals, predict_disapproval, 'bo', markersize=4)
plt.legend([disapproval_actual, approval_predicted], ["Actual", "Predicted"], loc=2, borderaxespad=0.)
plt.ylabel('Disapproval percentage')
axes = plt.gca()
axes.set_ylim([20, 70])
plt.show()
config_params = [
"CORPUS_NAME",
"POLL_DELAY",
"DAY_RANGE",
"SENTIMENT_ANALYSIS_METHOD",
"TOPIC_EXTRACTION_METHOD",
"NUM_TOPICS",
"REGRESSION_MODEL",
"NUM_LAYERS",
"YEARS"
]
plt.savefig(os.path.join(Config.PLOT_DIR, (Config.dump_config(config_params) + ".png")))
pickle.dump(k_fold_scores, open(os.path.join(Config.PLOT_DIR, Config.dump_config(config_params) + "_k_fold_scores_negmse.txt"), "wb"))
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes, interfaces
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list)
def attribute_context(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type()
# [CheckSecurity]
is_check_security_for_node = 'CheckSecurity' in extended_attributes
if is_check_security_for_node:
includes.add('bindings/core/v8/BindingSecurity.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/dom/custom/CustomElementProcessingStack.h')
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
# [TypeChecking]
has_type_checking_unrestricted = (
(has_extended_attribute_value(interface, 'TypeChecking', 'Unrestricted') or
has_extended_attribute_value(attribute, 'TypeChecking', 'Unrestricted')) and
idl_type.name in ('Float', 'Double'))
# [ImplementedInPrivateScript]
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
context = {
'access_control_list': access_control_list(attribute),
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'argument_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'cached_attribute_validation_method': extended_attributes.get('CachedAttribute'),
'conditional_string': v8_utilities.conditional_string(attribute),
'constructor_type': idl_type.constructor_type_name
if is_constructor_attribute(attribute) else None,
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_validation_expression': idl_type.enum_validation_expression,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_type_checking_unrestricted': has_type_checking_unrestricted,
'idl_type': str(idl_type), # need trailing [] on array for Dictionary::ConversionContext::setConversionType
'is_call_with_execution_context': v8_utilities.has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': v8_utilities.has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_check_security_for_node': is_check_security_for_node,
'is_custom_element_callbacks': is_custom_element_callbacks,
'is_expose_js_accessors': 'ExposeJSAccessors' in extended_attributes,
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_implemented_in_private_script': is_implemented_in_private_script,
'is_initialized_by_event_constructor':
'InitializedByEventConstructor' in extended_attributes,
'is_keep_alive_for_gc': is_keep_alive_for_gc(interface, attribute),
'is_nullable': idl_type.is_nullable,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_read_only': attribute.is_read_only,
'is_reflect': is_reflect,
'is_replaceable': 'Replaceable' in attribute.extended_attributes,
'is_static': attribute.is_static,
'is_url': 'URL' in extended_attributes,
'is_unforgeable': 'Unforgeable' in extended_attributes,
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'measure_as': v8_utilities.measure_as(attribute), # [MeasureAs]
'name': attribute.name,
'only_exposed_to_private_script': is_only_exposed_to_private_script,
'per_context_enabled_function': v8_utilities.per_context_enabled_function_name(attribute), # [PerContextEnabled]
'private_script_v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue', isolate='scriptState->isolate()', used_in_private_script=True),
'property_attributes': property_attributes(attribute),
'put_forwards': 'PutForwards' in extended_attributes,
'reflect_empty': extended_attributes.get('ReflectEmpty'),
'reflect_invalid': extended_attributes.get('ReflectInvalid', ''),
'reflect_missing': extended_attributes.get('ReflectMissing'),
'reflect_only': extended_attribute_value_as_list(attribute, 'ReflectOnly'),
'runtime_enabled_function': v8_utilities.runtime_enabled_function_name(attribute), # [RuntimeEnabled]
'setter_callback': setter_callback_name(interface, attribute),
'should_be_exposed_to_script': not (is_implemented_in_private_script and is_only_exposed_to_private_script),
'world_suffixes': ['', 'ForMainWorld']
if 'PerWorldBindings' in extended_attributes
else [''], # [PerWorldBindings]
}
if is_constructor_attribute(attribute):
constructor_getter_context(interface, attribute, context)
return context
if not has_custom_getter(attribute):
getter_context(interface, attribute, context)
if (not has_custom_setter(attribute) and
(not attribute.is_read_only or 'PutForwards' in extended_attributes)):
setter_context(interface, attribute, context)
return context
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
release = False
if 'ImplementedInPrivateScript' in extended_attributes:
if (not idl_type.is_wrapper_type and
not idl_type.is_basic_type and
not idl_type.is_enum):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
context['cpp_value_original'] = cpp_value
cpp_value = 'result'
# EventHandler has special handling
if base_idl_type != 'EventHandler':
release = idl_type.release
elif (idl_type.is_explicit_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_keep_alive_for_gc'] or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'cppValue'
# EventHandler has special handling
if base_idl_type != 'EventHandler':
release = idl_type.release
def v8_set_return_value_statement(for_main_world=False):
if context['is_keep_alive_for_gc']:
return 'v8SetReturnValue(info, wrapper)'
return idl_type.v8_set_return_value(cpp_value, extended_attributes=extended_attributes, script_wrappable='impl', release=release, for_main_world=for_main_world)
context.update({
'cpp_value': cpp_value,
'cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
cpp_value=cpp_value, creation_context='info.Holder()',
extended_attributes=extended_attributes),
'v8_set_return_value_for_main_world': v8_set_return_value_statement(for_main_world=True),
'v8_set_return_value': v8_set_return_value_statement(),
})
def getter_expression(interface, attribute, context):
arguments = []
this_getter_base_name = getter_base_name(interface, attribute, arguments)
getter_name = scoped_name(interface, attribute, this_getter_base_name)
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
arguments.append('toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext())')
arguments.append('impl')
arguments.append('&result')
arguments.extend(v8_utilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
not 'ImplementedInPrivateScript' in attribute.extended_attributes and
not attribute.is_static):
arguments.append('*impl')
if attribute.idl_type.is_explicit_nullable:
arguments.append('isNull')
if context['is_getter_raises_exception']:
arguments.append('exceptionState')
if attribute.idl_type.use_output_parameter_for_result:
arguments.append('result')
return '%s(%s)' % (getter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_GETTER_NAMES = {
'boolean': 'fastHasAttribute',
'long': 'getIntegralAttribute',
'unsigned long': 'getUnsignedIntegralAttribute',
}
def getter_base_name(interface, attribute, arguments):
extended_attributes = attribute.extended_attributes
if 'ImplementedInPrivateScript' in extended_attributes:
return '%sAttributeGetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in extended_attributes:
return uncapitalize(cpp_name(attribute))
content_attribute_name = extended_attributes['Reflect'] or attribute.name.lower()
if content_attribute_name in ['class', 'id', 'name']:
# Special-case for performance optimization.
return 'get%sAttribute' % content_attribute_name.capitalize()
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_GETTER_NAMES:
return CONTENT_ATTRIBUTE_GETTER_NAMES[base_idl_type]
if 'URL' in attribute.extended_attributes:
return 'getURLAttribute'
return 'fastGetAttribute'
def is_keep_alive_for_gc(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
return (
# For readonly attributes, for performance reasons we keep the attribute
# wrapper alive while the owner wrapper is alive, because the attribute
# never changes.
(attribute.is_read_only and
idl_type.is_wrapper_type and
# There are some exceptions, however:
not(
# Node lifetime is managed by object grouping.
inherits_interface(interface.name, 'Node') or
inherits_interface(base_idl_type, 'Node') or
# A self-reference is unnecessary.
attribute.name == 'self' or
# FIXME: Remove these hard-coded hacks.
base_idl_type in ['EventTarget', 'Window'] or
base_idl_type.startswith(('HTML', 'SVG')))))
################################################################################
# Setter
################################################################################
def setter_context(interface, attribute, context):
if 'PutForwards' in attribute.extended_attributes:
# Use target interface and attribute in place of original interface and
# attribute from this point onwards.
target_interface_name = attribute.idl_type.base_type
target_attribute_name = attribute.extended_attributes['PutForwards']
interface = interfaces[target_interface_name]
try:
attribute = next(candidate
for candidate in interface.attributes
if candidate.name == target_attribute_name)
except StopIteration:
raise Exception('[PutForward] target not found:\n'
'Attribute "%s" is not present in interface "%s"' %
(target_attribute_name, target_interface_name))
extended_attributes = attribute.extended_attributes
idl_type = attribute.idl_type
# [RaisesException], [RaisesException=Setter]
is_setter_raises_exception = (
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in [None, 'Setter'])
# [TypeChecking=Interface]
has_type_checking_interface = (
(has_extended_attribute_value(interface, 'TypeChecking', 'Interface') or
has_extended_attribute_value(attribute, 'TypeChecking', 'Interface')) and
idl_type.is_wrapper_type)
context.update({
'has_setter_exception_state':
is_setter_raises_exception or has_type_checking_interface or
context['has_type_checking_unrestricted'] or
idl_type.v8_conversion_needs_exception_state,
'has_type_checking_interface': has_type_checking_interface,
'is_setter_call_with_execution_context': v8_utilities.has_extended_attribute_value(
attribute, 'SetterCallWith', 'ExecutionContext'),
'is_setter_raises_exception': is_setter_raises_exception,
'private_script_cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
'cppValue', isolate='scriptState->isolate()',
creation_context='scriptState->context()->Global()'),
'v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue'),
})
# setter_expression() depends on context values we set above.
context['cpp_setter'] = setter_expression(interface, attribute, context)
def setter_expression(interface, attribute, context):
extended_attributes = attribute.extended_attributes
arguments = v8_utilities.call_with_arguments(
extended_attributes.get('SetterCallWith') or
extended_attributes.get('CallWith'))
this_setter_base_name = setter_base_name(interface, attribute, arguments)
setter_name = scoped_name(interface, attribute, this_setter_base_name)
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in extended_attributes and
not 'ImplementedInPrivateScript' in extended_attributes and
not attribute.is_static):
arguments.append('*impl')
idl_type = attribute.idl_type
if 'ImplementedInPrivateScript' in extended_attributes:
arguments.append('toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext())')
arguments.append('impl')
arguments.append('cppValue')
elif idl_type.base_type == 'EventHandler':
getter_name = scoped_name(interface, attribute, cpp_name(attribute))
context['event_handler_getter_expression'] = '%s(%s)' % (
getter_name, ', '.join(arguments))
if (interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
arguments.append('V8EventListenerList::findOrCreateWrapper<V8ErrorHandler>(v8Value, true, ScriptState::current(info.GetIsolate()))')
else:
arguments.append('V8EventListenerList::getEventListener(ScriptState::current(info.GetIsolate()), v8Value, true, ListenerFindOrCreate)')
elif idl_type.is_interface_type:
# FIXME: should be able to eliminate WTF::getPtr in most or all cases
arguments.append('WTF::getPtr(cppValue)')
else:
arguments.append('cppValue')
if context['is_setter_raises_exception']:
arguments.append('exceptionState')
return '%s(%s)' % (setter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_SETTER_NAMES = {
'boolean': 'setBooleanAttribute',
'long': 'setIntegralAttribute',
'unsigned long': 'setUnsignedIntegralAttribute',
}
def setter_base_name(interface, attribute, arguments):
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
return '%sAttributeSetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in attribute.extended_attributes:
return 'set%s' % capitalize(cpp_name(attribute))
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_SETTER_NAMES:
return CONTENT_ATTRIBUTE_SETTER_NAMES[base_idl_type]
return 'setAttribute'
def scoped_content_attribute_name(interface, attribute):
content_attribute_name = attribute.extended_attributes['Reflect'] or attribute.name.lower()
if interface.name.startswith('SVG'):
# SVG's xmlbase/xmlspace/xmllang need special behavior, i.e.
# it is in XMLNames namespace and the generated attribute has no xml prefix.
if attribute.name.startswith('xml'):
namespace = 'XMLNames'
content_attribute_name = content_attribute_name[3:]
else:
namespace = 'SVGNames'
else:
namespace = 'HTMLNames'
includes.add('core/%s.h' % namespace)
return '%s::%sAttr' % (namespace, content_attribute_name)
################################################################################
# Attribute configuration
################################################################################
# [Replaceable]
def setter_callback_name(interface, attribute):
cpp_class_name = cpp_name(interface)
cpp_class_name_or_partial = cpp_name_or_partial(interface)
extended_attributes = attribute.extended_attributes
if (('Replaceable' in extended_attributes and
'PutForwards' not in extended_attributes) or
is_constructor_attribute(attribute)):
return '%sV8Internal::%sForceSetAttributeOnThisCallback' % (
cpp_class_name_or_partial, cpp_class_name)
if attribute.is_read_only and 'PutForwards' not in extended_attributes:
return '0'
return '%sV8Internal::%sAttributeSetterCallback' % (cpp_class_name_or_partial, attribute.name)
# [DoNotCheckSecurity], [Unforgeable]
def access_control_list(attribute):
extended_attributes = attribute.extended_attributes
access_control = []
if 'DoNotCheckSecurity' in extended_attributes:
do_not_check_security = extended_attributes['DoNotCheckSecurity']
if do_not_check_security == 'Setter':
access_control.append('v8::ALL_CAN_WRITE')
else:
access_control.append('v8::ALL_CAN_READ')
if (not attribute.is_read_only or
'Replaceable' in extended_attributes):
access_control.append('v8::ALL_CAN_WRITE')
if 'Unforgeable' in extended_attributes:
access_control.append('v8::PROHIBITS_OVERWRITING')
return access_control or ['v8::DEFAULT']
# [NotEnumerable], [Unforgeable]
def property_attributes(attribute):
extended_attributes = attribute.extended_attributes
property_attributes_list = []
if ('NotEnumerable' in extended_attributes or
is_constructor_attribute(attribute)):
property_attributes_list.append('v8::DontEnum')
if 'Unforgeable' in extended_attributes:
property_attributes_list.append('v8::DontDelete')
return property_attributes_list or ['v8::None']
# [Custom], [Custom=Getter]
def has_custom_getter(attribute):
extended_attributes = attribute.extended_attributes
return ('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Getter'])
# [Custom], [Custom=Setter]
def has_custom_setter(attribute):
extended_attributes = attribute.extended_attributes
return (not attribute.is_read_only and
'Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Setter'])
################################################################################
# Constructors
################################################################################
idl_types.IdlType.constructor_type_name = property(
# FIXME: replace this with a [ConstructorAttribute] extended attribute
lambda self: strip_suffix(self.base_type, 'Constructor'))
def is_constructor_attribute(attribute):
# FIXME: replace this with [ConstructorAttribute] extended attribute
return attribute.idl_type.name.endswith('Constructor')
def constructor_getter_context(interface, attribute, context):
context['needs_constructor_getter_callback'] = context['measure_as'] or context['deprecate_as']
|
|
# swift_build_support/cmake.py - Detect host machine's CMake -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Find the path to a CMake executable on the host machine.
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import platform
import re
from numbers import Number
from . import shell
class CMakeOptions(object):
"""List like object used to define cmake options
"""
def __init__(self, initial_options=None):
self._options = []
if initial_options is not None:
self.extend(initial_options)
def define(self, var, value):
"""Utility to define cmake options in this object.
opts.define("FOO", "BAR") # -> -DFOO=BAR
opts.define("FLAG:BOOL", True) # -> -FLAG:BOOL=TRUE
"""
if var.endswith(':BOOL') or isinstance(value, bool):
value = self.true_false(value)
if value is None:
value = ""
elif not isinstance(value, (str, Number)):
raise ValueError('define: invalid value for key %s: %s (%s)' %
(var, value, type(value)))
self._options.append('-D%s=%s' % (var, value))
def extend(self, tuples_or_options):
if isinstance(tuples_or_options, CMakeOptions):
self += tuples_or_options
else:
for (variable, value) in tuples_or_options:
self.define(variable, value)
@staticmethod
def true_false(value):
if hasattr(value, 'lower'):
value = value.lower()
if value in [True, 1, 'true', 'yes', '1']:
return 'TRUE'
if value in [False, 0, 'false', 'no', '0']:
return 'FALSE'
raise ValueError("true_false: invalid value: %s" % value)
def __len__(self):
return self._options.__len__()
def __iter__(self):
return self._options.__iter__()
def __contains__(self, item):
return self._options.__contains__(item)
def __add__(self, other):
ret = CMakeOptions()
ret._options += self._options
ret._options += list(other)
return ret
def __iadd__(self, other):
self._options += list(other)
return self
class CMake(object):
def __init__(self, args, toolchain):
self.args = args
self.toolchain = toolchain
def common_options(self):
"""Return options used for all products, including LLVM/Clang
"""
args = self.args
toolchain = self.toolchain
options = CMakeOptions()
define = options.define
options += ['-G', args.cmake_generator]
sanitizers = []
if args.enable_asan:
sanitizers.append('Address')
if args.enable_ubsan:
sanitizers.append('Undefined')
if args.enable_tsan:
sanitizers.append('Thread')
if args.enable_lsan:
sanitizers.append('Leaks')
if sanitizers:
define("LLVM_USE_SANITIZER", ";".join(sanitizers))
if args.enable_sanitize_coverage:
define("LLVM_USE_SANITIZE_COVERAGE", "ON")
if args.export_compile_commands:
define("CMAKE_EXPORT_COMPILE_COMMANDS", "ON")
if args.distcc:
define("CMAKE_C_COMPILER_LAUNCHER:PATH", toolchain.distcc)
define("CMAKE_CXX_COMPILER_LAUNCHER:PATH", toolchain.distcc)
if args.cmake_c_launcher:
define("CMAKE_C_COMPILER_LAUNCHER:PATH", args.cmake_c_launcher)
if args.cmake_cxx_launcher:
define("CMAKE_CXX_COMPILER_LAUNCHER:PATH", args.cmake_cxx_launcher)
define("CMAKE_C_COMPILER:PATH", toolchain.cc)
define("CMAKE_CXX_COMPILER:PATH", toolchain.cxx)
define("CMAKE_LIBTOOL:PATH", toolchain.libtool)
if args.cmake_generator == 'Xcode':
define("CMAKE_CONFIGURATION_TYPES",
"Debug;Release;MinSizeRel;RelWithDebInfo")
if args.clang_user_visible_version:
major, minor, patch = \
args.clang_user_visible_version.components[0:3]
define("LLVM_VERSION_MAJOR:STRING", major)
define("LLVM_VERSION_MINOR:STRING", minor)
define("LLVM_VERSION_PATCH:STRING", patch)
define("CLANG_VERSION_MAJOR:STRING", major)
define("CLANG_VERSION_MINOR:STRING", minor)
define("CLANG_VERSION_PATCH:STRING", patch)
if args.build_ninja and args.cmake_generator == 'Ninja':
define('CMAKE_MAKE_PROGRAM', toolchain.ninja)
elif args.cmake_generator == 'Ninja' and toolchain.ninja is not None:
define('CMAKE_MAKE_PROGRAM', toolchain.ninja)
return options
def build_args(self):
"""Return arguments to the build tool used for all products
"""
args = self.args
toolchain = self.toolchain
jobs = args.build_jobs
if args.distcc:
jobs = shell.capture([toolchain.distcc, '-j'],
dry_run=False, echo=False).rstrip()
build_args = list(args.build_args)
if args.cmake_generator == 'Ninja':
build_args += ['-j%s' % jobs]
if args.verbose_build:
build_args += ['-v']
elif args.cmake_generator == 'Unix Makefiles':
build_args += ['-j%s' % jobs]
if args.verbose_build:
build_args += ['VERBOSE=1']
elif args.cmake_generator == 'Xcode':
build_args += ['-parallelizeTargets',
'-jobs', str(jobs)]
return build_args
# Determine the version of the installed CMake binary.
def installed_cmake_version(self, cmake_binary):
version = shell.capture([cmake_binary, '--version'], dry_run=False,
echo=True, optional=True)
(c_major, c_minor, c_patch) = (0, 0, 0)
if version is not None:
x = re.findall(r'cmake version (\d+)\.(\d+)\.(\d+)',
version.rstrip())
if len(x) == 1:
(c_major, c_minor, c_patch) = map(int, x[0])
return (c_major, c_minor, c_patch)
# Determine the version of the checked out CMake source.
def cmake_source_version(self, cmake_source_dir):
cmake_version_file = os.path.join(cmake_source_dir, 'Source',
'CMakeVersion.cmake')
major = -1
minor = -1
patch = -1
file = open(cmake_version_file, "r")
for line in file.readlines():
m = re.findall(r'set\(CMake_VERSION_MAJOR (\d+)\)', line)
if len(m) == 1:
major = int(m[0])
continue
m = re.findall(r'set\(CMake_VERSION_MINOR (\d+)\)', line)
if len(m) == 1:
minor = int(m[0])
continue
m = re.findall(r'set\(CMake_VERSION_PATCH (\d+)\)', line)
if len(m) == 1:
patch = int(m[0])
continue
if major == -1 or minor == -1 or patch == -1:
raise RuntimeError("Cant determine CMake version from %s"
% cmake_version_file)
return (major, minor, patch)
# Build CMake from source.
def build_cmake(self, source_root, build_root):
cmake_bootstrap = os.path.join(source_root, 'cmake', 'bootstrap')
if hasattr(self.args, 'build_script_impl_args'):
for opt in self.args.build_script_impl_args:
m = re.findall('--build-dir=(.*)', opt)
if len(m) == 1:
build_root = m[0]
cmake_build_dir = os.path.join(build_root, 'cmake-%s' %
self.args.host_target)
if not os.path.isdir(cmake_build_dir):
os.makedirs(cmake_build_dir)
cwd = os.getcwd()
os.chdir(cmake_build_dir)
shell.call_without_sleeping([cmake_bootstrap, '--no-qt-gui'],
echo=True)
shell.call_without_sleeping(['make', '-j%s' % self.args.build_jobs],
echo=True)
os.chdir(cwd)
return os.path.join(cmake_build_dir, 'bin', 'cmake')
# For Linux only, determine the version of the installed CMake compared to
# the source and build the source if necessary. Returns the path to the
# cmake binary.
def check_cmake_version(self, source_root, build_root):
if platform.system() != 'Linux':
return
cmake_source_dir = os.path.join(source_root, 'cmake')
# If the source is not checked out then don't attempt to build cmake.
if not os.path.isdir(cmake_source_dir):
return
cmake_binary = 'cmake'
try:
if self.args.cmake is not None:
cmake_binary = self.args.cmake
except AttributeError:
cmake_binary = 'cmake'
installed_ver = self.installed_cmake_version(cmake_binary)
if installed_ver > self.cmake_source_version(cmake_source_dir):
return
else:
# Build CMake from source and return the path to the executable.
return self.build_cmake(source_root, build_root)
|
|
"""Support for UK public transport data provided by transportapi.com."""
from datetime import datetime, timedelta
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_MODE, HTTP_OK, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_ATCOCODE = "atcocode"
ATTR_LOCALITY = "locality"
ATTR_STOP_NAME = "stop_name"
ATTR_REQUEST_TIME = "request_time"
ATTR_NEXT_BUSES = "next_buses"
ATTR_STATION_CODE = "station_code"
ATTR_CALLING_AT = "calling_at"
ATTR_NEXT_TRAINS = "next_trains"
CONF_API_APP_KEY = "app_key"
CONF_API_APP_ID = "app_id"
CONF_QUERIES = "queries"
CONF_ORIGIN = "origin"
CONF_DESTINATION = "destination"
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_MODE): vol.All(cv.ensure_list, [vol.In(["bus", "train"])]),
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_APP_ID): cv.string,
vol.Required(CONF_API_APP_KEY): cv.string,
vol.Required(CONF_QUERIES): [_QUERY_SCHEME],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Get the uk_transport sensor."""
sensors = []
number_sensors = len(config.get(CONF_QUERIES))
interval = timedelta(seconds=87 * number_sensors)
for query in config.get(CONF_QUERIES):
if "bus" in query.get(CONF_MODE):
stop_atcocode = query.get(CONF_ORIGIN)
bus_direction = query.get(CONF_DESTINATION)
sensors.append(
UkTransportLiveBusTimeSensor(
config.get(CONF_API_APP_ID),
config.get(CONF_API_APP_KEY),
stop_atcocode,
bus_direction,
interval,
)
)
elif "train" in query.get(CONF_MODE):
station_code = query.get(CONF_ORIGIN)
calling_at = query.get(CONF_DESTINATION)
sensors.append(
UkTransportLiveTrainTimeSensor(
config.get(CONF_API_APP_ID),
config.get(CONF_API_APP_KEY),
station_code,
calling_at,
interval,
)
)
add_entities(sensors, True)
class UkTransportSensor(SensorEntity):
"""
Sensor that reads the UK transport web API.
transportapi.com provides comprehensive transport data for UK train, tube
and bus travel across the UK via simple JSON API. Subclasses of this
base class can be used to access specific types of information.
"""
TRANSPORT_API_URL_BASE = "https://transportapi.com/v3/uk/"
_attr_icon = "mdi:train"
_attr_native_unit_of_measurement = TIME_MINUTES
def __init__(self, name, api_app_id, api_app_key, url):
"""Initialize the sensor."""
self._data = {}
self._api_app_id = api_app_id
self._api_app_key = api_app_key
self._url = self.TRANSPORT_API_URL_BASE + url
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def _do_api_request(self, params):
"""Perform an API request."""
request_params = dict(
{"app_id": self._api_app_id, "app_key": self._api_app_key}, **params
)
response = requests.get(self._url, params=request_params)
if response.status_code != HTTP_OK:
_LOGGER.warning("Invalid response from API")
elif "error" in response.json():
if "exceeded" in response.json()["error"]:
self._state = "Usage limits exceeded"
if "invalid" in response.json()["error"]:
self._state = "Credentials invalid"
else:
self._data = response.json()
class UkTransportLiveBusTimeSensor(UkTransportSensor):
"""Live bus time sensor from UK transportapi.com."""
_attr_icon = "mdi:bus"
def __init__(self, api_app_id, api_app_key, stop_atcocode, bus_direction, interval):
"""Construct a live bus time sensor."""
self._stop_atcocode = stop_atcocode
self._bus_direction = bus_direction
self._next_buses = []
self._destination_re = re.compile(f"{bus_direction}", re.IGNORECASE)
sensor_name = f"Next bus to {bus_direction}"
stop_url = f"bus/stop/{stop_atcocode}/live.json"
UkTransportSensor.__init__(self, sensor_name, api_app_id, api_app_key, stop_url)
self.update = Throttle(interval)(self._update)
def _update(self):
"""Get the latest live departure data for the specified stop."""
params = {"group": "route", "nextbuses": "no"}
self._do_api_request(params)
if self._data != {}:
self._next_buses = []
for (route, departures) in self._data["departures"].items():
for departure in departures:
if self._destination_re.search(departure["direction"]):
self._next_buses.append(
{
"route": route,
"direction": departure["direction"],
"scheduled": departure["aimed_departure_time"],
"estimated": departure["best_departure_estimate"],
}
)
if self._next_buses:
self._state = min(
_delta_mins(bus["scheduled"]) for bus in self._next_buses
)
else:
self._state = None
@property
def extra_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
if self._data is not None:
for key in (
ATTR_ATCOCODE,
ATTR_LOCALITY,
ATTR_STOP_NAME,
ATTR_REQUEST_TIME,
):
attrs[key] = self._data.get(key)
attrs[ATTR_NEXT_BUSES] = self._next_buses
return attrs
class UkTransportLiveTrainTimeSensor(UkTransportSensor):
"""Live train time sensor from UK transportapi.com."""
_attr_icon = "mdi:train"
def __init__(self, api_app_id, api_app_key, station_code, calling_at, interval):
"""Construct a live bus time sensor."""
self._station_code = station_code
self._calling_at = calling_at
self._next_trains = []
sensor_name = f"Next train to {calling_at}"
query_url = f"train/station/{station_code}/live.json"
UkTransportSensor.__init__(
self, sensor_name, api_app_id, api_app_key, query_url
)
self.update = Throttle(interval)(self._update)
def _update(self):
"""Get the latest live departure data for the specified stop."""
params = {
"darwin": "false",
"calling_at": self._calling_at,
"train_status": "passenger",
}
self._do_api_request(params)
self._next_trains = []
if self._data != {}:
if self._data["departures"]["all"] == []:
self._state = "No departures"
else:
for departure in self._data["departures"]["all"]:
self._next_trains.append(
{
"origin_name": departure["origin_name"],
"destination_name": departure["destination_name"],
"status": departure["status"],
"scheduled": departure["aimed_departure_time"],
"estimated": departure["expected_departure_time"],
"platform": departure["platform"],
"operator_name": departure["operator_name"],
}
)
if self._next_trains:
self._state = min(
_delta_mins(train["scheduled"]) for train in self._next_trains
)
else:
self._state = None
@property
def extra_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
if self._data is not None:
attrs[ATTR_STATION_CODE] = self._station_code
attrs[ATTR_CALLING_AT] = self._calling_at
if self._next_trains:
attrs[ATTR_NEXT_TRAINS] = self._next_trains
return attrs
def _delta_mins(hhmm_time_str):
"""Calculate time delta in minutes to a time in hh:mm format."""
now = dt_util.now()
hhmm_time = datetime.strptime(hhmm_time_str, "%H:%M")
hhmm_datetime = now.replace(hour=hhmm_time.hour, minute=hhmm_time.minute)
if hhmm_datetime < now:
hhmm_datetime += timedelta(days=1)
delta_mins = (hhmm_datetime - now).total_seconds() // 60
return delta_mins
|
|
#!/usr/bin/env
"""
Based on waveletanalysis.py
For P. Stabeno
Using Anaconda packaged Python
modifications for confidence intervals based on wave_matlab at
http://paos.colorado.edu/research/wavelets/
### License ###
The MIT License (MIT)
Copyright (c) 2013 Aaron O'Leary ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Standard packages
import os, datetime
#Science packages
import numpy as np
from netCDF4 import Dataset
#User Packages
import wave_sig
from general_utilities.wavelets_bams.wavelets import WaveletAnalysis
from utilities import ncutilities as ncutil
from utilities import utilities as util
from utilities import constants
import wavelet_analy_plot
__author__ = 'Shaun Bell'
__email__ = '[email protected]'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
"""---------------------------Read in Data File----------------------------------------"""
def reanaly_sfc_press(file_in):
ncfile = file_in
###nc readin/out
nchandle = Dataset(ncfile,'r')
params = ['time','lat', 'lon', 'pres']
time = nchandle.variables[params[0]][:]
lat = nchandle.variables[params[1]][:]
lon = nchandle.variables[params[2]][:]
data = nchandle.variables[params[3]][:,:]
ncutil.ncclose(nchandle)
site_loc = [52.5, 190.0] #52.8, -169.3 for M6 Samalga mooring
ind_lat = np.where(lat == site_loc[0])[0][0]
ind_lon = np.where(lon == site_loc[1])[0][0]
xx = data[:,ind_lat,ind_lon] / 100 #pa to hPa/mbar
# convert time to serial date
base_date = datetime.datetime.fromordinal(1) #time is in hours since 1-1-1
time_delta = datetime.timedelta(hours = 1)
# there is a -2 correction factor in the following
# conversion to calculate the date (unknown why its needed)
time = [base_date + (int(t - 48) * time_delta) for t in time] #convert to integer for datetime calculation
time = [t.toordinal() for t in time ]
variance = np.var(xx)
#normalize
print 'Variance = %s ' % (variance)
x = (xx - np.mean(xx)) / np.sqrt(variance)
variance = np.var(x)
return (xx, x,dt,np.array(time), variance, time_base)
def reanaly_sfc_press_multifile(files_in):
time = []
xx = []
for ncfile in files_in:
###nc readin/out
nchandle = Dataset(ncfile,'r')
params = ['time','lat', 'lon', 'pres']
time = np.hstack((time , nchandle.variables[params[0]][:]))
lat = nchandle.variables[params[1]][:]
lon = nchandle.variables[params[2]][:]
data = nchandle.variables[params[3]][:,:]
ncutil.ncclose(nchandle)
site_loc = [52.5, 190.0] #52.8, -169.3 for M6 Samalga mooring
ind_lat = np.where(lat == site_loc[0])[0][0]
ind_lon = np.where(lon == site_loc[1])[0][0]
xx = np.hstack((xx, data[:,ind_lat,ind_lon] / 100)) #pa to hPa/mbar
dt = 1. #data is daily
time_base = 'days'
# convert time to serial date
base_date = datetime.datetime.fromordinal(1) #time is in hours since 1-1-1
time_delta = datetime.timedelta(hours = 1)
# there is a -2 correction factor in the following
# conversion to calculate the date (unknown why its needed)
time = [base_date + (int(t - 48) * time_delta) for t in time] #convert to integer for datetime calculation
time = [t.toordinal() for t in time ]
variance = np.var(xx)
#normalize
print 'Variance = %s ' % (variance)
x = (xx - np.mean(xx)) / np.sqrt(variance)
variance = np.var(x)
return (xx, x,dt,np.array(time), variance, time_base)
def mooring_btm_press(file_in):
""" TODO: make file choice and var choice more transparent"""
#dir_path = os.path.dirname(os.path.abspath(__file__))
ncfile = file_in
###nc readin/out
nchandle = ncutil.ncopen(ncfile)
params = constants.nc_vars_moor()
ncdata = ncutil.ncreadfile(nchandle, params)
ncutil.ncclose(nchandle)
###data massaging
#time_all = ncdata[:,0] + ncdata[:,1]
xx = ncutil.nc_missing(ncdata[:,9], flag=1e35, setfill='Zero')
pytime = util.EPICdate2udunits(ncdata[:,0], ncdata[:,1])
dt = 1. / pytime['interval_min'] #data is 4 times daily
time_base = 'days'
time = pytime['timeint']
#time = util.subsample(time, int(pytime.get('interval_min')) / 4)
variance = np.var(xx)
#normalize
print 'Variance = %s ' % (variance)
x = (xx - np.mean(xx)) / np.sqrt(variance)
variance = np.var(x)
return (xx, x,dt,np.array(time), variance, time_base)
def example():
"""
Data file from http://paos.colorado.edu/research/wavelets/software.html
Used to validate program retrievals.
Should be consistant with the website as well as Torrence and Campo 1997 BAMS article
"""
###text readin
dir_path = os.path.dirname(os.path.abspath(__file__))
infile = '/data/sst_nino3.dat'
SSTfile = dir_path +infile
anom = np.loadtxt(SSTfile, unpack=True)
###data massaging
dt = 1. / 4. #data is 4 times yearly
time_base = 'years'
sl = len(anom)
time = (np.arange(0,sl ,1) * dt) +1871.
# signal
x = anom
variance = np.var(x)
#normalize
print 'Variance = %s ' % (variance)
x = (x - np.mean(x)) / np.sqrt(variance)
variance = np.var(x)
return (anom, x,dt,np.array(time), variance, time_base)
"""---------------------------------modules--------------------------------------------"""
def acf(series):
"""Determine autocorrelation factors"""
n = len(series)
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
acf_lag = ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
return round(acf_lag, 3)
x = np.arange(n) # Avoiding lag 0 calculation
acf_coeffs = map(r, x)
return acf_coeffs
"""------------------------------ Data Selection --------------------------------------"""
if __name__ == "__main__":
#(raw_data, x, dt, time, variance, time_base) = mooring_btm_press('/Users/bell/Data_Local/FOCI/Mooring/2003/samalga5/03sg5a_sg_0112m.nc')
#fig_name_base = 'images/M6_P_1wtide'
#(raw_data, x, dt, time, variance, time_base) = mooring_btm_press('/Users/bell/Data_Local/from_phyllis/03sg5j_0114m_detide_f35.nc')
#fig_name_base = 'images/M6_P_1'
(raw_data, x, dt, time, variance, time_base) = example()
fig_name_base = 'images/Nino3SST'
"""-----------------------------wavelet analysis ---------------------------"""
wa = WaveletAnalysis(x, time=time, dt=dt, dj=0.125)
# wavelet power spectrum
power = wa.wavelet_power
transform = wa.wavelet_transform
# scales
scales = wa.scales
# associated time vector
t = wa.time
# reconstruction of the original data
rx = wa.reconstruction()
# determine acor factor for red noise
acorr = acf(x)
lagf = (acorr[1]+np.sqrt(acorr[2]))/2
print 'acorr lagf is %s' % lagf
# determine significance levels
(signif, fft_theory) = wave_sig.wave_signif(x,dt,scales,lag1=lagf)
sig95 = np.ones_like(power) * np.array([signif] * len(t)).transpose()
sig95 = power / sig95 # where ratio > 1, power is significant
# Global wavelet spectrum & significance levels:
global_int = variance*(np.sum(power, axis=0) ) / x.shape[0] # time-average over all times
gs = ((np.sum(power, axis=1) ) / x.shape[0]) / variance #assume var=1
gswa = wa.global_wavelet_spectrum
# Global wavelet significance
(signif_g, fft_theory_g) = wave_sig.global_wave_signif(x,dt,scales,lag1=lagf,sigtest=1, dof=len(x))
"""----------------------------- plot setup ------------------------------------------"""
T, S = np.meshgrid(t, scales)
"""----------- plotting WaveTransform Power with confidence interval contour ----------"""
plt, fig = wavelet_analy_plot.plot_wavetransf(wa, T, S, sig95, time_base, plot_percentile=True)
plt.savefig(fig_name_base + '_wave' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
"""----------------- plotting contours w/global and timeseries ----------"""
plt, fig = wavelet_analy_plot.plot_wavetransf_time(x, wa, T, S, sig95, gs, signif_g, time_base, plot_percentile=True)
plt.savefig(fig_name_base + '_wave2' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
"""---------------------------------------- scaleogram --------------------------------"""
(plt, fig) = wavelet_analy_plot.scaleogram(wa)
plt.savefig(fig_name_base + '_scale' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
"""----------------------- plotting power spectrum FFT --------------------------------"""
(plt, fig) = wavelet_analy_plot.fft_power_spec(x, time_base)
plt.savefig(fig_name_base + '_FFTspec' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
"""
# Do FFT analysis of array
sp = np.fft.fft(x)
# Getting the related frequencies
freq = np.fft.fftfreq(t.shape[-1], d=.25)
pyy = sp*np.conj(sp)
"""
"""----------------- plot scale averaged power timeseries -----------------------------"""
#Need to know what scales to plot
scales_bin = [2, 8]
#find indices
indices = np.where((scales >2) & (scales < 8))
#scales[indices]
scale_ave1 = power[indices].mean(axis=0)
(plt, fig) = wavelet_analy_plot.scale_ave_timeseries(scale_ave1, wa.time, scales_bin)
plt.savefig(fig_name_base + '_scaleave' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
"""----------------- Compare against second data set -----------------------------"""
files = ['/Users/bell/Data_Local/from_phyllis/pres.sfc.2003.nc', '/Users/bell/Data_Local/from_phyllis/pres.sfc.2004.nc']
(raw_data, x, dt, time, variance, time_base) = reanaly_sfc_press_multifile(files)
fig_name_base = 'images/Reanalyisis'
wa = WaveletAnalysis(x, time=time, dt=dt, dj=0.125)
#find indices
indices = np.where((wa.scales >2) & (wa.scales < 8))
#scales[indices]
scale_ave2 = wa.wavelet_power[indices].mean(axis=0)
(plt, fig) = wavelet_analy_plot.scale_ave_timeseries2D(scale_ave1, scale_ave2, t, wa.time, scales_bin)
plt.savefig(fig_name_base + '_scaleave_comp' + '.png', bbox_inches='tight', dpi = (100))
plt.close()
|
|
import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
import yaml
# Unit tests
## ScoresDB access
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('match:scores:1:ABC:game', 12)
def test_set_league_points():
fake_connection = mock.Mock()
fake_connection.mset = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
raw_data = {'ABC':1.0, 'DEF':2.0}
scores_db.scores.set_league_points(1, raw_data)
call_data = {'match:scores:1:ABC:league':1.0, 'match:scores:1:DEF:league':2.0}
fake_connection.mset.assert_called_once_with(call_data)
def test_set_league_points():
fake_connection = mock.Mock()
fake_connection.mset = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
raw_data = {'ABC':1.0, 'DEF':2.0}
scores_db.scores.set_league_points(1, raw_data)
call_data = {'match:scores:1:ABC:league':1.0, 'match:scores:1:DEF:league':2.0}
fake_connection.mset.assert_called_once_with(call_data)
def test_get_league_points():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
keys = ['match:scores:1:ABC:league', 'match:scores:2:ABC:league']
fake_keys.callback(keys)
fake_connection.keys = mock.Mock(return_value = fake_keys)
fake_points = defer.Deferred()
fake_points.callback([2.0, 3.0])
fake_connection.mget = mock.Mock(return_value = fake_points)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
points = scores_db.scores.get_league_points('ABC')
# Assert that the right things were called
fake_connection.keys.assert_called_once_with('match:scores:*:ABC:league')
fake_connection.mget.assert_called_once_with(*keys)
# Check that the right result was output
points.addCallback(did_complete)
did_complete.assert_called_once_with(5.0)
def test_get_league_points_empty():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
fake_keys.callback([])
fake_connection.keys = mock.Mock(return_value = fake_keys)
fake_points = defer.Deferred()
fake_points.callback([])
fake_connection.mget = mock.Mock(return_value = fake_points)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
points = scores_db.scores.get_league_points('ABC')
# Assert that the right things were called (or not)
fake_connection.keys.assert_called_once_with('match:scores:*:ABC:league')
assert not fake_connection.mget.called, "Should not call mget when no matches"
# Check that the right result was output
points.addCallback(did_complete)
did_complete.assert_called_once_with(None)
def test_disqualify():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.disqualify(1, 'ABC')
# Assert that the right things were called
fake_connection.set.assert_called_once_with('match:scores:1:ABC:dsq', True)
def test_re_qualify():
fake_connection = mock.Mock()
fake_connection.delete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.re_qualify(1, 'ABC')
# Assert that the right things were called
fake_connection.delete.assert_called_once_with('match:scores:1:ABC:dsq')
def test_teams_in_match():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
fake_keys.callback(['match:scores:1:ABC:game', 'match:scores:1:DEF:game'])
fake_connection.keys = mock.Mock(return_value = fake_keys)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.teams_in_match(1)
# Assert that the right things were called
fake_connection.keys.assert_called_once_with('match:scores:1:*:game')
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with(['ABC', 'DEF'])
def test_teams_in_match_empty():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
fake_keys.callback([])
fake_connection.keys = mock.Mock(return_value = fake_keys)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.teams_in_match(1)
# Assert that the right things were called
fake_connection.keys.assert_called_once_with('match:scores:1:*:game')
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with([])
def test_teams_disqualified_in_match():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
fake_keys.callback(['match:scores:1:ABC:dsq', 'match:scores:1:DEF:dsq'])
fake_connection.keys = mock.Mock(return_value = fake_keys)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.teams_disqualified_in_match(1)
# Assert that the right things were called
fake_connection.keys.assert_called_once_with('match:scores:1:*:dsq')
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with(['ABC', 'DEF'])
def test_teams_disqualified_in_match_empty():
fake_connection = mock.Mock()
fake_keys = defer.Deferred()
fake_keys.callback([])
fake_connection.keys = mock.Mock(return_value = fake_keys)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.teams_disqualified_in_match(1)
# Assert that the right things were called
fake_connection.keys.assert_called_once_with('match:scores:1:*:dsq')
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with([])
def test_get_match_score():
fake_connection = mock.Mock()
fake_score = defer.Deferred()
fake_score.callback(2)
fake_connection.get = mock.Mock(return_value = fake_score)
did_complete = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.get_match_score(1, 'ABC')
# Assert that the right things were called
fake_connection.get.assert_called_once_with('match:scores:1:ABC:game')
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with(2)
def test_get_match_scores():
fake_connection = mock.Mock()
fake_teams = defer.Deferred()
fake_teams.callback(['ABC', 'DEF'])
fake_get_teams = mock.Mock(return_value = fake_teams)
fake_score = defer.Deferred()
fake_score.callback([2,3])
fake_connection.mget = mock.Mock(return_value = fake_score)
did_complete = mock.Mock()
with mock.patch('scores_db.scores.teams_in_match', fake_get_teams), \
mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.get_match_scores(1)
# Assert that the right things were called
fake_get_teams.assert_called_once_with(1)
fake_connection.mget.assert_called_once_with(*['match:scores:1:ABC:game',
'match:scores:1:DEF:game'])
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with({'ABC':2, 'DEF':3})
def test_get_match_scores_empty():
fake_connection = mock.Mock()
fake_teams = defer.Deferred()
fake_teams.callback([])
fake_get_teams = mock.Mock(return_value = fake_teams)
fake_score = defer.Deferred()
fake_score.callback([2,3])
fake_connection.mget = mock.Mock(return_value = fake_score)
did_complete = mock.Mock()
with mock.patch('scores_db.scores.teams_in_match', fake_get_teams), \
mock.patch('redis_client.connection', fake_connection):
# Get the value
info = scores_db.scores.get_match_scores(1)
# Assert that the right things were called
fake_get_teams.assert_called_once_with(1)
assert not fake_connection.mget.called, "Should not query scores we don't have"
# Check that the right result was output
info.addCallback(did_complete)
did_complete.assert_called_once_with(None)
## Commands
def test_perform_set_score():
fake_set_score = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.set_match_score', fake_set_score):
options = { '<match-id>': 1,
'<tla>': 'ABC',
'<score>': 3 }
# Run the command
scores_db.perform_set_score(fake_responder, options)
# Assert that the right things were called
fake_set_score.assert_called_once_with(1, 'ABC', 3)
# Check that the right text was output
fake_responder.assert_called_once_with('Scored 3 points for ABC in match 1')
def test_perform_get_score():
fake_score = defer.Deferred()
fake_score.callback(3)
fake_get_score = mock.Mock(return_value = fake_score)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_score', fake_get_score):
options = { '<match-id>': 1,
'<tla>': 'ABC' }
# Run the command
scores_db.perform_get_score(fake_responder, options)
# Assert that the right things were called
fake_get_score.assert_called_once_with(1, 'ABC')
# Check that the right text was output
fake_responder.assert_called_once_with('Team ABC scored 3 in match 1')
def test_perform_get_score_yaml():
fake_score = defer.Deferred()
fake_score.callback(3)
fake_get_score = mock.Mock(return_value = fake_score)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_score', fake_get_score):
options = { '<match-id>': 1,
'<tla>': 'ABC',
'--yaml': True }
# Run the command
scores_db.perform_get_score(fake_responder, options)
# Assert that the right things were called
fake_get_score.assert_called_once_with(1, 'ABC')
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'score':3}))
def test_perform_get_scores():
fake_scores = defer.Deferred()
results = {'ABC':1, 'DEF':4}
fake_scores.callback(results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_scores', fake_get_scores):
options = { '<match-id>': 1,
'<tla>': 'ABC' }
# Run the command
scores_db.perform_get_scores(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_has_calls([mock.call('Team ABC scored 1 in match 1'),
mock.call('Team DEF scored 4 in match 1')],
any_order = True)
def test_perform_get_scores_empty():
fake_scores = defer.Deferred()
results = None
fake_scores.callback(results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_scores', fake_get_scores):
options = { '<match-id>': 1,
'<tla>': 'ABC' }
# Run the command
scores_db.perform_get_scores(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once('No scores available for match 1')
def test_perform_get_scores_yaml():
fake_scores = defer.Deferred()
results = {'ABC':1, 'DEF':4}
fake_scores.callback(results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_scores', fake_get_scores):
options = { '<match-id>': 1,
'<tla>': 'ABC',
'--yaml': True }
# Run the command
scores_db.perform_get_scores(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'scores':results}))
def test_perform_get_scores_empty_yaml():
fake_scores = defer.Deferred()
results = None
fake_scores.callback(results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_match_scores', fake_get_scores):
options = { '<match-id>': 1,
'<tla>': 'ABC',
'--yaml': True }
# Run the command
scores_db.perform_get_scores(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'scores':results}))
def test_calc_league_points():
fake_scores = defer.Deferred()
match_results = {'ABC':2, 'FED':4, 'GHI':18}
dsqs = ['GHI']
fake_scores.callback(match_results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
results = {'ABC':3.0, 'FED': 4.0}
fake_ranker = mock.Mock(return_value = results)
fake_set_league_pts = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('ranker.get_ranked_points', fake_ranker), \
mock.patch('scores_db.scores.get_match_scores', fake_get_scores), \
mock.patch('scores_db.scores.set_league_points', fake_set_league_pts), \
mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1 }
# Run the command
scores_db.perform_calc_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
fake_get_dsqs.assert_called_once_with(1)
fake_ranker.assert_called_once_with(match_results, dsqs)
fake_set_league_pts.assert_called_once_with(1, results)
# Check that the right text was output
fake_responder.assert_has_calls([mock.call('Team ABC earned 3.0 points from match 1'),
mock.call('Team FED earned 4.0 points from match 1')],
any_order = True)
def test_calc_league_points_yaml():
fake_scores = defer.Deferred()
match_results = {'ABC':2, 'FED':4, 'GHI':18}
dsqs = ['GHI']
fake_scores.callback(match_results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
results = {'ABC':3.0, 'FED': 4.0}
fake_ranker = mock.Mock(return_value = results)
fake_set_league_pts = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('ranker.get_ranked_points', fake_ranker), \
mock.patch('scores_db.scores.get_match_scores', fake_get_scores), \
mock.patch('scores_db.scores.set_league_points', fake_set_league_pts), \
mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1,
'--yaml': True }
# Run the command
scores_db.perform_calc_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
fake_get_dsqs.assert_called_once_with(1)
fake_ranker.assert_called_once_with(match_results, dsqs)
fake_set_league_pts.assert_called_once_with(1, results)
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'points':results}))
def test_calc_league_points_empty():
fake_scores = defer.Deferred()
match_results = None
fake_scores.callback(match_results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_dsqs = defer.Deferred()
fake_dsqs.callback([])
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
results = {'ABC':3.0, 'FED': 4.0}
fake_ranker = mock.Mock(return_value = results)
fake_set_league_pts = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('ranker.get_ranked_points', fake_ranker), \
mock.patch('scores_db.scores.get_match_scores', fake_get_scores), \
mock.patch('scores_db.scores.set_league_points', fake_set_league_pts), \
mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1 }
# Run the command
scores_db.perform_calc_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
assert not fake_get_dsqs.called
assert not fake_ranker.called
assert not fake_set_league_pts.called
# Check that the right text was output
fake_responder.assert_called_once_with('No scores available for match 1')
def test_calc_league_points_empty_yaml():
fake_scores = defer.Deferred()
match_results = None
fake_scores.callback(match_results)
fake_get_scores = mock.Mock(return_value = fake_scores)
fake_dsqs = defer.Deferred()
fake_dsqs.callback([])
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
results = {'ABC':3.0, 'FED': 4.0}
fake_ranker = mock.Mock(return_value = results)
fake_set_league_pts = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('ranker.get_ranked_points', fake_ranker), \
mock.patch('scores_db.scores.get_match_scores', fake_get_scores), \
mock.patch('scores_db.scores.set_league_points', fake_set_league_pts), \
mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1,
'--yaml': True }
# Run the command
scores_db.perform_calc_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_scores.assert_called_once_with(1)
assert not fake_get_dsqs.called
assert not fake_ranker.called
assert not fake_set_league_pts.called
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'points':None}))
def test_get_league_points():
pts = 7.0
fake_get_league_pts = mock.Mock(return_value = pts)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_league_points', fake_get_league_pts):
options = { '<tla>': 'ABC' }
# Run the command
scores_db.perform_get_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_league_pts.assert_called_once_with('ABC')
# Check that the right text was output
fake_responder.assert_called_once_with('Team ABC have 7.0 league points')
def test_get_league_points_yaml():
pts = 7.0
fake_get_league_pts = mock.Mock(return_value = pts)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_league_points', fake_get_league_pts):
options = { '<tla>': 'ABC',
'--yaml': True }
# Run the command
scores_db.perform_get_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_league_pts.assert_called_once_with('ABC')
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'points':pts}))
def test_get_league_points_empty():
pts = None
fake_get_league_pts = mock.Mock(return_value = pts)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_league_points', fake_get_league_pts):
options = { '<tla>': 'ABC' }
# Run the command
scores_db.perform_get_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_league_pts.assert_called_once_with('ABC')
# Check that the right text was output
fake_responder.assert_called_once_with('No scores available for team ABC')
def test_get_league_points_empty_yaml():
pts = None
fake_get_league_pts = mock.Mock(return_value = pts)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.get_league_points', fake_get_league_pts):
options = { '<tla>': 'ABC',
'--yaml': True }
# Run the command
scores_db.perform_get_league_points(fake_responder, options)
# Assert that the right things were called
fake_get_league_pts.assert_called_once_with('ABC')
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'points':pts}))
def test_get_dsqs():
dsqs = ['ABC', 'DEF']
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1 }
# Run the command
scores_db.perform_get_dsqs(fake_responder, options)
# Assert that the right things were called
fake_get_dsqs.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with('Team(s) ABC, DEF were disqualified from match 1')
def test_get_dsqs_yaml():
dsqs = ['ABC', 'DEF']
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1,
'--yaml': True }
# Run the command
scores_db.perform_get_dsqs(fake_responder, options)
# Assert that the right things were called
fake_get_dsqs.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'dsqs': dsqs}))
def test_get_dsqs_empty():
dsqs = []
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1 }
# Run the command
scores_db.perform_get_dsqs(fake_responder, options)
# Assert that the right things were called
fake_get_dsqs.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with('No teams were disqualified from match 1')
def test_get_dsqs_empty_yaml():
dsqs = []
fake_dsqs = defer.Deferred()
fake_dsqs.callback(dsqs)
fake_get_dsqs = mock.Mock(return_value = fake_dsqs)
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.teams_disqualified_in_match', fake_get_dsqs):
options = { '<match-id>': 1,
'--yaml': True }
# Run the command
scores_db.perform_get_dsqs(fake_responder, options)
# Assert that the right things were called
fake_get_dsqs.assert_called_once_with(1)
# Check that the right text was output
fake_responder.assert_called_once_with(yaml.dump({'dsqs': dsqs}))
def test_disqualify():
fake_disqualify = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.disqualify', fake_disqualify):
options = { '<match-id>': 1,
'<tla>': 'ABC' }
# Run the command
scores_db.perform_disqualify(fake_responder, options)
# Assert that the right things were called
fake_disqualify.assert_called_once_with(1, 'ABC')
# Check that the right text was output
fake_responder.assert_called_once_with('Disqualified ABC in match 1')
def test_re_qualify():
fake_re_qualify = mock.Mock()
fake_responder = mock.Mock()
with mock.patch('scores_db.scores.re_qualify', fake_re_qualify):
options = { '<match-id>': 1,
'<tla>': 'ABC' }
# Run the command
scores_db.perform_re_qualify(fake_responder, options)
# Assert that the right things were called
fake_re_qualify.assert_called_once_with(1, 'ABC')
# Check that the right text was output
fake_responder.assert_called_once_with('Re-qualified ABC in match 1')
|
|
import mock
import os
import pytest
import tempfile
from datetime import datetime
from django.conf import settings
from django.core import mail
from django.test.utils import override_settings
from freezegun import freeze_time
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons import cron
from olympia.addons.models import Addon, AddonCategory, MigratedLWT
from olympia.addons.tasks import (
add_static_theme_from_lwt, create_persona_preview_images,
migrate_legacy_dictionary_to_webextension, migrate_lwts_to_static_themes,
migrate_webextensions_to_git_storage,
recreate_theme_previews, save_persona_image)
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.tests import (
addon_factory, collection_factory, TestCase, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import image_size
from olympia.applications.models import AppVersion
from olympia.constants import licenses
from olympia.constants.categories import CATEGORIES
from olympia.files.models import FileUpload
from olympia.files.utils import id_to_path
from olympia.ratings.models import Rating
from olympia.stats.models import ThemeUpdateCount, UpdateCount
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.versions.models import License, VersionPreview
from olympia.lib.git import AddonGitRepository
class TestPersonaImageFunctions(TestCase):
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_create_persona_preview_image(self, pngcrush_image_mock):
addon = addon_factory()
addon.modified = self.days_ago(41)
# Given an image, a 680x100 and a 32x32 thumbnails need to be generated
# and processed with pngcrush.
expected_dst1 = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
expected_dst2 = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
create_persona_preview_images(
src=get_image_path('persona-header.jpg'),
full_dst=[expected_dst1.name, expected_dst2.name],
set_modified_on=addon.serializable_reference(),
)
# pngcrush_image should have been called twice, once for each
# destination thumbnail.
assert pngcrush_image_mock.call_count == 2
assert pngcrush_image_mock.call_args_list[0][0][0] == (
expected_dst1.name)
assert pngcrush_image_mock.call_args_list[1][0][0] == (
expected_dst2.name)
assert image_size(expected_dst1.name) == (680, 100)
assert image_size(expected_dst2.name) == (32, 32)
addon.reload()
self.assertCloseToNow(addon.modified)
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_save_persona_image(self, pngcrush_image_mock):
# save_persona_image() simply saves an image as a png to the
# destination file. The image should be processed with pngcrush.
expected_dst = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
save_persona_image(
get_image_path('persona-header.jpg'),
expected_dst.name
)
# pngcrush_image should have been called once.
assert pngcrush_image_mock.call_count == 1
assert pngcrush_image_mock.call_args_list[0][0][0] == expected_dst.name
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_save_persona_image_not_an_image(self, pngcrush_image_mock):
# If the source is not an image, save_persona_image() should just
# return early without writing the destination or calling pngcrush.
expected_dst = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
save_persona_image(
get_image_path('non-image.png'),
expected_dst.name
)
# pngcrush_image should not have been called.
assert pngcrush_image_mock.call_count == 0
# the destination file should not have been written to.
assert os.stat(expected_dst.name).st_size == 0
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.add_static_theme_from_lwt')
def test_migrate_lwts_to_static_themes(add_static_theme_from_lwt_mock):
# Include two LWT that won't get migrated sandwiched between some good LWTs
persona_a = addon_factory(type=amo.ADDON_PERSONA, slug='theme_a')
persona_none = addon_factory(type=amo.ADDON_PERSONA, slug='theme_none')
persona_b = addon_factory(type=amo.ADDON_PERSONA, slug='theme_b')
persona_raise = addon_factory(type=amo.ADDON_PERSONA, slug='theme_raise')
persona_c = addon_factory(type=amo.ADDON_PERSONA, slug='theme_c')
addon_a = addon_factory(type=amo.ADDON_STATICTHEME)
addon_b = addon_factory(type=amo.ADDON_STATICTHEME)
addon_c = addon_factory(type=amo.ADDON_STATICTHEME)
add_static_theme_from_lwt_mock.side_effect = [
addon_a, False, addon_b, Exception('foo'), addon_c]
# call the migration task, as the command would:
migrate_lwts_to_static_themes(
[persona_a.id, persona_none.id, persona_b.id, persona_raise.id,
persona_c.id])
assert MigratedLWT.objects.all().count() == 3
assert Addon.objects.filter(type=amo.ADDON_PERSONA).count() == 2
persona_a.reload()
addon_a.reload()
assert persona_a.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_a).static_theme == addon_a
assert addon_a.slug == 'theme_a'
persona_b.reload()
addon_b.reload()
assert persona_b.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_b).static_theme == addon_b
assert addon_b.slug == 'theme_b'
persona_c.reload()
addon_c.reload()
assert persona_c.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_c).static_theme == addon_c
assert addon_c.slug == 'theme_c'
assert len(mail.outbox) == 0
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestAddStaticThemeFromLwt(TestCase):
create_date = datetime(2000, 1, 1, 1, 1, 1)
modify_date = datetime(2008, 8, 8, 8, 8, 8)
update_date = datetime(2009, 9, 9, 9, 9, 9)
def setUp(self):
super(TestAddStaticThemeFromLwt, self).setUp()
self.call_signing_mock = self.patch(
'olympia.lib.crypto.signing.call_signing')
self.build_mock = self.patch(
'olympia.addons.tasks.build_static_theme_xpi_from_lwt')
self.build_mock.side_effect = self._mock_xpi_side_effect
self.call_signing_mock.return_value = 'abcdefg1234'
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='53.0')
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='*')
user_factory(id=settings.TASK_USER_ID, email='[email protected]')
def _mock_xpi_side_effect(self, lwt, upload_path):
xpi_path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/mozilla_static_theme.zip')
copy_stored_file(xpi_path, upload_path)
assert not os.path.isdir(upload_path)
return mock.DEFAULT
def _check_result(self, static_theme, authors, tags, categories, license_,
ratings, collection):
# metadata is correct
assert list(static_theme.authors.all()) == authors
assert list(static_theme.tags.all()) == tags
assert len(categories) == 1
lwt_cat = categories[0]
static_theme_cats = [
(cat.name, cat.application) for cat in static_theme.all_categories]
assert static_theme_cats == [
(lwt_cat.name, amo.FIREFOX.id), (lwt_cat.name, amo.ANDROID.id)]
assert static_theme.current_version.license.builtin == license_
# status is good
assert static_theme.status == amo.STATUS_PUBLIC
current_file = static_theme.current_version.files.get()
assert current_file.status == amo.STATUS_PUBLIC
# Ratings were migrated
assert list(Rating.unfiltered.filter(addon=static_theme)) == ratings
log_entries = ActivityLog.objects.filter(
action=amo.LOG.ADD_RATING.id, addonlog__addon=static_theme)
assert log_entries.count() == len(ratings)
for rating, log_entry in zip(ratings, log_entries):
arguments = log_entry.arguments
assert rating in arguments
assert static_theme in arguments
# The collection has the new theme
if collection:
assert static_theme in list(collection.addons.all())
assert collection.addons.filter(
type=amo.ADDON_PERSONA).count() == 0
assert collection.addons.filter(
type=amo.ADDON_STATICTHEME).count() == 1
assert collection.addons.count() == 2
# UpdateCounts were copied.
assert UpdateCount.objects.filter(
addon_id=static_theme.id).count() == 2
# xpi was signed
self.call_signing_mock.assert_called_with(current_file)
assert current_file.cert_serial_num == 'abcdefg1234'
assert static_theme.created == self.create_date
assert static_theme.modified == self.modify_date
cron.addon_last_updated() # Make sure the last_updated change stuck.
assert static_theme.reload().last_updated == self.update_date
def test_add_static_theme_from_lwt(self):
author = user_factory()
persona = addon_factory(
type=amo.ADDON_PERSONA, users=[author], name='Firefox Theme')
persona.update(
created=self.create_date, modified=self.modify_date,
last_updated=self.update_date)
persona.persona.license = licenses.LICENSE_CC_BY_ND.id
Tag.objects.create(tag_text='themey').save_tag(persona)
License.objects.create(builtin=licenses.LICENSE_CC_BY_ND.builtin)
rating_user = user_factory()
rating = Rating.objects.create(
addon=persona, version=persona.current_version, user=rating_user,
rating=2, body=u'fooooo', user_responsible=rating_user)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 1, 1), count=123)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 2, 1), count=456)
# Create a count for an addon that shouldn't be migrated too.
ThemeUpdateCount.objects.create(
addon_id=addon_factory().id, date=datetime(2018, 2, 1), count=45)
# And add it to a collection
collection = collection_factory()
collection.add_addon(persona)
collection.add_addon(addon_factory())
static_theme = add_static_theme_from_lwt(persona)
self._check_result(
static_theme, [author], list(persona.tags.all()),
persona.all_categories, licenses.LICENSE_CC_BY_ND.builtin,
[rating], collection)
def test_add_static_theme_broken_lwt(self):
"""What if no author or license or category?"""
persona = addon_factory(type=amo.ADDON_PERSONA)
persona.update(
created=self.create_date, modified=self.modify_date,
last_updated=self.update_date)
assert list(persona.authors.all()) == [] # no author
persona.persona.license = None # no license
AddonCategory.objects.filter(addon=persona).delete()
assert persona.all_categories == [] # no category
License.objects.create(builtin=licenses.LICENSE_COPYRIGHT_AR.builtin)
rating_user = user_factory()
rating = Rating.objects.create(
addon=persona, version=persona.current_version, user=rating_user,
rating=2, body=u'fooooo', user_responsible=rating_user)
rating.delete() # delete the rating - should still be migrated.
# Add 2 more Ratings for different addons that shouldn't be copied.
Rating.objects.create(
addon=addon_factory(), user=rating_user,
rating=3, body=u'tgd', user_responsible=rating_user)
Rating.objects.create(
addon=addon_factory(), user=rating_user,
rating=4, body=u'tgffd', user_responsible=rating_user)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 1, 1), count=123)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 2, 1), count=456)
# Create a count for an addon that shouldn't be migrated too.
ThemeUpdateCount.objects.create(
addon_id=addon_factory().id, date=datetime(2018, 2, 1), count=45)
static_theme = add_static_theme_from_lwt(persona)
default_author = UserProfile.objects.get(
email=settings.MIGRATED_LWT_DEFAULT_OWNER_EMAIL)
desktop_default_category = (
CATEGORIES[amo.FIREFOX.id][amo.ADDON_STATICTHEME]['other'])
android_default_category = (
CATEGORIES[amo.ANDROID.id][amo.ADDON_STATICTHEME]['other'])
self._check_result(
static_theme, [default_author], [], [desktop_default_category],
licenses.LICENSE_COPYRIGHT_AR.builtin, [rating], None)
# Double check its the exact category we want.
assert static_theme.all_categories == [
desktop_default_category, android_default_category]
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestMigrateLegacyDictionaryToWebextension(TestCase):
def setUp(self):
self.user = user_factory(
id=settings.TASK_USER_ID, username='taskuser',
email='[email protected]')
with freeze_time('2017-07-27 07:00'):
self.addon = addon_factory(
type=amo.ADDON_DICT,
guid='@my-dict', # Same id used in dict-webext.xpi.
version_kw={'version': '6.3'})
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='61.0')
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='*')
self.call_signing_mock = self.patch(
'olympia.lib.crypto.signing.call_signing')
self.call_signing_mock.return_value = 'abcdefg1234'
self.build_mock = self.patch(
'olympia.addons.tasks.build_webext_dictionary_from_legacy')
self.build_mock.side_effect = self._mock_xpi_side_effect
def _mock_xpi_side_effect(self, addon, destination):
xpi_path = os.path.join(
settings.ROOT, 'src/olympia/files/fixtures/files/dict-webext.xpi')
copy_stored_file(xpi_path, destination)
assert not os.path.isdir(destination)
return mock.DEFAULT
def test_basic(self):
assert not FileUpload.objects.exists()
assert not ActivityLog.objects.filter(
action=amo.LOG.ADD_VERSION.id).exists()
old_version = self.addon.current_version
self.build_mock.return_value = 'fake-locale'
with freeze_time('2018-08-28 08:00'):
self.migration_date = datetime.now()
migrate_legacy_dictionary_to_webextension(self.addon)
self.build_mock.assert_called_once_with(self.addon, mock.ANY)
assert FileUpload.objects.exists()
self.addon.reload()
assert self.addon.target_locale == 'fake-locale'
assert self.addon.current_version != old_version
activity_log = ActivityLog.objects.filter(
action=amo.LOG.ADD_VERSION.id).get()
assert activity_log.arguments == [
self.addon.current_version, self.addon
]
assert self.addon.last_updated == self.migration_date
current_file = self.addon.current_version.all_files[0]
assert current_file.datestatuschanged == self.migration_date
assert current_file.status == amo.STATUS_PUBLIC
self.call_signing_mock.assert_called_with(current_file)
assert current_file.cert_serial_num == 'abcdefg1234'
class TestMigrateWebextensionsToGitStorage(TestCase):
def test_basic(self):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
migrate_webextensions_to_git_storage([addon.pk])
repo = AddonGitRepository(addon.pk)
assert repo.git_repository_path == os.path.join(
settings.GIT_FILE_STORAGE_PATH, id_to_path(addon.id), 'package')
assert os.listdir(repo.git_repository_path) == ['.git']
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_no_files(self, extract_mock):
addon = addon_factory()
addon.current_version.files.all().delete()
migrate_webextensions_to_git_storage([addon.pk])
extract_mock.assert_not_called()
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_skip_already_migrated_versions(self, extract_mock):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
version_to_migrate = addon.current_version
already_migrated_version = version_factory(
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
already_migrated_version.update(git_hash='already migrated...')
migrate_webextensions_to_git_storage([addon.pk])
# Only once instead of twice
extract_mock.assert_called_once_with(version_to_migrate.pk)
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_migrate_versions_from_old_to_new(self, extract_mock):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
oldest_version = addon.current_version
oldest_version.update(created=self.days_ago(6))
older_version = version_factory(
created=self.days_ago(5),
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
most_recent = version_factory(
created=self.days_ago(2),
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
migrate_webextensions_to_git_storage([addon.pk])
# Only once instead of twice
assert extract_mock.call_count == 3
assert extract_mock.call_args_list[0][0][0] == oldest_version.pk
assert extract_mock.call_args_list[1][0][0] == older_version.pk
assert extract_mock.call_args_list[2][0][0] == most_recent.pk
@pytest.mark.django_db
def test_recreate_theme_previews():
xpi_path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/mozilla_static_theme.zip')
addon_without_previews = addon_factory(type=amo.ADDON_STATICTHEME)
copy_stored_file(
xpi_path,
addon_without_previews.current_version.all_files[0].file_path)
addon_with_previews = addon_factory(type=amo.ADDON_STATICTHEME)
copy_stored_file(
xpi_path,
addon_with_previews.current_version.all_files[0].file_path)
VersionPreview.objects.create(
version=addon_with_previews.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
assert addon_without_previews.current_previews.count() == 0
assert addon_with_previews.current_previews.count() == 1
recreate_theme_previews(
[addon_without_previews.id, addon_with_previews.id])
assert addon_without_previews.reload().current_previews.count() == 3
assert addon_with_previews.reload().current_previews.count() == 3
sizes = addon_without_previews.current_previews.values_list(
'sizes', flat=True)
assert list(sizes) == [
{'image': list(amo.THEME_PREVIEW_SIZES['header']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['header']['thumbnail'])},
{'image': list(amo.THEME_PREVIEW_SIZES['list']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['list']['thumbnail'])},
{'image': list(amo.THEME_PREVIEW_SIZES['single']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['single']['thumbnail'])}]
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.parse_addon')
def test_create_missing_theme_previews(parse_addon_mock):
parse_addon_mock.return_value = {}
theme = addon_factory(type=amo.ADDON_STATICTHEME)
preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
# addon has 3 complete previews already so skip when only_missing=True
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 0
recreate_theme_previews([theme.id], only_missing=False)
assert p.call_count == 1
# break one of the previews
preview.update(sizes={})
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 1
# And delete it so the addon only has 2 previews
preview.delete()
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 1
|
|
# -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
from restkit.util import to_bytestring
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
from restkit.version import __version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return to_bytestring(key), raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(to_bytestring(key), raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from resource_management.libraries.functions import version
from stacks.utils.RMFTestCase import *
import os
from resource_management.libraries import functions
origin_exists = os.path.exists
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch.object(os.path, "exists", new=MagicMock(
side_effect=lambda *args: origin_exists(args[0])
if args[0][-2:] == "j2" else True))
@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.2.0.0-1234"))
class TestMapReduce2Client(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
CONFIG_OVERRIDES = {"serviceName":"MAPREDUCE2", "role":"MAPREDUCE2_CLIENT"}
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
classname = "MapReduce2Client",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
classname = "MapReduce2Client",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'root',
)
self.assertResourceCalled('File', '/usr/lib/hadoop/sbin/task-controller',
owner = 'root',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'root',
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_jaas.conf',
content = Template('yarn_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_nm_jaas.conf',
content = Template('yarn_nm_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred_jaas.conf',
content = Template('mapred_jaas.conf.j2'),
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.2.0.0-2041"))
def test_upgrade(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
classname = "MapReduce2Client",
command = "restart",
config_file="client-upgrade.json",
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
# for now, it's enough that <stack-selector-tool> is confirmed
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/client-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
classname = "MapReduce2Client",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
self.assertNoMoreResources()
@patch("resource_management.core.sudo.path_isdir", new = MagicMock(return_value = True))
def test_stack_upgrade_save_new_config(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/client-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
classname = "MapReduce2Client",
command = "stack_upgrade_save_new_config",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
# for now, it's enough to know the method didn't fail
|
|
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from mock import Mock, patch
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
from MockGPIO import MockGPIO
class TestBaseGPIO(unittest.TestCase):
def test_set_high_and_set_low(self):
gpio = MockGPIO()
gpio.set_high(1)
gpio.set_low(1)
self.assertDictEqual(gpio.pin_written, {1: [1, 0]})
def test_is_high_and_is_low(self):
gpio = MockGPIO()
gpio.pin_read[1] = [0, 0, 1, 1]
self.assertTrue(gpio.is_low(1))
self.assertFalse(gpio.is_high(1))
self.assertFalse(gpio.is_low(1))
self.assertTrue(gpio.is_high(1))
def test_output_pins(self):
gpio = MockGPIO()
gpio.output_pins({0: True, 1: False, 7: True})
self.assertDictEqual(gpio.pin_written, {0: [1], 1: [0], 7: [1]})
class TestRPiGPIOAdapter(unittest.TestCase):
def test_setup(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.setup(1, GPIO.OUT)
rpi_gpio.setup.assert_called_with(1, rpi_gpio.OUT, pull_up_down=rpi_gpio.PUD_OFF)
adapter.setup(1, GPIO.IN)
rpi_gpio.setup.assert_called_with(1, rpi_gpio.IN, pull_up_down=rpi_gpio.PUD_OFF)
adapter.setup(1, GPIO.IN, GPIO.PUD_DOWN)
rpi_gpio.setup.assert_called_with(1, rpi_gpio.IN, pull_up_down=rpi_gpio.PUD_DOWN)
adapter.setup(1, GPIO.IN, GPIO.PUD_UP)
rpi_gpio.setup.assert_called_with(1, rpi_gpio.IN, pull_up_down=rpi_gpio.PUD_UP)
def test_output(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.output(1, True)
rpi_gpio.output.assert_called_with(1, True)
adapter.output(1, False)
rpi_gpio.output.assert_called_with(1, False)
def test_input(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
rpi_gpio.input = Mock(return_value=True)
val = adapter.input(1)
self.assertTrue(val)
rpi_gpio.input.assert_called_with(1)
def test_setmode(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio, mode=rpi_gpio.BCM)
rpi_gpio.setmode.assert_called_with(rpi_gpio.BCM)
adapter = GPIO.RPiGPIOAdapter(rpi_gpio, mode=rpi_gpio.BOARD)
rpi_gpio.setmode.assert_called_with(rpi_gpio.BOARD)
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
rpi_gpio.setmode.assert_called_with(rpi_gpio.BCM)
def test_add_event_detect(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.add_event_detect(1, GPIO.RISING)
rpi_gpio.add_event_detect.assert_called_with(1, rpi_gpio.RISING)
def test_remove_event_detect(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.remove_event_detect(1)
rpi_gpio.remove_event_detect.assert_called_with(1)
def test_add_event_callback(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.add_event_callback(1, callback=self.test_add_event_callback)
rpi_gpio.add_event_callback.assert_called_with(1, self.test_add_event_callback)
def test_event_detected(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.event_detected(1)
rpi_gpio.event_detected.assert_called_with(1)
def test_wait_for_edge(self):
rpi_gpio = Mock()
adapter = GPIO.RPiGPIOAdapter(rpi_gpio)
adapter.wait_for_edge(1, GPIO.FALLING)
rpi_gpio.wait_for_edge.assert_called_with(1, rpi_gpio.FALLING)
def test_cleanup(self):
rpi_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(rpi_gpio)
adapter.cleanup()
rpi_gpio.cleanup.assert_called()
def test_cleanup_pin(self):
rpi_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(rpi_gpio)
adapter.cleanup(1)
rpi_gpio.cleanup.assert_called_with(1)
class TestAdafruitBBIOAdapter(unittest.TestCase):
def test_setup(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.setup(1, GPIO.OUT)
bbio_gpio.setup.assert_called_with(1, bbio_gpio.OUT, pull_up_down=bbio_gpio.PUD_OFF)
adapter.setup(1, GPIO.IN)
bbio_gpio.setup.assert_called_with(1, bbio_gpio.IN, pull_up_down=bbio_gpio.PUD_OFF)
adapter.setup(1, GPIO.IN, GPIO.PUD_DOWN)
bbio_gpio.setup.assert_called_with(1, bbio_gpio.IN, pull_up_down=bbio_gpio.PUD_DOWN)
adapter.setup(1, GPIO.IN, GPIO.PUD_UP)
bbio_gpio.setup.assert_called_with(1, bbio_gpio.IN, pull_up_down=bbio_gpio.PUD_UP)
def test_output(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.output(1, True)
bbio_gpio.output.assert_called_with(1, True)
adapter.output(1, False)
bbio_gpio.output.assert_called_with(1, False)
def test_input(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
bbio_gpio.input = Mock(return_value=True)
val = adapter.input(1)
self.assertTrue(val)
bbio_gpio.input.assert_called_with(1)
def test_add_event_detect(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.add_event_detect(1, GPIO.RISING)
bbio_gpio.add_event_detect.assert_called_with(1, bbio_gpio.RISING)
def test_add_event_detect(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.add_event_detect(1, GPIO.RISING)
bbio_gpio.add_event_detect.assert_called_with(1, bbio_gpio.RISING)
def test_remove_event_detect(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.remove_event_detect(1)
bbio_gpio.remove_event_detect.assert_called_with(1)
def test_add_event_callback(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.add_event_callback(1, callback=self.test_add_event_callback)
bbio_gpio.add_event_callback.assert_called_with(1, self.test_add_event_callback)
def test_event_detected(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.event_detected(1)
bbio_gpio.event_detected.assert_called_with(1)
def test_wait_for_edge(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.wait_for_edge(1, GPIO.FALLING)
bbio_gpio.wait_for_edge.assert_called_with(1, bbio_gpio.FALLING)
def test_cleanup(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.cleanup()
bbio_gpio.cleanup.assert_called()
def test_cleanup_pin(self):
bbio_gpio = Mock()
adapter = GPIO.AdafruitBBIOAdapter(bbio_gpio)
adapter.cleanup(1)
bbio_gpio.cleanup.assert_called_with(1)
class TestGetPlatformGPIO(unittest.TestCase):
@patch.dict('sys.modules', {'RPi': Mock(), 'RPi.GPIO': Mock()})
@patch('platform.platform', Mock(return_value='Linux-3.10.25+-armv6l-with-debian-7.4'))
def test_raspberrypi(self):
gpio = GPIO.get_platform_gpio()
self.assertIsInstance(gpio, GPIO.RPiGPIOAdapter)
@patch.dict('sys.modules', {'Adafruit_BBIO': Mock(), 'Adafruit_BBIO.GPIO': Mock()})
@patch('platform.platform', Mock(return_value='Linux-3.8.13-bone47-armv7l-with-debian-7.4'))
def test_beagleboneblack(self):
gpio = GPIO.get_platform_gpio()
self.assertIsInstance(gpio, GPIO.AdafruitBBIOAdapter)
@patch('platform.platform', Mock(return_value='Darwin-13.2.0-x86_64-i386-64bit'))
def test_otherplatform(self):
self.assertRaises(RuntimeError, GPIO.get_platform_gpio)
|
|
# -*- test-case-name: pyflakes -*-
# (c) 2005-2010 Divmod, Inc.
# See LICENSE file for details
import __builtin__
import os.path
import _ast
import re
from pyflakes import messages
interpol = re.compile(r'%(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?'
r'(\.([0-9]+|[*]))?[hlL]?[diouxXeEfFgGcrs%]')
# utility function to iterate over an AST node's children, adapted
# from Python 2.6's standard ast module
try:
import ast
iter_child_nodes = ast.iter_child_nodes
except (ImportError, AttributeError):
def iter_child_nodes(node, astcls=_ast.AST):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name in node._fields:
field = getattr(node, name, None)
if isinstance(field, astcls):
yield field
elif isinstance(field, list):
for item in field:
yield item
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, line-number) indicating the scope and
line number that this binding was last used
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
class UnBinding(Binding):
'''Created by the 'del' operator.'''
class Importation(Binding):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source):
self.fullName = name
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Binding):
is_property = False
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def names(self):
"""
Return a list of the names referenced by this binding.
"""
names = []
if isinstance(self.source, (_ast.Tuple, _ast.List)):
for node in self.source.elts:
if isinstance(node, _ast.Str):
names.append(node.s)
return names
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self))
def __init__(self):
super(Scope, self).__init__()
def of_type(self, type):
return isinstance(self, type)
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
def __init__(self):
super(FunctionScope, self).__init__()
self.globals = {}
class ConditionScope(Scope):
#: set of the scope leaves and may be discarded for promotion
escapes = False
#XXX: maybe handle in the conditions
def _get_import_starred(self):
return self.parent.importStarred
def _set_import_starred(self, value):
self.parent.importStarred = value
importStarred = property(_get_import_starred, _set_import_starred)
def __init__(self, parent):
super(ConditionScope, self).__init__()
self.parent = parent
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.parent[key]
@property
def globals(self):
return self.parent.globals
def of_type(self, type):
return self.parent.of_type(type) or isinstance(self, type)
class ModuleScope(Scope):
pass
# Globally defined names which are not attributes of the __builtin__ module.
_MAGIC_GLOBALS = ['__file__', '__builtins__']
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
traceTree = False
def __init__(self, tree, filename='(none)', traceTree=False):
self._deferredFunctions = []
self._deferredAssignments = []
self.dead_scopes = []
self.messages = []
self.filename = filename
self.scopeStack = [ModuleScope()]
self.traceTree = traceTree
self.futuresAllowed = True
self.handleChildren(tree)
self._runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self._runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisly if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.check_dead_scopes()
def deferFunction(self, callable):
'''
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
'''
self._deferredFunctions.append((callable, self.scopeStack[:]))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:]))
def _runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope in deferred:
self.scopeStack = scope
handler()
def scope(self):
return self.scopeStack[-1]
scope = property(scope)
def popScope(self):
scope = self.scopeStack.pop()
# dirty hack
if isinstance(scope, ConditionScope):
self.scopeStack.append(scope.parent)
self.dead_scopes.append(scope)
return scope
def check_dead_scopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.dead_scopes:
export = isinstance(scope.get('__all__'), ExportBinding)
if export:
all = scope['__all__'].names()
if os.path.split(self.filename)[1] != '__init__.py':
# Look for possible mistakes in the export list
undefined = set(all) - set(scope)
for name in undefined:
self.report(
messages.UndefinedExport,
scope['__all__'].source,
name)
else:
all = []
# Look for imported names that aren't used.
for importation in scope.itervalues():
if isinstance(importation, Importation):
if not importation.used and importation.name not in all:
self.report(
messages.UnusedImport,
importation.source,
importation.name)
def pushFunctionScope(self):
self.scopeStack.append(FunctionScope())
def pushClassScope(self):
self.scopeStack.append(ClassScope())
def pushConditionScope(self):
#XXX:hack
self.scopeStack[-1] = ConditionScope(self.scope)
def report(self, messageClass, *args, **kwargs):
msg = messageClass(self.filename, *args, **kwargs)
self.messages.append(msg)
def handleChildren(self, tree):
for node in iter_child_nodes(tree):
self.handleNode(node, tree)
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, _ast.Str) or \
(isinstance(node, _ast.Expr) and
isinstance(node.value, _ast.Str))
def handleNode(self, node, parent):
node.parent = parent
if self.traceTree:
print ' ' * self.nodeDepth + node.__class__.__name__
self.nodeDepth += 1
if self.futuresAllowed and not \
(isinstance(node, _ast.ImportFrom) or self.isDocstring(node)):
self.futuresAllowed = False
nodeType = node.__class__.__name__.upper()
try:
handler = getattr(self, nodeType)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print ' ' * self.nodeDepth + 'end ' + node.__class__.__name__
def ignore(self, node):
pass
# "stmt" type nodes
DELETE = PRINT = WHILE = WITH = \
TRYFINALLY = ASSERT = EXEC = EXPR = handleChildren
CONTINUE = BREAK = PASS = ignore
# "expr" type nodes
BOOLOP = UNARYOP = IFEXP = DICT = SET = YIELD = COMPARE = \
REPR = SUBSCRIPT = LIST = TUPLE = handleChildren
NUM = STR = ELLIPSIS = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore
# additional node types
COMPREHENSION = EXCEPTHANDLER = KEYWORD = handleChildren
def hasParent(self, node, kind):
parent = getattr(node, 'parent', None)
while True:
if not parent:
return False
elif isinstance(parent, kind):
return True
parent = getattr(parent, 'parent', None)
def addBinding(self, node, value, reportRedef=True):
'''Called when a binding is altered.
- `lineno` is the line of the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `reportRedef` is True (default), rebinding while unused will be
reported.
'''
if (isinstance(self.scope.get(value.name), FunctionDefinition)
and isinstance(value, FunctionDefinition)
and not self.scope.get(value.name).is_property
and not value.is_property):
self.report(messages.RedefinedFunction,
node, value.name, self.scope[value.name].source.lineno)
redefinedWhileUnused = False
if not isinstance(self.scope, ClassScope):
for scope in self.scopeStack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or value.fullName == existing.fullName)
and reportRedef):
redefinedWhileUnused = True
self.report(messages.RedefinedWhileUnused,
node, value.name, scope[value.name].source.lineno)
if (not redefinedWhileUnused and
self.hasParent(value.source, _ast.ListComp)):
existing = self.scope.get(value.name)
if (existing and
not self.hasParent(existing.source, (_ast.For, _ast.ListComp))
and reportRedef):
self.report(messages.RedefinedInListComp, node, value.name,
self.scope[value.name].source.lineno)
if isinstance(value, UnBinding):
try:
self.scope[value.name]
if self.scope.pop(value.name, None) is None:
#XXX: del in condition scope
pass
except KeyError:
self.report(messages.UndefinedName, node, value.name)
else:
self.scope[value.name] = value
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
if isinstance(self.scope, FunctionScope):
self.scope.globals.update(dict.fromkeys(node.names))
def LISTCOMP(self, node):
# handle generators before element
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.elt, node)
GENERATOREXP = SETCOMP = LISTCOMP
# dictionary comprehensions; introduced in Python 2.7
def DICTCOMP(self, node):
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.key, node)
self.handleNode(node.value, node)
def FOR(self, node):
"""
Process bindings for loop variables.
"""
vars = []
def collectLoopVars(n):
if isinstance(n, _ast.Name):
vars.append(n.id)
elif isinstance(n, _ast.expr_context):
return
else:
for c in iter_child_nodes(n):
collectLoopVars(c)
collectLoopVars(node.target)
for varn in vars:
if (isinstance(self.scope.get(varn), Importation)
# unused ones will get an unused import warning
and self.scope[varn].used):
self.report(messages.ImportShadowedByLoopVar,
node, varn, self.scope[varn].source.lineno)
self.handleChildren(node)
def BINOP(self, node):
if isinstance(node.op, _ast.Mod) and isinstance(node.left, _ast.Str):
dictfmt = ('%(' in node.left.s and '%%(' not in node.left.s)
nplaces = 0
for m in interpol.finditer(node.left.s):
if m.group()[-1] != '%':
nplaces += 1 + m.group().count('*')
if isinstance(node.right, _ast.Dict):
if not dictfmt:
self.report(messages.StringFormattingProblem,
node, 'tuple', 'dict')
else:
if isinstance(node.right, _ast.Tuple):
if dictfmt:
self.report(messages.StringFormattingProblem,
node, 'dict', 'tuple')
else:
nobjects = len(node.right.elts)
if nobjects != nplaces:
self.report(messages.StringFormattingProblem,
node, nplaces, nobjects)
self.handleNode(node.right, node)
else:
self.handleNode(node.left, node)
self.handleNode(node.right, node)
def CALL(self, node):
if isinstance(node.func, _ast.Tuple):
self.report(messages.TupleCall, node)
self.handleChildren(node)
def ATTRIBUTE(self, node):
if isinstance(node.value, _ast.Str) and node.attr == 'format' and \
isinstance(node.parent, _ast.Call) and node is node.parent.func:
try:
num = 0
maxnum = -1
kwds = set()
for lit, fn, fs, conv in node.value.s._formatter_parser():
if lit:
continue
fn = fn.partition('.')[0].partition('[')[0]
if not fn:
num += 1
elif fn.isdigit():
maxnum = max(maxnum, int(fn))
else:
kwds.add(fn)
except ValueError, err:
self.report(messages.StringFormatProblem,
node, str(err))
else:
callnode = node.parent
# can only really check if no *args or **kwds are used
if not (callnode.starargs or callnode.kwargs):
nargs = len(node.parent.args)
kwdset = set(kwd.arg for kwd in node.parent.keywords)
if nargs < num:
self.report(messages.StringFormatProblem, node,
'not enough positional args (need %s)' % num)
elif nargs < maxnum+1:
self.report(messages.StringFormatProblem, node,
'not enough positional args (need %s)' %
(maxnum+1))
missing = kwds - kwdset
if missing:
self.report(messages.StringFormatProblem, node,
'keyword args missing: %s' % ', '.join(missing))
else:
self.handleNode(node.value, node)
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (_ast.Load, _ast.AugLoad)):
# try local scope
importStarred = self.scope.importStarred
try:
self.scope[node.id].used = (self.scope, node)
except KeyError:
pass
else:
return
# try enclosing function scopes
for scope in self.scopeStack[-2:0:-1]:
importStarred = importStarred or scope.importStarred
if not scope.of_type(FunctionScope):
continue
try:
scope[node.id].used = (self.scope, node)
except KeyError:
pass
else:
return
# try global scope
importStarred = importStarred or self.scopeStack[0].importStarred
try:
self.scopeStack[0][node.id].used = (self.scope, node)
except KeyError:
if ((not hasattr(__builtin__, node.id))
and node.id not in _MAGIC_GLOBALS
and not importStarred):
if (os.path.basename(self.filename) == '__init__.py' and
node.id == '__path__'):
# the special name __path__ is valid only in packages
pass
else:
self.report(messages.UndefinedName, node, node.id)
elif isinstance(node.ctx, (_ast.Store, _ast.AugStore)):
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and node.id not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
if (node.id in scope
and scope[node.id].used
and scope[node.id].used[0] is self.scope
and node.id not in self.scope.globals):
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[node.id].used[1],
node.id,
scope[node.id].source.lineno)
# kevins fork used the source info instead of lineno here,
# however the message ctor did just revert that
break
if isinstance(node.parent,
(_ast.For, _ast.comprehension, _ast.Tuple, _ast.List)):
binding = Binding(node.id, node)
elif (node.id == '__all__' and
isinstance(self.scope, ModuleScope)):
binding = ExportBinding(node.id, node.parent.value)
else:
binding = Assignment(node.id, node)
if node.id in self.scope:
binding.used = self.scope[node.id].used
self.addBinding(node, binding)
elif isinstance(node.ctx, _ast.Del):
if isinstance(self.scope, FunctionScope) and \
node.id in self.scope.globals:
del self.scope.globals[node.id]
else:
self.addBinding(node, UnBinding(node.id, node))
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError(
"Got impossible expression context: %r" % (node.ctx,))
def FUNCTIONDEF(self, node):
# the decorators attribute is called decorator_list as of Python 2.6
is_property = False
if hasattr(node, 'decorators'):
decorators = node.decorators
else:
decorators = node.decorator_list
for deco in decorators:
self.handleNode(deco, node)
if getattr(deco, 'id', None) == 'property':
is_property = True
if getattr(deco, 'attr', None) in ('setter', 'deleter'):
is_property = True
funcdef = FunctionDefinition(node.name, node)
funcdef.is_property = is_property
self.addBinding(node, funcdef)
self.LAMBDA(node)
def LAMBDA(self, node):
for default in node.args.defaults:
self.handleNode(default, node)
def runFunction():
args = []
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, _ast.Tuple):
addArgs(arg.elts)
else:
if arg.id in args:
self.report(messages.DuplicateArgument, node, arg.id)
args.append(arg.id)
self.pushFunctionScope()
addArgs(node.args.args)
# vararg/kwarg identifiers are not Name nodes
if node.args.vararg:
args.append(node.args.vararg)
if node.args.kwarg:
args.append(node.args.kwarg)
for name in args:
self.addBinding(node, Argument(name, node), reportRedef=False)
if isinstance(node.body, list):
# case for FunctionDefs
for stmt in node.body:
self.handleNode(stmt, node)
else:
# case for Lambdas
self.handleNode(node.body, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.iteritems():
if name == '__tracebackhide__':
# used to hide frames in pytest
continue
if (not binding.used and not name in self.scope.globals
and isinstance(binding, Assignment)):
self.report(messages.UnusedVariable,
binding.source, name)
self.deferAssignment(checkUnusedAssignments)
self.popScope()
self.deferFunction(runFunction)
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
# decorator_list is present as of Python 2.6
for deco in getattr(node, 'decorator_list', []):
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
self.pushClassScope()
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, Binding(node.name, node))
def ASSIGN(self, node):
self.handleNode(node.value, node)
for target in node.targets:
self.handleNode(target, node)
def AUGASSIGN(self, node):
# AugAssign is awkward: must set the context explicitly and visit twice,
# once with AugLoad context, once with AugStore context
node.target.ctx = _ast.AugLoad()
self.handleNode(node.target, node)
self.handleNode(node.value, node)
node.target.ctx = _ast.AugStore()
self.handleNode(node.target, node)
def IMPORT(self, node):
for alias in node.names:
name = alias.asname or alias.name
importation = Importation(name, node)
self.addBinding(node, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport, node,
[n.name for n in node.names])
else:
self.futuresAllowed = False
for alias in node.names:
if alias.name == '*':
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node, node.module)
continue
name = alias.asname or alias.name
importation = Importation(name, node)
if node.module == '__future__':
importation.used = (self.scope, node)
self.addBinding(node, importation)
def RETURN(self, node):
self.scope.escapes = True
if not node.value:
return
self.handleNode(node.value, node)
if isinstance(node.value, _ast.Name):
name = node.value.id
elif isinstance(node.value, _ast.Call) and \
isinstance(node.value.func, _ast.Name):
name = node.value.func.id
else:
return
if name.endswith('Error') or name.endswith('Exception'):
self.report(messages.ExceptionReturn, node, name)
def TRYEXCEPT(self, node):
"""
Handle C{try}-C{except}. In particular, do not report redefinitions
when occurring in an "except ImportError" block.
"""
self.pushConditionScope()
for stmt in node.body:
self.handleNode(stmt, node)
body_scope = self.popScope()
handler_scopes = [body_scope]
for handler in node.handlers:
if handler.type:
self.handleNode(handler.type, node)
if handler.name:
self.handleNode(handler.name, node)
self.pushConditionScope()
for stmt in handler.body:
self.handleNode(stmt, node)
handler_scopes.append(self.popScope())
#XXX complicated logic, check
valid_scopes = [scope for scope in handler_scopes if not scope.escapes]
if valid_scopes:
common = set(valid_scopes[0])
for scope in valid_scopes[1:]:
common.intersection_update(scope)
# when the body scope doesnt raise,
# its currently the best to consider its names
# availiable for the orelse part
if not body_scope.escapes:
common.update(body_scope)
for name in common:
#XXX: really ok?
self.scope[name] = valid_scopes[0].pop(name)
for scope in valid_scopes[1:]:
scope.pop(name, None) # might not exist when body is ok
for scope in valid_scopes:
for key, binding in scope.items():
if key not in self.scope and not binding.used:
# bubble up all unused variables
# this should rather use the possible flowgraphs
self.scope[key] = binding
for stmt in node.orelse:
self.handleNode(stmt, node)
def RAISE(self, node):
"""
mark a scope if a exception is raised in it
"""
self.scope.escapes = True
self.handleChildren(node)
def IF(self, node):
"""
handle if statements,
use subscopes, and reconcile them in the parent scope
special conditions for raising
"""
self.handleNode(node.test, node)
# special case to handle modules with execnet channels
if isinstance(self.scope, ModuleScope) \
and isinstance(node.test, _ast.Compare) \
and len(node.test.ops) == 1 \
and isinstance(node.test.ops[0], _ast.Eq) \
and isinstance(node.test.left, _ast.Name) \
and node.test.left.id == '__name__' \
and isinstance(node.test.comparators[0], _ast.Str) \
and node.test.comparators[0].s == '__channelexec__':
#XXX: is that semantically valid?
self.addBinding(node, Binding('channel', node))
self.pushConditionScope()
for stmt in node.body:
self.handleNode(stmt, node)
body_scope = self.popScope()
self.pushConditionScope()
for stmt in node.orelse:
self.handleNode(stmt, node)
else_scope = self.popScope()
if body_scope.escapes and else_scope.escapes:
pass
elif body_scope.escapes:
self.scope.update(else_scope)
elif else_scope.escapes:
self.scope.update(body_scope)
else:
#XXX: better scheme for unsure bindings
common = set(body_scope) & set(else_scope)
for key in common:
self.scope[key] = body_scope[key]
for key, binding in body_scope.items():
if key not in self.scope and not binding.used:
#XXX: wrap it?
self.scope[key] = binding
for key, binding in else_scope.items():
if key not in self.scope and not binding.used:
#XXX: wrap it?
self.scope[key] = binding
|
|
"""Storage for auth models."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import hmac
from logging import getLogger
from typing import Any, Dict, List, Optional
from homeassistant.auth.const import ACCESS_TOKEN_EXPIRATION
from homeassistant.core import HomeAssistant, callback
from homeassistant.util import dt as dt_util
from . import models
from .const import GROUP_ID_ADMIN, GROUP_ID_USER, GROUP_ID_READ_ONLY
from .permissions import PermissionLookup, system_policies
from .permissions.types import PolicyType
STORAGE_VERSION = 1
STORAGE_KEY = "auth"
GROUP_NAME_ADMIN = "Administrators"
GROUP_NAME_USER = "Users"
GROUP_NAME_READ_ONLY = "Read Only"
class AuthStore:
"""Stores authentication info.
Any mutation to an object should happen inside the auth store.
The auth store is lazy. It won't load the data from disk until a method is
called that needs it.
"""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the auth store."""
self.hass = hass
self._users: Optional[Dict[str, models.User]] = None
self._groups: Optional[Dict[str, models.Group]] = None
self._perm_lookup: Optional[PermissionLookup] = None
self._store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._lock = asyncio.Lock()
async def async_get_groups(self) -> List[models.Group]:
"""Retrieve all users."""
if self._groups is None:
await self._async_load()
assert self._groups is not None
return list(self._groups.values())
async def async_get_group(self, group_id: str) -> Optional[models.Group]:
"""Retrieve all users."""
if self._groups is None:
await self._async_load()
assert self._groups is not None
return self._groups.get(group_id)
async def async_get_users(self) -> List[models.User]:
"""Retrieve all users."""
if self._users is None:
await self._async_load()
assert self._users is not None
return list(self._users.values())
async def async_get_user(self, user_id: str) -> Optional[models.User]:
"""Retrieve a user by id."""
if self._users is None:
await self._async_load()
assert self._users is not None
return self._users.get(user_id)
async def async_create_user(
self,
name: Optional[str],
is_owner: Optional[bool] = None,
is_active: Optional[bool] = None,
system_generated: Optional[bool] = None,
credentials: Optional[models.Credentials] = None,
group_ids: Optional[List[str]] = None,
) -> models.User:
"""Create a new user."""
if self._users is None:
await self._async_load()
assert self._users is not None
assert self._groups is not None
groups = []
for group_id in group_ids or []:
group = self._groups.get(group_id)
if group is None:
raise ValueError(f"Invalid group specified {group_id}")
groups.append(group)
kwargs: Dict[str, Any] = {
"name": name,
# Until we get group management, we just put everyone in the
# same group.
"groups": groups,
"perm_lookup": self._perm_lookup,
}
if is_owner is not None:
kwargs["is_owner"] = is_owner
if is_active is not None:
kwargs["is_active"] = is_active
if system_generated is not None:
kwargs["system_generated"] = system_generated
new_user = models.User(**kwargs)
self._users[new_user.id] = new_user
if credentials is None:
self._async_schedule_save()
return new_user
# Saving is done inside the link.
await self.async_link_user(new_user, credentials)
return new_user
async def async_link_user(
self, user: models.User, credentials: models.Credentials
) -> None:
"""Add credentials to an existing user."""
user.credentials.append(credentials)
self._async_schedule_save()
credentials.is_new = False
async def async_remove_user(self, user: models.User) -> None:
"""Remove a user."""
if self._users is None:
await self._async_load()
assert self._users is not None
self._users.pop(user.id)
self._async_schedule_save()
async def async_update_user(
self,
user: models.User,
name: Optional[str] = None,
is_active: Optional[bool] = None,
group_ids: Optional[List[str]] = None,
) -> None:
"""Update a user."""
assert self._groups is not None
if group_ids is not None:
groups = []
for grid in group_ids:
group = self._groups.get(grid)
if group is None:
raise ValueError("Invalid group specified.")
groups.append(group)
user.groups = groups
user.invalidate_permission_cache()
for attr_name, value in (("name", name), ("is_active", is_active)):
if value is not None:
setattr(user, attr_name, value)
self._async_schedule_save()
async def async_activate_user(self, user: models.User) -> None:
"""Activate a user."""
user.is_active = True
self._async_schedule_save()
async def async_deactivate_user(self, user: models.User) -> None:
"""Activate a user."""
user.is_active = False
self._async_schedule_save()
async def async_remove_credentials(self, credentials: models.Credentials) -> None:
"""Remove credentials."""
if self._users is None:
await self._async_load()
assert self._users is not None
for user in self._users.values():
found = None
for index, cred in enumerate(user.credentials):
if cred is credentials:
found = index
break
if found is not None:
user.credentials.pop(found)
break
self._async_schedule_save()
async def async_create_refresh_token(
self,
user: models.User,
client_id: Optional[str] = None,
client_name: Optional[str] = None,
client_icon: Optional[str] = None,
token_type: str = models.TOKEN_TYPE_NORMAL,
access_token_expiration: timedelta = ACCESS_TOKEN_EXPIRATION,
) -> models.RefreshToken:
"""Create a new token for a user."""
kwargs: Dict[str, Any] = {
"user": user,
"client_id": client_id,
"token_type": token_type,
"access_token_expiration": access_token_expiration,
}
if client_name:
kwargs["client_name"] = client_name
if client_icon:
kwargs["client_icon"] = client_icon
refresh_token = models.RefreshToken(**kwargs)
user.refresh_tokens[refresh_token.id] = refresh_token
self._async_schedule_save()
return refresh_token
async def async_remove_refresh_token(
self, refresh_token: models.RefreshToken
) -> None:
"""Remove a refresh token."""
if self._users is None:
await self._async_load()
assert self._users is not None
for user in self._users.values():
if user.refresh_tokens.pop(refresh_token.id, None):
self._async_schedule_save()
break
async def async_get_refresh_token(
self, token_id: str
) -> Optional[models.RefreshToken]:
"""Get refresh token by id."""
if self._users is None:
await self._async_load()
assert self._users is not None
for user in self._users.values():
refresh_token = user.refresh_tokens.get(token_id)
if refresh_token is not None:
return refresh_token
return None
async def async_get_refresh_token_by_token(
self, token: str
) -> Optional[models.RefreshToken]:
"""Get refresh token by token."""
if self._users is None:
await self._async_load()
assert self._users is not None
found = None
for user in self._users.values():
for refresh_token in user.refresh_tokens.values():
if hmac.compare_digest(refresh_token.token, token):
found = refresh_token
return found
@callback
def async_log_refresh_token_usage(
self, refresh_token: models.RefreshToken, remote_ip: Optional[str] = None
) -> None:
"""Update refresh token last used information."""
refresh_token.last_used_at = dt_util.utcnow()
refresh_token.last_used_ip = remote_ip
self._async_schedule_save()
async def _async_load(self) -> None:
"""Load the users."""
async with self._lock:
if self._users is not None:
return
await self._async_load_task()
async def _async_load_task(self) -> None:
"""Load the users."""
[ent_reg, dev_reg, data] = await asyncio.gather(
self.hass.helpers.entity_registry.async_get_registry(),
self.hass.helpers.device_registry.async_get_registry(),
self._store.async_load(),
)
# Make sure that we're not overriding data if 2 loads happened at the
# same time
if self._users is not None:
return
self._perm_lookup = perm_lookup = PermissionLookup(ent_reg, dev_reg)
if data is None:
self._set_defaults()
return
users: Dict[str, models.User] = OrderedDict()
groups: Dict[str, models.Group] = OrderedDict()
# Soft-migrating data as we load. We are going to make sure we have a
# read only group and an admin group. There are two states that we can
# migrate from:
# 1. Data from a recent version which has a single group without policy
# 2. Data from old version which has no groups
has_admin_group = False
has_user_group = False
has_read_only_group = False
group_without_policy = None
# When creating objects we mention each attribute explicitly. This
# prevents crashing if user rolls back HA version after a new property
# was added.
for group_dict in data.get("groups", []):
policy: Optional[PolicyType] = None
if group_dict["id"] == GROUP_ID_ADMIN:
has_admin_group = True
name = GROUP_NAME_ADMIN
policy = system_policies.ADMIN_POLICY
system_generated = True
elif group_dict["id"] == GROUP_ID_USER:
has_user_group = True
name = GROUP_NAME_USER
policy = system_policies.USER_POLICY
system_generated = True
elif group_dict["id"] == GROUP_ID_READ_ONLY:
has_read_only_group = True
name = GROUP_NAME_READ_ONLY
policy = system_policies.READ_ONLY_POLICY
system_generated = True
else:
name = group_dict["name"]
policy = group_dict.get("policy")
system_generated = False
# We don't want groups without a policy that are not system groups
# This is part of migrating from state 1
if policy is None:
group_without_policy = group_dict["id"]
continue
groups[group_dict["id"]] = models.Group(
id=group_dict["id"],
name=name,
policy=policy,
system_generated=system_generated,
)
# If there are no groups, add all existing users to the admin group.
# This is part of migrating from state 2
migrate_users_to_admin_group = not groups and group_without_policy is None
# If we find a no_policy_group, we need to migrate all users to the
# admin group. We only do this if there are no other groups, as is
# the expected state. If not expected state, not marking people admin.
# This is part of migrating from state 1
if groups and group_without_policy is not None:
group_without_policy = None
# This is part of migrating from state 1 and 2
if not has_admin_group:
admin_group = _system_admin_group()
groups[admin_group.id] = admin_group
# This is part of migrating from state 1 and 2
if not has_read_only_group:
read_only_group = _system_read_only_group()
groups[read_only_group.id] = read_only_group
if not has_user_group:
user_group = _system_user_group()
groups[user_group.id] = user_group
for user_dict in data["users"]:
# Collect the users group.
user_groups = []
for group_id in user_dict.get("group_ids", []):
# This is part of migrating from state 1
if group_id == group_without_policy:
group_id = GROUP_ID_ADMIN
user_groups.append(groups[group_id])
# This is part of migrating from state 2
if not user_dict["system_generated"] and migrate_users_to_admin_group:
user_groups.append(groups[GROUP_ID_ADMIN])
users[user_dict["id"]] = models.User(
name=user_dict["name"],
groups=user_groups,
id=user_dict["id"],
is_owner=user_dict["is_owner"],
is_active=user_dict["is_active"],
system_generated=user_dict["system_generated"],
perm_lookup=perm_lookup,
)
for cred_dict in data["credentials"]:
users[cred_dict["user_id"]].credentials.append(
models.Credentials(
id=cred_dict["id"],
is_new=False,
auth_provider_type=cred_dict["auth_provider_type"],
auth_provider_id=cred_dict["auth_provider_id"],
data=cred_dict["data"],
)
)
for rt_dict in data["refresh_tokens"]:
# Filter out the old keys that don't have jwt_key (pre-0.76)
if "jwt_key" not in rt_dict:
continue
created_at = dt_util.parse_datetime(rt_dict["created_at"])
if created_at is None:
getLogger(__name__).error(
"Ignoring refresh token %(id)s with invalid created_at "
"%(created_at)s for user_id %(user_id)s",
rt_dict,
)
continue
token_type = rt_dict.get("token_type")
if token_type is None:
if rt_dict["client_id"] is None:
token_type = models.TOKEN_TYPE_SYSTEM
else:
token_type = models.TOKEN_TYPE_NORMAL
# old refresh_token don't have last_used_at (pre-0.78)
last_used_at_str = rt_dict.get("last_used_at")
if last_used_at_str:
last_used_at = dt_util.parse_datetime(last_used_at_str)
else:
last_used_at = None
token = models.RefreshToken(
id=rt_dict["id"],
user=users[rt_dict["user_id"]],
client_id=rt_dict["client_id"],
# use dict.get to keep backward compatibility
client_name=rt_dict.get("client_name"),
client_icon=rt_dict.get("client_icon"),
token_type=token_type,
created_at=created_at,
access_token_expiration=timedelta(
seconds=rt_dict["access_token_expiration"]
),
token=rt_dict["token"],
jwt_key=rt_dict["jwt_key"],
last_used_at=last_used_at,
last_used_ip=rt_dict.get("last_used_ip"),
)
users[rt_dict["user_id"]].refresh_tokens[token.id] = token
self._groups = groups
self._users = users
@callback
def _async_schedule_save(self) -> None:
"""Save users."""
if self._users is None:
return
self._store.async_delay_save(self._data_to_save, 1)
@callback
def _data_to_save(self) -> Dict:
"""Return the data to store."""
assert self._users is not None
assert self._groups is not None
users = [
{
"id": user.id,
"group_ids": [group.id for group in user.groups],
"is_owner": user.is_owner,
"is_active": user.is_active,
"name": user.name,
"system_generated": user.system_generated,
}
for user in self._users.values()
]
groups = []
for group in self._groups.values():
g_dict: Dict[str, Any] = {
"id": group.id,
# Name not read for sys groups. Kept here for backwards compat
"name": group.name,
}
if not group.system_generated:
g_dict["policy"] = group.policy
groups.append(g_dict)
credentials = [
{
"id": credential.id,
"user_id": user.id,
"auth_provider_type": credential.auth_provider_type,
"auth_provider_id": credential.auth_provider_id,
"data": credential.data,
}
for user in self._users.values()
for credential in user.credentials
]
refresh_tokens = [
{
"id": refresh_token.id,
"user_id": user.id,
"client_id": refresh_token.client_id,
"client_name": refresh_token.client_name,
"client_icon": refresh_token.client_icon,
"token_type": refresh_token.token_type,
"created_at": refresh_token.created_at.isoformat(),
"access_token_expiration": refresh_token.access_token_expiration.total_seconds(),
"token": refresh_token.token,
"jwt_key": refresh_token.jwt_key,
"last_used_at": refresh_token.last_used_at.isoformat()
if refresh_token.last_used_at
else None,
"last_used_ip": refresh_token.last_used_ip,
}
for user in self._users.values()
for refresh_token in user.refresh_tokens.values()
]
return {
"users": users,
"groups": groups,
"credentials": credentials,
"refresh_tokens": refresh_tokens,
}
def _set_defaults(self) -> None:
"""Set default values for auth store."""
self._users = OrderedDict()
groups: Dict[str, models.Group] = OrderedDict()
admin_group = _system_admin_group()
groups[admin_group.id] = admin_group
user_group = _system_user_group()
groups[user_group.id] = user_group
read_only_group = _system_read_only_group()
groups[read_only_group.id] = read_only_group
self._groups = groups
def _system_admin_group() -> models.Group:
"""Create system admin group."""
return models.Group(
name=GROUP_NAME_ADMIN,
id=GROUP_ID_ADMIN,
policy=system_policies.ADMIN_POLICY,
system_generated=True,
)
def _system_user_group() -> models.Group:
"""Create system user group."""
return models.Group(
name=GROUP_NAME_USER,
id=GROUP_ID_USER,
policy=system_policies.USER_POLICY,
system_generated=True,
)
def _system_read_only_group() -> models.Group:
"""Create read only group."""
return models.Group(
name=GROUP_NAME_READ_ONLY,
id=GROUP_ID_READ_ONLY,
policy=system_policies.READ_ONLY_POLICY,
system_generated=True,
)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Transformer-based models."""
import typing
import flax
from flax import linen as nn
import jax.numpy as jnp
from language.gscan.xattn_model.model import layers
from language.gscan.xattn_model.model import model_utils
@flax.struct.dataclass
class TransformerConfig:
"""Global model hyperparameters."""
vocab_size: int
target_vocab_size: int
type_vocab_size: int = 2
dtype: typing.Any = jnp.float32
bi_hidden_dim: int = 128
l_hidden_dim: int = 128
v_hidden_dim: int = 128
l_intermediate_dim: int = 256
v_intermediate_dim: int = 256
bi_num_heads: int = 8
l_num_heads: int = 8
v_num_heads: int = 8
decode_num_heads: int = 8
l_num_layers: int = 6
v_num_layers: int = 6
bi_num_layers: int = 6
decode_num_layers: int = 6
max_position_embeddings: int = 512
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
hidden_act: layers.ActFn = nn.gelu
deterministic: bool = True
kernel_init: layers.InitFn = layers.default_kernel_init
bias_init: layers.InitFn = layers.default_bias_init
embedding_init: layers.InitFn = layers.default_embedding_init
layer_norm_eps: float = 1e-12
cross_attn: bool = True
num_conv_channels: int = 50
conv_kernel_sizes: typing.Sequence[int] = (1, 5, 7)
max_decode_step: int = 50
decode: bool = False
beam_size: int = 1
class CNNImageEncoder(nn.Module):
"""CNN-based image encoder."""
config: TransformerConfig
@nn.compact
def __call__(self, x):
cfg = self.config
feats = []
for i, kernel_size in enumerate(cfg.conv_kernel_sizes):
feat = nn.Conv(
cfg.num_conv_channels,
kernel_size=(kernel_size, kernel_size),
name=f'conv_{i}')(
x)
feats.append(feat)
img = jnp.concatenate(feats, axis=-1)
img = img.reshape(img.shape[0], -1, img.shape[-1])
img = nn.Dense(
cfg.v_hidden_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='dense')(
img)
img = nn.relu(img)
img = nn.Dropout(rate=cfg.dropout_rate)(
img, deterministic=cfg.deterministic)
return img
class TransformerEncoder(nn.Module):
"""The generatic transformer-based input encoder.
It should be inherited with other transformer-based encoders, e.g. the
encoder with or without cross-modal attention.
"""
config: TransformerConfig
def encode_txt(self, batch):
cfg = self.config
x = batch['token']
mask = batch.get('txt_mask', jnp.ones(x.shape[:2], dtype=jnp.int32))
assert x.ndim == 2, 'Inputs shape must be (batch_size, seq_len).'
x = layers.TransformerEmbeddings(
hidden_size=cfg.l_hidden_dim,
vocab_size=cfg.vocab_size,
type_vocab_size=cfg.type_vocab_size,
max_position_embeddings=cfg.max_position_embeddings,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
embedding_init=cfg.embedding_init,
name='embeddings')(x, batch.get('pos_ids'), batch.get('seg_ids'))
mask = mask[:, None, None, :]
return x, mask
def encode_image(self, batch):
img = CNNImageEncoder(self.config, name='img_enc')(batch['image'])
img_mask = jnp.ones(img.shape[:2], dtype=jnp.int32)
img_mask = img_mask[:, None, None, :]
return img, img_mask
class CrossModalEncoder(TransformerEncoder):
"""Transformer-based encoder with cross-modal attention."""
config: TransformerConfig
@nn.compact
def __call__(self, batch):
cfg = self.config
txt, txt_mask = self.encode_txt(batch)
img, img_mask = self.encode_image(batch)
for i in range(cfg.bi_num_layers):
txt, img = layers.TransformerCrossLayer(
bi_num_heads=cfg.bi_num_heads,
bi_hidden_size=cfg.bi_hidden_dim,
hidden_size1=cfg.l_hidden_dim,
hidden_size2=cfg.v_hidden_dim,
intermediate_size1=cfg.l_intermediate_dim,
intermediate_size2=cfg.v_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'transformer_cross_layer_{i}')(txt, img, txt_mask, img_mask)
encoded = jnp.concatenate((txt, img), axis=1)
encoded_mask = jnp.concatenate(
(txt_mask.squeeze(1).squeeze(1), img_mask.squeeze(1).squeeze(1)),
axis=1)
encoded = img
encoded_mask = img_mask.squeeze(1).squeeze(1)
return encoded, encoded_mask
class NonCrossModalEncoder(TransformerEncoder):
"""Transformer-based encoder without cross-modal attention."""
config: TransformerConfig
@nn.compact
def __call__(self, batch):
cfg = self.config
txt, txt_mask = self.encode_txt(batch)
img, img_mask = self.encode_image(batch)
for i in range(cfg.l_num_layers):
txt = layers.TransformerLayer(
num_heads=cfg.l_num_heads,
hidden_size=cfg.l_hidden_dim,
intermediate_size=cfg.l_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'txt_transformer_layer_{i}')(
txt, txt, mask=txt_mask)
for i in range(cfg.v_num_layers):
img = layers.TransformerLayer(
num_heads=cfg.v_num_heads,
hidden_size=cfg.v_hidden_dim,
intermediate_size=cfg.v_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'img_transformer_layer_{i}')(
img, img, mask=img_mask)
encoded = jnp.concatenate((txt, img), axis=1)
encoded_mask = jnp.concatenate(
(txt_mask.squeeze(1).squeeze(1), img_mask.squeeze(1).squeeze(1)),
axis=1)
return encoded, encoded_mask
class TransformerDecoder(nn.Module):
"""Transformer decoder."""
config: TransformerConfig
@nn.compact
def __call__(self,
x,
encoded,
pos_ids=None,
token_type_ids=None,
decoder_mask=None,
encoder_decoder_mask=None):
cfg = self.config
x = layers.TransformerEmbeddings(
hidden_size=cfg.l_hidden_dim,
vocab_size=cfg.target_vocab_size,
type_vocab_size=cfg.type_vocab_size,
max_position_embeddings=cfg.max_position_embeddings,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
embedding_init=cfg.embedding_init,
decode=cfg.decode,
name='embeddings')(x, pos_ids, token_type_ids)
for i in range(cfg.decode_num_layers):
x = layers.TransformerEncoderDecoderLayer(
num_heads=cfg.decode_num_heads,
hidden_size=cfg.l_hidden_dim,
intermediate_size=cfg.l_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
decode=cfg.decode,
name=f'transformer_encoder_decoder_layer_{i}')(x, encoded,
decoder_mask,
encoder_decoder_mask)
x = nn.Dense(
cfg.target_vocab_size,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='dense')(
x)
return x
def get_attention_masks(self, inputs, targets):
cfg = self.config
if cfg.decode:
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=cfg.dtype),
nn.make_causal_mask(targets, dtype=cfg.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=cfg.dtype)
return decoder_mask, encoder_decoder_mask
class Model(nn.Module):
"""The main model class."""
config: TransformerConfig
def setup(self):
cfg = self.config
if cfg.cross_attn:
self.encoder = CrossModalEncoder(cfg)
else:
self.encoder = NonCrossModalEncoder(cfg)
self.decoder = TransformerDecoder(cfg)
def encode(self, batch):
return self.encoder(batch)
def decode(self, targets, encoded, targets_mask, inputs_mask):
if not self.config.decode:
targets = model_utils.shift_left(targets)
targets_mask = model_utils.shift_left(targets_mask)
decoder_mask, encoder_decoder_mask = self.decoder.get_attention_masks(
inputs_mask, targets_mask)
decoder_logits = self.decoder(
targets,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
return decoder_logits
@nn.compact
def __call__(self, batch):
encoded, encoded_mask = self.encode(batch)
decoder_logits = self.decode(batch['target_token'], encoded,
batch['target_txt_mask'], encoded_mask)
return decoder_logits
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import StringIO
import sys
from unittest import TestCase
from mock.mock import patch, MagicMock
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
from ambari_commons.os_check import OSCheck, OSConst
utils = __import__('ambari_server.utils').utils
@not_for_platform(PLATFORM_WINDOWS)
class TestUtils(TestCase):
@patch.object(OSCheck, "get_os_family")
@patch('os.listdir')
@patch('os.path.isdir')
def test_get_ubuntu_pg_version(self, path_isdir_mock, os_listdir_mock, get_os_family_mock):
get_os_family_mock.return_value = OSConst.UBUNTU_FAMILY
path_isdir_mock.return_value = True
os_listdir_mock.return_value = ['8.4', '9.1']
self.assertEqual('9.1', utils.get_ubuntu_pg_version())
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch('ambari_server.utils.get_ubuntu_pg_version')
def test_get_postgre_running_status(self, get_ubuntu_pg_version_mock, is_redhat_family, is_ubuntu_family, is_suse_family):
is_redhat_family.return_value = False
is_ubuntu_family.return_value = True
is_suse_family.return_value = False
utils.PG_STATUS_RUNNING_DEFAULT = "red_running"
get_ubuntu_pg_version_mock.return_value = '9.1'
self.assertEqual('9.1/main', utils.get_postgre_running_status())
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
self.assertEqual('red_running', utils.get_postgre_running_status())
@patch('os.path.isfile')
def test_locate_file(self, isfile_mock):
utils.ENV_PATH = ['/test']
# File was found in the path
isfile_mock.return_value = True
self.assertEquals('/test/myfile', utils.locate_file('myfile'))
# File not found in the path
isfile_mock.return_value = False
self.assertEquals('myfile', utils.locate_file('myfile'))
# Testing default vaule
isfile_mock.return_value = False
self.assertEquals('/tmp/myfile', utils.locate_file('myfile', '/tmp'))
@patch('os.path.exists')
@patch('os.path.join')
def test_pid_exists(self, path_join_mock, path_exists_mock):
path_join_mock.return_value = '/test'
path_exists_mock.return_value = True
self.assertTrue(utils.pid_exists('1'))
@patch('time.time')
@patch('__builtin__.open')
@patch('time.sleep')
@patch('os.listdir')
@patch('os.path.join')
@patch.object(utils, 'get_symlink_path')
def test_looking_for_pid(self, get_symlink_path_mock, path_join_mock,
listdir_mock, sleep_mock, open_mock, time_mock):
def test_read():
return "test args"
def test_obj():
pass
test_obj.read = test_read
path_join_mock.return_value = '/'
open_mock.return_value = test_obj
listdir_mock.return_value = ['1000']
get_symlink_path_mock.return_value = "/symlinkpath"
time_mock.side_effect = [0, 0, 0, 0, 0, 0, 6]
out = StringIO.StringIO()
sys.stdout = out
r = utils.looking_for_pid("test args", 5)
self.assertEqual(".....", out.getvalue())
sys.stdout = sys.__stdout__
self.assertEquals(len(r), 1)
self.assertEquals(r[0], {
"pid": "1000",
"exe": "/symlinkpath",
"cmd": "test args"
})
@patch('os.path.normpath')
@patch('os.path.join')
@patch('os.path.dirname')
@patch('os.readlink')
def test_get_symlink_path(self, readlink_mock, dirname_mock, join_mock,
normpath_mock):
normpath_mock.return_value = "test value"
self.assertEquals(utils.get_symlink_path("/"), "test value")
@patch.object(utils, 'pid_exists')
@patch('__builtin__.open')
@patch('os.kill')
def test_save_main_pid_ex(self, kill_mock, open_mock, pid_exists_mock):
def test_write(data):
self.assertEquals(data, "222\n")
def test_close():
pass
def test_obj():
pass
test_obj.write = test_write
test_obj.close = test_close
open_mock.return_value = test_obj
pid_exists_mock.return_value = True
utils.save_main_pid_ex([{"pid": "111",
"exe": "/exe1",
"cmd": ""
},
{"pid": "222",
"exe": "/exe2",
"cmd": ""
},
], "/pidfile", ["/exe1"])
self.assertEquals(open_mock.call_count, 1)
self.assertEquals(pid_exists_mock.call_count, 4)
self.assertEquals(kill_mock.call_count, 1)
@patch('os.path.isfile')
@patch('__builtin__.open')
@patch('os.remove')
def test_check_exitcode(self, remove_mock, open_mock, isfile_mock):
def test_read():
return "777"
def test_close():
pass
def test_obj():
pass
test_obj.read = test_read
test_obj.close = test_close
open_mock.return_value = test_obj
isfile_mock.return_value = True
self.assertEquals(utils.check_exitcode("/tmp/nofile"), 777)
def test_format_with_reload(self):
from resource_management.libraries.functions import format
from resource_management.libraries.functions.format import ConfigurationFormatter
from resource_management.core.environment import Environment
env = Environment()
with env:
# declare some environment variables
env_params = {}
env_params["envfoo"] = "env-foo1"
env_params["envbar"] = "env-bar1"
env.config.params = env_params
# declare some local variables
foo = "foo1"
bar = "bar1"
# make sure local variables and env variables work
message = "{foo} {bar} {envfoo} {envbar}"
formatted_message = format(message)
self.assertEquals("foo1 bar1 env-foo1 env-bar1", formatted_message)
# try the same thing with an instance; we pass in keyword args to be
# combined with the env params
formatter = ConfigurationFormatter()
formatted_message = formatter.format(message, foo="foo2", bar="bar2")
self.assertEquals("foo2 bar2 env-foo1 env-bar1", formatted_message)
# now supply keyword args to override env params
formatted_message = formatter.format(message, envfoo="foobar", envbar="foobarbaz", foo="foo3", bar="bar3")
self.assertEquals("foo3 bar3 foobar foobarbaz", formatted_message)
def test_compare_versions(self):
self.assertEquals(utils.compare_versions("1.7.0", "2.0.0"), -1)
self.assertEquals(utils.compare_versions("2.0.0", "2.0.0"), 0)
self.assertEquals(utils.compare_versions("2.1.0", "2.0.0"), 1)
self.assertEquals(utils.compare_versions("1.7.0_abc", "2.0.0-abc"), -1)
self.assertEquals(utils.compare_versions("2.0.0.abc", "2.0.0_abc"), 0)
self.assertEquals(utils.compare_versions("2.1.0-abc", "2.0.0.abc"), 1)
self.assertEquals(utils.compare_versions("2.1.0-1","2.0.0-2"),1)
self.assertEquals(utils.compare_versions("2.0.0_1","2.0.0-2"),0)
self.assertEquals(utils.compare_versions("2.0.0-1","2.0.0-2"),0)
self.assertEquals(utils.compare_versions("2.0.0_1","2.0.0_2"),0)
self.assertEquals(utils.compare_versions("2.0.0-abc","2.0.0_abc"),0)
class FakeProperties(object):
def __init__(self, prop_map):
self.prop_map = prop_map
def get_property(self, prop_name):
return self.prop_map[prop_name]
|
|
"""Adds support for generic hygrostat units."""
import asyncio
import logging
from homeassistant.components.humidifier import PLATFORM_SCHEMA, HumidifierEntity
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
MODE_AWAY,
MODE_NORMAL,
SUPPORT_MODES,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, callback
from homeassistant.helpers import condition
from homeassistant.helpers.event import (
async_track_state_change,
async_track_time_interval,
)
from homeassistant.helpers.restore_state import RestoreEntity
from . import (
CONF_AWAY_FIXED,
CONF_AWAY_HUMIDITY,
CONF_DEVICE_CLASS,
CONF_DRY_TOLERANCE,
CONF_HUMIDIFIER,
CONF_INITIAL_STATE,
CONF_KEEP_ALIVE,
CONF_MAX_HUMIDITY,
CONF_MIN_DUR,
CONF_MIN_HUMIDITY,
CONF_SENSOR,
CONF_STALE_DURATION,
CONF_TARGET_HUMIDITY,
CONF_WET_TOLERANCE,
HYGROSTAT_SCHEMA,
)
_LOGGER = logging.getLogger(__name__)
ATTR_SAVED_HUMIDITY = "saved_humidity"
SUPPORT_FLAGS = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(HYGROSTAT_SCHEMA.schema)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic hygrostat platform."""
if discovery_info:
config = discovery_info
name = config[CONF_NAME]
switch_entity_id = config[CONF_HUMIDIFIER]
sensor_entity_id = config[CONF_SENSOR]
min_humidity = config.get(CONF_MIN_HUMIDITY)
max_humidity = config.get(CONF_MAX_HUMIDITY)
target_humidity = config.get(CONF_TARGET_HUMIDITY)
device_class = config.get(CONF_DEVICE_CLASS)
min_cycle_duration = config.get(CONF_MIN_DUR)
sensor_stale_duration = config.get(CONF_STALE_DURATION)
dry_tolerance = config[CONF_DRY_TOLERANCE]
wet_tolerance = config[CONF_WET_TOLERANCE]
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_state = config.get(CONF_INITIAL_STATE)
away_humidity = config.get(CONF_AWAY_HUMIDITY)
away_fixed = config.get(CONF_AWAY_FIXED)
async_add_entities(
[
GenericHygrostat(
name,
switch_entity_id,
sensor_entity_id,
min_humidity,
max_humidity,
target_humidity,
device_class,
min_cycle_duration,
dry_tolerance,
wet_tolerance,
keep_alive,
initial_state,
away_humidity,
away_fixed,
sensor_stale_duration,
)
]
)
class GenericHygrostat(HumidifierEntity, RestoreEntity):
"""Representation of a Generic Hygrostat device."""
def __init__(
self,
name,
switch_entity_id,
sensor_entity_id,
min_humidity,
max_humidity,
target_humidity,
device_class,
min_cycle_duration,
dry_tolerance,
wet_tolerance,
keep_alive,
initial_state,
away_humidity,
away_fixed,
sensor_stale_duration,
):
"""Initialize the hygrostat."""
self._name = name
self._switch_entity_id = switch_entity_id
self._sensor_entity_id = sensor_entity_id
self._device_class = device_class
self._min_cycle_duration = min_cycle_duration
self._dry_tolerance = dry_tolerance
self._wet_tolerance = wet_tolerance
self._keep_alive = keep_alive
self._state = initial_state
self._saved_target_humidity = away_humidity or target_humidity
self._active = False
self._cur_humidity = None
self._humidity_lock = asyncio.Lock()
self._min_humidity = min_humidity
self._max_humidity = max_humidity
self._target_humidity = target_humidity
self._support_flags = SUPPORT_FLAGS
if away_humidity:
self._support_flags = SUPPORT_FLAGS | SUPPORT_MODES
self._away_humidity = away_humidity
self._away_fixed = away_fixed
self._sensor_stale_duration = sensor_stale_duration
self._remove_stale_tracking = None
self._is_away = False
if not self._device_class:
self._device_class = DEVICE_CLASS_HUMIDIFIER
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
async_track_state_change(
self.hass, self._sensor_entity_id, self._async_sensor_changed
)
async_track_state_change(
self.hass, self._switch_entity_id, self._async_switch_changed
)
if self._keep_alive:
async_track_time_interval(self.hass, self._async_operate, self._keep_alive)
@callback
async def _async_startup(event):
"""Init on startup."""
sensor_state = self.hass.states.get(self._sensor_entity_id)
await self._async_sensor_changed(self._sensor_entity_id, None, sensor_state)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
if (old_state := await self.async_get_last_state()) is not None:
if old_state.attributes.get(ATTR_MODE) == MODE_AWAY:
self._is_away = True
self._saved_target_humidity = self._target_humidity
self._target_humidity = self._away_humidity or self._target_humidity
if old_state.attributes.get(ATTR_HUMIDITY):
self._target_humidity = int(old_state.attributes[ATTR_HUMIDITY])
if old_state.attributes.get(ATTR_SAVED_HUMIDITY):
self._saved_target_humidity = int(
old_state.attributes[ATTR_SAVED_HUMIDITY]
)
if old_state.state:
self._state = old_state.state == STATE_ON
if self._target_humidity is None:
if self._device_class == DEVICE_CLASS_HUMIDIFIER:
self._target_humidity = self.min_humidity
else:
self._target_humidity = self.max_humidity
_LOGGER.warning(
"No previously saved humidity, setting to %s", self._target_humidity
)
if self._state is None:
self._state = False
await _async_startup(None) # init the sensor
@property
def available(self):
"""Return True if entity is available."""
return self._active
@property
def extra_state_attributes(self):
"""Return the optional state attributes."""
if self._saved_target_humidity:
return {ATTR_SAVED_HUMIDITY: self._saved_target_humidity}
return None
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the hygrostat."""
return self._name
@property
def is_on(self):
"""Return true if the hygrostat is on."""
return self._state
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._target_humidity
@property
def mode(self):
"""Return the current mode."""
if self._away_humidity is None:
return None
if self._is_away:
return MODE_AWAY
return MODE_NORMAL
@property
def available_modes(self):
"""Return a list of available modes."""
if self._away_humidity:
return [MODE_NORMAL, MODE_AWAY]
return None
@property
def device_class(self):
"""Return the device class of the humidifier."""
return self._device_class
async def async_turn_on(self, **kwargs):
"""Turn hygrostat on."""
if not self._active:
return
self._state = True
await self._async_operate(force=True)
await self.async_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn hygrostat off."""
if not self._active:
return
self._state = False
if self._is_device_active:
await self._async_device_turn_off()
await self.async_update_ha_state()
async def async_set_humidity(self, humidity: int):
"""Set new target humidity."""
if humidity is None:
return
if self._is_away and self._away_fixed:
self._saved_target_humidity = humidity
await self.async_update_ha_state()
return
self._target_humidity = humidity
await self._async_operate(force=True)
await self.async_update_ha_state()
@property
def min_humidity(self):
"""Return the minimum humidity."""
if self._min_humidity:
return self._min_humidity
# get default humidity from super class
return super().min_humidity
@property
def max_humidity(self):
"""Return the maximum humidity."""
if self._max_humidity:
return self._max_humidity
# Get default humidity from super class
return super().max_humidity
@callback
async def _async_sensor_changed(self, entity_id, old_state, new_state):
"""Handle ambient humidity changes."""
if new_state is None:
return
if self._sensor_stale_duration:
if self._remove_stale_tracking:
self._remove_stale_tracking()
self._remove_stale_tracking = async_track_time_interval(
self.hass,
self._async_sensor_not_responding,
self._sensor_stale_duration,
)
await self._async_update_humidity(new_state.state)
await self._async_operate()
await self.async_update_ha_state()
@callback
async def _async_sensor_not_responding(self, now=None):
"""Handle sensor stale event."""
_LOGGER.debug(
"Sensor has not been updated for %s",
now - self.hass.states.get(self._sensor_entity_id).last_updated,
)
_LOGGER.warning("Sensor is stalled, call the emergency stop")
await self._async_update_humidity("Stalled")
@callback
def _async_switch_changed(self, entity_id, old_state, new_state):
"""Handle humidifier switch state changes."""
if new_state is None:
return
self.async_schedule_update_ha_state()
async def _async_update_humidity(self, humidity):
"""Update hygrostat with latest state from sensor."""
try:
self._cur_humidity = float(humidity)
except ValueError as ex:
_LOGGER.warning("Unable to update from sensor: %s", ex)
self._cur_humidity = None
self._active = False
if self._is_device_active:
await self._async_device_turn_off()
async def _async_operate(self, time=None, force=False):
"""Check if we need to turn humidifying on or off."""
async with self._humidity_lock:
if not self._active and None not in (
self._cur_humidity,
self._target_humidity,
):
self._active = True
force = True
_LOGGER.info(
"Obtained current and target humidity. "
"Generic hygrostat active. %s, %s",
self._cur_humidity,
self._target_humidity,
)
if not self._active or not self._state:
return
if not force and time is None:
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if self._min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = STATE_OFF
long_enough = condition.state(
self.hass,
self._switch_entity_id,
current_state,
self._min_cycle_duration,
)
if not long_enough:
return
if force:
# Ignore the tolerance when switched on manually
dry_tolerance = 0
wet_tolerance = 0
else:
dry_tolerance = self._dry_tolerance
wet_tolerance = self._wet_tolerance
too_dry = self._target_humidity - self._cur_humidity >= dry_tolerance
too_wet = self._cur_humidity - self._target_humidity >= wet_tolerance
if self._is_device_active:
if (self._device_class == DEVICE_CLASS_HUMIDIFIER and too_wet) or (
self._device_class == DEVICE_CLASS_DEHUMIDIFIER and too_dry
):
_LOGGER.info("Turning off humidifier %s", self._switch_entity_id)
await self._async_device_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
await self._async_device_turn_on()
else:
if (self._device_class == DEVICE_CLASS_HUMIDIFIER and too_dry) or (
self._device_class == DEVICE_CLASS_DEHUMIDIFIER and too_wet
):
_LOGGER.info("Turning on humidifier %s", self._switch_entity_id)
await self._async_device_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
await self._async_device_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return self.hass.states.is_state(self._switch_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_device_turn_on(self):
"""Turn humidifier toggleable device on."""
data = {ATTR_ENTITY_ID: self._switch_entity_id}
await self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_ON, data)
async def _async_device_turn_off(self):
"""Turn humidifier toggleable device off."""
data = {ATTR_ENTITY_ID: self._switch_entity_id}
await self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_OFF, data)
async def async_set_mode(self, mode: str):
"""Set new mode.
This method must be run in the event loop and returns a coroutine.
"""
if self._away_humidity is None:
return
if mode == MODE_AWAY and not self._is_away:
self._is_away = True
if not self._saved_target_humidity:
self._saved_target_humidity = self._away_humidity
self._saved_target_humidity, self._target_humidity = (
self._target_humidity,
self._saved_target_humidity,
)
await self._async_operate(force=True)
elif mode == MODE_NORMAL and self._is_away:
self._is_away = False
self._saved_target_humidity, self._target_humidity = (
self._target_humidity,
self._saved_target_humidity,
)
await self._async_operate(force=True)
await self.async_update_ha_state()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import json
import logging
import numbers
import re
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.lib.i18n import smart_unicode, smart_str
from desktop.models import get_data_link
from dashboard.dashboard_api import get_engine
LOG = logging.getLogger(__name__)
class Collection2(object):
def __init__(self, user, name='Default', data=None, document=None, engine='solr'):
self.document = document
if document is not None:
self.data = json.loads(document.data)
elif data is not None:
self.data = json.loads(data)
else:
self.data = {
'collection': self.get_default(user, name, engine),
'layout': []
}
def get_json(self, user):
return json.dumps(self.get_props(user))
def get_props(self, user):
props = self.data
if self.document is not None:
props['collection']['id'] = self.document.id
props['collection']['label'] = self.document.name
props['collection']['description'] = self.document.description
# For backward compatibility
if 'rows' not in props['collection']['template']:
props['collection']['template']['rows'] = 25
if 'showGrid' not in props['collection']['template']:
props['collection']['template']['showGrid'] = True
if 'showChart' not in props['collection']['template']:
props['collection']['template']['showChart'] = False
if 'chartSettings' not in props['collection']['template']:
props['collection']['template']['chartSettings'] = {
'chartType': 'bars',
'chartSorting': 'none',
'chartScatterGroup': None,
'chartScatterSize': None,
'chartScope': 'world',
'chartX': None,
'chartYSingle': None,
'chartYMulti': [],
'chartData': [],
'chartMapLabel': None,
}
if 'enabled' not in props['collection']:
props['collection']['enabled'] = True
if 'engine' not in props['collection']:
props['collection']['engine'] = 'solr'
if 'leafletmap' not in props['collection']['template']:
props['collection']['template']['leafletmap'] = {'latitudeField': None, 'longitudeField': None, 'labelField': None}
if 'timeFilter' not in props['collection']:
props['collection']['timeFilter'] = {
'field': '',
'type': 'rolling',
'value': 'all',
'from': '',
'to': '',
'truncate': True
}
if 'suggest' not in props['collection']:
props['collection']['suggest'] = {'enabled': False, 'dictionary': ''}
for field in props['collection']['template']['fieldsAttributes']:
if 'type' not in field:
field['type'] = 'string'
if 'nested' not in props['collection']:
props['collection']['nested'] = {
'enabled': False,
'schema': []
}
for facet in props['collection']['facets']:
properties = facet['properties']
if 'gap' in properties and not 'initial_gap' in properties:
properties['initial_gap'] = properties['gap']
if 'start' in properties and not 'initial_start' in properties:
properties['initial_start'] = properties['start']
if 'end' in properties and not 'initial_end' in properties:
properties['initial_end'] = properties['end']
if 'domain' not in properties:
properties['domain'] = {'blockParent': [], 'blockChildren': []}
if facet['widgetType'] == 'histogram-widget':
if 'timelineChartType' not in properties:
properties['timelineChartType'] = 'bar'
if 'enableSelection' not in properties:
properties['enableSelection'] = True
if 'extraSeries' not in properties:
properties['extraSeries'] = []
if facet['widgetType'] == 'map-widget' and facet['type'] == 'field':
facet['type'] = 'pivot'
properties['facets'] = []
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 5}
if 'qdefinitions' not in props['collection']:
props['collection']['qdefinitions'] = []
return props
def get_default(self, user, name, engine='solr'):
fields = self.fields_data(user, name, engine)
id_field = [field['name'] for field in fields if field.get('isId')]
if id_field:
id_field = id_field[0]
else:
id_field = '' # Schemaless might not have an id
TEMPLATE = {
"extracode": escape("<style type=\"text/css\">\nem {\n font-weight: bold;\n background-color: yellow;\n}</style>\n\n<script>\n</script>"),
"highlighting": [""],
"properties": {"highlighting_enabled": True},
"template": """
<div class="row-fluid">
<div class="row-fluid">
<div class="span12">%s</div>
</div>
<br/>
</div>""" % ' '.join(['{{%s}}' % field['name'] for field in fields]),
"isGridLayout": True,
"showFieldList": True,
"showGrid": True,
"showChart": False,
"chartSettings" : {
'chartType': 'bars',
'chartSorting': 'none',
'chartScatterGroup': None,
'chartScatterSize': None,
'chartScope': 'world',
'chartX': None,
'chartYSingle': None,
'chartYMulti': [],
'chartData': [],
'chartMapLabel': None,
},
"fieldsAttributes": [self._make_gridlayout_header_field(field) for field in fields],
"fieldsSelected": [],
"leafletmap": {'latitudeField': None, 'longitudeField': None, 'labelField': None},
"rows": 25,
}
FACETS = []
return {
'id': None,
'name': name,
'engine': engine,
'label': name,
'enabled': False,
'template': TEMPLATE,
'facets': FACETS,
'fields': fields,
'idField': id_field,
}
@classmethod
def _make_field(cls, field, attributes):
return {
'name': str(escape(field)),
'type': str(attributes.get('type', '')),
'isId': attributes.get('required') and attributes.get('uniqueKey'),
'isDynamic': 'dynamicBase' in attributes
}
@classmethod
def _make_gridlayout_header_field(cls, field, isDynamic=False):
return {'name': field['name'], 'type': field['type'], 'sort': {'direction': None}, 'isDynamic': isDynamic}
@classmethod
def _make_luke_from_schema_fields(cls, schema_fields):
return dict([
(f['name'], {
'copySources': [],
'type': f['type'],
'required': True,
'uniqueKey': f.get('uniqueKey'),
'flags': u'%s-%s-----OF-----l' % ('I' if f['indexed'] else '-', 'S' if f['stored'] else '-'), u'copyDests': []
})
for f in schema_fields['fields']
])
def get_absolute_url(self):
return reverse('search:index') + '?collection=%s' % self.id
def fields(self, user):
return sorted([str(field.get('name', '')) for field in self.fields_data(user)])
def fields_data(self, user, name, engine='solr'):
api = get_engine(user, engine)
try:
schema_fields = api.fields(name)
schema_fields = schema_fields['schema']['fields']
except Exception, e:
LOG.warn('/luke call did not succeed: %s' % e)
fields = api.schema_fields(name)
schema_fields = Collection2._make_luke_from_schema_fields(fields)
return sorted([self._make_field(field, attributes) for field, attributes in schema_fields.iteritems()])
def update_data(self, post_data):
data_dict = self.data
data_dict.update(post_data)
self.data = data_dict
@property
def autocomplete(self):
return self.data['autocomplete']
@autocomplete.setter
def autocomplete(self, autocomplete):
properties_ = self.data
properties_['autocomplete'] = autocomplete
self.data = json.dumps(properties_)
@classmethod
def get_field_list(cls, collection):
if collection['template']['fieldsSelected'] and collection['template']['isGridLayout']:
fields = set(collection['template']['fieldsSelected'] + ([collection['idField']] if collection['idField'] else []))
# Add field if needed
if collection['template']['leafletmap'].get('latitudeField'):
fields.add(collection['template']['leafletmap']['latitudeField'])
if collection['template']['leafletmap'].get('longitudeField'):
fields.add(collection['template']['leafletmap']['longitudeField'])
if collection['template']['leafletmap'].get('labelField'):
fields.add(collection['template']['leafletmap']['labelField'])
return list(fields)
else:
return ['*']
def get_facet_field(category, field, facets):
if category in ('nested', 'function'):
id_pattern = '%(id)s'
else:
id_pattern = '%(field)s-%(id)s'
facets = filter(lambda facet: facet['type'] == category and id_pattern % facet == field, facets)
if facets:
return facets[0]
else:
return None
def pairwise2(field, fq_filter, iterable):
pairs = []
selected_values = [f['value'] for f in fq_filter]
a, b = itertools.tee(iterable)
for element in a:
pairs.append({
'cat': field,
'value': element,
'count': next(a),
'selected': element in selected_values,
'exclude': all([f['exclude'] for f in fq_filter if f['value'] == element])
})
return pairs
def range_pair(field, cat, fq_filter, iterable, end, collection_facet):
# e.g. counts":["0",17430,"1000",1949,"2000",671,"3000",404,"4000",243,"5000",165],"gap":1000,"start":0,"end":6000}
pairs = []
selected_values = [f['value'] for f in fq_filter]
is_single_unit_gap = re.match('^[\+\-]?1[A-Za-z]*$', str(collection_facet['properties']['gap'])) is not None
is_up = collection_facet['properties']['sort'] == 'asc'
if collection_facet['properties']['sort'] == 'asc' and (collection_facet['type'] == 'range-up' or collection_facet['properties'].get('type') == 'range-up'):
prev = None
n = []
for e in iterable:
if prev is not None:
n.append(e)
n.append(prev)
prev = None
else:
prev = e
iterable = n
iterable.reverse()
a, to = itertools.tee(iterable)
next(to, None)
counts = iterable[1::2]
total_counts = counts.pop(0) if collection_facet['properties']['sort'] == 'asc' else 0
for element in a:
next(to, None)
to_value = next(to, end)
count = next(a)
pairs.append({
'field': field, 'from': element, 'value': count, 'to': to_value, 'selected': element in selected_values,
'exclude': all([f['exclude'] for f in fq_filter if f['value'] == element]),
'is_single_unit_gap': is_single_unit_gap,
'total_counts': total_counts,
'is_up': is_up
})
total_counts += counts.pop(0) if counts else 0
if collection_facet['properties']['sort'] == 'asc' and collection_facet['type'] != 'range-up' and collection_facet['properties'].get('type') != 'range-up':
pairs.reverse()
return pairs
def augment_solr_response(response, collection, query):
augmented = response
augmented['normalized_facets'] = []
NAME = '%(field)s-%(id)s'
normalized_facets = []
selected_values = dict([(fq['id'], fq['filter']) for fq in query['fqs']])
if response and response.get('facet_counts'):
for facet in collection['facets']:
category = facet['type']
if category == 'field' and response['facet_counts']['facet_fields']:
name = NAME % facet
collection_facet = get_facet_field(category, name, collection['facets'])
counts = pairwise2(facet['field'], selected_values.get(facet['id'], []), response['facet_counts']['facet_fields'][name])
if collection_facet['properties']['sort'] == 'asc':
counts.reverse()
facet = {
'id': collection_facet['id'],
'field': facet['field'],
'type': category,
'label': collection_facet['label'],
'counts': counts,
}
normalized_facets.append(facet)
elif (category == 'range' or category == 'range-up') and response['facet_counts']['facet_ranges']:
name = NAME % facet
collection_facet = get_facet_field(category, name, collection['facets'])
counts = response['facet_counts']['facet_ranges'][name]['counts']
end = response['facet_counts']['facet_ranges'][name]['end']
counts = range_pair(facet['field'], name, selected_values.get(facet['id'], []), counts, end, collection_facet)
facet = {
'id': collection_facet['id'],
'field': facet['field'],
'type': category,
'label': collection_facet['label'],
'counts': counts,
'extraSeries': []
}
normalized_facets.append(facet)
elif category == 'query' and response['facet_counts']['facet_queries']:
for name, value in response['facet_counts']['facet_queries'].iteritems():
collection_facet = get_facet_field(category, name, collection['facets'])
facet = {
'id': collection_facet['id'],
'query': name,
'type': category,
'label': name,
'counts': value,
}
normalized_facets.append(facet)
elif category == 'pivot':
name = NAME % facet
if 'facet_pivot' in response['facet_counts'] and name in response['facet_counts']['facet_pivot']:
if facet['properties']['scope'] == 'stack':
count = _augment_pivot_2d(name, facet['id'], response['facet_counts']['facet_pivot'][name], selected_values)
else:
count = response['facet_counts']['facet_pivot'][name]
_augment_pivot_nd(facet['id'], count, selected_values)
else:
count = []
facet = {
'id': facet['id'],
'field': name,
'type': category,
'label': name,
'counts': count,
}
normalized_facets.append(facet)
if response and response.get('facets'):
for facet in collection['facets']:
category = facet['type']
name = facet['id'] # Nested facets can only have one name
if category == 'function' and name in response['facets']:
value = response['facets'][name]
collection_facet = get_facet_field(category, name, collection['facets'])
facet = {
'id': collection_facet['id'],
'query': name,
'type': category,
'label': name,
'counts': value,
}
normalized_facets.append(facet)
elif category == 'nested' and name in response['facets']:
value = response['facets'][name]
collection_facet = get_facet_field(category, name, collection['facets'])
extraSeries = []
counts = response['facets'][name]['buckets']
cols = ['%(field)s' % facet, 'count(%(field)s)' % facet]
last_x_col = 0
last_xx_col = 0
for i, f in enumerate(facet['properties']['facets']):
if f['aggregate']['function'] == 'count':
cols.append(f['field'])
last_xx_col = last_x_col
last_x_col = i + 2
from libsolr.api import SolrApi
cols.append(SolrApi._get_aggregate_function(f))
rows = []
# For dim in dimensions
# Number or Date range
if collection_facet['properties']['canRange'] and not facet['properties'].get('type') == 'field':
dimension = 3 if collection_facet['properties']['isDate'] else 1
# Single dimension or dimension 2 with analytics
if not collection_facet['properties']['facets'] or collection_facet['properties']['facets'][0]['aggregate']['function'] != 'count' and len(collection_facet['properties']['facets']) == 1:
column = 'count'
if len(collection_facet['properties']['facets']) == 1:
agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_')]
legend = agg_keys[0].split(':', 2)[1]
column = agg_keys[0]
else:
legend = facet['field'] # 'count(%s)' % legend
agg_keys = [column]
_augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
counts = [_v for _f in counts for _v in (_f['val'], _f[column])]
counts = range_pair(facet['field'], name, selected_values.get(facet['id'], []), counts, 1, collection_facet)
else:
# Dimension 1 with counts and 2 with analytics
agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
agg_keys.sort(key=lambda a: a[4:])
if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
agg_keys.insert(0, 'count')
counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
_series = collections.defaultdict(list)
for row in rows:
for i, cell in enumerate(row):
if i > last_x_col:
legend = cols[i]
if last_xx_col != last_x_col:
legend = '%s %s' % (cols[i], row[last_x_col])
_series[legend].append(row[last_xx_col])
_series[legend].append(cell)
for _name, val in _series.iteritems():
_c = range_pair(facet['field'], _name, selected_values.get(facet['id'], []), val, 1, collection_facet)
extraSeries.append({'counts': _c, 'label': _name})
counts = []
elif collection_facet['properties'].get('isOldPivot'):
facet_fields = [collection_facet['field']] + [f['field'] for f in collection_facet['properties'].get('facets', []) if f['aggregate']['function'] == 'count']
column = 'count'
agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
agg_keys.sort(key=lambda a: a[4:])
if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
agg_keys.insert(0, 'count')
counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
#_convert_nested_to_augmented_pivot_nd(facet_fields, facet['id'], count, selected_values, dimension=2)
dimension = len(facet_fields)
elif not collection_facet['properties']['facets'] or (collection_facet['properties']['facets'][0]['aggregate']['function'] != 'count' and len(collection_facet['properties']['facets']) == 1):
# Dimension 1 with 1 count or agg
dimension = 1
column = 'count'
agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_')]
if len(collection_facet['properties']['facets']) == 1 and agg_keys:
legend = agg_keys[0].split(':', 2)[1]
column = agg_keys[0]
else:
legend = facet['field']
agg_keys = [column]
_augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
counts = [_v for _f in counts for _v in (_f['val'], _f[column])]
counts = pairwise2(legend, selected_values.get(facet['id'], []), counts)
else:
# Dimension 2 with analytics or 1 with N aggregates
dimension = 2
agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
agg_keys.sort(key=lambda a: a[4:])
if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
agg_keys.insert(0, 'count')
counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
actual_dimension = 1 + sum([_f['aggregate']['function'] == 'count' for _f in collection_facet['properties']['facets']])
counts = filter(lambda a: len(a['fq_fields']) == actual_dimension, counts)
num_bucket = response['facets'][name]['numBuckets'] if 'numBuckets' in response['facets'][name] else len(response['facets'][name])
facet = {
'id': collection_facet['id'],
'field': facet['field'],
'type': category,
'label': collection_facet['label'],
'counts': counts,
'extraSeries': extraSeries,
'dimension': dimension,
'response': {'response': {'start': 0, 'numFound': num_bucket}}, # Todo * nested buckets + offsets
'docs': [dict(zip(cols, row)) for row in rows],
'fieldsAttributes': [Collection2._make_gridlayout_header_field({'name': col, 'type': 'aggr' if '(' in col else 'string'}) for col in cols]
}
normalized_facets.append(facet)
# Remove unnecessary facet data
if response:
response.pop('facet_counts')
response.pop('facets')
augment_response(collection, query, response)
if normalized_facets:
augmented['normalized_facets'].extend(normalized_facets)
return augmented
def augment_response(collection, query, response):
# HTML escaping
if not query.get('download'):
id_field = collection.get('idField', '')
for doc in response['response']['docs']:
link = None
if 'link-meta' in doc:
meta = json.loads(doc['link-meta'])
link = get_data_link(meta)
elif 'link' in doc:
meta = {'type': 'link', 'link': doc['link']}
link = get_data_link(meta)
for field, value in doc.iteritems():
if isinstance(value, numbers.Number):
escaped_value = value
elif field == '_childDocuments_': # Nested documents
escaped_value = value
elif isinstance(value, list): # Multivalue field
escaped_value = [smart_unicode(escape(val), errors='replace') for val in value]
else:
value = smart_unicode(value, errors='replace')
escaped_value = escape(value)
doc[field] = escaped_value
doc['externalLink'] = link
doc['details'] = []
doc['hueId'] = smart_unicode(doc.get(id_field, ''))
highlighted_fields = response.get('highlighting', {}).keys()
if highlighted_fields and not query.get('download'):
id_field = collection.get('idField')
if id_field:
for doc in response['response']['docs']:
if id_field in doc and smart_unicode(doc[id_field]) in highlighted_fields:
highlighting = response['highlighting'][smart_unicode(doc[id_field])]
if highlighting:
escaped_highlighting = {}
for field, hls in highlighting.iteritems():
_hls = [escape(smart_unicode(hl, errors='replace')).replace('<em>', '<em>').replace('</em>', '</em>') for hl in hls]
escaped_highlighting[field] = _hls[0] if len(_hls) == 1 else _hls
doc.update(escaped_highlighting)
else:
response['warning'] = _("The Solr schema requires an id field for performing the result highlighting")
def _augment_pivot_2d(name, facet_id, counts, selected_values):
values = set()
for dimension in counts:
for pivot in dimension['pivot']:
values.add(pivot['value'])
values = sorted(list(values))
augmented = []
for dimension in counts:
count = {}
pivot_field = ''
for pivot in dimension['pivot']:
count[pivot['value']] = pivot['count']
pivot_field = pivot['field']
for val in values:
fq_values = [dimension['value'], val]
fq_fields = [dimension['field'], pivot_field]
fq_filter = selected_values.get(facet_id, [])
_selected_values = [f['value'] for f in fq_filter]
augmented.append({
"count": count.get(val, 0),
"value": val,
"cat": dimension['value'],
'selected': fq_values in _selected_values,
'exclude': all([f['exclude'] for f in fq_filter if f['value'] == val]),
'fq_fields': fq_fields,
'fq_values': fq_values,
})
return augmented
def _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows):
fq_fields = []
fq_values = []
fq_filter = []
_selected_values = [f['value'] for f in selected_values.get(facet['id'], [])]
_fields = [facet['field']] + [facet['field'] for facet in facet['properties']['facets']]
return __augment_stats_2d(counts, facet['field'], fq_fields, fq_values, fq_filter, _selected_values, _fields, agg_keys, rows)
# Clear one dimension
def __augment_stats_2d(counts, label, fq_fields, fq_values, fq_filter, _selected_values, _fields, agg_keys, rows):
augmented = []
for bucket in counts: # For each dimension, go through each bucket and pick up the counts or aggregates, then go recursively in the next dimension
val = bucket['val']
count = bucket['count']
dim_row = [val]
_fq_fields = fq_fields + _fields[0:1]
_fq_values = fq_values + [val]
for agg_key in agg_keys:
if agg_key == 'count':
dim_row.append(count)
augmented.append(_get_augmented(count, val, label, _fq_values, _fq_fields, fq_filter, _selected_values))
elif agg_key.startswith('agg_'):
label = fq_values[0] if len(_fq_fields) >= 2 else agg_key.split(':', 2)[1]
if agg_keys.index(agg_key) == 0: # One count by dimension
dim_row.append(count)
if not agg_key in bucket: # No key if value is 0
bucket[agg_key] = 0
dim_row.append(bucket[agg_key])
augmented.append(_get_augmented(bucket[agg_key], val, label, _fq_values, _fq_fields, fq_filter, _selected_values))
else:
augmented.append(_get_augmented(count, val, label, _fq_values, _fq_fields, fq_filter, _selected_values)) # Needed?
# Go rec
_agg_keys = [key for key, value in bucket[agg_key]['buckets'][0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
_agg_keys.sort(key=lambda a: a[4:])
if not _agg_keys or len(_agg_keys) == 1 and _agg_keys[0].lower().startswith('dim_'):
_agg_keys.insert(0, 'count')
next_dim = []
new_rows = []
augmented += __augment_stats_2d(bucket[agg_key]['buckets'], val, _fq_fields, _fq_values, fq_filter, _selected_values, _fields[1:], _agg_keys, next_dim)
for row in next_dim:
new_rows.append(dim_row + row)
dim_row = new_rows
if dim_row and type(dim_row[0]) == list:
rows.extend(dim_row)
else:
rows.append(dim_row)
return augmented
def _get_augmented(count, val, label, fq_values, fq_fields, fq_filter, _selected_values):
return {
"count": count,
"value": val,
"cat": label,
'selected': fq_values in _selected_values,
'exclude': all([f['exclude'] for f in fq_filter if f['value'] == val]),
'fq_fields': fq_fields,
'fq_values': fq_values
}
def _augment_pivot_nd(facet_id, counts, selected_values, fields='', values=''):
for c in counts:
fq_fields = (fields if fields else []) + [c['field']]
fq_values = (values if values else []) + [smart_str(c['value'])]
if 'pivot' in c:
_augment_pivot_nd(facet_id, c['pivot'], selected_values, fq_fields, fq_values)
fq_filter = selected_values.get(facet_id, [])
_selected_values = [f['value'] for f in fq_filter]
c['selected'] = fq_values in _selected_values
c['exclude'] = False
c['fq_fields'] = fq_fields
c['fq_values'] = fq_values
def _convert_nested_to_augmented_pivot_nd(facet_fields, facet_id, counts, selected_values, fields='', values='', dimension=2):
for c in counts['buckets']:
c['field'] = facet_fields[0]
fq_fields = (fields if fields else []) + [c['field']]
fq_values = (values if values else []) + [smart_str(c['val'])]
c['value'] = c.pop('val')
bucket = 'd%s' % dimension
if bucket in c:
next_dimension = facet_fields[1:]
if next_dimension:
_convert_nested_to_augmented_pivot_nd(next_dimension, facet_id, c[bucket], selected_values, fq_fields, fq_values, dimension=dimension+1)
c['pivot'] = c.pop(bucket)['buckets']
else:
c['count'] = c.pop(bucket)
fq_filter = selected_values.get(facet_id, [])
_selected_values = [f['value'] for f in fq_filter]
c['selected'] = fq_values in _selected_values
c['exclude'] = False
c['fq_fields'] = fq_fields
c['fq_values'] = fq_values
def augment_solr_exception(response, collection):
response.update(
{
"facet_counts": {
},
"highlighting": {
},
"normalized_facets": [
{
"field": facet['field'],
"counts": [],
"type": facet['type'],
"label": facet['label']
}
for facet in collection['facets']
],
"responseHeader": {
"status": -1,
"QTime": 0,
"params": {
}
},
"response": {
"start": 0,
"numFound": 0,
"docs": [
]
}
})
|
|
"""Tests of http client with custom Connector"""
import asyncio
import http.cookies
import gc
import socket
import unittest
import ssl
import tempfile
import shutil
import os.path
from unittest import mock
import aiohttp
from aiohttp import web
from aiohttp import client
from aiohttp import helpers
from aiohttp.client import ClientResponse
from aiohttp.connector import Connection, host_is_ip
class TestBaseConnector(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.transport = unittest.mock.Mock()
self.stream = aiohttp.StreamParser()
self.response = ClientResponse('get', 'http://base-conn.org')
self.response._post_init(self.loop)
def tearDown(self):
self.response.close()
self.loop.close()
gc.collect()
def test_del(self):
conn = aiohttp.BaseConnector(loop=self.loop)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
with self.assertWarns(ResourceWarning):
del conn
gc.collect()
self.assertFalse(conns_impl)
transp.close.assert_called_with()
msg = {'connector': unittest.mock.ANY, # conn was deleted
'connections': unittest.mock.ANY,
'message': 'Unclosed connector'}
if self.loop.get_debug():
msg['source_traceback'] = unittest.mock.ANY
exc_handler.assert_called_with(self.loop, msg)
def test_del_with_scheduled_cleanup(self):
conn = aiohttp.BaseConnector(loop=self.loop, keepalive_timeout=0.01)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
conn._start_cleanup_task()
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
with self.assertWarns(ResourceWarning):
del conn
yield from asyncio.sleep(0.01)
gc.collect()
self.assertFalse(conns_impl)
transp.close.assert_called_with()
msg = {'connector': unittest.mock.ANY, # conn was deleted
'message': 'Unclosed connector'}
if self.loop.get_debug():
msg['source_traceback'] = unittest.mock.ANY
exc_handler.assert_called_with(self.loop, msg)
def test_del_with_closed_loop(self):
conn = aiohttp.BaseConnector(loop=self.loop)
transp = unittest.mock.Mock()
conn._conns['a'] = [(transp, 'proto', 123)]
conns_impl = conn._conns
conn._start_cleanup_task()
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
self.loop.close()
with self.assertWarns(ResourceWarning):
del conn
gc.collect()
self.assertFalse(conns_impl)
self.assertFalse(transp.close.called)
self.assertTrue(exc_handler.called)
def test_del_empty_conector(self):
conn = aiohttp.BaseConnector(loop=self.loop)
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
del conn
self.assertFalse(exc_handler.called)
def test_create_conn(self):
def go():
conn = aiohttp.BaseConnector(loop=self.loop)
with self.assertRaises(NotImplementedError):
yield from conn._create_connection(object())
self.loop.run_until_complete(go())
@unittest.mock.patch('aiohttp.connector.asyncio')
def test_ctor_loop(self, asyncio):
session = aiohttp.BaseConnector()
self.assertIs(session._loop, asyncio.get_event_loop.return_value)
def test_close(self):
tr = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertFalse(conn.closed)
conn._conns[1] = [(tr, object(), object())]
conn.close()
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
self.assertTrue(conn.closed)
def test_get(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertEqual(conn._get(1), (None, None))
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._conns[1] = [(tr, proto, self.loop.time())]
self.assertEqual(conn._get(1), (tr, proto))
conn.close()
def test_get_expired(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertEqual(conn._get(1), (None, None))
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._conns[1] = [(tr, proto, self.loop.time() - 1000)]
self.assertEqual(conn._get(1), (None, None))
self.assertFalse(conn._conns)
conn.close()
def test_release(self):
self.loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=self.loop)
conn._start_cleanup_task = unittest.mock.Mock()
req = unittest.mock.Mock()
resp = req.response = unittest.mock.Mock()
resp._should_close = False
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns[1][0], (tr, proto, 10))
self.assertTrue(conn._start_cleanup_task.called)
conn.close()
def test_release_close(self):
with self.assertWarns(DeprecationWarning):
conn = aiohttp.BaseConnector(share_cookies=True, loop=self.loop)
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
cookies = resp.cookies = http.cookies.SimpleCookie()
cookies['c1'] = 'cookie1'
cookies['c2'] = 'cookie2'
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
def test_get_pop_empty_conns(self):
# see issue #473
conn = aiohttp.BaseConnector(loop=self.loop)
key = ('127.0.0.1', 80, False)
conn._conns[key] = []
tr, proto = conn._get(key)
self.assertEqual((None, None), (tr, proto))
self.assertFalse(conn._conns)
def test_release_close_do_not_add_to_pool(self):
# see issue #473
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
key = ('127.0.0.1', 80, False)
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertFalse(conn._conns)
def test_release_close_do_not_delete_existing_connections(self):
key = ('127.0.0.1', 80, False)
tr1, proto1 = unittest.mock.Mock(), unittest.mock.Mock()
with self.assertWarns(DeprecationWarning):
conn = aiohttp.BaseConnector(share_cookies=True, loop=self.loop)
conn._conns[key] = [(tr1, proto1, 1)]
req = unittest.mock.Mock()
resp = unittest.mock.Mock()
resp.message.should_close = True
req.response = resp
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
conn._acquired[key].add(tr1)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns[key], [(tr1, proto1, 1)])
self.assertTrue(tr.close.called)
conn.close()
def test_release_not_started(self):
self.loop.time = mock.Mock(return_value=10)
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
req.response = None
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertEqual(conn._conns, {1: [(tr, proto, 10)]})
self.assertFalse(tr.close.called)
conn.close()
def test_release_not_opened(self):
conn = aiohttp.BaseConnector(loop=self.loop)
req = unittest.mock.Mock()
req.response = unittest.mock.Mock()
req.response.message = None
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
key = 1
conn._acquired[key].add(tr)
conn._release(key, req, tr, proto)
self.assertTrue(tr.close.called)
def test_connect(self):
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = self.loop.run_until_complete(conn.connect(Req()))
self.assertFalse(conn._create_connection.called)
self.assertEqual(connection._transport, tr)
self.assertEqual(connection._protocol, proto)
self.assertIsInstance(connection, Connection)
connection.close()
def test_connect_timeout(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(self.loop)
conn._create_connection.return_value.set_exception(
asyncio.TimeoutError())
with self.assertRaises(aiohttp.ClientTimeoutError):
req = unittest.mock.Mock()
self.loop.run_until_complete(conn.connect(req))
def test_connect_oserr(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(self.loop)
err = OSError(1, 'permission error')
conn._create_connection.return_value.set_exception(err)
with self.assertRaises(aiohttp.ClientOSError) as ctx:
req = unittest.mock.Mock()
self.loop.run_until_complete(conn.connect(req))
self.assertEqual(1, ctx.exception.errno)
self.assertTrue(ctx.exception.strerror.startswith('Cannot connect to'))
self.assertTrue(ctx.exception.strerror.endswith('[permission error]'))
def test_start_cleanup_task(self):
loop = unittest.mock.Mock()
loop.time.return_value = 1.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
self.assertIsNone(conn._cleanup_handle)
conn._start_cleanup_task()
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
12, conn._cleanup)
def test_cleanup(self):
testset = {
1: [(unittest.mock.Mock(), unittest.mock.Mock(), 10),
(unittest.mock.Mock(), unittest.mock.Mock(), 300),
(None, unittest.mock.Mock(), 300)],
}
testset[1][0][1].is_connected.return_value = True
testset[1][1][1].is_connected.return_value = False
loop = unittest.mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop)
conn._conns = testset
existing_handle = conn._cleanup_handle = unittest.mock.Mock()
conn._cleanup()
self.assertTrue(existing_handle.cancel.called)
self.assertEqual(conn._conns, {})
self.assertIsNone(conn._cleanup_handle)
def test_cleanup2(self):
testset = {1: [(unittest.mock.Mock(), unittest.mock.Mock(), 300)]}
testset[1][0][1].is_connected.return_value = True
loop = unittest.mock.Mock()
loop.time.return_value = 300.1
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
self.assertEqual(conn._conns, testset)
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
310, conn._cleanup)
conn.close()
def test_cleanup3(self):
testset = {1: [(unittest.mock.Mock(), unittest.mock.Mock(), 290.1),
(unittest.mock.Mock(), unittest.mock.Mock(), 305.1)]}
testset[1][0][1].is_connected.return_value = True
loop = unittest.mock.Mock()
loop.time.return_value = 308.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
self.assertEqual(conn._conns, {1: [testset[1][1]]})
self.assertIsNotNone(conn._cleanup_handle)
loop.call_at.assert_called_with(
316, conn._cleanup)
conn.close()
def test_tcp_connector_ctor(self):
conn = aiohttp.TCPConnector(loop=self.loop)
self.assertTrue(conn.verify_ssl)
self.assertIs(conn.fingerprint, None)
with self.assertWarns(DeprecationWarning):
self.assertFalse(conn.resolve)
self.assertFalse(conn.use_dns_cache)
self.assertEqual(conn.family, 0)
with self.assertWarns(DeprecationWarning):
self.assertEqual(conn.resolved_hosts, {})
self.assertEqual(conn.resolved_hosts, {})
def test_tcp_connector_ctor_fingerprint_valid(self):
valid = b'\xa2\x06G\xad\xaa\xf5\xd8\\J\x99^by;\x06='
conn = aiohttp.TCPConnector(loop=self.loop, fingerprint=valid)
self.assertEqual(conn.fingerprint, valid)
def test_tcp_connector_fingerprint_invalid(self):
invalid = b'\x00'
with self.assertRaises(ValueError):
aiohttp.TCPConnector(loop=self.loop, fingerprint=invalid)
def test_tcp_connector_clear_resolved_hosts(self):
conn = aiohttp.TCPConnector(loop=self.loop)
info = object()
conn._cached_hosts[('localhost', 123)] = info
conn._cached_hosts[('localhost', 124)] = info
conn.clear_resolved_hosts('localhost', 123)
self.assertEqual(
conn.resolved_hosts, {('localhost', 124): info})
conn.clear_resolved_hosts('localhost', 123)
self.assertEqual(
conn.resolved_hosts, {('localhost', 124): info})
with self.assertWarns(DeprecationWarning):
conn.clear_resolved_hosts()
self.assertEqual(conn.resolved_hosts, {})
def test_tcp_connector_clear_dns_cache(self):
conn = aiohttp.TCPConnector(loop=self.loop)
info = object()
conn._cached_hosts[('localhost', 123)] = info
conn._cached_hosts[('localhost', 124)] = info
conn.clear_dns_cache('localhost', 123)
self.assertEqual(
conn.cached_hosts, {('localhost', 124): info})
conn.clear_dns_cache('localhost', 123)
self.assertEqual(
conn.cached_hosts, {('localhost', 124): info})
conn.clear_dns_cache()
self.assertEqual(conn.cached_hosts, {})
def test_tcp_connector_clear_dns_cache_bad_args(self):
conn = aiohttp.TCPConnector(loop=self.loop)
with self.assertRaises(ValueError):
conn.clear_dns_cache('localhost')
def test_ambigous_verify_ssl_and_ssl_context(self):
with self.assertRaises(ValueError):
aiohttp.TCPConnector(
verify_ssl=False,
ssl_context=ssl.SSLContext(ssl.PROTOCOL_SSLv23),
loop=self.loop)
def test_dont_recreate_ssl_context(self):
conn = aiohttp.TCPConnector(loop=self.loop)
ctx = conn.ssl_context
self.assertIs(ctx, conn.ssl_context)
def test_respect_precreated_ssl_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
conn = aiohttp.TCPConnector(loop=self.loop, ssl_context=ctx)
self.assertIs(ctx, conn.ssl_context)
def test_close_twice(self):
tr = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop)
conn._conns[1] = [(tr, object(), object())]
conn.close()
self.assertFalse(conn._conns)
self.assertTrue(tr.close.called)
self.assertTrue(conn.closed)
conn._conns = 'Invalid' # fill with garbage
conn.close()
self.assertTrue(conn.closed)
def test_close_cancels_cleanup_handle(self):
conn = aiohttp.BaseConnector(loop=self.loop)
conn._start_cleanup_task()
self.assertIsNotNone(conn._cleanup_handle)
conn.close()
self.assertIsNone(conn._cleanup_handle)
def test_ctor_with_default_loop(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.addCleanup(loop.close)
self.addCleanup(asyncio.set_event_loop, None)
conn = aiohttp.BaseConnector()
self.assertIs(loop, conn._loop)
def test_connect_with_limit(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(
self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection1 = yield from conn.connect(Req())
self.assertEqual(connection1._transport, tr)
self.assertEqual(1, len(conn._acquired[key]))
acquired = False
@asyncio.coroutine
def f():
nonlocal acquired
connection2 = yield from conn.connect(Req())
acquired = True
self.assertEqual(1, len(conn._acquired[key]))
connection2.release()
task = asyncio.async(f(), loop=self.loop)
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertFalse(acquired)
connection1.release()
yield from asyncio.sleep(0, loop=self.loop)
self.assertTrue(acquired)
yield from task
conn.close()
self.loop.run_until_complete(go())
def test_connect_with_limit_cancelled(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(
self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = yield from conn.connect(Req())
self.assertEqual(connection._transport, tr)
self.assertEqual(1, len(conn._acquired[key]))
with self.assertRaises(asyncio.TimeoutError):
# limit exhausted
yield from asyncio.wait_for(conn.connect(Req), 0.01,
loop=self.loop)
connection.close()
self.loop.run_until_complete(go())
def test_connect_with_limit_release_waiters(self):
def check_with_exc(err):
conn = aiohttp.BaseConnector(limit=1, loop=self.loop)
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = \
helpers.create_future(self.loop)
conn._create_connection.return_value.set_exception(err)
with self.assertRaises(Exception):
req = unittest.mock.Mock()
self.loop.run_until_complete(conn.connect(req))
key = (req.host, req.port, req.ssl)
self.assertFalse(conn._waiters[key])
check_with_exc(OSError(1, 'permission error'))
check_with_exc(RuntimeError())
check_with_exc(asyncio.TimeoutError())
def test_connect_with_limit_concurrent(self):
@asyncio.coroutine
def go():
proto = unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock(_should_close=False)
max_connections = 2
num_connections = 0
conn = aiohttp.BaseConnector(limit=max_connections, loop=self.loop)
# Use a real coroutine for _create_connection; a mock would mask
# problems that only happen when the method yields.
@asyncio.coroutine
def create_connection(req):
nonlocal num_connections
num_connections += 1
yield from asyncio.sleep(0, loop=self.loop)
# Make a new transport mock each time because acquired
# transports are stored in a set. Reusing the same object
# messes with the count.
tr = unittest.mock.Mock()
return tr, proto
conn._create_connection = create_connection
# Simulate something like a crawler. It opens a connection, does
# something with it, closes it, then creates tasks that make more
# connections and waits for them to finish. The crawler is started
# with multiple concurrent requests and stops when it hits a
# predefined maximum number of requests.
max_requests = 10
num_requests = 0
start_requests = max_connections + 1
@asyncio.coroutine
def f(start=True):
nonlocal num_requests
if num_requests == max_requests:
return
num_requests += 1
if not start:
connection = yield from conn.connect(Req())
yield from asyncio.sleep(0, loop=self.loop)
connection.release()
tasks = [
asyncio.async(f(start=False), loop=self.loop)
for i in range(start_requests)
]
yield from asyncio.wait(tasks, loop=self.loop)
yield from f()
conn.close()
self.assertEqual(max_connections, num_connections)
self.loop.run_until_complete(go())
def test_close_with_acquired_connection(self):
@asyncio.coroutine
def go():
tr, proto = unittest.mock.Mock(), unittest.mock.Mock()
proto.is_connected.return_value = True
class Req:
host = 'host'
port = 80
ssl = False
response = unittest.mock.Mock()
conn = aiohttp.BaseConnector(loop=self.loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(tr, proto, self.loop.time())]
conn._create_connection = unittest.mock.Mock()
conn._create_connection.return_value = helpers.create_future(
self.loop)
conn._create_connection.return_value.set_result((tr, proto))
connection = yield from conn.connect(Req())
self.assertEqual(1, len(conn._acquired))
conn.close()
self.assertEqual(0, len(conn._acquired))
self.assertTrue(conn.closed)
tr.close.assert_called_with()
self.assertFalse(connection.closed)
connection.close()
self.assertTrue(connection.closed)
self.loop.run_until_complete(go())
def test_default_force_close(self):
connector = aiohttp.BaseConnector(loop=self.loop)
self.assertFalse(connector.force_close)
def test_limit_property(self):
conn = aiohttp.BaseConnector(loop=self.loop, limit=15)
self.assertEqual(15, conn.limit)
conn.close()
def test_limit_property_default(self):
conn = aiohttp.BaseConnector(loop=self.loop)
self.assertIsNone(conn.limit)
conn.close()
def test_force_close_and_explicit_keep_alive(self):
with self.assertRaises(ValueError):
aiohttp.BaseConnector(loop=self.loop, keepalive_timeout=30,
force_close=True)
conn = aiohttp.BaseConnector(loop=self.loop, force_close=True,
keepalive_timeout=None)
assert conn
conn = aiohttp.BaseConnector(loop=self.loop, force_close=True)
assert conn
class TestHttpClientConnector(unittest.TestCase):
def setUp(self):
self.handler = None
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
if self.handler:
self.loop.run_until_complete(self.handler.finish_connections())
self.loop.stop()
self.loop.run_forever()
self.loop.close()
gc.collect()
def find_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
@asyncio.coroutine
def create_server(self, method, path, handler):
app = web.Application(loop=self.loop)
app.router.add_route(method, path, handler)
port = self.find_unused_port()
self.handler = app.make_handler(keep_alive_on=False)
srv = yield from self.loop.create_server(
self.handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port) + path
self.addCleanup(srv.close)
return app, srv, url
@asyncio.coroutine
def create_unix_server(self, method, path, handler):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
app = web.Application(loop=self.loop)
app.router.add_route(method, path, handler)
self.handler = app.make_handler(keep_alive_on=False, access_log=None)
sock_path = os.path.join(tmpdir, 'socket.sock')
srv = yield from self.loop.create_unix_server(
self.handler, sock_path)
url = "http://127.0.0.1" + path
self.addCleanup(srv.close)
return app, srv, url, sock_path
def test_tcp_connector(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url = self.loop.run_until_complete(
self.create_server('get', '/', handler))
conn = aiohttp.TCPConnector(loop=self.loop)
r = self.loop.run_until_complete(
aiohttp.request(
'get', url,
connector=conn,
loop=self.loop))
self.loop.run_until_complete(r.release())
self.assertEqual(r.status, 200)
r.close()
conn.close()
def test_tcp_connector_uses_provided_local_addr(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url = self.loop.run_until_complete(
self.create_server('get', '/', handler)
)
port = self.find_unused_port()
conn = aiohttp.TCPConnector(loop=self.loop,
local_addr=('127.0.0.1', port))
r = self.loop.run_until_complete(
aiohttp.request(
'get', url,
connector=conn
))
self.loop.run_until_complete(r.release())
first_conn = next(iter(conn._conns.values()))[0][0]
self.assertEqual(first_conn._sock.getsockname(), ('127.0.0.1', port))
r.close()
conn.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'requires unix')
def test_unix_connector(self):
@asyncio.coroutine
def handler(request):
return web.HTTPOk()
app, srv, url, sock_path = self.loop.run_until_complete(
self.create_unix_server('get', '/', handler))
connector = aiohttp.UnixConnector(sock_path, loop=self.loop)
self.assertEqual(sock_path, connector.path)
r = self.loop.run_until_complete(
client.request(
'get', url,
connector=connector,
loop=self.loop))
self.assertEqual(r.status, 200)
r.close()
def test_connector_cookie_deprecation(self):
with self.assertWarnsRegex(DeprecationWarning,
"^Using `share_cookies` is deprecated"):
conn = aiohttp.TCPConnector(share_cookies=True, loop=self.loop)
conn.close()
def test_ambiguous_ctor_params(self):
with self.assertRaises(ValueError):
aiohttp.TCPConnector(resolve=True, use_dns_cache=False,
loop=self.loop)
def test_both_resolve_and_use_dns_cache(self):
conn = aiohttp.TCPConnector(resolve=True, use_dns_cache=True,
loop=self.loop)
self.assertTrue(conn.use_dns_cache)
with self.assertWarns(DeprecationWarning):
self.assertTrue(conn.resolve)
def test_both_use_dns_cache_only(self):
conn = aiohttp.TCPConnector(use_dns_cache=True,
loop=self.loop)
self.assertTrue(conn.use_dns_cache)
with self.assertWarns(DeprecationWarning):
self.assertTrue(conn.resolve)
def test_resolver_not_called_with_address_is_ip(self):
resolver = unittest.mock.MagicMock()
connector = aiohttp.TCPConnector(resolver=resolver, loop=self.loop)
class Req:
host = '127.0.0.1'
port = 80
ssl = False
response = unittest.mock.Mock()
with self.assertRaises(OSError):
self.loop.run_until_complete(connector.connect(Req()))
resolver.resolve.assert_not_called()
def test_ip_addresses(self):
ip_addresses = [
'0.0.0.0',
'127.0.0.1',
'255.255.255.255',
'0:0:0:0:0:0:0:0',
'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF',
'00AB:0002:3008:8CFD:00AB:0002:3008:8CFD',
'00ab:0002:3008:8cfd:00ab:0002:3008:8cfd',
'AB:02:3008:8CFD:AB:02:3008:8CFD',
'AB:02:3008:8CFD::02:3008:8CFD',
'::',
'1::1',
]
for address in ip_addresses:
assert host_is_ip(address) is True
def test_host_addresses(self):
hosts = [
'www.four.part.host'
'www.python.org',
'foo.bar',
'localhost',
]
for host in hosts:
assert host_is_ip(host) is False
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RegionAutoscalersTransport(abc.ABC):
"""Abstract transport class for RegionAutoscalers."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
self.update: gapic_v1.method.wrap_method(
self.update, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteRegionAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetRegionAutoscalerRequest],
Union[compute.Autoscaler, Awaitable[compute.Autoscaler]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertRegionAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListRegionAutoscalersRequest],
Union[compute.RegionAutoscalerList, Awaitable[compute.RegionAutoscalerList]],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchRegionAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def update(
self,
) -> Callable[
[compute.UpdateRegionAutoscalerRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("RegionAutoscalersTransport",)
|
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = numpy.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. `dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
(0.6666666666666667, 7.401486830834377e-15)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
... lambda x, y: 0, lambda x, y: 1)
(1.8750000000000002, 3.324644794257407e-14)
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters, the ``xx`` array contains the
coordinates. The ``user_data`` is the data contained in the
`scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda x1,x2,x3 : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
|
|
# encoding: utf-8
import socket
from functools import partial
from pprint import pformat
from marrow.server.http.testing import HTTPTestCase, CRLF, EOH
from marrow.util.compat import unicode
log = __import__('logging').getLogger(__name__)
from applications import *
class TestHTTP11Protocol(HTTPTestCase):
arguments = dict(application=partial(echo, False))
def test_headers(self):
response = self.request(headers=[(b'Connection', b'close')])
self.assertEquals(response.protocol, b"HTTP/1.1")
self.assertEquals(response.code, b"200")
self.assertEquals(response.status, b"OK")
self.assertEquals(response[b'content-type'], b"text/plain; charset=utf8")
# self.assertEquals(response[b'content-length'], b"468")
def test_request(self):
response = self.request(headers=[(b'Connection', b'close')])
self.assertEquals(response.protocol, b"HTTP/1.1")
self.assertEquals(response.code, b"200")
self.assertEquals(response.status, b"OK")
self.assertEquals(response[b'content-type'], b"text/plain; charset=utf8")
# self.assertEquals(response[b'content-length'], b"468")
request = eval(response.body)
expect = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': None,
'FRAGMENT': '',
'HTTP_CONNECTION': 'close',
'HTTP_HOST': 'localhost',
'PARAMETERS': unicode(),
'PATH_INFO': b'/'.decode('iso-8859-1'),
'QUERY_STRING': unicode(),
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': unicode(),
'SERVER_ADDR': '127.0.0.1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (2, 0),
'REQUEST_URI': b'http://localhost/',
'wsgi.async': False,
'wsgi.uri_encoding': 'utf8'
}
self.assertEquals(request, expect)
def test_single(self):
self.request(headers=[(b'Connection', b'close')])
def try_again():
self.request(headers=[(b'Connection', b'close')])
self.assertRaises((socket.error, IOError), try_again)
def test_keepalive(self):
one = self.request()
two = self.request()
self.assertEquals(one, two)
class TestChunkedHTTP11Protocol(HTTPTestCase):
arguments = dict(application=partial(echo, True))
maxDiff = None
def test_chunked(self):
response = self.request()
self.assertEquals(response.protocol, b"HTTP/1.1")
self.assertEquals(response.code, b"200")
self.assertEquals(response.status, b"OK")
self.assertEquals(response[b'content-type'], b"text/plain; charset=utf8")
self.assertEquals(response[b'transfer-encoding'], b"chunked")
request = eval(response.body)
expect = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': None,
'FRAGMENT': '',
'HTTP_HOST': 'localhost',
'PARAMETERS': unicode(),
'PATH_INFO': b'/'.decode('iso-8859-1'),
'QUERY_STRING': unicode(),
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': unicode(),
'SERVER_ADDR': '127.0.0.1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (2, 0),
'REQUEST_URI': b'http://localhost/',
'wsgi.async': False,
'wsgi.uri_encoding': 'utf8'
}
self.assertEquals(request, expect)
class TestHTTP11BodyProtocol(HTTPTestCase):
arguments = dict(application=partial(echo, True))
maxDiff = None
def test_normal(self):
body = b"Hello world!"
response = self.request(b"PUT", headers=[(b'Content-Length', unicode(len(body)).encode('ascii'))], body=[body])
self.assertEquals(response.protocol, b"HTTP/1.1")
self.assertEquals(response.code, b"200")
self.assertEquals(response.status, b"OK")
self.assertEquals(response[b'content-type'], b"text/plain; charset=utf8")
#self.assertEquals(response[b'transfer-encoding'], b"chunked")
request = eval(response.body)
expect = {
'CONTENT_LENGTH': "12",
'CONTENT_TYPE': None,
'FRAGMENT': '',
'HTTP_HOST': 'localhost',
'PARAMETERS': unicode(),
'PATH_INFO': b'/'.decode('iso-8859-1'),
'QUERY_STRING': unicode(),
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': unicode(),
'SERVER_ADDR': '127.0.0.1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (2, 0),
'REQUEST_URI': b'http://localhost/',
'wsgi.async': False,
'wsgi.uri_encoding': 'utf8',
'wsgi.input': b"Hello world!"
}
self.assertEquals(request, expect)
def test_chunked(self):
body = b"Hello world!"
response = self.request(b"PUT", body=[body])
self.assertEquals(response.protocol, b"HTTP/1.1")
self.assertEquals(response.code, b"200")
self.assertEquals(response.status, b"OK")
self.assertEquals(response[b'content-type'], b"text/plain; charset=utf8")
self.assertEquals(response[b'transfer-encoding'], b"chunked")
request = eval(response.body)
expect = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': None,
'FRAGMENT': '',
'HTTP_TRANSFER_ENCODING': 'chunked',
'HTTP_HOST': 'localhost',
'PARAMETERS': unicode(),
'PATH_INFO': b'/'.decode('iso-8859-1'),
'QUERY_STRING': unicode(),
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': unicode(),
'SERVER_ADDR': '127.0.0.1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (2, 0),
'REQUEST_URI': b'http://localhost/',
'wsgi.async': False,
'wsgi.uri_encoding': 'utf8',
'wsgi.input': b'Hello world!'
}
self.assertEquals(request, expect)
|
|
import sys, re, os, time, math
from datetime import datetime
from dateutil.parser import parse as dtparse
import pickle, json
from pprint import pformat
import asyncio
from functools import partial
import biothings.utils.mongo as mongo
import biothings.utils.aws as aws
from biothings.utils.common import timesofar
from biothings.utils.loggers import HipchatHandler, get_logger
from biothings.utils.manager import BaseManager
from biothings.utils.es import ESIndexer
from biothings.utils.backend import DocESBackend
from biothings import config as btconfig
from biothings.utils.mongo import doc_feeder, id_feeder
from config import LOG_FOLDER, logger as logging, HUB_ENV
from biothings.utils.hub import publish_data_version
class IndexerException(Exception):
pass
class IndexerManager(BaseManager):
def __init__(self, pindexer, *args, **kwargs):
super(IndexerManager,self).__init__(*args, **kwargs)
self.pindexer = pindexer
self.src_build = mongo.get_src_build()
self.target_db = mongo.get_target_db()
self.t0 = time.time()
self.prepared = False
self.log_folder = LOG_FOLDER
self.timestamp = datetime.now()
self.setup()
def setup(self):
self.setup_log()
def setup_log(self):
import logging as logging_mod
if not os.path.exists(self.log_folder):
os.makedirs(self.log_folder)
self.logfile = os.path.join(self.log_folder, 'indexmanager_%s.log' % time.strftime("%Y%m%d",self.timestamp.timetuple()))
fh = logging_mod.FileHandler(self.logfile)
fmt = logging_mod.Formatter('%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s',datefmt="%H:%M:%S")
fh.setFormatter(fmt)
fh.name = "logfile"
nh = HipchatHandler(btconfig.HIPCHAT_CONFIG)
nh.setFormatter(fmt)
nh.name = "hipchat"
self.logger = logging_mod.getLogger("indexmanager")
self.logger.setLevel(logging_mod.DEBUG)
if not fh.name in [h.name for h in self.logger.handlers]:
self.logger.addHandler(fh)
if not nh.name in [h.name for h in self.logger.handlers]:
self.logger.addHandler(nh)
return self.logger
def __getitem__(self,build_name):
"""
Return an instance of an indexer for the build named 'build_name'
Note: each call returns a different instance (factory call behind the scene...)
"""
# we'll get a partial class but will return an instance
pclass = BaseManager.__getitem__(self,build_name)
return pclass()
def configure(self):
self.register_indexer("default")
def register_indexer(self, conf):
def create():
idxer = self.pindexer()
return idxer
self.register[conf] = partial(create)
def index(self, target_name=None, index_name=None, ids=None, **kwargs):
"""
Trigger an index creation to index the collection target_name and create an
index named index_name (or target_name if None). Optional list of IDs can be
passed to index specific documents.
"""
t0 = time.time()
def indexed(f):
res = f.result()
try:
self.logger.info("Done indexing target '%s' to index '%s': %s" % (target_name,index_name,res))
except Exception as e:
import traceback
self.logger.error("Error while running merge job, %s:\n%s" % (e,traceback.format_exc()))
raise
idx = self["default"]
idx.target_name = target_name
index_name = index_name or target_name
job = idx.index(target_name, index_name, ids=ids, job_manager=self.job_manager, **kwargs)
job = asyncio.ensure_future(job)
job.add_done_callback(indexed)
return job
def snapshot(self, index, snapshot=None, mode=None, steps=["snapshot","meta"]):
# check what to do
if type(steps) == str:
steps = [steps]
if "meta" in steps:
assert getattr(btconfig,"BIOTHINGS_ROLE",None) == "master","Hub needs to be master to publish metadata about snapshots"
assert hasattr(btconfig,"URL_SNAPSHOT_REPOSITORY"), "URL_SNAPSHOT_REPOSITORY must be defined to publish metadata about snapshots"
snapshot = snapshot or index
es_snapshot_host = getattr(btconfig,"ES_SNAPSHOT_HOST",btconfig.ES_HOST)
idxr = ESIndexer(index=index,doc_type=btconfig.ES_DOC_TYPE,es_host=es_snapshot_host)
# will hold the overall result
fut = asyncio.Future()
def get_status():
try:
res = idxr.get_snapshot_status(btconfig.SNAPSHOT_REPOSITORY, snapshot)
assert "snapshots" in res, "Can't find snapshot '%s' in repository '%s'" % (snapshot,btconfig.SNAPSHOT_REPOSITORY)
# assuming only one index in the snapshot, so only check first elem
state = res["snapshots"][0].get("state")
assert state, "Can't find state in snapshot '%s'" % snapshot
return state
except Exception as e:
# somethng went wrong, report as failure
return "FAILED"
@asyncio.coroutine
def do(index):
def snapshot_launched(f):
try:
self.logger.info("Snapshot launched: %s" % f.result())
except Exception as e:
self.logger.error("Error while lauching snapshot: %s" % e)
fut.set_exception(e)
if "snapshot" in steps:
pinfo = {"category" : "index",
"source" : index,
"step" : "snapshot",
"description" : es_snapshot_host}
self.logger.info("Creating snapshot for index '%s' on host '%s', repository '%s'" % (index,es_snapshot_host,btconfig.SNAPSHOT_REPOSITORY))
job = yield from self.job_manager.defer_to_thread(pinfo,
partial(idxr.snapshot,btconfig.SNAPSHOT_REPOSITORY,snapshot, mode=mode))
job.add_done_callback(snapshot_launched)
yield from job
while True:
state = get_status()
if state in ["INIT","IN_PROGRESS","STARTED"]:
yield from asyncio.sleep(getattr(btconfig,"MONITOR_SNAPSHOT_DELAY",60))
else:
if state == "SUCCESS":
# if "meta" is required, it will set the result later
if not "meta" in steps:
fut.set_result(state)
self.logger.info("Snapshot '%s' successfully created (host: '%s', repository: '%s')" % \
(snapshot,es_snapshot_host,btconfig.SNAPSHOT_REPOSITORY),extra={"notify":True})
else:
e = IndexerException("Snapshot '%s' failed: %s" % (snapshot,state))
fut.set_exception(e)
self.logger.error("Failed creating snapshot '%s' (host: %s, repository: %s), state: %s" % \
(snapshot,es_snapshot_host,btconfig.SNAPSHOT_REPOSITORY,state),extra={"notify":True})
raise e
break
if "meta" in steps:
try:
esb = DocESBackend(idxr)
self.logger.info("Generating JSON metadata for full release '%s'" % esb.version)
repo = idxr._es.snapshot.get_repository(btconfig.URL_SNAPSHOT_REPOSITORY)
# generate json metadata about this diff release
full_meta = {
"type": "full",
"build_version": esb.version,
"app_version": None,
"metadata" : {"repository" : repo,
"snapshot_name" : snapshot}
}
assert esb.version, "Can't retrieve a version from index '%s'" % index
build_info = "%s.json" % esb.version
build_info_path = os.path.join(btconfig.DIFF_PATH,build_info)
json.dump(full_meta,open(build_info_path,"w"))
# override lastmodified header with our own timestamp
local_ts = dtparse(idxr.get_mapping_meta()["_meta"]["timestamp"])
utc_epoch = str(int(time.mktime(local_ts.timetuple())))
# it's a full release, but all build info metadata (full, incremental) all go
# to the diff bucket (this is the main entry)
s3key = os.path.join(btconfig.S3_DIFF_FOLDER,build_info)
aws.send_s3_file(build_info_path,s3key,
aws_key=btconfig.AWS_KEY,aws_secret=btconfig.AWS_SECRET,
s3_bucket=btconfig.S3_DIFF_BUCKET,metadata={"lastmodified":utc_epoch},
overwrite=True)
url = aws.get_s3_url(s3key,aws_key=btconfig.AWS_KEY,aws_secret=btconfig.AWS_SECRET,
s3_bucket=btconfig.S3_DIFF_BUCKET)
self.logger.info("Full release metadata published for version: '%s'" % url)
publish_data_version(esb.version)
self.logger.info("Registered version '%s'" % (esb.version))
fut.set_result("SUCCESS")
except Exception as e:
self.logger.error("Error while publishing metadata for snapshot '%s': %s" % (snapshot,e))
fut.set_exception(e)
task = asyncio.ensure_future(do(index))
return fut
class Indexer(object):
def __init__(self, es_host, target_name=None):
self.host = es_host
self.log_folder = LOG_FOLDER
self.timestamp = datetime.now()
self.build_name = None
self.build_doc = None
self.target_name = None
self.index_name = None
self.doc_type = None
self.num_shards = None
self.num_replicas = None
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
return {"category" : "indexer",
"source" : "%s:%s" % (self.build_name,self.index_name),
"step" : "",
"description" : ""}
@asyncio.coroutine
def index(self, target_name, index_name, job_manager, steps=["index","post"], batch_size=10000, ids=None, mode="index"):
"""
Build an index named "index_name" with data from collection
"target_collection". "ids" can be passed to selectively index documents. "mode" can have the following
values:
- 'purge': will delete index if it exists
- 'resume': will use existing index and add documents. "ids" can be passed as a list of missing IDs,
or, if not pass, ES will be queried to identify which IDs are missing for each batch in
order to complete the index.
- None (default): will create a new index, assuming it doesn't already exist
"""
assert job_manager
# check what to do
if type(steps) == str:
steps = [steps]
self.target_name = target_name
self.index_name = index_name
self.setup_log()
self.load_build()
got_error = False
cnt = 0
if "index" in steps:
_db = mongo.get_target_db()
target_collection = _db[target_name]
_mapping = self.get_mapping()
_extra = self.get_index_creation_settings()
_meta = {}
# partially instantiated indexer instance for process workers
partial_idxer = partial(ESIndexer,doc_type=self.doc_type,
index=index_name,
es_host=self.host,
step=batch_size,
number_of_shards=self.num_shards,
number_of_replicas=self.num_replicas)
# instantiate one here for index creation
es_idxer = partial_idxer()
if es_idxer.exists_index():
if mode == "purge":
es_idxer.delete_index()
elif mode != "resume":
raise IndexerException("Index already '%s' exists, (use mode='purge' to auto-delete it or mode='resume' to add more documents)" % index_name)
if mode != "resume":
es_idxer.create_index({self.doc_type:_mapping},_extra)
jobs = []
total = target_collection.count()
btotal = math.ceil(total/batch_size)
bnum = 1
if ids:
self.logger.info("Indexing from '%s' with specific list of _ids, create indexer job with batch_size=%d" % (target_name, batch_size))
id_provider = [ids]
else:
self.logger.info("Fetch _ids from '%s', and create indexer job with batch_size=%d" % (target_name, batch_size))
id_provider = id_feeder(target_collection, batch_size=batch_size,logger=self.logger)
for ids in id_provider:
yield from asyncio.sleep(0.0)
cnt += len(ids)
pinfo = self.get_pinfo()
pinfo["step"] = self.target_name
pinfo["description"] = "#%d/%d (%.1f%%)" % (bnum,btotal,(cnt/total*100))
self.logger.info("Creating indexer job #%d/%d, to index '%s' %d/%d (%.1f%%)" % \
(bnum,btotal,target_name,cnt,total,(cnt/total*100.)))
job = yield from job_manager.defer_to_process(
pinfo,
partial(indexer_worker,
self.target_name,
ids,
partial_idxer,
bnum,
mode))
def batch_indexed(f,batch_num):
nonlocal got_error
res = f.result()
if type(res) != tuple or type(res[0]) != int:
got_error = Exception("Batch #%s failed while indexing collection '%s' [result:%s]" % (batch_num,self.target_name,repr(f.result())))
job.add_done_callback(partial(batch_indexed,batch_num=bnum))
jobs.append(job)
bnum += 1
# raise error as soon as we know
if got_error:
raise got_error
self.logger.info("%d jobs created for indexing step" % len(jobs))
tasks = asyncio.gather(*jobs)
def done(f):
nonlocal got_error
if None in f.result():
got_error = Exception("Some batches failed")
return
# compute overall inserted/updated records
# returned values looks like [(num,[]),(num,[]),...]
cnt = sum([val[0] for val in f.result()])
self.logger.info("Index '%s' successfully created" % index_name,extra={"notify":True})
tasks.add_done_callback(done)
yield from tasks
if "post" in steps:
self.logger.info("Running post-index process for index '%s'" % index_name)
pinfo = self.get_pinfo()
pinfo["step"] = "post_index"
# for some reason (like maintaining object's state between pickling).
# we can't use process there. Need to use thread to maintain that state without
# building an unmaintainable monster
job = yield from job_manager.defer_to_thread(pinfo, partial(self.post_index, target_name, index_name,
job_manager, steps=steps, batch_size=batch_size, ids=ids, mode=mode))
def posted(f):
try:
res = f.result()
self.logger.info("Post-index process done for index '%s': %s" % (index_name,res))
except Exception as e:
got_error = e
self.logger.error("Post-index process failed for index '%s': %s" % (index_name,e),extra={"notify":True})
raise
job.add_done_callback(posted)
yield from asyncio.gather(job) # consume future
if got_error:
raise got_error
else:
return {"%s" % self.target_name : cnt}
def post_index(self, target_name, index_name, job_manager, steps=["index","post"], batch_size=10000, ids=None, mode=None):
"""
Override in sub-class to add a post-index process. Method's signature is the same as index() to get
the full context. This method will run in a thread (using job_manager.defer_to_thread())
"""
pass
def setup_log(self):
import logging as logging_mod
if not os.path.exists(self.log_folder):
os.makedirs(self.log_folder)
self.logfile = os.path.join(self.log_folder, 'index_%s_%s.log' % (self.index_name,time.strftime("%Y%m%d",self.timestamp.timetuple())))
fh = logging_mod.FileHandler(self.logfile)
fmt = logging_mod.Formatter('%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s',datefmt="%H:%M:%S")
fh.setFormatter(fmt)
fh.name = "logfile"
nh = HipchatHandler(btconfig.HIPCHAT_CONFIG)
nh.setFormatter(fmt)
nh.name = "hipchat"
self.logger = logging_mod.getLogger("%s_index" % self.build_name)
self.logger.setLevel(logging_mod.DEBUG)
if not fh.name in [h.name for h in self.logger.handlers]:
self.logger.addHandler(fh)
if not nh.name in [h.name for h in self.logger.handlers]:
self.logger.addHandler(nh)
return self.logger
def get_index_creation_settings(self):
"""
Override to return a dict containing some extra settings
for index creation. Dict will be merged with mandatory settings,
see biothings.utils.es.ESIndexer.create_index for more.
"""
return {}
def get_mapping(self, enable_timestamp=True):
'''collect mapping data from data sources.
This is for GeneDocESBackend only.
'''
mapping = self.build_doc.get("mapping",{})
mapping = {"properties": mapping,
"dynamic": "false",
"include_in_all": "false"}
if enable_timestamp:
mapping['_timestamp'] = {
"enabled": True,
}
mapping["_meta"] = self.get_metadata()
return mapping
def get_metadata(self):
return self.build_doc.get("_meta",{})
def get_build(self,target_name=None):
target_name = target_name or self.target_name
assert target_name, "target_name must be defined first before searching for builds"
builds = [b for b in self.build_config["build"] if b == target_name]
assert len(builds) == 1, "Can't find build for config '%s' and target_name '%s'" % (self.build_name,self.target_name)
return self.build_config["build"][builds[0]]
def get_src_versions(self):
build = self.get_build()
return build["src_version"]
def get_stats(self):
build = self.get_build()
return build["stats"]
def get_timestamp(self):
build = self.get_build()
return build["started_at"]
def get_build_version(self):
build = self.get_build()
return build["build_version"]
def load_build(self, target_name=None):
'''Load build info from src_build collection.'''
target_name = target_name or self.target_name
src_build = mongo.get_src_build()
self.build_doc = src_build.find_one({'_id': target_name})
assert self.build_doc, "Can't find build document associated to '%s'" % target_name
_cfg = self.build_doc.get("build_config")
if _cfg:
self.build_config = _cfg
if not "doc_type" in _cfg:
raise ValueError("Missing 'doc_type' in build config")
self.doc_type = _cfg["doc_type"]
self.num_shards = _cfg.get("num_shards",10) # optional
self.num_shards = self.num_shards and int(self.num_shards) or self.num_shards
self.num_replicas = _cfg.get("num_replicas",0) # optional
self.num_replicas = self.num_replicas and int(self.num_replicas) or self.num_replicas
self.build_name = _cfg["name"]
else:
raise ValueError("Cannot find build config associated to '%s'" % target_name)
return _cfg
def do_index_worker(col_name,ids,pindexer,batch_num):
tgt = mongo.get_target_db()
col = tgt[col_name]
idxer = pindexer()
cur = doc_feeder(col, step=len(ids), inbatch=False, query={'_id': {'$in': ids}})
cnt = idxer.index_bulk(cur)
return cnt
def indexer_worker(col_name,ids,pindexer,batch_num,mode="index"):
try:
if mode == "index":
return do_index_worker(col_name,ids,pindexer,batch_num)
elif mode == "resume":
idxr = pindexer()
es_ids = idxr.mexists(ids)
missing_ids = [e[0] for e in es_ids if e[1] == False]
if missing_ids:
return do_index_worker(col_name,missing_ids,pindexer,batch_num)
else:
# fake indexer results, it has to be a tuple, first elem is num of indexed docs
return (0,None)
except Exception as e:
logger_name = "index_%s_%s_batch_%s" % (pindexer.keywords.get("index","index"),col_name,batch_num)
logger = get_logger(logger_name, btconfig.LOG_FOLDER)
logger.exception(e)
exc_fn = os.path.join(btconfig.LOG_FOLDER,"%s.pick" % logger_name)
pickle.dump({"exc":e,"ids":ids},open(exc_fn,"wb"))
logger.info("Exception and IDs were dumped in pickle file '%s'" % exc_fn)
raise
|
|
"""
Test module for Flask-htpasswd extension
"""
from __future__ import absolute_import, unicode_literals
import base64
import os
import unittest
from flask import request, Flask, g
from itsdangerous import JSONWebSignatureSerializer as Serializer
import mock
from flask_htpasswd import HtPasswdAuth
class TestAuth(unittest.TestCase):
"""
Verify each piece of our authentication module using
the htpasswd in tests/config/
"""
TEST_USER = 'foo'
TEST_PASS = 'bar'
NOT_USER = 'notuser'
def setUp(self):
super(TestAuth, self).setUp()
self.app = Flask(__name__)
self.app.config['FLASK_SECRET'] = 'dummy'
self.app.debug = True
self.htpasswd = None
def _setup_normal_extension(self, auth_all=False, realm=None):
"""Setup the extension with the test htpasswd file."""
self.app.config['FLASK_HTPASSWD_PATH'] = os.path.join(
os.path.dirname(
os.path.abspath(__file__)
),
'config',
'test_htpasswd'
)
if auth_all:
self.app.config['FLASK_AUTH_ALL'] = True
if realm:
self.app.config['FLASK_AUTH_REALM'] = realm
self.htpasswd = HtPasswdAuth(self.app)
def _get_requires_auth_decorator(self):
"""
Returns decorated mock function.
"""
wrapped = mock.Mock()
wrapped.__name__ = str('foo')
decorated = self.htpasswd.required(wrapped)
return wrapped, decorated
def test_app_factory(self):
"""Verify we work fine even without an app in __init__"""
htpasswd = HtPasswdAuth()
htpasswd.init_app(self.app)
@mock.patch('flask_htpasswd.log')
def test_no_htpasswd_file(self, mocked_log):
"""Verify that we are just fine without an htpasswd file"""
HtPasswdAuth(self.app)
mocked_log.critical.assert_called_with(
'No htpasswd file loaded, please set `FLASK_HTPASSWD`'
'or `FLASK_HTPASSWD_PATH` environment variable to a '
'valid apache htpasswd file.'
)
def test_check_basic_auth(self):
"""
Validate a test user works with the correct password
and doesn't with a bad one
"""
self._setup_normal_extension()
with self.app.app_context():
self.assertTrue(self.TEST_USER in self.htpasswd.users.users())
# Verify positive case
valid, username = self.htpasswd.check_basic_auth(
self.TEST_USER, self.TEST_PASS
)
self.assertTrue(valid)
self.assertEqual(username, self.TEST_USER)
# Verify negative password case
valid, username = self.htpasswd.check_basic_auth(
self.TEST_USER, 'blah'
)
self.assertFalse(valid)
self.assertEqual(self.TEST_USER, username)
# Verify negative user case
not_user = self.NOT_USER
self.assertTrue(not_user not in self.htpasswd.users.users())
valid, username = self.htpasswd.check_basic_auth(not_user, 'blah')
self.assertFalse(valid)
self.assertEqual(not_user, username)
def test_token_generation(self):
"""
Verify token generation using known hashes and signature
"""
test_user = self.TEST_USER
not_user = self.NOT_USER
known_hashhash = ('5106273f7789f1e26b4a212789992f75c15433f402f3e94a'
'd18e7c80aee80faf')
self._setup_normal_extension()
with self.app.app_context():
token = self.htpasswd.generate_token(test_user)
# Verify hashhash against known value
hashhash = self.htpasswd.get_hashhash(test_user)
self.assertEqual(hashhash, known_hashhash)
# Now that we verified our hashhash, independently verify
# the data with a serializer from config (not trusting
# get_signature here).
serializer = Serializer(self.app.config['FLASK_SECRET'])
self.assertEqual(serializer.loads(token)['hashhash'], hashhash)
# Now go ahead and verify the reverse, trusting, and
# verifying get_signature.
serializer = self.htpasswd.get_signature()
data = serializer.loads(token)
self.assertTrue(data['username'], test_user)
self.assertTrue(data['hashhash'], hashhash)
# Verify no user handling (don't really care what
# exception gets raised).
with self.assertRaises(Exception):
token = self.htpasswd.generate_token(not_user)
@mock.patch('flask_htpasswd.log')
def test_token_auth(self, log):
"""
Validate authentication by token works properly
"""
self._setup_normal_extension()
with self.app.app_context():
# Test bad token
valid, username = self.htpasswd.check_token_auth(
'asdfasdf.asdfasdf'
)
self.assertEqual(False, valid)
self.assertEqual(None, username)
log.warning.assert_called_with('Received bad token signature')
# Test bad username, but valid signature for users that have
# been deleted
sig = self.htpasswd.get_signature()
token = sig.dumps({
'username': self.NOT_USER,
})
valid, username = self.htpasswd.check_token_auth(token)
self.assertEqual(False, valid)
self.assertEqual(None, username)
log.warning.assert_called_with(
'Token auth signed message, but invalid user %s',
self.NOT_USER
)
# Test that a different password invalidates token
token = sig.dumps({
'username': self.TEST_USER,
'hashhash': self.htpasswd.get_hashhash('norm')
})
valid, username = self.htpasswd.check_token_auth(token)
self.assertEqual(False, valid)
self.assertEqual(None, username)
log.warning.assert_called_with(
'Token and password do not match, '
'%s needs to regenerate token',
self.TEST_USER
)
# Test valid case
token = self.htpasswd.generate_token(self.TEST_USER)
valid, username = self.htpasswd.check_token_auth(token)
self.assertEqual(True, valid)
self.assertEqual(self.TEST_USER, username)
def test_requires_auth(self):
"""
Verify full auth with both token and basic auth.
"""
self._setup_normal_extension()
# Test successful basic auth
with self.app.test_request_context(headers={
'Authorization': 'Basic {0}'.format(
base64.b64encode(
'{0}:{1}'.format(
self.TEST_USER, self.TEST_PASS
).encode('ascii')
).decode('ascii')
)
}):
wrapped, decorated = self._get_requires_auth_decorator()
decorated()
wrapped.assert_called_with(user=self.TEST_USER)
# Test successful token header auth
with self.app.app_context():
with self.app.test_request_context(headers={
'Authorization': 'token {0}'.format(
self.htpasswd.generate_token(self.TEST_USER)
)
}):
wrapped, decorated = self._get_requires_auth_decorator()
decorated()
wrapped.assert_called_with(user=self.TEST_USER)
# Test successful token param auth
with self.app.app_context():
with self.app.test_request_context():
wrapped = mock.Mock()
request.args = {
'access_token': self.htpasswd.generate_token(
self.TEST_USER
)
}
wrapped, decorated = self._get_requires_auth_decorator()
decorated()
wrapped.assert_called_with(user=self.TEST_USER)
# Test unsuccessful auth
with self.app.test_request_context(headers={
'Authorization': 'token blah blah'
}):
wrapped, decorated = self._get_requires_auth_decorator()
response = decorated()
self.assertEqual(401, response.status_code)
def test_auth_all_views_disabled(self):
"""Verify that with ``FLASK_AUTH_ALL`` turned off, views are normal"""
self._setup_normal_extension()
@self.app.route('/')
def _():
"""Simple view to verify we aren't protected."""
return 'Hi'
response = self.app.test_client().get('/')
self.assertEqual(200, response.status_code)
self.assertEqual('Hi', response.data.decode('UTF-8'))
def test_auth_all_views_enabled(self):
"""Verify that with ``FLASK_AUTH_ALL`` turned on, views need auth"""
self._setup_normal_extension(auth_all=True)
@self.app.route('/')
def _():
"""Simple view to verify we are protected."""
# Validate we have the user available in g
self.assertEqual(g.user, self.TEST_USER)
return 'Hi'
response = self.app.test_client().get('/')
self.assertEqual(401, response.status_code)
# Make sure we can properly authenticate as well
response = self.app.test_client().get(
'/',
headers={
'Authorization': 'Basic {0}'.format(
base64.b64encode(
'{0}:{1}'.format(
self.TEST_USER, self.TEST_PASS
).encode('ascii')
).decode('ascii')
)
}
)
self.assertEqual(200, response.status_code)
self.assertEqual('Hi', response.data.decode('UTF-8'))
def test_basic_auth_realm_config(self):
"""Verify that the auth realm returned is configurable"""
realm = 'Foomanchubars'
self._setup_normal_extension(auth_all=True, realm=realm)
@self.app.route('/')
def _():
"""Simple view to prompt for authentication."""
self.fail(
'This view should not have been called'
) # pragma: no cover
response = self.app.test_client().get('/')
self.assertEqual(401, response.status_code)
self.assertEqual(
'Basic realm="{0}"'.format(realm),
response.headers['WWW-Authenticate']
)
def test_decorator_syntax(self):
"""Verify that the auth realm returned is configurable"""
self._setup_normal_extension()
@self.app.route('/')
@self.htpasswd.required
def _():
"""Simple view to validate authentication."""
self.fail(
'This view should not have been called'
) # pragma: no cover
response = self.app.test_client().get('/')
self.assertEqual(401, response.status_code)
|
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import time
import unittest
import pylast
from .test_pylast import TestPyLastWithLastFm
class TestPyLastNetwork(TestPyLastWithLastFm):
def test_scrobble(self):
# Arrange
artist = "test artist"
title = "test title"
timestamp = self.unix_timestamp()
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist=artist, title=title, timestamp=timestamp)
# Assert
# limit=2 to ignore now-playing:
last_scrobble = lastfm_user.get_recent_tracks(limit=2)[0]
self.assertEqual(str(last_scrobble.track.artist).lower(), artist)
self.assertEqual(str(last_scrobble.track.title).lower(), title)
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_update_now_playing(self):
# Arrange
artist = "Test Artist"
title = "test title"
album = "Test Album"
track_number = 1
lastfm_user = self.network.get_user(self.username)
# Act
self.network.update_now_playing(
artist=artist, title=title, album=album, track_number=track_number
)
# Assert
current_track = lastfm_user.get_now_playing()
self.assertIsNotNone(current_track)
self.assertEqual(str(current_track.title).lower(), "test title")
self.assertEqual(str(current_track.artist).lower(), "test artist")
def test_enable_rate_limiting(self):
# Arrange
self.assertFalse(self.network.is_rate_limited())
# Act
self.network.enable_rate_limit()
then = time.time()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
now = time.time()
# Assert
self.assertTrue(self.network.is_rate_limited())
self.assertGreaterEqual(now - then, 0.2)
def test_disable_rate_limiting(self):
# Arrange
self.network.enable_rate_limit()
self.assertTrue(self.network.is_rate_limited())
# Act
self.network.disable_rate_limit()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
# Assert
self.assertFalse(self.network.is_rate_limited())
def test_lastfm_network_name(self):
# Act
name = str(self.network)
# Assert
self.assertEqual(name, "Last.fm Network")
def test_geo_get_top_artists(self):
# Arrange
# Act
artists = self.network.get_geo_top_artists(country="United Kingdom", limit=1)
# Assert
self.assertEqual(len(artists), 1)
self.assertIsInstance(artists[0], pylast.TopItem)
self.assertIsInstance(artists[0].item, pylast.Artist)
def test_geo_get_top_tracks(self):
# Arrange
# Act
tracks = self.network.get_geo_top_tracks(
country="United Kingdom", location="Manchester", limit=1
)
# Assert
self.assertEqual(len(tracks), 1)
self.assertIsInstance(tracks[0], pylast.TopItem)
self.assertIsInstance(tracks[0].item, pylast.Track)
def test_network_get_top_artists_with_limit(self):
# Arrange
# Act
artists = self.network.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
def test_network_get_top_tags_with_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tags, pylast.Tag)
def test_network_get_top_tags_with_no_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags()
# Assert
self.helper_at_least_one_thing_in_top_list(tags, pylast.Tag)
def test_network_get_top_tracks_with_limit(self):
# Arrange
# Act
tracks = self.network.get_top_tracks(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tracks, pylast.Track)
def test_country_top_tracks(self):
# Arrange
country = self.network.get_country("Croatia")
# Act
things = country.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_country_network_top_tracks(self):
# Arrange
# Act
things = self.network.get_geo_top_tracks("Croatia", limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_tag_top_tracks(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
things = tag.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_album_data(self):
# Arrange
thing = self.network.get_album("Test Artist", "Test Album")
# Act
stringed = str(thing)
rep = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url()
# Assert
self.assertEqual(stringed, "Test Artist - Test Album")
self.assertIn("pylast.Album('Test Artist', 'Test Album',", rep)
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual("https://www.last.fm/music/test%2bartist/test%2balbum", url)
def test_track_data(self):
# Arrange
thing = self.network.get_track("Test Artist", "test title")
# Act
stringed = str(thing)
rep = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url(pylast.DOMAIN_FRENCH)
# Assert
self.assertEqual(stringed, "Test Artist - test title")
self.assertIn("pylast.Track('Test Artist', 'test title',", rep)
self.assertEqual(title, "test title")
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual(
"https://www.last.fm/fr/music/test%2bartist/_/test%2btitle", url
)
def test_country_top_artists(self):
# Arrange
country = self.network.get_country("Ukraine")
# Act
artists = country.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
def test_caching(self):
# Arrange
user = self.network.get_user("RJ")
# Act
self.network.enable_caching()
tags1 = user.get_top_tags(limit=1, cacheable=True)
tags2 = user.get_top_tags(limit=1, cacheable=True)
# Assert
self.assertTrue(self.network.is_caching_enabled())
self.assertEqual(tags1, tags2)
self.network.disable_caching()
self.assertFalse(self.network.is_caching_enabled())
def test_album_mbid(self):
# Arrange
mbid = "a6a265bf-9f81-4055-8224-f7ac0aa6b937"
# Act
album = self.network.get_album_by_mbid(mbid)
album_mbid = album.get_mbid()
# Assert
self.assertIsInstance(album, pylast.Album)
self.assertEqual(album.title.lower(), "test")
self.assertEqual(album_mbid, mbid)
def test_artist_mbid(self):
# Arrange
mbid = "7e84f845-ac16-41fe-9ff8-df12eb32af55"
# Act
artist = self.network.get_artist_by_mbid(mbid)
# Assert
self.assertIsInstance(artist, pylast.Artist)
self.assertEqual(artist.name, "MusicBrainz Test Artist")
def test_track_mbid(self):
# Arrange
mbid = "ebc037b1-cc9c-44f2-a21f-83c219f0e1e0"
# Act
track = self.network.get_track_by_mbid(mbid)
track_mbid = track.get_mbid()
# Assert
self.assertIsInstance(track, pylast.Track)
self.assertEqual(track.title, "first")
self.assertEqual(track_mbid, mbid)
def test_init_with_token(self):
# Arrange/Act
msg = None
try:
pylast.LastFMNetwork(
api_key=self.__class__.secrets["api_key"],
api_secret=self.__class__.secrets["api_secret"],
token="invalid",
)
except pylast.WSError as exc:
msg = str(exc)
# Assert
self.assertEqual(msg, "Unauthorized Token - This token has not been issued")
def test_proxy(self):
# Arrange
host = "https://example.com"
port = 1234
# Act / Assert
self.network.enable_proxy(host, port)
self.assertTrue(self.network.is_proxy_enabled())
self.assertEqual(self.network._get_proxy(), ["https://example.com", 1234])
self.network.disable_proxy()
self.assertFalse(self.network.is_proxy_enabled())
def test_album_search(self):
# Arrange
album = "Nevermind"
# Act
search = self.network.search_for_album(album)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Album)
def test_album_search_images(self):
# Arrange
album = "Nevermind"
search = self.network.search_for_album(album)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 4)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_artist_search(self):
# Arrange
artist = "Nirvana"
# Act
search = self.network.search_for_artist(artist)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Artist)
def test_artist_search_images(self):
# Arrange
artist = "Nirvana"
search = self.network.search_for_artist(artist)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 5)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_track_search(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
# Act
search = self.network.search_for_track(artist, track)
results = search.get_next_page()
# Assert
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], pylast.Track)
def test_track_search_images(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
search = self.network.search_for_track(artist, track)
# Act
results = search.get_next_page()
images = results[0].info["image"]
# Assert
self.assertEqual(len(images), 4)
self.assert_startswith(images[pylast.SIZE_SMALL], "https://")
self.assert_endswith(images[pylast.SIZE_SMALL], ".png")
self.assertIn("/34s/", images[pylast.SIZE_SMALL])
self.assert_startswith(images[pylast.SIZE_EXTRA_LARGE], "https://")
self.assert_endswith(images[pylast.SIZE_EXTRA_LARGE], ".png")
self.assertIn("/300x300/", images[pylast.SIZE_EXTRA_LARGE])
def test_search_get_total_result_count(self):
# Arrange
artist = "Nirvana"
track = "Smells Like Teen Spirit"
search = self.network.search_for_track(artist, track)
# Act
total = search.get_total_result_count()
# Assert
self.assertGreater(int(total), 10000)
if __name__ == "__main__":
unittest.main(failfast=True)
|
|
############################################################################
# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay #
# Copyright (c) QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import os
import sys
import subprocess
# Build the test extension
here = os.path.abspath(os.path.dirname(__file__))
subprocess.check_call([sys.executable, os.path.join(here, 'setup.py'), 'build_ext', '--inplace'], cwd=here)
# Test it!
from unittest import TestCase
import xtensor_python_test as xt
import numpy as np
class XtensorTest(TestCase):
def test_rm(self):
xt.test_rm(np.array([10], dtype=int))
def test_example1(self):
self.assertEqual(4, xt.example1([4, 5, 6]))
def test_example2(self):
x = np.array([[0., 1.], [2., 3.]])
res = np.array([[2., 3.], [4., 5.]])
y = xt.example2(x)
np.testing.assert_allclose(y, res, 1e-12)
def test_example3(self):
x = np.arange(2 * 3).reshape(2, 3)
xc = np.asfortranarray(x)
y = np.arange(2 * 3 * 4).reshape(2, 3, 4)
v = y[1:, 1:, 0]
z = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
np.testing.assert_array_equal(xt.example3_xarray(x), x.T + 2)
np.testing.assert_array_equal(xt.example3_xarray_colmajor(xc), xc.T + 2)
np.testing.assert_array_equal(xt.example3_xtensor3(y), y.T + 2)
np.testing.assert_array_equal(xt.example3_xtensor2(x), x.T + 2)
np.testing.assert_array_equal(xt.example3_xtensor2(y[1:, 1:, 0]), v.T + 2)
np.testing.assert_array_equal(xt.example3_xtensor2_colmajor(xc), xc.T + 2)
np.testing.assert_array_equal(xt.example3_xfixed3(y), y.T + 2)
np.testing.assert_array_equal(xt.example3_xfixed2(x), x.T + 2)
np.testing.assert_array_equal(xt.example3_xfixed2_colmajor(xc), xc.T + 2)
with self.assertRaises(TypeError):
xt.example3_xtensor3(x)
with self.assertRaises(TypeError):
xt.example3_xfixed3(x)
with self.assertRaises(TypeError):
x = np.arange(3*2).reshape(3, 2)
xt.example3_xfixed2(x)
def test_vectorize(self):
x1 = np.array([[0, 1], [2, 3]])
x2 = np.array([0, 1])
res = np.array([[0, 2], [2, 4]])
y = xt.vectorize_example1(x1, x2)
np.testing.assert_array_equal(y, res)
def test_readme_example1(self):
v = np.arange(15).reshape(3, 5)
y = xt.readme_example1(v)
np.testing.assert_allclose(y, 1.2853996391883833, 1e-12)
def test_complex_overload_reg(self):
a = 23.23
c = 2.0 + 3.1j
self.assertEqual(xt.complex_overload_reg(a), a)
self.assertEqual(xt.complex_overload_reg(c), c)
def test_complex_overload(self):
a = np.random.rand(3, 3)
b = np.random.rand(3, 3)
c = a + b * 1j
y = xt.complex_overload(c)
np.testing.assert_allclose(np.imag(y), np.imag(c))
np.testing.assert_allclose(np.real(y), np.real(c))
x = xt.complex_overload(b)
self.assertEqual(x.dtype, b.dtype)
np.testing.assert_allclose(x, b)
def test_readme_example2(self):
x = np.arange(15).reshape(3, 5)
y = [1, 2, 3, 4, 5]
z = xt.readme_example2(x, y)
np.testing.assert_allclose(z,
[[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465],
[-1.499227, 0.136731, 1.646979, 1.643002, 0.128456],
[-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]], 1e-5)
def test_rect_to_polar(self):
x = np.ones(10, dtype=complex)
z = xt.rect_to_polar(x[::2]);
np.testing.assert_allclose(z, np.ones(5, dtype=float), 1e-5)
def test_shape_comparison(self):
x = np.ones([4, 4])
y = np.ones([5, 5])
z = np.zeros([4, 4])
self.assertFalse(xt.compare_shapes(x, y))
self.assertTrue(xt.compare_shapes(x, z))
def test_int_overload(self):
for dtype in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64]:
b = xt.int_overload(np.ones((10), dtype))
self.assertEqual(str(dtype.__name__), b)
def test_dtype(self):
var = xt.dtype_to_python()
self.assertEqual(var.dtype.names, ('a', 'b', 'c', 'x'))
exp_dtype = {
'a': (np.dtype('float64'), 0),
'b': (np.dtype('int32'), 8),
'c': (np.dtype('int8'), 12),
'x': (np.dtype(('<f8', (3,))), 16)
}
self.assertEqual(var.dtype.fields, exp_dtype)
self.assertEqual(var[0]['a'], 123)
self.assertEqual(var[0]['b'], 321)
self.assertEqual(var[0]['c'], ord('a'))
self.assertTrue(np.all(var[0]['x'] == [1, 2, 3]))
self.assertEqual(var[1]['a'], 111)
self.assertEqual(var[1]['b'], 222)
self.assertEqual(var[1]['c'], ord('x'))
self.assertTrue(np.all(var[1]['x'] == [5, 5, 5]))
d_dtype = np.dtype({'names':['a','b'], 'formats':['<f8','<i4'], 'offsets':[0,8], 'itemsize':16})
darr = np.array([(1, ord('p')), (123, ord('c'))], dtype=d_dtype)
self.assertEqual(darr[0]['a'], 1)
res = xt.dtype_from_python(darr)
self.assertEqual(res[0]['a'], 123.)
self.assertEqual(darr[0]['a'], 123.)
def test_char_array(self):
var = np.array(['hello', 'from', 'python'], dtype=np.dtype('|S20'));
self.assertEqual(var[0], b'hello')
xt.char_array(var)
self.assertEqual(var[0], b'hello')
self.assertEqual(var[1], b'from')
self.assertEqual(var[2], b'c++')
def test_col_row_major(self):
var = np.arange(50, dtype=float).reshape(2, 5, 5)
with self.assertRaises(RuntimeError):
xt.col_major_array(var)
with self.assertRaises(TypeError):
xt.row_major_tensor(var.T)
with self.assertRaises(TypeError):
xt.row_major_tensor(var[:, ::2, ::2])
with self.assertRaises(TypeError):
# raise for wrong dimension
xt.row_major_tensor(var[0, 0, :])
xt.row_major_tensor(var)
varF = np.arange(50, dtype=float).reshape(2, 5, 5, order='F')
xt.col_major_array(varF)
xt.col_major_array(varF[:, :, 0]) # still col major!
def test_xscalar(self):
var = np.arange(50, dtype=int)
self.assertTrue(np.sum(var) == xt.xscalar(var))
def test_bad_argument_call(self):
with self.assertRaises(TypeError):
xt.simple_array("foo")
with self.assertRaises(TypeError):
xt.simple_tensor("foo")
def test_diff_shape_overload(self):
self.assertEqual(1, xt.diff_shape_overload(np.ones(2)))
self.assertEqual(2, xt.diff_shape_overload(np.ones((2, 2))))
with self.assertRaises(TypeError):
# FIXME: the TypeError information is not informative
xt.diff_shape_overload(np.ones((2, 2, 2)))
def test_native_casters(self):
import gc
# check keep alive policy for get_strided_view()
gc.collect()
obj = xt.test_native_casters()
a = obj.get_strided_view()
obj = None
gc.collect()
_ = np.zeros((100, 100))
self.assertEqual(a.sum(), a.size)
# check keep alive policy for get_array_adapter()
gc.collect()
obj = xt.test_native_casters()
a = obj.get_array_adapter()
obj = None
gc.collect()
_ = np.zeros((100, 100))
self.assertEqual(a.sum(), a.size)
# check keep alive policy for get_array_adapter()
gc.collect()
obj = xt.test_native_casters()
a = obj.get_tensor_adapter()
obj = None
gc.collect()
_ = np.zeros((100, 100))
self.assertEqual(a.sum(), a.size)
# check keep alive policy for get_owning_array_adapter()
gc.collect()
obj = xt.test_native_casters()
a = obj.get_owning_array_adapter()
gc.collect()
_ = np.zeros((100, 100))
self.assertEqual(a.sum(), a.size)
# check keep alive policy for view_keep_alive_member_function()
gc.collect()
a = np.ones((100, 100))
b = obj.view_keep_alive_member_function(a)
obj = None
a = None
gc.collect()
_ = np.zeros((100, 100))
self.assertEqual(b.sum(), b.size)
# check shared buffer (insure that no copy is done)
obj = xt.test_native_casters()
arr = obj.get_array()
strided_view = obj.get_strided_view()
strided_view[0, 1] = -1
self.assertEqual(strided_view.shape, (1, 2))
self.assertEqual(arr[0, 2], -1)
adapter = obj.get_array_adapter()
self.assertEqual(adapter.shape, (2, 2))
adapter[1, 1] = -2
self.assertEqual(arr[0, 5], -2)
adapter = obj.get_tensor_adapter()
self.assertEqual(adapter.shape, (2, 2))
adapter[1, 1] = -3
self.assertEqual(arr[0, 5], -3)
class AttributeTest(TestCase):
def setUp(self):
self.c = xt.C()
def test_copy(self):
arr = self.c.copy
arr[0] = 1
self.assertEqual([0.]*4, self.c.copy.tolist())
def test_reference(self):
arr = self.c.ref
arr[0] = 1
self.assertEqual([1.] + [0.]*3, self.c.ref.tolist())
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class ScenePathTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
a = GafferScene.AlembicSource()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/cube.abc" )
p = GafferScene.ScenePath( a["out"], Gaffer.Context(), "/" )
c = p.children()
self.assertEqual( len( c ), 1 )
self.assertEqual( str( c[0] ), "/group1" )
def testRelative( self ) :
a = GafferScene.AlembicSource()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/cube.abc" )
p = GafferScene.ScenePath( a["out"], Gaffer.Context(), "group1" )
self.assertEqual( str( p ), "group1" )
self.assertEqual( p.root(), "" )
self.assertEqual( [ str( c ) for c in p.children() ], [ "group1/pCube1" ] )
p2 = p.copy()
self.assertEqual( str( p2 ), "group1" )
self.assertEqual( p2.root(), "" )
self.assertEqual( [ str( c ) for c in p2.children() ], [ "group1/pCube1" ] )
def testIsValid( self ) :
plane = GafferScene.Plane()
group = GafferScene.Group()
group["in"][0].setInput( plane["out"] )
p = GafferScene.ScenePath( group["out"], Gaffer.Context(), "/" )
self.assertTrue( p.isValid() )
p.setFromString( "/group" )
self.assertTrue( p.isValid() )
p.setFromString( "/group/plane" )
self.assertTrue( p.isValid() )
p.setFromString( "/group/plane2" )
self.assertFalse( p.isValid() )
p.setFromString( "/group2/plane" )
self.assertFalse( p.isValid() )
p.setFromString( "" )
self.assertFalse( p.isValid() )
def testContextSignals( self ) :
plane = GafferScene.Plane()
context = Gaffer.Context()
self.assertEqual( context.changedSignal().num_slots(), 0 )
p = GafferScene.ScenePath( plane["out"], context, "/" )
# The path shouldn't connect to the context changed signal
# until it really need to - when something is connected
# to the path's own changed signal.
self.assertEqual( context.changedSignal().num_slots(), 0 )
cs = GafferTest.CapturingSlot( p.pathChangedSignal() )
self.assertEqual( context.changedSignal().num_slots(), 1 )
self.assertEqual( len( cs ), 0 )
context["test"] = 10
self.assertTrue( len( cs ), 1 )
# Changing the context should disconnect from the old one
# and reconnect to the new one.
context2 = Gaffer.Context()
self.assertEqual( context2.changedSignal().num_slots(), 0 )
p.setContext( context2 )
self.assertEqual( context.changedSignal().num_slots(), 0 )
self.assertEqual( context2.changedSignal().num_slots(), 1 )
context["test"] = 20
self.assertTrue( len( cs ), 1 )
context["test"] = 10
self.assertTrue( len( cs ), 2 )
def testSignallingAfterDestruction( self ) :
plane = GafferScene.Plane()
context = Gaffer.Context()
path = GafferScene.ScenePath( plane["out"], context, "/" )
# force path to connect to signals
path.pathChangedSignal()
# destroy path
del path
# force emission of signals on scene and context
plane["name"].setValue( "dontCrashNow" )
context["dontCrashNow"] = 10
def testPlugRemovedFromNode( self ) :
box = Gaffer.Box()
box["p"] = Gaffer.IntPlug()
box["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out )
context = Gaffer.Context()
path = GafferScene.ScenePath( box["out"], context, "/" )
# force path to connect to signals
path.pathChangedSignal()
# mess things up
del box["out"]
del path
# trigger plug dirtied on the Box
box["p"].setValue( 10 )
def testSceneAccessors( self ) :
s1 = GafferScene.Plane()
s2 = GafferScene.Plane()
path = GafferScene.ScenePath( s1["out"], Gaffer.Context(), "/" )
self.assertTrue( path.getScene().isSame( s1["out"] ) )
cs = GafferTest.CapturingSlot( path.pathChangedSignal() )
s1["name"].setValue( "p" )
self.assertEqual( len( cs ), 1 )
del cs[:]
path.setScene( s1["out"] )
self.assertEqual( len( cs ), 0 )
self.assertTrue( path.getScene().isSame( s1["out"] ) )
s1["name"].setValue( "pp" )
self.assertEqual( len( cs ), 1 )
del cs[:]
path.setScene( s2["out"] )
self.assertEqual( len( cs ), 1 )
self.assertTrue( path.getScene().isSame( s2["out"] ) )
s2["name"].setValue( "a" )
self.assertEqual( len( cs ), 2 )
del cs[:]
s1["name"].setValue( "b" )
self.assertEqual( len( cs ), 0 )
def testStandardFilter( self ) :
camera = GafferScene.Camera()
plane = GafferScene.Plane()
parent = GafferScene.Parent()
parent["in"].setInput( camera["out"] )
parent["child"].setInput( plane["out"] )
parent["parent"].setValue( "/" )
path = GafferScene.ScenePath( parent["out"], Gaffer.Context(), "/" )
self.assertEqual( { str( c ) for c in path.children() }, { "/camera", "/plane" } )
path.setFilter( GafferScene.ScenePath.createStandardFilter( [ "__cameras" ] ) )
self.assertEqual( { str( c ) for c in path.children() }, { "/camera" } )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python3
# Written by Devon Mack April-May 2017
# This program takes a 5 parameters in the config file and runs a SNVPhyl in galaxy fully automatically
# Parameters (when it doubt, delete the config.json file and the program will create it for you with defaults):
# config.json
# api_key: The API key which must be generated in galaxy
# workflow_id: The ID of the SNVPhyl workflow
# ip: The ip of galaxy (with port)
# name: The prefix of the history name
# nasmnt: The directory of the NAS mount
# TODO Combined sequences folder to index
# TODO SNVPhyl Renamer
import os
import sys
import re
import time
import requests
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy import dataset_collections as collections
from bioblend import ConnectionError
from pyaccessories.TimeLog import Timer
import zipfile
class AutoSNVPhylError(ValueError):
"""Raise when a specific subset of values in context of app is wrong"""
def __init__(self, message, *args):
self.message = message # without this you may get DeprecationWarning
# allow users initialize misc. arguments as any other builtin Error
super(AutoSNVPhylError, self).__init__(message, *args)
class AutoSNVPhyl(object):
def run(self):
try:
self.load() # Load from config
self.gi = GalaxyInstance(self.IP, key=self.API_KEY)
if not self.manual and self.reference is None:
# No reference and it isn't using files in upload folder
self.t.time_print("No reference file specified with -r, please input one or use the --manual"
" flag to use a reference file that you put in the upload folder.")
exit(1)
if self.noextract and not self.manual:
self.t.time_print("[Warning] Using manual flag since noextract was specified without manual.")
self.manual = True
return self.main() # Return the path to the results zip
except:
import traceback
# Print error to file
self.t.time_print("[Error Dump]\n" + traceback.format_exc())
raise
def main(self):
if self.inputs is not None:
if len(self.inputs['rename']) > 0:
self.rename = True
print(self.rename)
print(self.inputs['rename'])
# Create history in Galaxy
self.t.time_print("Creating history " + self.NAME)
while True:
try:
self.history_id = self.gi.histories.create_history(self.NAME)['id']
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
self.t.time_print(self.history_id)
# Begin uploading files to Galaxy
self.t.time_print("Uploading files to galaxy...")
# Upload files from the NAS based on the SEQ-ID list given
if not self.noextract:
self.t.time_print("Finding files on the NAS...")
# Get list of files to retrieve
to_upload = self.extract_files()
# Upload to galaxy
self.t.time_print("Uploading files from the NAS...")
n = 1
nfiles = len(to_upload)
for file in to_upload:
self.t.time_print("%d of %d: Uploading %s" % (n, nfiles, file))
self.upload_file(file)
n += 1
# Upload files from the upload folder if the manual flag is used
if self.manual:
self.t.time_print("Using files in upload folder since -m was used")
n = 1
upload_folder = os.path.join(self.script_dir, "upload")
files = os.listdir(upload_folder)
nfiles = len(files)
for file in files:
self.t.time_print("%d of %d: Uploading %s from %s directory." % (n, nfiles, file, upload_folder))
self.upload_file(os.path.join(upload_folder, file))
n += 1
self.t.time_print("Waiting for files to finish uploading...")
while True:
try:
while self.gi.histories.show_history(self.history_id)["state"] != "ok":
time.sleep(10)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
# Check if all the files are on galaxy and that there are no duplicate/extra files there
# Create list that stores all the files on galaxy
on_galaxy = []
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
on_galaxy.append(dataset['name'])
# Check for duplicate files
count = {}
for file in on_galaxy:
try:
self.t.time_print(count[file]) # If this succeeds then the file is already on galaxy so duplicate
self.t.time_print("[Error] Duplicate file %s on galaxy!" % file)
raise AutoSNVPhylError("Duplicate file on galaxy!")
except KeyError:
# If it isn't already in the dictionary add it to the dictionary
count[file] = True
# Print all the files that weren't successfully uploaded.
for file in self.uploaded:
if file not in on_galaxy:
# It wasn't decompressed
if file + ".gz" in on_galaxy:
if 'R1' in file:
n = 'R1'
else:
n = 'R2'
# Re-upload the file
self.t.time_print("[Warning] File %s wasn't automatically decompressed by galaxy,"
" re-uploading..." % file + '.gz')
self.upload_file(self.extractor.retrieve_file(file.split('_')[0], filetype="fastq_" + n,
getpathonly=True))
if not self.upload_check(file):
errmsg = "[Error] File %s wasn't automatically decompressed by galaxy again, something is " \
"wrong with the file?" % file + '.gz'
self.t.time_print(errmsg)
raise AutoSNVPhylError(errmsg)
else:
n = ""
if 'R1' in file:
n = 'R1'
elif 'R2'in file:
n = 'R2'
self.t.time_print("[Warning] File %s wasn't uploaded to galaxy! Attempting to re-upload" % file)
if n == "":
if file.endswith('.fasta'):
self.upload_file(self.extractor.retrieve_file(file.split('.')[0], filetype="fasta",
getpathonly=True))
else:
self.upload_file(self.extractor.retrieve_file(file.split('_')[0], filetype="fastq_" + n,
getpathonly=True))
if not self.upload_check(file):
errmsg = "[Error] File %s couldn't be uploaded to galaxy!" % file
self.t.time_print(errmsg)
raise AutoSNVPhylError(errmsg)
self.t.time_print("Finished uploading.")
self.t.time_print("Building list of dataset pairs...")
self.build_list()
self.t.time_print("Starting workflow...")
self.run_workflow()
time.sleep(10) # Give it a bit of time to start the workflow
# Wait for workflow to finish
self.t.time_print("Waiting for workflow to finish.")
wait = 0
longwait = 24
while True:
try:
history_state = self.gi.histories.show_history(self.history_id)["state"]
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
while history_state != "ok":
wait += 1
if wait > 60: # 10 minutes
self.t.time_print("Still waiting for workflow to finish.")
wait = 0
longwait += 1
if longwait > 23:
raise AutoSNVPhylError("SNVPhyl took to long, please check galaxy history called %s" %
str(self.NAME))
time.sleep(10)
while True:
try:
history_state = self.gi.histories.show_history(self.history_id)["state"]
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
if history_state == "error":
self.t.time_print("Something went wrong with your SNVPhyl! Check the galaxy history called %s" % self.NAME)
raise AutoSNVPhylError("Something went wrong with your SNVPhyl! "
"Check the galaxy history called %s" % self.NAME)
self.t.time_print("Workflow finished, downloading files...")
to_download = [
"snvMatrix.tsv",
"phylogeneticTreeStats.txt",
"phylogeneticTree.newick",
"filterStats.txt",
"snvAlignment.phy",
"vcf2core.tsv",
"snvTable.tsv"
]
self.t.time_print("Creating directory %s." % self.NAME)
folder = os.path.join(self.script_dir, 'results', self.NAME)
if not os.path.exists(folder):
os.makedirs(folder)
self.t.time_print("Downloading files:")
not_downloaded = to_download
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
# Renames and downloads
if dataset["name"] in to_download:
self.t.time_print(" Downloading %s to %s" % (dataset["name"], os.path.join(folder, dataset["name"])))
while True:
try:
self.gi.datasets.download_dataset(dataset["id"], os.path.join(folder, dataset["name"]),
wait_for_completion=True, use_default_filename=False)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
not_downloaded.remove(dataset["name"])
if len(not_downloaded) > 0:
self.t.time_print("[Warning] Can't find some results files on Galaxy!,"
" these will not be included in the zip file: ")
for missing in to_download:
self.t.time_print(" %s" % missing)
self.zip_results(folder)
self.t.time_print("Completed")
return os.path.join(self.script_dir, folder, self.NAME + '.zip')
def upload_check(self, filename):
self.t.time_print("Waiting for upload to finish...")
while True:
try:
while self.gi.histories.show_history(self.history_id)["state"] != "ok":
time.sleep(10)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
# Check if the file is on galaxy
on_galaxy = []
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
on_galaxy.append(dataset['name'])
if filename in on_galaxy:
return True
else:
return False
def zip_results(self, r_folder):
f_list = [
"snvMatrix.tsv",
"phylogeneticTreeStats.txt",
"phylogeneticTree.newick",
"filterStats.txt",
"snvAlignment.phy",
"vcf2core.tsv",
"snvTable.tsv"
]
# Zip all the files
results_zip = os.path.join(self.script_dir, r_folder, self.NAME + '.zip')
self.t.time_print("Creating zip file %s" % results_zip)
try:
os.remove(results_zip)
except OSError:
pass
zipf = zipfile.ZipFile(results_zip, 'w', zipfile.ZIP_DEFLATED)
for to_zip in f_list:
try:
zipf.write(os.path.join(r_folder, to_zip), arcname=to_zip)
self.t.time_print("Zipped %s" % to_zip)
except FileNotFoundError:
self.t.time_print("[Warning] Can't find %s, will leave it out of .zip." % to_zip)
raise
zipf.close()
def upload_file(self, path):
# TODO I removed some stuff needs testing before being run permanently.
from bioblend import ConnectionError as bioblendConnectionError
import time
attempts = 0
while True:
try:
if self.rename:
if path.endswith('.fasta'):
ending = '.fasta'
seqid = os.path.split(path)[-1].split('.')[0]
else:
if 'r2' in os.path.split(path)[-1].lower():
ending = '_R2.fastq'
elif 'r1' in os.path.split(path)[-1].lower():
ending = '_R1.fastq'
else:
ending = '.fastq'
seqid = os.path.split(path)[-1].split('_')[0]
nfilename = self.inputs['rename'][seqid] + ending
self.t.time_print("Uploading as %s..." % nfilename)
else:
self.t.time_print('Uploading...')
if self.inputs is not None: # Automated
if self.rename:
try:
self.uploaded.append(nfilename)
self.gi.tools.upload_file(path, self.history_id,
file_name=nfilename)
except KeyError:
self.gi.tools.upload_file(path, self.history_id)
else:
self.gi.tools.upload_file(path, self.history_id)
else:
self.gi.tools.upload_file(path, self.history_id)
break
except bioblendConnectionError:
if attempts < self.max_attempts:
attempts += 1
self.t.time_print("[Warning] Failed to upload %s, retrying (attempt %d of %d)" %
(path, attempts, self.max_attempts))
time.sleep(5)
continue
else:
self.t.time_print("[Error] Failed to upload %s, after %d attempts." %
(path, self.max_attempts))
raise
except requests.exceptions.ConnectionError:
if attempts < self.max_attempts:
attempts += 1
self.t.time_print("Galaxy isn't responding...")
self.wait_for_problem()
self.t.time_print("[Warning] Failed to upload %s, retrying (attempt %d of %d)" %
(path, attempts, self.max_attempts))
continue
else:
self.t.time_print("[Error] Failed to upload %s, after %d attempts." %
(path, self.max_attempts))
raise
def extract_files(self):
from sequence_getter import SequenceGetter
from sequence_getter import ExtractionError
self.extractor = SequenceGetter(nasmnt=self.NASMNT, output=False)
if self.inputs is None:
path_to_list = os.path.join(self.script_dir, "retrieve.txt")
try:
f = open(path_to_list, "r")
# Get all of the ids in the file
ids = re.findall(r"(2\d{3}-\w{2,10}-\d{3,4})", f.read())
f.close()
except FileNotFoundError:
# create blank file
open(path_to_list, "w").close()
print("Please enter SEQids in the retrieve.txt file")
exit(1)
# Finds the invalid lines and output them
for line in open("retrieve.txt", "r"):
if line.rstrip("\n") not in ids and len(line.rstrip("\n")) > 2:
self.t.time_print("Invalid seqid: \"%s\"" % line.rstrip("\n"))
else:
ids = self.inputs['fastqs']
# Get paths of fastq's
path_list = []
err = ""
for seqid in ids:
for i in [1, 2]:
try:
path_list.append(self.extractor.retrieve_file(seqid.rstrip("\n"), filetype="fastq_R" + str(i),
getpathonly=True))
except ExtractionError as e:
err += e.message + '\n'
if self.reference is not None:
# Get fasta
try:
refpath = self.extractor.retrieve_file(self.reference, "fasta", getpathonly=True)
except ExtractionError as e:
err += e.message + '\n'
if len(err) > 0:
raise AutoSNVPhylError(err)
path_list.append(refpath)
else:
# Since there is no reference specified, check for one in the upload directory
self.t.time_print("No reference file specified, using the one in the upload directory")
found_ref = False
for file in os.listdir(os.path.join(self.script_dir, 'upload')):
if file.endswith(".fasta"):
if not found_ref:
self.t.time_print("Found " + file + ", using it as a reference...")
found_ref = True
else:
self.t.time_print("[Error] Found another reference file in upload folder, please only use one.")
exit(1)
if not found_ref:
self.t.time_print("[Error] No reference file(fasta) found. Cannot run.")
exit(1)
return path_list
def run_workflow(self):
while True:
try:
contents = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
datamap = dict()
found_ref = False
found_collection = True
# Find the reference file
for item in contents:
if item["history_content_type"] == "dataset" and item["extension"] == "fasta":
datamap['1'] = {
'src': 'hda',
'id': item['id']
}
found_ref = True
if item["name"] == "pair_list":
datamap['0'] = {
'src': 'hdca',
'id': item['id']
}
found_collection = True
if not found_ref:
self.t.time_print("[Error] Can't find a reference on Galaxy.")
raise AutoSNVPhylError("Can't find a reference on Galaxy.")
if not found_collection:
self.t.time_print("[Error] Can't find list of dataset pairs on Galaxy.")
raise AutoSNVPhylError("Can't find list of dataset pairs on Galaxy.")
min_coverage = "10"
min_mean_mapping = "30"
alternative_allele_proportion = "0.75"
params = { # Don't change this, it works
'5': {
'mindepth': min_coverage
},
'11': {
'coverage': min_coverage,
'mean_mapping': min_mean_mapping,
'ao': alternative_allele_proportion
},
}
while True:
try:
self.gi.workflows.invoke_workflow(self.WORKFLOW_ID, inputs=datamap,
params=params, history_id=self.history_id)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
def build_list(self):
while True:
try:
contents = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
fastqs = []
# get fastq files
for item in contents:
if item["history_content_type"] == "dataset" and item["extension"] == "fastq":
fastqs.append(item)
# pair fastq files
r1s = []
r2s = []
for fastq in fastqs:
result1 = re.findall(r"(.+)_[Rr]1", fastq["name"], flags=0)
result2 = re.findall(r"(.+)_[Rr]2", fastq["name"], flags=0)
if len(result1) >= 1:
fastq["name"] = result1[0]
r1s.append(fastq)
if len(result2) >= 1:
fastq["name"] = result2[0]
r2s.append(fastq)
if len(r1s) != len(r2s):
self.t.time_print("[WARNING] There are different amounts of R1 and R2 files,"
" will only use ones that can be paired.")
pairs = []
done = []
for sequence in r1s:
for compare in r2s:
if sequence["name"] == compare["name"] and sequence["name"] not in done:
# Pair them
elements = [
collections.HistoryDatasetElement(name="forward", id=sequence["id"]),
collections.HistoryDatasetElement(name="reverse", id=compare["id"])
]
done.append(sequence["name"])
pairs.append(collections.CollectionElement(sequence["name"], type="paired", elements=elements))
collection_description = collections.CollectionDescription("pair_list", type="list:paired", elements=pairs)
while True:
try:
self.gi.histories.create_dataset_collection(self.history_id, collection_description)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
def load(self):
from pyaccessories.SaveLoad import SaveLoad as SaveLoad
config = SaveLoad(os.path.join(self.script_dir, "config.json"), create=True)
self.API_KEY = config.get('api_key')
if not re.match(r"^\w{32}$", self.API_KEY):
self.t.time_print("Invalid Galaxy API key.")
exit(1)
self.WORKFLOW_ID = config.get('workflow_id', default='f2db41e1fa331b3e') # SNVPhyl paired end
if not re.match(r"^\w{16}$", self.WORKFLOW_ID):
self.t.time_print("Invalid workflow ID format.")
exit(1)
self.IP = config.get('ip', default="http://192.168.1.3:48888/")
self.NASMNT = os.path.normpath(config.get('nasmnt', default="/mnt/nas/"))
def wait_for_problem(self):
import time
short_wait = 5
time_until_giveup = 36
problem = True
while problem:
problem = False
try:
self.gi.histories.get_histories()
return
except (ConnectionError, requests.exceptions.ConnectionError) as e:
if e.status_code == 403: # Invalid API key
self.t.time_print("Invalid Galaxy API Key!")
exit(1)
elif 'Max retries exceeded' in str(e.args[0]):
self.t.time_print("Error: Galaxy isn't running/connection error.")
problem = True
if short_wait > 1:
self.t.time_print("Waiting 30 seconds...")
time.sleep(30)
short_wait -= 1
else:
self.t.time_print("Waiting 1 hour...")
time_until_giveup -= 1
if time_until_giveup < 1:
raise
time.sleep(3600)
else:
raise
def __init__(self, args_in, inputs=None):
self.max_attempts = 10
self.uploaded = [] # A list of all uploaded files
# constants sort of
self.IP = None
self.API_KEY = None
self.WORKFLOW_ID = None
self.NASMNT = None
self.inputs = inputs
self.gi = None
# Add arguments
self.reference = args_in.reference
self.noextract = args_in.noextract
self.NAME = args_in.history_name if args_in.history_name is not None else "AutoSNVPhyl_%s"\
% time.strftime("%d-%m-%Y")
self.manual = args_in.manual
self.script_dir = sys.path[0]
if not os.path.exists(os.path.join(self.script_dir, 'galaxy_logs')):
os.makedirs(os.path.join(self.script_dir, 'galaxy_logs'))
import datetime
self.t = Timer(log_file=os.path.join(self.script_dir, 'galaxy_logs',
datetime.datetime.now().strftime("%d-%m-%Y_%S:%M:%H")
+ "_%s.txt" % self.NAME))
self.t.set_colour(32)
self.rename = False
self.extractor = None
self.history_id = None
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--reference",
help="Input the seqid of the reference file. "
"Also tells the program to extract the fastqs in your retrieve.txt. "
"If this parameter is not given then it will use the files in your "
"upload folder, it will autodetect the reference file as long as it's "
"a fasta. ", type=str)
parser.add_argument("-e", "--noextract", action="store_true",
help="Use if you don't want any fastq files to be extracted from the nas.")
parser.add_argument("-n", "--history_name", type=str,
help="Name of the history to create")
parser.add_argument("-m", "--manual", action="store_true",
help="Use the files in your upload directory (can use this in addition to the files extracted)."
" If this flag is not used then it will clear the files in your upload directory.")
# If no arguments
if len(sys.argv) == 1:
parser.print_help()
exit(1)
args = parser.parse_args()
runner = AutoSNVPhyl(args)
runner.run()
|
|
__author__ = 'woolly_sammoth'
from kivy.config import Config
Config.set('graphics', 'borderless', '0')
Config.set('graphics', 'resizable', '1')
Config.set('graphics', 'fullscreen', '0')
Config.set('input', 'mouse', 'mouse,disable_multitouch')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.screenmanager import ScreenManager
from kivy.uix.actionbar import ActionBar
from kivy.uix.screenmanager import SlideTransition
from kivy.uix.popup import Popup
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.core.window import Window
import logging
import time
import utils
import os
import json
import sys
import screens.HomeScreen as HomeScreen
import overrides
class TopActionBar(ActionBar):
def __init__(self, PlungeApp, **kwargs):
super(TopActionBar, self).__init__(**kwargs)
self.PlungeApp = PlungeApp
self.PlungeApp.logger.debug("Build Top Action Bar")
self.top_action_view = self.ids.top_action_view.__self__
self.top_action_previous = self.ids.top_action_previous.__self__
self.top_settings_button = self.ids.top_settings_button.__self__
self.top_size_button = self.ids.top_size_button.__self__
self.standard_height = self.height
self.top_action_previous.bind(on_release=self.PlungeApp.open_settings)
self.top_settings_button.bind(on_release=self.PlungeApp.open_settings)
return
def minimise(self, override=None):
min = self.top_size_button.text if override is None else override
if min == self.PlungeApp.get_string("Minimise"):
self.PlungeApp.logger.debug("set to min")
Window.size = (300, 180)
height = self.height
self.height = 0.5 * height if height == self.standard_height else height
self.top_size_button.text = self.PlungeApp.get_string("Maximise")
self.top_action_previous.title = ''
if self.PlungeApp.config.getint('standard', 'monitor') == 0:
if self.PlungeApp.client_running is True:
self.top_action_previous.title = self.PlungeApp.get_string("Running")
self.top_action_previous.color = (0, 1, 0.28235, 1)
else:
self.top_action_previous.title = self.PlungeApp.get_string("Stopped")
self.top_action_previous.color = (0.93725, 0.21176, 0.07843, 1)
self.top_action_previous.bind(on_release=self.minimise)
self.top_action_previous.unbind(on_release=self.PlungeApp.open_settings)
self.top_settings_button.text = ''
self.top_settings_button.bind(on_release=self.minimise)
self.top_settings_button.unbind(on_release=self.PlungeApp.open_settings)
self.PlungeApp.homeScreen.clear_widgets()
self.PlungeApp.homeScreen.add_widget(self.PlungeApp.homeScreen.min_layout)
self.PlungeApp.is_min = True
else:
self.PlungeApp.logger.debug("set to max")
if self.PlungeApp.config.getint('standard', 'monitor') == 1:
Window.size = (1000, 800)
else:
Window.size = (1000, 1000)
height = self.height
self.height = 2 * height if height != self.standard_height else height
self.top_size_button.text = self.PlungeApp.get_string("Minimise")
self.top_action_previous.title = self.PlungeApp.get_string('Main_Title')
self.top_action_previous.color = (1, 1, 1, 1)
self.top_action_previous.bind(on_release=self.PlungeApp.open_settings)
self.top_action_previous.unbind(on_release=self.minimise)
self.top_settings_button.text = self.PlungeApp.get_string("Settings")
self.top_settings_button.bind(on_release=self.PlungeApp.open_settings)
self.top_settings_button.unbind(on_release=self.minimise)
self.PlungeApp.homeScreen.clear_widgets()
self.PlungeApp.homeScreen.add_widget(self.PlungeApp.homeScreen.max_layout)
self.PlungeApp.is_min = False
return
class PlungeApp(App):
def __init__(self, **kwargs):
super(PlungeApp, self).__init__(**kwargs)
self.isPopup = False
self.use_kivy_settings = False
self.settings_cls = overrides.SettingsWithCloseButton
self.utils = utils.utils(self)
self.exchanges = ['ccedk', 'poloniex', 'bitcoincoid', 'bter', 'bittrex']
self.active_exchanges = []
self.currencies = ['btc', 'ltc', 'eur', 'usd', 'ppc']
self.active_currencies = []
self.client_running = False
self.is_min = False
if not os.path.isdir('logs'):
os.makedirs('logs')
if not os.path.isfile('api_keys.json'):
api_keys = []
with open('api_keys.json', 'a+') as api_file:
api_file.write(json.dumps(api_keys))
api_file.close()
if not os.path.isfile('user_data.json'):
user_data = {exchange: [] for exchange in self.exchanges}
with open('user_data.json', 'a+') as user_file:
user_file.write(json.dumps(user_data))
user_file.close()
self.first_run = True
self.logger = logging.getLogger('Plunge')
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('logs/%s_%d.log' % ('Plunge', time.time()))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh_formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(threadName)s %(funcName)s: %(message)s',
datefmt="%Y/%m/%d-%H:%M:%S")
ch_formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt="%Y/%m/%d-%H:%M:%S")
fh.setFormatter(fh_formatter)
ch.setFormatter(ch_formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
self.logger.debug("finished init on app")
# self.logger_socket = socketlogger.start_logging_receiver('Client')
# sys.excepthook = self.log_uncaught_exceptions
return
def log_uncaught_exceptions(self, exctype, value, tb):
self.logger.exception('\n===================\nException Caught\n\n%s\n===================\n' % value)
return
def build(self):
self.logger.info("Fetching language from config")
self.language = self.config.get('standard', 'language')
try:
self.lang = json.load(open('res/json/languages/' + self.language.lower() + '.json', 'r'))
except (ValueError, IOError) as e:
self.logger.error('')
self.logger.error('##################################################################')
self.logger.error('')
self.logger.error('There was an Error loading the ' + self.language + ' language file.')
self.logger.error('')
self.logger.error(str(e))
self.logger.error('')
self.logger.error('##################################################################')
raise SystemExit
self.logger.debug("build app")
self.root = BoxLayout(orientation='vertical')
self.mainScreenManager = ScreenManager(transition=SlideTransition(direction='left'))
Builder.load_file('screens/HomeScreen.kv')
self.homeScreen = HomeScreen.HomeScreen(self)
self.mainScreenManager.add_widget(self.homeScreen)
self.topActionBar = TopActionBar(self)
self.root.add_widget(self.topActionBar)
self.root.add_widget(self.mainScreenManager)
self.homeScreen.clear_widgets()
if self.config.getint('standard', 'start_min') == 1:
self.topActionBar.minimise(self.get_string("Minimise"))
self.is_min = True
else:
self.topActionBar.minimise(self.get_string("Maximise"))
self.is_min = False
self.set_monitor()
if self.config.getint('standard', 'show_disclaimer') == 1:
Clock.schedule_once(self.show_disclaimer, 1)
self.logger.debug("finished building app")
return self.root
def show_disclaimer(self, dt):
self.logger.debug("build disclaimer window")
content = BoxLayout(orientation='vertical')
content.add_widget(Label(text=self.get_string('Disclaimer_Text'), size_hint=(1, 0.8), font_size=16,
text_size=(500, 250)))
content.add_widget(BoxLayout(size_hint=(1, 0.1)))
button_layout = BoxLayout(size_hint=(1, 0.1), spacing='20dp')
ok_button = Button(text=self.get_string('OK'), size_hint=(None, None), size=(250, 50))
cancel_button = Button(text=self.get_string('Cancel'), size_hint=(None, None), size=(250, 50))
ok_button.bind(on_press=self.close_popup)
cancel_button.bind(on_press=self.exit)
button_layout.add_widget(ok_button)
button_layout.add_widget(cancel_button)
content.add_widget(button_layout)
self.popup = Popup(title=self.get_string('Disclaimer'), content=content, auto_dismiss=False,
size_hint=(None, None), size=(550, 450))
self.popup.open()
self.logger.debug("disclaimer is open")
return
def exit(self):
self.logger.debug("exit")
sys.exit()
def set_monitor(self):
if self.is_min is False:
self.logger.debug("set monitor window size")
self.homeScreen.max_layout.remove_widget(self.homeScreen.run_layout)
if self.config.getint('standard', 'monitor') == 1:
Window.size = (1000, 800)
else:
self.homeScreen.max_layout.add_widget(self.homeScreen.run_layout)
Window.size = (1000, 1000)
def get_string(self, text):
try:
self.logger.debug("Getting string for %s" % text)
return_string = self.lang[text]
except (ValueError, KeyError):
self.logger.error("No string found for %s in %s language file" % (text, self.language))
return_string = 'Language Error'
return return_string
def build_config(self, config):
self.logger.debug("building default config")
config.setdefaults('server', {'host': "", 'port': 80})
config.setdefaults('exchanges', {'ccedk': 0, 'poloniex': 0, 'bitcoincoid': 0, 'bter': 0, 'bittrex': 0})
config.setdefaults('standard', {'language': 'English', 'period': 30, 'monitor': 0, 'start_min': 0, 'data': 0,
'show_disclaimer': 1, 'smooth_line': 1})
def build_settings(self, settings):
self.logger.debug("build settings page")
settings.register_type('string', overrides.SettingStringFocus)
settings.register_type('numeric', overrides.SettingNumericFocus)
settings.register_type('string_exchange', overrides.SettingStringExchange)
with open('user_data.json', 'a+') as user_data:
try:
saved_data = json.load(user_data)
except ValueError:
self.logger.debug("failed to get saved data")
saved_data = []
user_data.close()
for exchange in self.exchanges:
if exchange not in saved_data:
self.config.set('exchanges', exchange, 0)
continue
self.config.set('exchanges', exchange, len(saved_data[exchange]))
settings.add_json_panel(self.get_string('Plunge_Configuration'), self.config, 'settings/plunge.json')
def on_config_change(self, config, section, key, value):
self.logger.debug("config changed %s > %s" % (key, value))
if section == "standard":
if key == "period":
Clock.unschedule(self.homeScreen.get_stats)
self.logger.info("Setting refresh Period to %s" % self.config.get('standard', 'period'))
Clock.schedule_interval(self.homeScreen.get_stats, self.config.getint('standard', 'period'))
if key == "monitor":
self.set_monitor()
self.active_exchanges = self.utils.get_active_exchanges()
self.homeScreen.set_exchange_spinners()
self.homeScreen.get_stats(0)
def show_popup(self, title, text):
self.logger.debug("show pop up %s" % text)
content = BoxLayout(orientation='vertical')
content.add_widget(Label(text=text, size_hint=(1, 0.8), font_size=16))
content.add_widget(BoxLayout(size_hint=(1, 0.1)))
button_layout = BoxLayout(size_hint=(1, 0.1))
button = Button(text=self.get_string('OK'), size_hint=(None, None), size=(250, 50))
button.bind(on_press=self.close_popup)
button_layout.add_widget(button)
content.add_widget(button_layout)
self.popup = Popup(title=title, content=content, auto_dismiss=False, size_hint=(None, None), size=(500, 300))
self.popup.open()
padding = ((self.popup.width - button.width) / 2)
button_layout.padding = (padding, 0, padding, 0)
self.isPopup = True
self.logger.debug("popup is open")
return
def close_popup(self, instance, value=False):
self.logger.debug("close pop up")
self.popup.dismiss()
self.isPopup = False
return
if __name__ == '__main__':
Plunge = PlungeApp()
Plunge.run()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""TensorRT supported operators."""
import logging
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.expr import Call, Constant, Tuple, GlobalVar
from tvm.relay.expr_functor import ExprMutator
logger = logging.getLogger("TensorRT")
def is_tensorrt_runtime_enabled():
"""Check if the TensorRT graph runtime is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.op.is_tensorrt_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def get_tensorrt_version():
"""Gets the version of TensorRT that TVM is built against or is targeting.
Returns
-------
ret: Tuple[int, int, int]
TensorRT version as a tuple of major, minor, and patch number. If TVM
is not built with TensorRT, the value set by set_tensorrt_version() is returned instead.
"""
pass_ctx = tvm.transform.PassContext.current()
if "relay.ext.tensorrt.options" in pass_ctx.config:
return tuple(pass_ctx.config["relay.ext.tensorrt.options"].tensorrt_version)
return tuple(tvm.get_global_func("relay.op.get_tensorrt_version")())
def get_tensorrt_use_implicit_batch_mode():
pass_ctx = tvm.transform.PassContext.current()
if "relay.ext.tensorrt.options" in pass_ctx.config:
return pass_ctx.config["relay.ext.tensorrt.options"].use_implicit_batch
logger.warning(
"PassContext has no relay.ext.tensorrt.options config, using default value "
"use_implicit_batch=True."
)
return True
def get_tensorrt_remove_no_mac_subgraphs():
pass_ctx = tvm.transform.PassContext.current()
if "relay.ext.tensorrt.options" in pass_ctx.config:
return pass_ctx.config["relay.ext.tensorrt.options"].remove_no_mac_subgraphs
logger.warning(
"PassContext has no relay.ext.tensorrt.options config, using default value "
"remove_no_mac_subgraphs=False."
)
return False
def partition_for_tensorrt(
mod,
params=None,
version=None,
use_implicit_batch=True,
remove_no_mac_subgraphs=False,
max_workspace_size=1 << 30,
):
"""Partition the graph greedily offloading supported operators to TensorRT.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
version : Optional[Tuple[int, int, int]]
TensorRT version to target as tuple of (major, minor, patch). If TVM is compiled with
USE_TENSORRT_RUNTIME=ON, the linked TensorRT version will be used instead.
use_implicit_batch : Optional[bool]
Use TensorRT implicit batch mode (default true). Setting to false will enable explicit batch
mode which will widen supported operators to include those which modify the batch dimension,
but may reduce performance for some models.
remove_no_mac_subgraphs : Optional[bool]
Removes subgraphs which have been partitioned for TensorRT if they do not have any
multiply-accumulate operations. The removed subgraphs will go through TVM's standard
compilation instead. Can improve performance.
max_workspace_size : Optional[int]
How many bytes of workspace size to allow each subgraph to use for TensorRT engine creation.
See TensorRT documentation for more info.
Returns
-------
mod_and_config : Tuple[Module, Dict[str, Any]]
A tuple of 1) annotated and partitioned module and 2) "relay.ext.tensorrt.options"
configuration which should be given to PassContext when building.
"""
config = {
"use_implicit_batch": use_implicit_batch,
"max_workspace_size": max_workspace_size,
"remove_no_mac_subgraphs": remove_no_mac_subgraphs,
}
if version:
assert isinstance(version, tuple) and len(version) == 3
config["tensorrt_version"] = version
else:
linked_version = tuple(tvm.get_global_func("relay.op.get_tensorrt_version")())
if not linked_version:
logger.warning(
"TVM was not built against TensorRT and no version was provided to "
"partition_for_tensorrt. Defaulting to 6.0.1"
)
linked_version = (6, 0, 1)
config["tensorrt_version"] = linked_version
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
RemoveDropoutPass(),
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(
{"nn.conv2d": ["NCHW", "default"], "nn.conv3d": ["NCDHW", "default"]}
),
transform.FoldConstant(),
transform.AnnotateTarget("tensorrt"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
transform.InferType(),
]
)
with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}):
mod = seq(mod)
mod = prune_tensorrt_subgraphs(mod)
return mod, config
def _register_external_op_helper_with_checker(op_name, checker):
@tvm.ir.register_op_attr(op_name, "target.tensorrt")
def _func_wrapper(attrs, args):
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
return checker(attrs, args, op_name)
return _func_wrapper
def _register_external_op_helper(op_name, supported=True):
return _register_external_op_helper_with_checker(
op_name, lambda attrs, args, op_name: supported
)
# Ops which are always supported
_register_external_op_helper("nn.relu")
_register_external_op_helper("sigmoid")
_register_external_op_helper("tanh")
_register_external_op_helper("subtract")
_register_external_op_helper("multiply")
_register_external_op_helper("divide")
_register_external_op_helper("power")
_register_external_op_helper("maximum")
_register_external_op_helper("minimum")
_register_external_op_helper("exp")
_register_external_op_helper("log")
_register_external_op_helper("sqrt")
_register_external_op_helper("abs")
_register_external_op_helper("negative")
_register_external_op_helper("nn.batch_flatten")
_register_external_op_helper("clip")
@tvm.ir.register_op_attr("add", "target.tensorrt")
def add_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if add is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if (
not get_tensorrt_use_implicit_batch_mode()
and (isinstance(args[0], Constant) or isinstance(args[1], Constant))
and args[0].checked_type.shape[0] == args[1].checked_type.shape[0]
and args[0].checked_type.shape[0] != 1
and (len(args[0].checked_type.shape) > 3 or len(args[1].checked_type.shape) > 3)
):
logger.info("add: bug in TRT with adding batched constants.")
return False
return True
@tvm.ir.register_op_attr("nn.batch_norm", "target.tensorrt")
def batch_norm_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.batch_norm is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if int(attrs.axis) not in (1, 3):
logger.info("nn.batch_norm: axis is %d but must be 1 or 3.", int(attrs.axis))
return False
return True
@tvm.ir.register_op_attr("nn.softmax", "target.tensorrt")
def softmax_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.softmax is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info("nn.softmax: can't modify batch dimension.")
return False
return True
@tvm.ir.register_op_attr("nn.conv2d", "target.tensorrt")
def conv2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.conv2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.data_layout != "NCHW":
logger.info("nn.conv2d: data_layout is %s but must be NCHW.", attrs.data_layout)
return False
if attrs.kernel_layout != "OIHW":
logger.info("nn.conv2d: kernel_layout is %s but must be OIHW.", attrs.kernel_layout)
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info("nn.conv2d: out_layout is %s but must be NCHW.", attrs.out_layout)
return False
return True
@tvm.ir.register_op_attr("nn.dense", "target.tensorrt")
def dense_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
input_rank = len(args[0].checked_type.shape)
weight_rank = len(args[1].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info("nn.dense: input has rank %d but must be 2, 3 or 4.", input_rank)
return False
if weight_rank != 2:
logger.info("nn.dense: weight has rank %d but must be 2.", weight_rank)
return False
return True
@tvm.ir.register_op_attr("nn.bias_add", "target.tensorrt")
def bias_add_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.bias_add is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
input_rank = len(args[0].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info("nn.bias_add: input rank is %d but must be 2, 3 or 4.", input_rank)
return False
return True
@tvm.ir.register_op_attr("nn.max_pool2d", "target.tensorrt")
def max_pool_2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.max_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.layout != "NCHW":
logger.info("nn.max_pool2d: layout is %s but must be NCHW.", attrs.layout)
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info("nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
@tvm.ir.register_op_attr("nn.avg_pool2d", "target.tensorrt")
def avg_pool_2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.avg_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.layout != "NCHW":
logger.info("nn.avg_pool2d: layout is %d but must be NCHW.", attrs.layout)
return False
if (
attrs.count_include_pad
and len(attrs.padding) == 4
and (
int(attrs.padding[0]) != int(attrs.padding[2])
or int(attrs.padding[1]) != int(attrs.padding[3])
)
):
logger.info(
"nn.avg_pool2d: inclusive-counted blended or average "
"pooling is not supported in combination with asymmetric padding"
)
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info("nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
@tvm.ir.register_op_attr("nn.global_max_pool2d", "target.tensorrt")
def global_max_pool_2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.global_max_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.layout != "NCHW":
logger.info("nn.global_max_pool2d: layout is %s but must be NCHW.", attrs.layout)
return False
return True
@tvm.ir.register_op_attr("nn.global_avg_pool2d", "target.tensorrt")
def global_avg_pool_2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.global_avg_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.layout != "NCHW":
logger.info("nn.global_avg_pool2d: layout is %s but must be NCHW.", attrs.layout)
return False
return True
@tvm.ir.register_op_attr("expand_dims", "target.tensorrt")
def expand_dims_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if expand_dims is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info("expand_dims: can't modify batch dimension.")
return False
return True
@tvm.ir.register_op_attr("squeeze", "target.tensorrt")
def squeeze_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if squeeze is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not attrs.axis:
logger.info("squeeze: must explicitly set axis.")
return False
if get_tensorrt_use_implicit_batch_mode() and any([axis == 0 for axis in map(int, attrs.axis)]):
logger.info("squeeze: can't modify batch dimension.")
return False
return True
@tvm.ir.register_op_attr("concatenate", "target.tensorrt")
def concatenate_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if concatenate is supported by TensorRT."""
if any([x.dtype != "float32" for x in args[0].checked_type.fields]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not get_tensorrt_use_implicit_batch_mode():
return True
if int(attrs.axis) == 0:
logger.info("concatenate: can't modify batch dimension.")
return False
if isinstance(args[0], Tuple):
for tuple_input in args[0].fields:
if isinstance(tuple_input, Constant):
logger.info("concatenate: can't concatenate tensors with constants.")
return False
return True
@tvm.ir.register_op_attr("nn.conv2d_transpose", "target.tensorrt")
def conv2d_transpose_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.conv2d_transpose is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.data_layout != "NCHW":
logger.info("nn.conv2d_transpose: data_layout is %s but must be NCHW.", attrs.data_layout)
return False
if attrs.kernel_layout != "OIHW":
logger.info(
"nn.conv2d_transpose: kernel_layout is %s but must be OIHW.", attrs.kernel_layout
)
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info("nn.conv2d_transpose: out_layout is %s but must be NCHW.", attrs.out_layout)
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info("nn.conv2d_transpose: dilation rate must be 1.")
return False
return True
@tvm.ir.register_op_attr("transpose", "target.tensorrt")
def transpose_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if transpose is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axes[0]) != 0:
logger.info("transpose: can't modify batch dimension.")
return False
return True
@tvm.ir.register_op_attr("layout_transform", "target.tensorrt")
def layout_transform_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if layout_transform is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if (attrs.src_layout, attrs.dst_layout) not in [
("NCHW", "NHWC"),
("NHWC", "NCHW"),
("NDHWC", "NCDHW"),
("NCDHW", "NDHWC"),
]:
logger.info(
"layout_transform: %s to %s is not supported.", attrs.src_layout, attrs.dst_layout
)
return False
return True
@tvm.ir.register_op_attr("reshape", "target.tensorrt")
def reshape_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if reshape is supported by TensorRT."""
if args[0].checked_type.dtype != "float32":
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if any([x < -1 for x in map(int, attrs.newshape)]):
logger.info("reshape: new shape dims must be explicit.")
return False
if get_tensorrt_use_implicit_batch_mode():
shape = list(map(int, args[0].checked_type.shape))
new_shape = list(map(int, attrs.newshape))
if len(new_shape) == 0 or len(shape) == 0:
logger.info("reshape: Can't reshape to or from scalar.")
return False
# TRT cannot modify batch dimension.
original_volume = np.prod(shape)
# First, resolve 0.
for i, value in enumerate(new_shape):
if value == 0:
new_shape[i] = shape[i]
# Resolve -1.
for i, value in enumerate(new_shape):
if value == -1:
new_shape[i] = original_volume // np.prod([x for x in new_shape if x != -1])
if shape[0] != new_shape[0]:
logger.info("reshape: can't modify batch dimension.")
return False
return True
@tvm.ir.register_op_attr("nn.pad", "target.tensorrt")
def pad_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.pad is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if attrs.pad_mode != "constant":
logger.info("nn.pad: pad mode is %s but must be constant.", attrs.pad_mode)
return False
if float(attrs.pad_value) != 0.0:
logger.info("nn.pad: pad value is %f but must be 0.0.", float(attrs.pad_value))
return False
if any([x != 0 for x in attrs.pad_width[0]]) or any([x != 0 for x in attrs.pad_width[1]]):
logger.info("nn.pad: can't pad batch or channel dimensions.")
return False
if len(attrs.pad_width) == 5 and any([x != 0 for x in attrs.pad_width[2]]):
logger.info("nn.pad: can only pad last two dimensions for 5D inputs.")
return True
def reduce_annotate_fn(attrs, args, op_name):
"""Helper for reduce operations."""
if not attrs.axis or len(attrs.axis) == 0:
logger.info("%s: cannot reduce to scalar.", op_name)
return False
if attrs.exclude:
logger.info("%s: exclude not supported.", op_name)
return False
if get_tensorrt_use_implicit_batch_mode() and any([x == 0 for x in map(int, attrs.axis)]):
logger.info("%s: can't modify batch dimension.", op_name)
return False
return True
_register_external_op_helper_with_checker("sum", reduce_annotate_fn)
_register_external_op_helper_with_checker("prod", reduce_annotate_fn)
_register_external_op_helper_with_checker("max", reduce_annotate_fn)
_register_external_op_helper_with_checker("min", reduce_annotate_fn)
_register_external_op_helper_with_checker("mean", reduce_annotate_fn)
def trt_version_annotate_fn(version):
"""Helper for ops which require a minimum TRT version"""
def _func_wrapper(attrs, args, op_name):
if get_tensorrt_version() < version:
logger.info(
"%s: requires TensorRT version %s or higher.", op_name, ".".join(map(str, version))
)
return False
return True
return _func_wrapper
_register_external_op_helper_with_checker("nn.leaky_relu", trt_version_annotate_fn((5, 1, 5)))
_register_external_op_helper_with_checker("sin", trt_version_annotate_fn((5, 1, 5)))
_register_external_op_helper_with_checker("cos", trt_version_annotate_fn((5, 1, 5)))
_register_external_op_helper_with_checker("atan", trt_version_annotate_fn((5, 1, 5)))
_register_external_op_helper_with_checker("ceil", trt_version_annotate_fn((5, 1, 5)))
@tvm.ir.register_op_attr("strided_slice", "target.tensorrt")
def strided_slice_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if strided_slice is supported by TensorRT."""
if args[0].checked_type.dtype != "float32":
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not trt_version_annotate_fn((5, 1, 5))(attrs, args, "strided_slice"):
return False
if get_tensorrt_use_implicit_batch_mode():
batch_dim_begin_modified = attrs.begin[0] is not None and int(attrs.begin[0]) != 0
batch_dim_end_modified = (
attrs.end[0] is not None
and int(attrs.end[0]) != -1
and int(attrs.end[0]) != int(args[0].checked_type.shape[0])
)
if batch_dim_begin_modified or batch_dim_end_modified:
logger.info("strided_slice: can't modify batch dimension.")
return False
if any([x is not None and x <= 0 for x in attrs.strides]):
logger.info("strided_slice: stride must be positive")
return False
return True
@tvm.ir.register_op_attr("nn.adaptive_max_pool2d", "target.tensorrt")
def adapative_max_pool2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.adaptive_max_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info("nn.adaptive_max_pool2d: output size must be (1, 1).")
return False
return True
@tvm.ir.register_op_attr("nn.adaptive_avg_pool2d", "target.tensorrt")
def adapative_avg_pool2d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.adaptive_avg_pool2d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info("nn.adaptive_avg_pool2d: output size must be (1, 1).")
return False
return True
@tvm.ir.register_op_attr("nn.conv3d", "target.tensorrt")
def conv3d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.conv3d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not trt_version_annotate_fn((6, 0, 1))(attrs, args, "nn.conv3d"):
return False
if attrs.data_layout != "NCDHW":
logger.info("nn.conv3d: data_layout is %s but must be NCDHW.", attrs.data_layout)
return False
if attrs.kernel_layout != "OIDHW":
logger.info("nn.conv3d: kernel_layout is %s but must be OIDHW.", attrs.kernel_layout)
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info("nn.conv3d: out_layout is %s but must be NCDHW.", attrs.out_layout)
return False
return True
@tvm.ir.register_op_attr("nn.max_pool3d", "target.tensorrt")
def max_pool_3d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.max_pool3d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not trt_version_annotate_fn((6, 0, 1))(attrs, args, "nn.max_pool3d"):
return False
if attrs.layout != "NCDHW":
logger.info("nn.max_pool3d: layout is %s but must be NCDHW.", attrs.layout)
return False
return True
@tvm.ir.register_op_attr("nn.avg_pool3d", "target.tensorrt")
def avg_pool_3d_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.avg_pool3d is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not trt_version_annotate_fn((6, 0, 1))(attrs, args, "nn.avg_pool3d"):
return False
if attrs.layout != "NCDHW":
logger.info("nn.avg_pool3d: layout is %s but must be NCDHW.", attrs.layout)
return False
return True
@tvm.ir.register_op_attr("nn.conv3d_transpose", "target.tensorrt")
def conv3d_transpose_annotate_fn(attrs, args): # pylint: disable=unused-variable
"""Check if nn.conv3d_transpose is supported by TensorRT."""
if any([x.checked_type.dtype != "float32" for x in args]):
logger.info("Only float32 inputs are supported for TensorRT.")
return False
if not trt_version_annotate_fn((6, 0, 1))(attrs, args, "nn.conv3d_transpose"):
return False
if attrs.data_layout != "NCDHW":
logger.info("nn.conv3d_transpose: data_layout is %s but must be NCDHW.", attrs.data_layout)
return False
if attrs.kernel_layout != "OIDHW":
logger.info(
"nn.conv3d_transpose: kernel_layout is %s but must be OIDHW.", attrs.kernel_layout
)
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info("nn.conv3d_transpose: out_layout is %s but must be NCDHW.", attrs.out_layout)
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info("nn.conv3d_transpose: dilation rate must be 1.")
return False
if attrs.output_padding and any([x != 0 for x in map(int, attrs.output_padding)]):
logger.info("nn.conv3d_transpose: output padding is not supported.")
return False
return True
def is_valid_subgraph(params, body):
"""Final check on whether the subgraph is valid and should be offloaded to TensorRT."""
# Remove invalid subgraphs for implicit batch mode.
if get_tensorrt_use_implicit_batch_mode():
input_batch_sizes = []
for var in params:
# In implicit batch mode, all inputs must have same batch size
if isinstance(var.checked_type, relay.TupleType):
for tupe_type in var.checked_type.fields:
# Scalar inputs not allowed
if len(tupe_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
input_batch_sizes.append(int(tupe_type.shape[0]))
else:
# Scalar inputs not allowed
if len(var.checked_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
input_batch_sizes.append(int(var.checked_type.shape[0]))
if len(input_batch_sizes) > 1 and len(set(input_batch_sizes)) != 1:
logger.info("tensorrt: inputs have different batch sizes")
return False
# Remove subgraphs with no multiply-accumulates
if get_tensorrt_remove_no_mac_subgraphs() and relay.analysis.get_total_mac_number(body) == 0:
return False
return True
def prune_tensorrt_subgraphs(mod):
"""
Removes invalid subgraphs and those with no multiply-accumulates (if remove_no_max_subgraphs
is set).
"""
class SubgraphRemover(ExprMutator):
"""
Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.
"""
def __init__(self, subgraphs_to_remove, mod, new_mod):
ExprMutator.__init__(self)
self.subgraphs_to_remove = subgraphs_to_remove
self.mod = mod
self.new_mod = new_mod
def visit_call(self, call):
if isinstance(call.op, GlobalVar):
name = call.op.name_hint
if name in self.subgraphs_to_remove:
# "Inline" the subgraph back into new main function.
func = self.mod[name]
var_map = {}
for arg, param in zip(call.args, func.params):
var_map[param] = super().visit(arg)
new_body = relay.bind(func.body, var_map)
return new_body
if name != "main":
# Copy the GlobalVar (subgraph function) to the new module and call.
args = []
for arg in call.args:
args.append(super().visit(arg))
subgraph_gv = relay.GlobalVar(name)
self.new_mod[subgraph_gv] = self.mod[name]
return subgraph_gv(*args)
return super().visit_call(call)
subgraphs_to_remove = []
# Remove invalid subgraphs
for subgraph in mod.get_global_vars():
name = subgraph.name_hint
if not mod[name].attrs or mod[name].attrs["Compiler"] != "tensorrt":
continue
if not is_valid_subgraph(mod[name].params, mod[name].body):
subgraphs_to_remove.append(name)
# Create new pruned module
new_mod = tvm.IRModule()
new_mod["main"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod["main"])
return new_mod
class RemoveDropout(ExprMutator):
"""
Removes all nn.dropout from an expr.
"""
def visit_tuple_getitem(self, op):
visit = super().visit_tuple_getitem(op)
if (
isinstance(visit.tuple_value, Call)
and visit.tuple_value.op.name == "nn.dropout"
and visit.index == 0
):
return visit.tuple_value.args[0]
return visit
@transform.function_pass(opt_level=0)
class RemoveDropoutPass:
def transform_function(self, func, mod, _):
return RemoveDropout().visit(func)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
import shutil
import time
import uuid
import zipfile
from botocore.exceptions import ClientError
import kappa.awsclient
import kappa.log
LOG = logging.getLogger(__name__)
class Function(object):
DEFAULT_EXCLUDED_DIRS = ['boto3', 'botocore', 's3transfer', 'concurrent', 'dateutil', 'docutils', 'futures',
'jmespath', 'python_dateutil']
DEFAULT_EXCLUDED_FILES = ['.gitignore']
def __init__(self, context, config):
self._context = context
self._config = config
self._lambda_client = kappa.awsclient.create_client(
'lambda', context.session)
self._response = None
self._log = None
@property
def name(self):
return self._context.name
@property
def environment_variables(self):
try:
if (self._context.environment_variables is not None):
return {'Variables': self._context.environment_variables}
except AttributeError:
pass
return None
@property
def runtime(self):
return self._config['runtime']
@property
def handler(self):
return self._config['handler']
@property
def dependencies(self):
return self._config.get('dependencies', list())
@property
def description(self):
return self._config['description']
@property
def timeout(self):
return self._config['timeout']
@property
def memory_size(self):
return self._config['memory_size']
@property
def vpc_config(self):
vpc_config = {}
if 'vpc_config' in self._config:
if 'security_group_ids' in self._config['vpc_config']:
sgids = self._config['vpc_config']['security_group_ids']
vpc_config['SecurityGroupIds'] = sgids
if 'subnet_ids' in self._config['vpc_config']:
snids = self._config['vpc_config']['subnet_ids']
vpc_config['SubnetIds'] = snids
return vpc_config
@property
def zipfile_name(self):
return '{}.zip'.format(self._context.name)
@property
def excluded_dirs(self):
excluded_dirs = self._config.get('excluded_dirs', 'default')
if excluded_dirs == 'default':
excluded_dirs = self.DEFAULT_EXCLUDED_DIRS
elif excluded_dirs == 'none':
excluded_dirs = list()
return excluded_dirs
@property
def excluded_files(self):
return self._config.get('excluded_files', self.DEFAULT_EXCLUDED_FILES)
@property
def tests(self):
return self._config.get('tests', '_tests')
@property
def permissions(self):
return self._config.get('permissions', list())
@property
def log_retention_policy(self):
return self._config.get('log_retention_policy', "")
@property
def log(self):
if self._log is None:
log_group_name = '/aws/lambda/%s' % self.name
self._log = kappa.log.Log(self._context, log_group_name)
return self._log
@property
def code_sha_256(self):
return self._get_response_configuration('CodeSha256')
@property
def arn(self):
return self._get_response_configuration('FunctionArn')
@property
def alias_arn(self):
return self.arn + ':{}'.format(self._context.environment)
@property
def repository_type(self):
return self._get_response_code('RepositoryType')
@property
def location(self):
return self._get_response_code('Location')
@property
def version(self):
return self._get_response_configuration('Version')
@property
def deployment_uri(self):
return 'https://{}.execute-api.{}.amazonaws.com/{}'.format(
self.api_id, self._apigateway_client.region_name,
self._context.environment)
def _get_response(self):
if self._response is None:
try:
self._response = self._lambda_client.call(
'get_function',
FunctionName=self.name)
LOG.debug(self._response)
except Exception:
LOG.debug('Unable to find ARN for function: %s', self.name)
return self._response
def _get_response_configuration(self, key, default=None):
value = None
response = self._get_response()
if response:
if 'Configuration' in response:
value = response['Configuration'].get(key, default)
return value
def _get_response_code(self, key, default=None):
value = None
response = self._get_response
if response:
if 'Configuration' in response:
value = response['Configuration'].get(key, default)
return value
def _check_function_md5(self):
# Zip up the source code and then compute the MD5 of that.
# If the MD5 does not match the cached MD5, the function has
# changed and needs to be updated so return True.
changed = True
self._copy_config_file()
files = [] + self.dependencies + [self._context.source_dir]
self.zip_lambda_function(self.zipfile_name, files)
m = hashlib.md5()
with open(self.zipfile_name, 'rb') as fp:
m.update(fp.read())
zip_md5 = m.hexdigest()
cached_md5 = self._context.get_cache_value('zip_md5')
LOG.debug('zip_md5: %s', zip_md5)
LOG.debug('cached md5: %s', cached_md5)
if zip_md5 != cached_md5:
self._context.set_cache_value('zip_md5', zip_md5)
else:
changed = False
LOG.info('function unchanged')
return changed
def _check_config_md5(self):
# Compute the MD5 of all of the components of the configuration.
# If the MD5 does not match the cached MD5, the configuration has
# changed and needs to be updated so return True.
m = hashlib.md5()
m.update(self.description.encode('utf-8'))
m.update(self.handler.encode('utf-8'))
m.update(str(self.memory_size).encode('utf-8'))
m.update(self._context.exec_role_arn.encode('utf-8'))
m.update(str(self.timeout).encode('utf-8'))
m.update(str(self.vpc_config).encode('utf-8'))
m.update(str(self.environment_variables).encode('utf-8'))
config_md5 = m.hexdigest()
cached_md5 = self._context.get_cache_value('config_md5')
LOG.debug('config_md5: %s', config_md5)
LOG.debug('cached_md5: %s', cached_md5)
if config_md5 != cached_md5:
self._context.set_cache_value('config_md5', config_md5)
changed = True
else:
changed = False
return changed
def _copy_config_file(self):
config_name = '{}_config.json'.format(self._context.environment)
config_path = os.path.join(self._context.source_dir, config_name)
if os.path.exists(config_path):
dest_path = os.path.join(self._context.source_dir, 'config.json')
LOG.debug('copy %s to %s', config_path, dest_path)
shutil.copy2(config_path, dest_path)
def _zip_lambda_dir(self, zipfile_name, lambda_dir):
LOG.debug('_zip_lambda_dir: lambda_dir=%s', lambda_dir)
LOG.debug('zipfile_name=%s', zipfile_name)
LOG.debug('excluded_dirs={}'.format(self.excluded_dirs))
relroot = os.path.abspath(lambda_dir)
with zipfile.ZipFile(zipfile_name, 'a',
compression=zipfile.ZIP_DEFLATED) as zf:
for root, subdirs, files in os.walk(lambda_dir):
excluded_dirs = []
for subdir in subdirs:
for excluded in self.excluded_dirs:
if subdir.startswith(excluded):
excluded_dirs.append(subdir)
for excluded in excluded_dirs:
subdirs.remove(excluded)
try:
dir_path = os.path.relpath(root, relroot)
dir_path = os.path.normpath(
os.path.splitdrive(dir_path)[1]
)
while dir_path[0] in (os.sep, os.altsep):
dir_path = dir_path[1:]
dir_path += '/'
zf.getinfo(dir_path)
except KeyError:
zf.write(root, dir_path)
for filename in files:
if filename not in self.excluded_files:
filepath = os.path.join(root, filename)
if os.path.isfile(filepath):
arcname = os.path.join(
os.path.relpath(root, relroot), filename)
try:
zf.getinfo(arcname)
except KeyError:
zf.write(filepath, arcname)
def _zip_lambda_file(self, zipfile_name, lambda_file):
LOG.debug('_zip_lambda_file: lambda_file=%s', lambda_file)
LOG.debug('zipfile_name=%s', zipfile_name)
with zipfile.ZipFile(zipfile_name, 'a',
compression=zipfile.ZIP_DEFLATED) as zf:
try:
zf.getinfo(lambda_file)
except KeyError:
zf.write(lambda_file)
def zip_lambda_function(self, zipfile_name, files):
try:
os.remove(zipfile_name)
except OSError:
pass
for f in files:
LOG.debug('adding file %s', f)
if os.path.isdir(f):
self._zip_lambda_dir(zipfile_name, f)
else:
self._zip_lambda_file(zipfile_name, f)
def exists(self):
return self._get_response()
def tail(self, attempt=0):
try:
LOG.debug('tailing function: %s', self.name)
return self.log.tail()
except Exception as e:
if attempt > 10:
return e
time.sleep(attempt)
return self.tail(attempt + 1)
def list_aliases(self):
LOG.info('listing aliases of %s', self.name)
try:
response = self._lambda_client.call(
'list_aliases',
FunctionName=self.name)
LOG.debug(response)
except Exception:
LOG.exception('Unable to list aliases')
return response.get('Versions', list())
def find_latest_version(self):
# Find the current (latest) version by version number
# First find the SHA256 of $LATEST
versions = self.list_versions()
latest_sha256 = next(v['CodeSha256'] for v in versions
if v['Version'] == '$LATEST')
# Looping reversed since last version numbers is on last index usually
return next(v['Version'] for v in reversed(versions)
if v['Version'] != '$LATEST' and v['CodeSha256'] == latest_sha256)
def create_alias(self, name, description, version=None):
if not version:
version = self.find_latest_version()
try:
LOG.debug('creating alias %s=%s', name, version)
response = self._lambda_client.call(
'create_alias',
FunctionName=self.name,
Description=description,
FunctionVersion=version,
Name=name)
LOG.debug(response)
except Exception:
LOG.exception('Unable to create alias')
def update_alias(self, name, description, version=None):
# Find the current (latest) version by version number
# First find the SHA256 of $LATEST
if not version:
version = self.find_latest_version()
try:
LOG.debug('updating alias %s=%s', name, version)
response = self._lambda_client.call(
'update_alias',
FunctionName=self.name,
Description=description,
FunctionVersion=version,
Name=name)
LOG.debug(response)
except ClientError as e:
if 'ResourceNotFoundException' in str(e):
LOG.debug('Alias not found, creating it...')
self.create_alias(name, description, version)
else:
LOG.error('Unexpected error while update_alias: %s', e)
except Exception:
LOG.exception('Unable to update alias')
def add_permission(self, action, principal,
source_arn=None, source_account=None):
try:
kwargs = {
'FunctionName': self.name,
'Qualifier': self._context.environment,
'StatementId': str(uuid.uuid4()),
'Action': action,
'Principal': principal}
if source_arn:
kwargs['SourceArn'] = source_arn
if source_account:
kwargs['SourceAccount'] = source_account
response = self._lambda_client.call(
'add_permission', **kwargs)
LOG.debug(response)
except Exception:
LOG.exception('Unable to add permission')
def add_permissions(self):
if self.permissions:
time.sleep(5)
for permission in self.permissions:
self.add_permission(
permission['action'],
permission['principal'],
permission.get('source_arn'),
permission.get('source_account'))
def add_log_retention_policy(self):
if self.log_retention_policy:
self.log.add_log_retention_policy(self.log_retention_policy)
def create(self):
LOG.info('creating function %s', self.name)
self._check_function_md5()
self._check_config_md5()
# There is a consistency problem here.
# Sometimes the role is not ready to be used by the function.
ready = False
while not ready:
with open(self.zipfile_name, 'rb') as fp:
exec_role = self._context.exec_role_arn
LOG.debug('exec_role=%s', exec_role)
try:
zipdata = fp.read()
response = self._lambda_client.call(
'create_function',
FunctionName=self.name,
Code={'ZipFile': zipdata},
Runtime=self.runtime,
Role=exec_role,
Handler=self.handler,
Environment=self.environment_variables,
Description=self.description,
Timeout=self.timeout,
MemorySize=self.memory_size,
VpcConfig=self.vpc_config,
Publish=True)
LOG.debug(response)
description = 'For stage {}'.format(
self._context.environment)
self.create_alias(self._context.environment, description)
ready = True
except ClientError as e:
if 'InvalidParameterValueException' in str(e):
LOG.debug('Role is not ready, waiting')
time.sleep(2)
else:
LOG.debug(str(e))
ready = True
except Exception:
LOG.exception('Unable to upload zip file')
ready = True
self.add_permissions()
self.add_log_retention_policy()
def update(self):
LOG.info('updating function %s', self.name)
if self._check_function_md5():
self._response = None
with open(self.zipfile_name, 'rb') as fp:
try:
LOG.info('uploading new function zipfile %s',
self.zipfile_name)
zipdata = fp.read()
response = self._lambda_client.call(
'update_function_code',
FunctionName=self.name,
ZipFile=zipdata,
Publish=True)
LOG.debug(response)
except Exception:
LOG.exception('unable to update zip file')
self.update_alias(
self._context.environment,
'For the {} stage'.format(self._context.environment))
self.add_log_retention_policy()
def update_configuration(self):
if self._check_config_md5():
self._response = None
LOG.info('updating configuration for %s', self.name)
exec_role = self._context.exec_role_arn
LOG.debug('exec_role=%s', exec_role)
try:
response = self._lambda_client.call(
'update_function_configuration',
FunctionName=self.name,
VpcConfig=self.vpc_config,
Role=exec_role,
Handler=self.handler,
Environment=self.environment_variables,
Description=self.description,
Timeout=self.timeout,
MemorySize=self.memory_size)
LOG.debug(response)
except Exception:
LOG.exception('unable to update function configuration')
else:
LOG.info('function configuration has not changed')
def deploy(self):
if self.exists():
self.update_configuration()
return self.update()
return self.create()
def list_versions(self):
try:
response = self._lambda_client.call(
'list_versions_by_function',
FunctionName=self.name)
LOG.debug(response)
versions = response['Versions']
while (response.get('NextMarker', None)):
reponse = self._lambda_client.call(
'list_versions_by_function',
FunctionName=self.name,
Marker=response['NextMarker'])
versions += response['Versions']
except Exception:
LOG.exception('Unable to list versions')
return versions
def tag(self, name, description):
self.create_alias(name, description)
def delete(self):
LOG.info('deleting function %s', self.name)
response = None
try:
response = self._lambda_client.call(
'delete_function',
FunctionName=self.name)
LOG.debug(response)
except ClientError:
LOG.debug('function %s: not found', self.name)
return response
def status(self):
try:
response = self._lambda_client.call(
'get_function',
FunctionName=self.name)
LOG.debug(response)
except ClientError:
LOG.debug('function %s not found', self.name)
response = None
return response
def _invoke(self, data, invocation_type):
LOG.debug('invoke %s as %s', self.name, invocation_type)
response = self._lambda_client.call(
'invoke',
FunctionName=self.name,
InvocationType=invocation_type,
LogType='Tail',
Payload=data)
LOG.debug(response)
return response
def invoke(self, test_data=None):
return self._invoke(test_data, 'RequestResponse')
def invoke_async(self, test_data=None):
return self._invoke(test_data, 'Event')
def dryrun(self, test_data=None):
return self._invoke(test_data, 'DryRun')
|
|
'''tzinfo timezone information for Africa/Cairo.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Cairo(DstTzInfo):
'''Africa/Cairo timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Cairo'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1940,7,14,22,0,0),
d(1940,9,30,21,0,0),
d(1941,4,14,22,0,0),
d(1941,9,15,21,0,0),
d(1942,3,31,22,0,0),
d(1942,10,26,21,0,0),
d(1943,3,31,22,0,0),
d(1943,10,31,21,0,0),
d(1944,3,31,22,0,0),
d(1944,10,31,21,0,0),
d(1945,4,15,22,0,0),
d(1945,10,31,21,0,0),
d(1957,5,9,22,0,0),
d(1957,9,30,21,0,0),
d(1958,4,30,22,0,0),
d(1958,9,30,21,0,0),
d(1959,4,30,23,0,0),
d(1959,9,30,0,0,0),
d(1960,4,30,23,0,0),
d(1960,9,30,0,0,0),
d(1961,4,30,23,0,0),
d(1961,9,30,0,0,0),
d(1962,4,30,23,0,0),
d(1962,9,30,0,0,0),
d(1963,4,30,23,0,0),
d(1963,9,30,0,0,0),
d(1964,4,30,23,0,0),
d(1964,9,30,0,0,0),
d(1965,4,30,23,0,0),
d(1965,9,30,0,0,0),
d(1966,4,30,23,0,0),
d(1966,10,1,0,0,0),
d(1967,4,30,23,0,0),
d(1967,10,1,0,0,0),
d(1968,4,30,23,0,0),
d(1968,10,1,0,0,0),
d(1969,4,30,23,0,0),
d(1969,10,1,0,0,0),
d(1970,4,30,23,0,0),
d(1970,10,1,0,0,0),
d(1971,4,30,23,0,0),
d(1971,10,1,0,0,0),
d(1972,4,30,23,0,0),
d(1972,10,1,0,0,0),
d(1973,4,30,23,0,0),
d(1973,10,1,0,0,0),
d(1974,4,30,23,0,0),
d(1974,10,1,0,0,0),
d(1975,4,30,23,0,0),
d(1975,10,1,0,0,0),
d(1976,4,30,23,0,0),
d(1976,10,1,0,0,0),
d(1977,4,30,23,0,0),
d(1977,10,1,0,0,0),
d(1978,4,30,23,0,0),
d(1978,10,1,0,0,0),
d(1979,4,30,23,0,0),
d(1979,10,1,0,0,0),
d(1980,4,30,23,0,0),
d(1980,10,1,0,0,0),
d(1981,4,30,23,0,0),
d(1981,10,1,0,0,0),
d(1982,7,24,23,0,0),
d(1982,10,1,0,0,0),
d(1983,7,11,23,0,0),
d(1983,10,1,0,0,0),
d(1984,4,30,23,0,0),
d(1984,10,1,0,0,0),
d(1985,4,30,23,0,0),
d(1985,10,1,0,0,0),
d(1986,4,30,23,0,0),
d(1986,10,1,0,0,0),
d(1987,4,30,23,0,0),
d(1987,10,1,0,0,0),
d(1988,4,30,23,0,0),
d(1988,10,1,0,0,0),
d(1989,5,5,23,0,0),
d(1989,10,1,0,0,0),
d(1990,4,30,23,0,0),
d(1990,10,1,0,0,0),
d(1991,4,30,23,0,0),
d(1991,10,1,0,0,0),
d(1992,4,30,23,0,0),
d(1992,10,1,0,0,0),
d(1993,4,30,23,0,0),
d(1993,10,1,0,0,0),
d(1994,4,30,23,0,0),
d(1994,10,1,0,0,0),
d(1995,4,27,22,0,0),
d(1995,9,28,21,0,0),
d(1996,4,25,22,0,0),
d(1996,9,26,21,0,0),
d(1997,4,24,22,0,0),
d(1997,9,25,21,0,0),
d(1998,4,23,22,0,0),
d(1998,9,24,21,0,0),
d(1999,4,29,22,0,0),
d(1999,9,30,21,0,0),
d(2000,4,27,22,0,0),
d(2000,9,28,21,0,0),
d(2001,4,26,22,0,0),
d(2001,9,27,21,0,0),
d(2002,4,25,22,0,0),
d(2002,9,26,21,0,0),
d(2003,4,24,22,0,0),
d(2003,9,25,21,0,0),
d(2004,4,29,22,0,0),
d(2004,9,30,21,0,0),
d(2005,4,28,22,0,0),
d(2005,9,29,21,0,0),
d(2006,4,27,22,0,0),
d(2006,9,21,21,0,0),
d(2007,4,26,22,0,0),
d(2007,9,27,21,0,0),
d(2008,4,24,22,0,0),
d(2008,9,25,21,0,0),
d(2009,4,23,22,0,0),
d(2009,9,24,21,0,0),
d(2010,4,29,22,0,0),
d(2010,9,30,21,0,0),
d(2011,4,28,22,0,0),
d(2011,9,29,21,0,0),
d(2012,4,26,22,0,0),
d(2012,9,27,21,0,0),
d(2013,4,25,22,0,0),
d(2013,9,26,21,0,0),
d(2014,4,24,22,0,0),
d(2014,9,25,21,0,0),
d(2015,4,23,22,0,0),
d(2015,9,24,21,0,0),
d(2016,4,28,22,0,0),
d(2016,9,29,21,0,0),
d(2017,4,27,22,0,0),
d(2017,9,28,21,0,0),
d(2018,4,26,22,0,0),
d(2018,9,27,21,0,0),
d(2019,4,25,22,0,0),
d(2019,9,26,21,0,0),
d(2020,4,23,22,0,0),
d(2020,9,24,21,0,0),
d(2021,4,29,22,0,0),
d(2021,9,30,21,0,0),
d(2022,4,28,22,0,0),
d(2022,9,29,21,0,0),
d(2023,4,27,22,0,0),
d(2023,9,28,21,0,0),
d(2024,4,25,22,0,0),
d(2024,9,26,21,0,0),
d(2025,4,24,22,0,0),
d(2025,9,25,21,0,0),
d(2026,4,23,22,0,0),
d(2026,9,24,21,0,0),
d(2027,4,29,22,0,0),
d(2027,9,30,21,0,0),
d(2028,4,27,22,0,0),
d(2028,9,28,21,0,0),
d(2029,4,26,22,0,0),
d(2029,9,27,21,0,0),
d(2030,4,25,22,0,0),
d(2030,9,26,21,0,0),
d(2031,4,24,22,0,0),
d(2031,9,25,21,0,0),
d(2032,4,29,22,0,0),
d(2032,9,30,21,0,0),
d(2033,4,28,22,0,0),
d(2033,9,29,21,0,0),
d(2034,4,27,22,0,0),
d(2034,9,28,21,0,0),
d(2035,4,26,22,0,0),
d(2035,9,27,21,0,0),
d(2036,4,24,22,0,0),
d(2036,9,25,21,0,0),
d(2037,4,23,22,0,0),
d(2037,9,24,21,0,0),
]
_transition_info = [
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
i(10800,3600,'EEST'),
i(7200,0,'EET'),
]
Cairo = Cairo()
|
|
#this python file runs the non-iterative and iterative versions of
#the A and B versions of the trace simplification algorithm,
#where:
# A - Blocks are considered alone
# B - Blocks of like thread segments are condensed into single blocks
# this aims to simplify even in the case of incremental failure
import sys
import os
import copy
import noniterativeAA
import noniterativeBB
import shutil
tr = os.environ.get('THRILLE_ROOT')
if tr != None:
sys.path.append(os.path.join(tr, "scripts/simpl/blockremoval"))
import blockremoval
sys.path.append(os.path.join(tr, "scripts/simpl/preemptremoval"))
import preemptremoval
thrille_input_schedule = "thrille-sched"
thrille_output_schedule = "my-schedule"
def checkEnvironment():
if len(sys.argv) < 4:
print "usage: python multiexp.py [times to execute] [results dir]",
print "[binary under test] [binary flags]"
print
print "purpose: performs the block removal AND",
print "preemption simplification algorithm",
print "on a generated input schedule in both their iterative",
print "and non iterative forms, the specified number of times",
print "saving the results to [results dir]"
print
print "intermediary/output files:"
print "\tsimple-sched-NAA: output of noniterative AA algorithms"
print "\tsimple-sched-NBB: output of noniterative BB algorithms"
print "\tsimple-sched-IAA: output of iterative AA algorithms"
print "\tsimple-sched-IBB: output of iterative BB algorithms"
print
print "definitions:\n\tA algorithm - each scheduling point is treated",
print "as its own block, regardless of the blocks around it"
print "\tB algorithm - all scheduling points choosing the same thread",
print "in a row are coalesced and treated as one block"
print
sys.exit(1)
assert os.environ.get('THRILLE_ROOT')!= None, \
"Thrille root environment variable not defined"
assert int(sys.argv[1]) > 0, "Nonsensical execution time"
assert os.path.exists(sys.argv[2]), "Results directory does not exist"
assert os.path.exists(sys.argv[3]), "binary does not exist"
def clearOldThrilleSchedule():
open(thrille_input_schedule, "w").write("begin_addr_list\nend_addr_list\n")
def outputScheduleInformation(str, sched, enabled, condlist):
blocks = blockremoval.getTotalBlocks(sched)
contexts = preemptremoval.countContextSwitches(sched, enabled, condlist)
npcs = preemptremoval.countNonpreemptiveCS(sched, enabled, condlist)
preemptions = preemptremoval.countPreemptions(sched, enabled, condlist)
assert (npcs + preemptions) == contexts, "sanity fail"
print str, ":"
print "\tblocks:", blocks,
print "\n\tcontext switches:", contexts, "\n\t\tpreemptions:",
print preemptions, "\n\t\tnon-preemptive switches:", npcs
def executePreload(thrille_root, preload, binary, binflags):
os.environ['LD_PRELOAD'] = \
os.path.join(thrille_root, "bin", preload)
binarydir, bin = os.path.split(binary)
thing = os.spawnve(os.P_NOWAIT, binary, binflags, os.environ)
pid, exit = os.waitpid(thing, 0)
del os.environ['LD_PRELOAD']
return exit
def doRaceDetect(binary_file, binflags, thrille_root):
clearOldThrilleSchedule()
executePreload(thrille_root, "liblockrace.so", binary_file, binflags)
def getNewErrorSchedule(binary_file, thrille_root):
assert os.path.exists(os.path.join(thrille_root, "bin/liblockrace.so"))
assert os.path.exists(os.path.join(thrille_root, "bin/librandact.so"))
binarydir, bin = os.path.split(binary_file)
curr_dir = os.getcwd()
if binarydir != '':
os.chdir(binarydir)
binflags = blockremoval.getBinaryFlags()
binflags.insert(0, bin)
print "NOTE: automatic race detection is disabled"
#doRaceDetect(binary_file, binflags, thrille_root)
assert os.path.exists("./thrille-randomactive")
exit_status = 0
sched = []
enabled = []
error = ""
count = 0;
while True:
clearOldThrilleSchedule()
count += 1
if count > 1000:
raw_input("100 iterations with no error--continue?")
count = 0
exit_status = executePreload(thrille_root, "librandact.so", \
binary_file, binflags)
print "Thrille Random Active Exit Status:", exit_status
sched, enabled, addrlist, condlist = \
blockremoval.readInNewSchedule(thrille_output_schedule)
error = blockremoval.recordFailure(thrille_output_schedule)
if error != None:
if blockremoval.testSchedule(sched, error, addrlist,\
binary_file, thrille_root):
os.chdir(curr_dir)
return sched, enabled, addrlist, condlist, error
else:
assert False, "Error in Thrille makes replay impossible"
def runAlgoNAA(save, bin, thrille, sched, en, addrlist, err):
#NAA
simpsched_NAA = noniterativeAA.noniterativeAA(save,\
bin,\
thrille, sched,\
en, addrlist, err)
blockremoval.outputResult(simpsched_NAA, err, addrlist,\
bin, thrille, save)
NAAsched, NAAenabled, NAAaddrlist, NAAcondlist = \
blockremoval.readInNewSchedule(save)
assert simpsched_NAA == NAAsched
return NAAsched, NAAenabled, NAAaddrlist, NAAcondlist
def runAlgoNBB(save, bin, thrille, sched, en, addrlist, err):
#NBB
simpsched_NBB = noniterativeBB.noniterativeBB(save,\
bin,\
thrille, sched,\
en, addrlist, err)
blockremoval.outputResult(simpsched_NBB, err, addrlist,\
bin, thrille, save)
NBBsched, NBBenabled, NBBaddrlist, NBBcondlist = \
blockremoval.readInNewSchedule(save)
assert simpsched_NBB == NBBsched
return NBBsched, NBBenabled, NBBaddrlist, NBBcondlist
def runAlgoIAA(save, bin, thrille, sched, en, addrlist, err):
prev_IAAsched = sched
prev_IAAenabled = en
IAAsched = []
IAAenabled = []
IAAaddrlist = []
IAAcondlist = []
while True:
simpsched_IAA = noniterativeAA.noniterativeAA(save,\
bin,\
thrille, prev_IAAsched,\
prev_IAAenabled, addrlist, err)
blockremoval.outputResult(simpsched_IAA, err, addrlist,\
bin, thrille, save)
IAAsched, IAAenabled, IAAaddrlist, IAAcondlist = \
blockremoval.readInNewSchedule(save)
assert simpsched_IAA == IAAsched
assert IAAaddrlist == addrlist
if IAAsched == prev_IAAsched:
break
else:
prev_IAAsched = IAAsched
prev_IAAenabled = IAAenabled
return IAAsched, IAAenabled, IAAaddrlist, IAAcondlist
def runAlgoIBB(save, bin, thrille, sched, en, addrlist, err):
prev_IBBsched = sched
prev_IBBenabled = en
IBBsched = []
IBBenabled = []
IBBaddrlist = []
IBBcondlist = []
while True:
simpsched_IBB = noniterativeBB.noniterativeBB(save,\
bin,\
thrille, prev_IBBsched,\
prev_IBBenabled, addrlist, err)
blockremoval.outputResult(simpsched_IBB, err, addrlist,\
bin, thrille, save)
IBBsched, IBBenabled, IBBaddrlist, IBBcondlist = \
blockremoval.readInNewSchedule(save)
assert simpsched_IBB == IBBsched
assert IBBaddrlist == addrlist
if IBBsched == prev_IBBsched:
break
else:
prev_IBBsched = IBBsched
prev_IBBenabled = IBBenabled
return IBBsched, IBBenabled, IBBaddrlist, IBBcondlist
def main():
checkEnvironment()
times_to_repeat = int(sys.argv[1])
save_directory = sys.argv[2]
binary_file = sys.argv[3]
thrille_root = os.environ.get('THRILLE_ROOT')
fout = open(os.path.join(save_directory, "simpl-runstat"), "w")
my_bin_save = os.path.join(save_directory, "bin")
os.mkdir(my_bin_save)
shutil.copy(os.path.join(thrille_root, "bin", "libserializer.so"), \
os.path.join(my_bin_save, "libserializer.so"))
shutil.copy(os.path.join(thrille_root, "bin", "libstrictserial.so"), \
os.path.join(my_bin_save, "libstrictserial.so"))
shutil.copy(os.path.join(thrille_root, "bin", "librandomschedule.so"), \
os.path.join(my_bin_save, "librandomschedule.so"))
shutil.copy(os.path.join(thrille_root, "bin", "librandact.so"), \
os.path.join(my_bin_save, "librandact.so"))
shutil.copy(os.path.join(thrille_root, "bin", "librace.so"), \
os.path.join(my_bin_save, "librace.so"))
shutil.copy(os.path.join(thrille_root, "bin", "liblockrace.so"), \
os.path.join(my_bin_save, "liblockrace.so"))
#figure out how to remove svn
os.mkdir(os.path.join(save_directory, "src"))
shutil.copytree(os.path.join(thrille_root, "src"), \
os.path.join(save_directory,"src","src")) \
shutil.copytree(os.path.join(thrille_root, "scripts"), \
os.path.join(save_directory,"src","scripts"))
fout.write("Command that was run:\n")
for x in sys.argv:
fout.write(x + " ")
fout.write("\n\n")
#lists for tracking statistics
start_list = []
naa_list = []
nbb_list = []
iaa_list = []
ibb_list = []
i = 0
while i < times_to_repeat:
my_save_dir = ""
if (i < 10):
my_save_dir = os.path.join(save_directory, "run0" + str(i))
else:
my_save_dir = os.path.join(save_directory, "run" + str(i))
os.mkdir(my_save_dir)
startsched, startenabled, startaddrlist, startcondlist, error = \
getNewErrorSchedule(binary_file, thrille_root)
#save the error schedule we are starting with
#and ensure it's legitimate
blockremoval.outputResult(startsched, error, startaddrlist, \
binary_file, thrille_root, \
os.path.join(my_save_dir, "start-sched"))
start_list.append((startsched, startenabled, startcondlist))
#NAA
output_schedule = os.path.join(my_save_dir, "simp-sched-NAA")
NAAsched, NAAenabled, NAAaddrlist, NAAcondlist = \
runAlgoNAA(output_schedule, binary_file, thrille_root, \
startsched, startenabled, startaddrlist, error)
assert NAAaddrlist == startaddrlist
naa_list.append((NAAsched, NAAenabled, NAAcondlist))
#NBB
output_schedule = os.path.join(my_save_dir, "simp-sched-NBB")
NBBsched, NBBenabled, NBBaddrlist, NBBcondlist = \
runAlgoNBB(output_schedule, binary_file, thrille_root, \
startsched, startenabled, startaddrlist, error)
assert NBBaddrlist == startaddrlist
nbb_list.append((NBBsched, NBBenabled, NBBcondlist))
#IAA - iterate until fixed point
output_schedule = os.path.join(my_save_dir, "simp-sched-IAA")
IAAsched, IAAenabled, IAAaddrlist, IAAcondlist = \
runAlgoIAA(output_schedule, binary_file, thrille_root, \
startsched, startenabled, startaddrlist, error)
assert IAAaddrlist == startaddrlist
assert len(IAAsched) <= len(NAAsched)
iaa_list.append((IAAsched, IAAenabled, IAAcondlist))
#IBB - iterate until fixed point
output_schedule = os.path.join(my_save_dir, "simp-sched-IBB")
IBBsched, IBBenabled, IBBaddrlist, IBBcondlist = \
runAlgoIBB(output_schedule, binary_file, thrille_root, \
startsched, startenabled, startaddrlist, error)
assert IBBaddrlist == startaddrlist
assert len(IBBsched) <= len(NBBsched)
ibb_list.append((IBBsched, IBBenabled, IBBcondlist))
assert len(start_list) == len(naa_list)
assert len(naa_list) == len(nbb_list)
assert len(nbb_list) == len(iaa_list)
assert len(iaa_list) == len(ibb_list)
sched, en, cond = start_list[-1]
outputScheduleInformation("Start", sched, en, cond)
sched, en, cond = naa_list[-1]
outputScheduleInformation("NAA", sched, en, cond)
sched, en, cond = nbb_list[-1]
outputScheduleInformation("NBB", sched, en, cond)
sched, en, cond = iaa_list[-1]
outputScheduleInformation("IAA", sched, en, cond)
sched, en, cond = ibb_list[-1]
outputScheduleInformation("IBB", sched, en, cond)
tmpout = open(os.path.join(my_save_dir, "README"), "w")
sys.stdout = tmpout
sched, en, cond = start_list[-1]
outputScheduleInformation("Start", sched, en, cond)
sched, en, cond = naa_list[-1]
outputScheduleInformation("NAA", sched, en, cond)
sched, en, cond = nbb_list[-1]
outputScheduleInformation("NBB", sched, en, cond)
sched, en, cond = iaa_list[-1]
outputScheduleInformation("IAA", sched, en, cond)
sched, en, cond = ibb_list[-1]
outputScheduleInformation("IBB", sched, en, cond)
tmpout.write("\n")
sys.stdout.flush()
sys.stdout = sys.__stdout__
tmpout.close()
fout.write("**RUN " + str(i) + "\n")
sys.stdout = fout
sched, en, cond = start_list[-1]
outputScheduleInformation("Start", sched, en, cond)
sched, en, cond = naa_list[-1]
outputScheduleInformation("NAA", sched, en, cond)
sched, en, cond = nbb_list[-1]
outputScheduleInformation("NBB", sched, en, cond)
sched, en, cond = iaa_list[-1]
outputScheduleInformation("IAA", sched, en, cond)
sched, en, cond = ibb_list[-1]
outputScheduleInformation("IBB", sched, en, cond)
fout.write("\n")
sys.stdout.flush()
sys.stdout = sys.__stdout__
i+= 1
#output statistics
if __name__ == "__main__":
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.Variable(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.Variable(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.Variable(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testExtractSubGraphWithInvalidDestNodes(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
with self.assertRaisesRegexp(TypeError, "must be a list"):
graph_util.extract_sub_graph(graph_def, "n1")
def testConvertVariablesToConstsWithFunctions(self):
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
output_node = math_ops_lib.multiply(
defun_node, 2.0, name="output_node")
with session.Session() as sess:
init = variables.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(4.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
self.assertEqual(variable_graph_def.library,
constant_graph_def.library)
def testConvertVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=False)
def testConvertResourceVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=True)
def _test_variable_to_const_conversion(self, use_resource):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=use_resource):
variable_node = variable_scope.get_variable(
"variable_node", initializer=1.0)
another_variable = variable_scope.get_variable(
"unused_variable_node", initializer=1.0)
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
sess.run(variable_node.initializer)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is
# set, note that if variable_names_whitelist is not set an error will
# be thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
sess.run(another_variable.initializer)
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable
# not being a const.
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
if use_resource:
self.assertEqual(variable_node.op, "VarHandleOp")
else:
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotIn(
node.op,
["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]), self.create_node_def(
"Identity", "B", ["C"]), self.create_node_def(
"Identity", "C", ["D"]), self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]), self.create_node_def(
"Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
if __name__ == "__main__":
test.main()
|
|
# (C) Datadog, Inc. 2013-2017
# (C) Brett Langdon <[email protected]> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import re
import time
import urllib
import urlparse
from collections import defaultdict
# 3p
import requests
from requests.exceptions import RequestException
# project
from checks import AgentCheck
from config import _is_affirmative
EVENT_TYPE = SOURCE_TYPE_NAME = 'rabbitmq'
EXCHANGE_TYPE = 'exchanges'
QUEUE_TYPE = 'queues'
NODE_TYPE = 'nodes'
CONNECTION_TYPE = 'connections'
MAX_DETAILED_EXCHANGES = 50
MAX_DETAILED_QUEUES = 200
MAX_DETAILED_NODES = 100
# Post an event in the stream when the number of queues or nodes to
# collect is above 90% of the limit:
ALERT_THRESHOLD = 0.9
EXCHANGE_ATTRIBUTES = [
# Path, Name, Operation
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/confirm', 'messages.confirm.count', float),
('message_stats/confirm_details/rate', 'messages.confirm.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/publish_in', 'messages.publish_in.count', float),
('message_stats/publish_in_details/rate', 'messages.publish_in.rate', float),
('message_stats/publish_out', 'messages.publish_out.count', float),
('message_stats/publish_out_details/rate', 'messages.publish_out.rate', float),
('message_stats/return_unroutable', 'messages.return_unroutable.count', float),
('message_stats/return_unroutable_details/rate', 'messages.return_unroutable.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
QUEUE_ATTRIBUTES = [
# Path, Name, Operation
('active_consumers', 'active_consumers', float),
('consumers', 'consumers', float),
('consumer_utilisation', 'consumer_utilisation', float),
('memory', 'memory', float),
('messages', 'messages', float),
('messages_details/rate', 'messages.rate', float),
('messages_ready', 'messages_ready', float),
('messages_ready_details/rate', 'messages_ready.rate', float),
('messages_unacknowledged', 'messages_unacknowledged', float),
('messages_unacknowledged_details/rate', 'messages_unacknowledged.rate', float),
('message_stats/ack', 'messages.ack.count', float),
('message_stats/ack_details/rate', 'messages.ack.rate', float),
('message_stats/deliver', 'messages.deliver.count', float),
('message_stats/deliver_details/rate', 'messages.deliver.rate', float),
('message_stats/deliver_get', 'messages.deliver_get.count', float),
('message_stats/deliver_get_details/rate', 'messages.deliver_get.rate', float),
('message_stats/publish', 'messages.publish.count', float),
('message_stats/publish_details/rate', 'messages.publish.rate', float),
('message_stats/redeliver', 'messages.redeliver.count', float),
('message_stats/redeliver_details/rate', 'messages.redeliver.rate', float),
]
NODE_ATTRIBUTES = [
('fd_used', 'fd_used', float),
('disk_free', 'disk_free', float),
('mem_used', 'mem_used', float),
('run_queue', 'run_queue', float),
('sockets_used', 'sockets_used', float),
('partitions', 'partitions', len),
('running', 'running', float),
('mem_alarm', 'mem_alarm', float),
('disk_free_alarm', 'disk_alarm', float),
]
ATTRIBUTES = {
EXCHANGE_TYPE: EXCHANGE_ATTRIBUTES,
QUEUE_TYPE: QUEUE_ATTRIBUTES,
NODE_TYPE: NODE_ATTRIBUTES,
}
TAG_PREFIX = 'rabbitmq'
TAGS_MAP = {
EXCHANGE_TYPE: {
'name': 'exchange',
'vhost': 'vhost',
'exchange_family': 'exchange_family',
},
QUEUE_TYPE: {
'node': 'node',
'name': 'queue',
'vhost': 'vhost',
'policy': 'policy',
'queue_family': 'queue_family',
},
NODE_TYPE: {
'name': 'node',
}
}
METRIC_SUFFIX = {
EXCHANGE_TYPE: "exchange",
QUEUE_TYPE: "queue",
NODE_TYPE: "node",
}
class RabbitMQException(Exception):
pass
class RabbitMQ(AgentCheck):
"""This check is for gathering statistics from the RabbitMQ
Management Plugin (http://www.rabbitmq.com/management.html)
"""
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.already_alerted = []
self.cached_vhosts = {} # this is used to send CRITICAL rabbitmq.aliveness check if the server goes down
def _get_config(self, instance):
# make sure 'rabbitmq_api_url' is present and get parameters
base_url = instance.get('rabbitmq_api_url', None)
if not base_url:
raise Exception('Missing "rabbitmq_api_url" in RabbitMQ config.')
if not base_url.endswith('/'):
base_url += '/'
username = instance.get('rabbitmq_user', 'guest')
password = instance.get('rabbitmq_pass', 'guest')
custom_tags = instance.get('tags', [])
parsed_url = urlparse.urlparse(base_url)
if not parsed_url.scheme or "://" not in parsed_url.geturl():
self.log.warning('The rabbit url did not include a protocol, assuming http')
# urlparse.urljoin cannot add a protocol to the rest of the url for some reason.
# This still leaves the potential for errors, but such urls would never have been valid, either
# and it's not likely to be useful to attempt to catch all possible mistakes people could make.
# urlparse also has a known issue parsing url with no schema, but a port in the host section
# mistakingly taking the host for the schema, hence the additional validation
base_url = 'http://' + base_url
parsed_url = urlparse.urlparse(base_url)
ssl_verify = _is_affirmative(instance.get('ssl_verify', True))
if not ssl_verify and parsed_url.scheme == 'https':
self.log.warning('Skipping SSL cert validation for %s based on configuration.' % (base_url))
# Limit of queues/nodes to collect metrics from
max_detailed = {
EXCHANGE_TYPE: int(instance.get('max_detailed_exchanges', MAX_DETAILED_EXCHANGES)),
QUEUE_TYPE: int(instance.get('max_detailed_queues', MAX_DETAILED_QUEUES)),
NODE_TYPE: int(instance.get('max_detailed_nodes', MAX_DETAILED_NODES)),
}
# List of queues/nodes to collect metrics from
specified = {
EXCHANGE_TYPE: {
'explicit': instance.get('exchanges', []),
'regexes': instance.get('exchanges_regexes', []),
},
QUEUE_TYPE: {
'explicit': instance.get('queues', []),
'regexes': instance.get('queues_regexes', []),
},
NODE_TYPE: {
'explicit': instance.get('nodes', []),
'regexes': instance.get('nodes_regexes', []),
},
}
for object_type, filters in specified.iteritems():
for filter_type, filter_objects in filters.iteritems():
if type(filter_objects) != list:
raise TypeError(
"{0} / {0}_regexes parameter must be a list".format(object_type))
auth = (username, password)
return base_url, max_detailed, specified, auth, ssl_verify, custom_tags
def _get_vhosts(self, instance, base_url, auth=None, ssl_verify=True):
vhosts = instance.get('vhosts')
if not vhosts:
# Fetch a list of _all_ vhosts from the API.
vhosts_url = urlparse.urljoin(base_url, 'vhosts')
vhost_proxy = self.get_instance_proxy(instance, vhosts_url)
vhosts_response = self._get_data(vhosts_url, auth=auth, ssl_verify=ssl_verify, proxies=vhost_proxy)
vhosts = [v['name'] for v in vhosts_response]
return vhosts
def check(self, instance):
base_url, max_detailed, specified, auth, ssl_verify, custom_tags = self._get_config(instance)
try:
vhosts = self._get_vhosts(instance, base_url, auth=auth, ssl_verify=ssl_verify)
self.cached_vhosts[base_url] = vhosts
limit_vhosts = []
if self._limit_vhosts(instance):
limit_vhosts = vhosts
# Generate metrics from the status API.
self.get_stats(instance, base_url, EXCHANGE_TYPE, max_detailed[EXCHANGE_TYPE], specified[EXCHANGE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_stats(instance, base_url, QUEUE_TYPE, max_detailed[QUEUE_TYPE], specified[QUEUE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_stats(instance, base_url, NODE_TYPE, max_detailed[NODE_TYPE], specified[NODE_TYPE],
limit_vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
self.get_connections_stat(instance, base_url, CONNECTION_TYPE, vhosts, limit_vhosts, custom_tags,
auth=auth, ssl_verify=ssl_verify)
# Generate a service check from the aliveness API. In the case of an invalid response
# code or unparseable JSON this check will send no data.
self._check_aliveness(instance, base_url, vhosts, custom_tags, auth=auth, ssl_verify=ssl_verify)
# Generate a service check for the service status.
self.service_check('rabbitmq.status', AgentCheck.OK, custom_tags)
self._check_cluster_status(instance, base_url, custom_tags, auth=auth, ssl_verify=ssl_verify)
except RabbitMQException as e:
msg = "Error executing check: {}".format(e)
self.service_check('rabbitmq.status', AgentCheck.CRITICAL, custom_tags, message=msg)
self.log.error(msg)
# tag every vhost as CRITICAL or they would keep the latest value, OK, in case the RabbitMQ server goes down
self.log.error("error while contacting rabbitmq (%s), setting aliveness to CRITICAL for vhosts: %s" % (base_url, self.cached_vhosts))
for vhost in self.cached_vhosts.get(base_url, []):
self.service_check('rabbitmq.aliveness', AgentCheck.CRITICAL, ['vhost:%s' % vhost] + custom_tags, message=u"Could not contact aliveness API")
def _get_data(self, url, auth=None, ssl_verify=True, proxies={}):
try:
r = requests.get(url, auth=auth, proxies=proxies, timeout=self.default_integration_http_timeout, verify=ssl_verify)
r.raise_for_status()
return r.json()
except RequestException as e:
raise RabbitMQException('Cannot open RabbitMQ API url: {} {}'.format(url, str(e)))
except ValueError as e:
raise RabbitMQException('Cannot parse JSON response from API url: {} {}'.format(url, str(e)))
def _filter_list(self, data, explicit_filters, regex_filters, object_type, tag_families):
if explicit_filters or regex_filters:
matching_lines = []
for data_line in data:
name = data_line.get("name")
if name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(name)
continue
match_found = False
for p in regex_filters:
match = re.search(p, name)
if match:
if _is_affirmative(tag_families) and match.groups():
if object_type == QUEUE_TYPE:
data_line["queue_family"] = match.groups()[0]
if object_type == EXCHANGE_TYPE:
data_line["exchange_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
# Absolute names work only for queues and exchanges
if object_type != QUEUE_TYPE and object_type != EXCHANGE_TYPE:
continue
absolute_name = '%s/%s' % (data_line.get("vhost"), name)
if absolute_name in explicit_filters:
matching_lines.append(data_line)
explicit_filters.remove(absolute_name)
continue
for p in regex_filters:
match = re.search(p, absolute_name)
if match:
if _is_affirmative(tag_families) and match.groups():
if object_type == QUEUE_TYPE:
data_line["queue_family"] = match.groups()[0]
if object_type == EXCHANGE_TYPE:
data_line["exchange_family"] = match.groups()[0]
matching_lines.append(data_line)
match_found = True
break
if match_found:
continue
return matching_lines
return data
def _get_tags(self, data, object_type, custom_tags):
tags = []
tag_list = TAGS_MAP[object_type]
for t in tag_list:
tag = data.get(t)
if tag:
# FIXME 6.x: remove this suffix or unify (sc doesn't have it)
tags.append('%s_%s:%s' % (TAG_PREFIX, tag_list[t], tag))
return tags + custom_tags
def get_stats(self, instance, base_url, object_type, max_detailed, filters, limit_vhosts, custom_tags, auth=None, ssl_verify=True):
"""
instance: the check instance
base_url: the url of the rabbitmq management api (e.g. http://localhost:15672/api)
object_type: either QUEUE_TYPE or NODE_TYPE or EXCHANGE_TYPE
max_detailed: the limit of objects to collect for this type
filters: explicit or regexes filters of specified queues or nodes (specified in the yaml file)
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
# Make a copy of this list as we will remove items from it at each
# iteration
explicit_filters = list(filters['explicit'])
regex_filters = filters['regexes']
data = []
# only do this if vhosts were specified,
# otherwise it'll just be making more queries for the same data
if self._limit_vhosts(instance) and object_type == QUEUE_TYPE:
for vhost in limit_vhosts:
url = '{}/{}'.format(object_type, urllib.quote_plus(vhost))
try:
data += self._get_data(urlparse.urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
except Exception as e:
self.log.debug("Couldn't grab queue data from vhost, {}: {}".format(vhost, e))
else:
data = self._get_data(urlparse.urljoin(base_url, object_type), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
""" data is a list of nodes or queues:
data = [
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue1', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host, 'name': 'queue10', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
{'status': 'running', 'node': 'rabbit@host', 'name': 'queue11', 'consumers': 0, 'vhost': '/', 'backing_queue_status': {'q1': 0, 'q3': 0, 'q2': 0, 'q4': 0, 'avg_ack_egress_rate': 0.0, 'ram_msg_count': 0, 'ram_ack_count': 0, 'len': 0, 'persistent_count': 0, 'target_ram_count': 'infinity', 'next_seq_id': 0, 'delta': ['delta', 'undefined', 0, 'undefined'], 'pending_acks': 0, 'avg_ack_ingress_rate': 0.0, 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0}, 'durable': True, 'idle_since': '2013-10-03 13:38:18', 'exclusive_consumer_tag': '', 'arguments': {}, 'memory': 10956, 'policy': '', 'auto_delete': False},
...
]
"""
if len(explicit_filters) > max_detailed:
raise Exception(
"The maximum number of %s you can specify is %d." % (object_type, max_detailed))
# a list of queues/nodes is specified. We process only those
data = self._filter_list(data, explicit_filters, regex_filters, object_type, instance.get("tag_families", False))
# if no filters are specified, check everything according to the limits
if len(data) > ALERT_THRESHOLD * max_detailed:
# Post a message on the dogweb stream to warn
self.alert(base_url, max_detailed, len(data), object_type, custom_tags)
if len(data) > max_detailed:
# Display a warning in the info page
self.warning(
"Too many items to fetch. You must choose the %s you are interested in by editing the rabbitmq.yaml configuration file or get in touch with Server Density Support" % object_type)
for data_line in data[:max_detailed]:
# We truncate the list if it's above the limit
self._get_metrics(data_line, object_type, custom_tags)
# get a list of the number of bindings on a given queue
# /api/queues/vhost/name/bindings
if object_type is QUEUE_TYPE:
self._get_queue_bindings_metrics(base_url, custom_tags, data, instance_proxy,
instance, object_type, auth, ssl_verify)
def _get_metrics(self, data, object_type, custom_tags):
tags = self._get_tags(data, object_type, custom_tags)
for attribute, metric_name, operation in ATTRIBUTES[object_type]:
# Walk down through the data path, e.g. foo/bar => d['foo']['bar']
root = data
keys = attribute.split('/')
for path in keys[:-1]:
root = root.get(path, {})
value = root.get(keys[-1], None)
if value is not None:
try:
self.gauge('rabbitmq.%s.%s' % (
METRIC_SUFFIX[object_type], metric_name), operation(value), tags=tags)
except ValueError:
self.log.debug("Caught ValueError for %s %s = %s with tags: %s" % (
METRIC_SUFFIX[object_type], attribute, value, tags))
def _get_queue_bindings_metrics(self, base_url, custom_tags, data, instance_proxy,
instance, object_type, auth=None, ssl_verify=True):
for item in data:
vhost = item['vhost']
tags = self._get_tags(item, object_type, custom_tags)
url = '{}/{}/{}/bindings'.format(QUEUE_TYPE, urllib.quote_plus(vhost), item['name'])
bindings_count = len(self._get_data(urlparse.urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy))
self.gauge('rabbitmq.queue.bindings.count', bindings_count, tags)
def get_connections_stat(self, instance, base_url, object_type, vhosts, limit_vhosts, custom_tags, auth=None, ssl_verify=True):
"""
Collect metrics on currently open connection per vhost.
"""
instance_proxy = self.get_instance_proxy(instance, base_url)
grab_all_data = True
if self._limit_vhosts(instance):
grab_all_data = False
data = []
for vhost in vhosts:
url = "vhosts/{}/{}".format(urllib.quote_plus(vhost), object_type)
try:
data += self._get_data(urlparse.urljoin(base_url, url), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
except Exception as e:
# This will happen if there is no connection data to grab
self.log.debug("Couldn't grab connection data from vhost, {}: {}".format(vhost, e))
# sometimes it seems to need to fall back to this
if grab_all_data or not len(data):
data = self._get_data(urlparse.urljoin(base_url, object_type), auth=auth,
ssl_verify=ssl_verify, proxies=instance_proxy)
stats = {vhost: 0 for vhost in vhosts}
connection_states = defaultdict(int)
for conn in data:
if conn['vhost'] in vhosts:
stats[conn['vhost']] += 1
# 'state' does not exist for direct type connections.
connection_states[conn.get('state', 'direct')] += 1
for vhost, nb_conn in stats.iteritems():
self.gauge('rabbitmq.connections', nb_conn, tags=['%s_vhost:%s' % (TAG_PREFIX, vhost)] + custom_tags)
for conn_state, nb_conn in connection_states.iteritems():
self.gauge('rabbitmq.connections.state', nb_conn, tags=['%s_conn_state:%s' % (TAG_PREFIX, conn_state)] + custom_tags)
def alert(self, base_url, max_detailed, size, object_type, custom_tags):
key = "%s%s" % (base_url, object_type)
if key in self.already_alerted:
# We have already posted an event
return
self.already_alerted.append(key)
title = "RabbitMQ integration is approaching the limit on the number of %s that can be collected from on %s" % (
object_type, self.hostname)
msg = """%s %s are present. The limit is %s.
Please get in touch with Server Density support to increase the limit.""" % (size, object_type, max_detailed)
event = {
"timestamp": int(time.time()),
"event_type": EVENT_TYPE,
"msg_title": title,
"msg_text": msg,
"alert_type": 'warning',
"source_type_name": SOURCE_TYPE_NAME,
"host": self.hostname,
"tags": ["base_url:%s" % base_url, "host:%s" % self.hostname] + custom_tags,
"event_object": "rabbitmq.limit.%s" % object_type,
}
self.event(event)
def _limit_vhosts(self, instance):
"""
Check to see if vhosts were specified in the instance
it will return a boolean, True if they were.
This allows the check to only query the wanted vhosts.
"""
vhosts = instance.get('vhosts', [])
return len(vhosts) > 0
def _check_aliveness(self, instance, base_url, vhosts, custom_tags, auth=None, ssl_verify=True):
"""
Check the aliveness API against all or a subset of vhosts. The API
will return {"status": "ok"} and a 200 response code in the case
that the check passes.
"""
for vhost in vhosts:
tags = ['vhost:%s' % vhost] + custom_tags
# We need to urlencode the vhost because it can be '/'.
path = u'aliveness-test/%s' % (urllib.quote_plus(vhost))
aliveness_url = urlparse.urljoin(base_url, path)
aliveness_proxy = self.get_instance_proxy(instance, aliveness_url)
aliveness_response = self._get_data(aliveness_url, auth=auth, ssl_verify=ssl_verify, proxies=aliveness_proxy)
message = u"Response from aliveness API: %s" % aliveness_response
if aliveness_response.get('status') == 'ok':
status = AgentCheck.OK
else:
status = AgentCheck.CRITICAL
self.service_check('rabbitmq.aliveness', status, tags, message=message)
def _check_cluster_status(self, instance, base_url, custom_tags, auth=None, ssl_verify=True):
"""
Check the cluster API for all running nodes. If any of the nodes
are not running it will return their number in the case that
the check fails.
"""
cluster_url = urlparse.urljoin(base_url, u'nodes')
cluster_proxy = self.get_instance_proxy(instance, cluster_url)
cluster_response = self._get_data(cluster_url, auth=auth, ssl_verify=ssl_verify, proxies=cluster_proxy)
running_nodes = 0
for node in cluster_response:
if node[u'running']:
running_nodes += 1
self.gauge(u'rabbitmq.running_nodes', running_nodes, tags=custom_tags)
|
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Some handy utility functions used by several classes.
"""
import socket
import urllib
import urllib2
import imp
import subprocess
import StringIO
import time
import logging.handlers
import boto
import boto.provider
import tempfile
import smtplib
import datetime
import re
import email.mime.multipart
import email.mime.base
import email.mime.text
import email.utils
import email.encoders
import gzip
import base64
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import hashlib
_hashfn = hashlib.sha512
except ImportError:
import md5
_hashfn = md5.md5
from boto.compat import json
# List of Query String Arguments of Interest
qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
'partNumber', 'policy', 'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore',
# storageClass is a QSA for buckets in Google Cloud Storage.
# (StorageClass is associated to individual keys in S3, but
# having it listed here should cause no problems because
# GET bucket?storageClass is not part of the S3 API.)
'storageClass',
# websiteConfig is a QSA for buckets in Google Cloud Storage.
'websiteConfig']
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
def unquote_v(nv):
if len(nv) == 1:
return nv
else:
return (nv[0], urllib.unquote(nv[1]))
def canonical_string(method, path, headers, expires=None,
provider=None):
"""
Generates the aws canonical string for the given parameters
"""
if not provider:
provider = boto.provider.get_default()
interesting_headers = {}
for key in headers:
lk = key.lower()
if headers[key] != None and (lk in ['content-md5', 'content-type', 'date'] or
lk.startswith(provider.header_prefix)):
interesting_headers[lk] = str(headers[key]).strip()
# these keys get empty strings if they don't exist
if 'content-type' not in interesting_headers:
interesting_headers['content-type'] = ''
if 'content-md5' not in interesting_headers:
interesting_headers['content-md5'] = ''
# just in case someone used this. it's not necessary in this lib.
if provider.date_header in interesting_headers:
interesting_headers['date'] = ''
# if you're using expires for query string auth, then it trumps date
# (and provider.date_header)
if expires:
interesting_headers['date'] = str(expires)
sorted_header_keys = sorted(interesting_headers.keys())
buf = "%s\n" % method
for key in sorted_header_keys:
val = interesting_headers[key]
if key.startswith(provider.header_prefix):
buf += "%s:%s\n" % (key, val)
else:
buf += "%s\n" % val
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
t = path.split('?')
buf += t[0]
if len(t) > 1:
qsa = t[1].split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest]
if len(qsa) > 0:
qsa.sort(cmp=lambda x, y:cmp(x[0], y[0]))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def merge_meta(headers, metadata, provider=None):
if not provider:
provider = boto.provider.get_default()
metadata_prefix = provider.metadata_prefix
final_headers = headers.copy()
for k in metadata.keys():
if k.lower() in ['cache-control', 'content-md5', 'content-type',
'content-encoding', 'content-disposition',
'date', 'expires']:
final_headers[k] = metadata[k]
else:
final_headers[metadata_prefix + k] = metadata[k]
return final_headers
def get_aws_metadata(headers, provider=None):
if not provider:
provider = boto.provider.get_default()
metadata_prefix = provider.metadata_prefix
metadata = {}
for hkey in headers.keys():
if hkey.lower().startswith(metadata_prefix):
val = urllib.unquote_plus(headers[hkey])
try:
metadata[hkey[len(metadata_prefix):]] = unicode(val, 'utf-8')
except UnicodeDecodeError:
metadata[hkey[len(metadata_prefix):]] = val
del headers[hkey]
return metadata
def retry_url(url, retry_on_404=True, num_retries=10):
"""
Retry a url. This is specifically used for accessing the metadata
service on an instance. Since this address should never be proxied
(for security reasons), we create a ProxyHandler with a NULL
dictionary to override any proxy settings in the environment.
"""
for i in range(0, num_retries):
try:
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
req = urllib2.Request(url)
r = opener.open(req)
result = r.read()
resp = urllib2.urlopen(req)
return resp.read()
except urllib2.HTTPError, e:
# in 2.6 you use getcode(), in 2.5 and earlier you use code
if hasattr(e, 'getcode'):
code = e.getcode()
else:
code = e.code
if code == 404 and not retry_on_404:
return ''
except urllib2.URLError, e:
raise e
except Exception, e:
pass
boto.log.exception('Caught exception reading instance data')
time.sleep(2 ** i)
boto.log.error('Unable to read instance data, giving up')
return ''
def _get_instance_metadata(url, num_retries):
return LazyLoadMetadata(url, num_retries)
class LazyLoadMetadata(dict):
def __init__(self, url, num_retries):
self._url = url
self._num_retries = num_retries
self._leaves = {}
self._dicts = []
data = boto.utils.retry_url(self._url, num_retries=self._num_retries)
if data:
fields = data.split('\n')
for field in fields:
if field.endswith('/'):
key = field[0:-1]
self._dicts.append(key)
else:
p = field.find('=')
if p > 0:
key = field[p + 1:]
resource = field[0:p] + '/openssh-key'
else:
key = resource = field
self._leaves[key] = resource
self[key] = None
def _materialize(self):
for key in self:
self[key]
def __getitem__(self, key):
if key not in self:
# allow dict to throw the KeyError
return super(LazyLoadMetadata, self).__getitem__(key)
# already loaded
val = super(LazyLoadMetadata, self).__getitem__(key)
if val is not None:
return val
if key in self._leaves:
resource = self._leaves[key]
val = boto.utils.retry_url(self._url + urllib.quote(resource,
safe="/:"),
num_retries=self._num_retries)
if val and val[0] == '{':
val = json.loads(val)
else:
p = val.find('\n')
if p > 0:
val = val.split('\n')
self[key] = val
elif key in self._dicts:
self[key] = LazyLoadMetadata(self._url + key + '/',
self._num_retries)
return super(LazyLoadMetadata, self).__getitem__(key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def values(self):
self._materialize()
return super(LazyLoadMetadata, self).values()
def items(self):
self._materialize()
return super(LazyLoadMetadata, self).items()
def __str__(self):
self._materialize()
return super(LazyLoadMetadata, self).__str__()
def __repr__(self):
self._materialize()
return super(LazyLoadMetadata, self).__repr__()
def get_instance_metadata(version='latest', url='http://169.254.169.254',
timeout=None, num_retries=5):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
stored as string values. Values such as ancestor-ami-ids will
be stored in the dict as a list of string values. More complex
fields such as public-keys and will be stored as nested dicts.
If the timeout is specified, the connection to the specified url
will time out after the specified number of seconds.
"""
if timeout is not None:
original = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return _get_instance_metadata('%s/%s/meta-data/' % (url, version),
num_retries=num_retries)
except urllib2.URLError, e:
return None
finally:
if timeout is not None:
socket.setdefaulttimeout(original)
def get_instance_identity(version='latest', url='http://169.254.169.254',
timeout=None, num_retries=5):
"""
Returns the instance identity as a nested Python dictionary.
"""
iid = {}
base_url = 'http://169.254.169.254/latest/dynamic/instance-identity'
if timeout is not None:
original = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
data = retry_url(base_url, num_retries=num_retries)
fields = data.split('\n')
for field in fields:
val = retry_url(base_url + '/' + field + '/')
if val[0] == '{':
val = json.loads(val)
if field:
iid[field] = val
return iid
except urllib2.URLError, e:
return None
finally:
if timeout is not None:
socket.setdefaulttimeout(original)
def get_instance_userdata(version='latest', sep=None,
url='http://169.254.169.254'):
ud_url = '%s/%s/user-data' % (url, version)
user_data = retry_url(ud_url, retry_on_404=False)
if user_data:
if sep:
l = user_data.split(sep)
user_data = {}
for nvpair in l:
t = nvpair.split('=')
user_data[t[0].strip()] = t[1].strip()
return user_data
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
def get_ts(ts=None):
if not ts:
ts = time.gmtime()
return time.strftime(ISO8601, ts)
def parse_ts(ts):
ts = ts.strip()
try:
dt = datetime.datetime.strptime(ts, ISO8601)
return dt
except ValueError:
dt = datetime.datetime.strptime(ts, ISO8601_MS)
return dt
def find_class(module_name, class_name=None):
if class_name:
module_name = "%s.%s" % (module_name, class_name)
modules = module_name.split('.')
c = None
try:
for m in modules[1:]:
if c:
c = getattr(c, m)
else:
c = getattr(__import__(".".join(modules[0:-1])), m)
return c
except:
return None
def update_dme(username, password, dme_id, ip_address):
"""
Update your Dynamic DNS record with DNSMadeEasy.com
"""
dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'
dme_url += '?username=%s&password=%s&id=%s&ip=%s'
s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address))
return s.read()
def fetch_file(uri, file=None, username=None, password=None):
"""
Fetch a file based on the URI provided. If you do not pass in a file pointer
a tempfile.NamedTemporaryFile, or None if the file could not be
retrieved is returned.
The URI can be either an HTTP url, or "s3://bucket_name/key_name"
"""
boto.log.info('Fetching %s' % uri)
if file == None:
file = tempfile.NamedTemporaryFile()
try:
if uri.startswith('s3://'):
bucket_name, key_name = uri[len('s3://'):].split('/', 1)
c = boto.connect_s3(aws_access_key_id=username,
aws_secret_access_key=password)
bucket = c.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_file(file)
else:
if username and password:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, uri, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
s = urllib2.urlopen(uri)
file.write(s.read())
file.seek(0)
except:
raise
boto.log.exception('Problem Retrieving file: %s' % uri)
file = None
return file
class ShellCommand(object):
def __init__(self, command, wait=True, fail_fast=False, cwd=None):
self.exit_code = 0
self.command = command
self.log_fp = StringIO.StringIO()
self.wait = wait
self.fail_fast = fail_fast
self.run(cwd=cwd)
def run(self, cwd=None):
boto.log.info('running:%s' % self.command)
self.process = subprocess.Popen(self.command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
if(self.wait):
while self.process.poll() == None:
time.sleep(1)
t = self.process.communicate()
self.log_fp.write(t[0])
self.log_fp.write(t[1])
boto.log.info(self.log_fp.getvalue())
self.exit_code = self.process.returncode
if self.fail_fast and self.exit_code != 0:
raise Exception("Command " + self.command + " failed with status " + self.exit_code)
return self.exit_code
def setReadOnly(self, value):
raise AttributeError
def getStatus(self):
return self.exit_code
status = property(getStatus, setReadOnly, None, 'The exit code for the command')
def getOutput(self):
return self.log_fp.getvalue()
output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command')
class AuthSMTPHandler(logging.handlers.SMTPHandler):
"""
This class extends the SMTPHandler in the standard Python logging module
to accept a username and password on the constructor and to then use those
credentials to authenticate with the SMTP server. To use this, you could
add something like this in your boto config file:
[handler_hand07]
class=boto.utils.AuthSMTPHandler
level=WARN
formatter=form07
args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject')
"""
def __init__(self, mailhost, username, password,
fromaddr, toaddrs, subject):
"""
Initialize the handler.
We have extended the constructor to accept a username/password
for SMTP authentication.
"""
logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr,
toaddrs, subject)
self.username = username
self.password = password
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
It would be really nice if I could add authorization to this class
without having to resort to cut and paste inheritance but, no.
"""
try:
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
smtp.login(self.username, self.password)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
','.join(self.toaddrs),
self.getSubject(record),
email.utils.formatdate(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
>>> cache['A']
0
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
Iterating over the cache returns the keys, starting with the most recently
used:
>>> for key in cache:
... print key
D
A
C
This code is based on the LRUCache class from Genshi which is based on
`Myghty <http://www.myghty.org>`_'s LRUCache from ``myghtyutils.util``,
written by Mike Bayer and released under the MIT license (Genshi uses the
BSD License).
"""
class _Item(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key = key
self.value = value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
self._dict = dict()
self.capacity = capacity
self.head = None
self.tail = None
def __contains__(self, key):
return key in self._dict
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __len__(self):
return len(self._dict)
def __getitem__(self, key):
item = self._dict[key]
self._update_item(item)
return item.value
def __setitem__(self, key, value):
item = self._dict.get(key)
if item is None:
item = self._Item(key, value)
self._dict[key] = item
self._insert_item(item)
else:
item.value = value
self._update_item(item)
self._manage_size()
def __repr__(self):
return repr(self._dict)
def _insert_item(self, item):
item.previous = None
item.next = self.head
if self.head is not None:
self.head.previous = item
else:
self.tail = item
self.head = item
self._manage_size()
def _manage_size(self):
while len(self._dict) > self.capacity:
del self._dict[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update_item(self, item):
if self.head == item:
return
previous = item.previous
previous.next = item.next
if item.next is not None:
item.next.previous = previous
else:
self.tail = previous
item.previous = None
item.next = self.head
self.head.previous = self.head = item
class Password(object):
"""
Password object that stores itself as hashed.
Hash defaults to SHA512 if available, MD5 otherwise.
"""
hashfunc = _hashfn
def __init__(self, str=None, hashfunc=None):
"""
Load the string from an initial value, this should be the
raw hashed password.
"""
self.str = str
if hashfunc:
self.hashfunc = hashfunc
def set(self, value):
self.str = self.hashfunc(value).hexdigest()
def __str__(self):
return str(self.str)
def __eq__(self, other):
if other == None:
return False
return str(self.hashfunc(other).hexdigest()) == str(self.str)
def __len__(self):
if self.str:
return len(self.str)
else:
return 0
def notify(subject, body=None, html_body=None, to_string=None,
attachments=None, append_instance_id=True):
attachments = attachments or []
if append_instance_id:
subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject)
if not to_string:
to_string = boto.config.get_value('Notification', 'smtp_to', None)
if to_string:
try:
from_string = boto.config.get_value('Notification', 'smtp_from', 'boto')
msg = email.mime.multipart.MIMEMultipart()
msg['From'] = from_string
msg['Reply-To'] = from_string
msg['To'] = to_string
msg['Date'] = email.utils.formatdate(localtime=True)
msg['Subject'] = subject
if body:
msg.attach(email.mime.text.MIMEText(body))
if html_body:
part = email.mime.base.MIMEBase('text', 'html')
part.set_payload(html_body)
email.encoders.encode_base64(part)
msg.attach(part)
for part in attachments:
msg.attach(part)
smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost')
# Alternate port support
if boto.config.get_value("Notification", "smtp_port"):
server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port")))
else:
server = smtplib.SMTP(smtp_host)
# TLS support
if boto.config.getbool("Notification", "smtp_tls"):
server.ehlo()
server.starttls()
server.ehlo()
smtp_user = boto.config.get_value('Notification', 'smtp_user', '')
smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '')
if smtp_user:
server.login(smtp_user, smtp_pass)
server.sendmail(from_string, to_string, msg.as_string())
server.quit()
except:
boto.log.exception('notify failed')
def get_utf8_value(value):
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def mklist(value):
if not isinstance(value, list):
if isinstance(value, tuple):
value = list(value)
else:
value = [value]
return value
def pythonize_name(name):
"""Convert camel case to a "pythonic" name.
Examples::
pythonize_name('CamelCase') -> 'camel_case'
pythonize_name('already_pythonized') -> 'already_pythonized'
pythonize_name('HTTPRequest') -> 'http_request'
pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
pythonize_name('UPPER') -> 'upper'
pythonize_name('') -> ''
"""
s1 = _first_cap_regex.sub(r'\1_\2', name)
s2 = _number_cap_regex.sub(r'\1_\2', s1)
return _end_cap_regex.sub(r'\1_\2', s2).lower()
def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'):
"""Description:
:param content: A list of tuples of name-content pairs. This is used
instead of a dict to ensure that scripts run in order
:type list of tuples:
:param compress: Use gzip to compress the scripts, defaults to no compression
:type bool:
:param deftype: The type that should be assumed if nothing else can be figured out
:type str:
:param delimiter: mime delimiter
:type str:
:return: Final mime multipart
:rtype: str:
"""
wrapper = email.mime.multipart.MIMEMultipart()
for name, con in content:
definite_type = guess_mime_type(con, deftype)
maintype, subtype = definite_type.split('/', 1)
if maintype == 'text':
mime_con = email.mime.text.MIMEText(con, _subtype=subtype)
else:
mime_con = email.mime.base.MIMEBase(maintype, subtype)
mime_con.set_payload(con)
# Encode the payload using Base64
email.encoders.encode_base64(mime_con)
mime_con.add_header('Content-Disposition', 'attachment', filename=name)
wrapper.attach(mime_con)
rcontent = wrapper.as_string()
if compress:
buf = StringIO.StringIO()
gz = gzip.GzipFile(mode='wb', fileobj=buf)
try:
gz.write(rcontent)
finally:
gz.close()
rcontent = buf.getvalue()
return rcontent
def guess_mime_type(content, deftype):
"""Description: Guess the mime type of a block of text
:param content: content we're finding the type of
:type str:
:param deftype: Default mime type
:type str:
:rtype: <type>:
:return: <description>
"""
#Mappings recognized by cloudinit
starts_with_mappings = {
'#include': 'text/x-include-url',
'#!': 'text/x-shellscript',
'#cloud-config': 'text/cloud-config',
'#upstart-job': 'text/upstart-job',
'#part-handler': 'text/part-handler',
'#cloud-boothook': 'text/cloud-boothook'
}
rtype = deftype
for possible_type, mimetype in starts_with_mappings.items():
if content.startswith(possible_type):
rtype = mimetype
break
return(rtype)
def compute_md5(fp, buf_size=8192, size=None):
"""
Compute MD5 hash on passed file and return results in a tuple of values.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer
will be reset to its current location before the
method returns.
:type buf_size: integer
:param buf_size: Number of bytes per read request.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being
split inplace into different parts. Less bytes may
be available.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5 hash
as the first element, the base64 encoded version of the
plain digest as the second element and the data size as
the third element.
"""
return compute_hash(fp, buf_size, size, hash_algorithm=md5)
def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5):
hash_obj = hash_algorithm()
spos = fp.tell()
if size and size < buf_size:
s = fp.read(size)
else:
s = fp.read(buf_size)
while s:
hash_obj.update(s)
if size:
size -= len(s)
if size <= 0:
break
if size and size < buf_size:
s = fp.read(size)
else:
s = fp.read(buf_size)
hex_digest = hash_obj.hexdigest()
base64_digest = base64.encodestring(hash_obj.digest())
if base64_digest[-1] == '\n':
base64_digest = base64_digest[0:-1]
# data_size based on bytes read.
data_size = fp.tell() - spos
fp.seek(spos)
return (hex_digest, base64_digest, data_size)
|
|
import json
import locale
import logging
import os
import re
import string
from zipfile import ZipFile, ZipInfo
from tarfile import TarFile, TarInfo
from pyp2rpm import utils
logger = logging.getLogger(__name__)
def generator_to_list(fn):
"""This decorator is for flat_list function.
It converts returned generator to list.
"""
def wrapper(*args, **kw):
return list(fn(*args, **kw))
return wrapper
@generator_to_list
def flat_list(lst):
"""This function flatten given nested list.
Argument:
nested list
Returns:
flat list
"""
if isinstance(lst, list):
for item in lst:
for i in flat_list(item):
yield i
else:
yield lst
class Archive(object):
"""Class representing package archive. All the operations must be run using with statement.
For example:
archive = Archive('/spam/beans.egg')
with archive as a:
a.get_contents_of_file('spam.py')
"""
monkey_patched_zip = False
@classmethod
def monkey_patch_zip(cls):
if not cls.monkey_patched_zip:
# monkey patch ZipFile to behave like TarFile
ZipFile.getmembers = ZipFile.infolist
ZipFile.extractfile = ZipFile.open
ZipFile.open = ZipFile
ZipInfo.name = ZipInfo.filename
cls.monkey_patched_zip = True
def __init__(self, local_file):
self.file = local_file
self.name, self.suffix = os.path.splitext(local_file)
self.handle = None
self.monkey_patch_zip()
@property
def is_zip(self):
return self.suffix in ['.egg', '.zip', '.whl']
@property
def is_tar(self):
return self.suffix in ['.tar', '.gz', '.bz2']
@property
def is_egg(self):
return self.suffix == '.egg'
@property
def is_wheel(self):
return self.suffix == '.whl'
def open(self):
try:
self.handle = self.extractor_cls.open(self.file)
except BaseException:
self.handle = None
logger.error('Failed to open archive: {0}.'.format(self.file), exc_info=True)
return self
def close(self):
if self.handle:
self.handle.close()
def __enter__(self):
return self.open()
def __exit__(self, type, value, traceback): # TODO: handle exceptions here
self.close()
@property
def extractor_cls(self):
"""Returns the class that can read this archive based on archive suffix.
Returns:
Class that can read this archive or None if no such exists.
"""
file_cls = None
# only catches ".gz", even from ".tar.gz"
if self.is_tar:
file_cls = TarFile
elif self.is_zip:
file_cls = ZipFile
else:
logger.info("Couldn't recognize archive suffix: {0}.".format(self.suffix))
return file_cls
@utils.memoize_by_args
def get_content_of_file(self, name, full_path=False): # TODO: log if file can't be opened
"""Returns content of file from archive.
If full_path is set to False and two files with given name exist,
content of one is returned (it is not specified which one that is).
If set to True, returns content of exactly that file.
Args:
name: name of the file to get content of
Returns:
Content of the file with given name or None, if no such.
"""
if self.handle:
for member in self.handle.getmembers():
if (full_path and member.name == name)\
or (not full_path and os.path.basename(member.name) == name):
extracted = self.handle.extractfile(member)
return extracted.read().decode(locale.getpreferredencoding())
return None
def has_file_with_suffix(self, suffixes): # TODO: maybe implement this using get_files_re
"""Finds out if there is a file with one of suffixes in the archive.
Args:
suffixes: list of suffixes or single suffix to look for
Returns:
True if there is at least one file with at least one given suffix in the archive,
False otherwise (or archive can't be opened)
"""
if not isinstance(suffixes, list):
suffixes = [suffixes]
if self.handle:
for member in self.handle.getmembers():
if os.path.splitext(member.name)[1] in suffixes:
return True
else:
# hack for .zip files, where directories are not returned
# themselves, therefore we can't find e.g. .egg-info
for suffix in suffixes:
if '{0}/'.format(suffix) in member.name:
return True
return False
def get_files_re(self, file_re, full_path=False, ignorecase=False):
"""Finds all files that match file_re and returns their list.
Doesn't return directories, only files.
Args:
file_re: raw string to match files against (gets compiled into re)
full_path: whether to match against full path inside the archive or just the filenames
ignorecase: whether to ignore case when using the given re
Returns:
List of full paths of files inside the archive that match the given file_re.
"""
if ignorecase:
compiled_re = re.compile(file_re, re.I)
else:
compiled_re = re.compile(file_re)
found = []
if self.handle:
for member in self.handle.getmembers():
if isinstance(member, TarInfo) and member.isdir():
pass # for TarInfo files, filter out directories
elif (full_path and compiled_re.search(member.name))\
or (not full_path and compiled_re.search(os.path.basename(member.name))):
found.append(member.name)
return found
def get_directories_re(self, directory_re, full_path=False, ignorecase=False):
"""Same as get_files_re, but for directories"""
if ignorecase:
compiled_re = re.compile(directory_re, re.I)
else:
compiled_re = re.compile(directory_re)
found = set()
if self.handle:
for member in self.handle.getmembers():
if isinstance(member, ZipInfo): # zipfiles only list directories => have to work around that
to_match = os.path.dirname(member.name)
elif isinstance(member, TarInfo) and member.isdir(): # tarfiles => only match directories
to_match = member.name
else:
to_match = None
if to_match:
if (full_path and compiled_re.search(to_match)) or (not full_path and compiled_re.search(os.path.basename(to_match))):
found.add(to_match)
return list(found)
def find_list_argument(self, setup_argument):
"""A simple method that gets setup() function from setup.py list argument
like install_requires.
Will not work in all cases and might need a smarter approach.
On the other side, it's so stupid, that it's actually smart - it gets this:
'console_scripts': [
'xtermcolor = xtermcolor.Main:Cli'
]
as 'scripts', which is very nice :)
Args:
setup_argument: name of the argument of setup() function to get value of
Returns:
The requested setup() argument or empty list, if setup.py can't be open (or argument is not present).
"""
argument = []
cont = False
setup_cfg = self.get_content_of_file('setup.cfg')
if setup_cfg:
argument_re = re.compile(r'\b' + format(setup_argument) + '\s*=')
for line in setup_cfg.splitlines():
if line.find("#") != -1:
line = line.split("#")[0]
if argument_re.search(line):
args = line.split("=")
if len(args) > 1:
argument.append(args[1])
cont = True
continue
if cont and len(line) and line[0] in string.whitespace:
argument.append(line.strip())
continue
if cont:
return argument
setup_py = self.get_content_of_file(
'setup.py') # TODO: construct absolute path here?
if not setup_py:
return []
start_braces = end_braces = 0
cont = False
for line in setup_py.splitlines():
if setup_argument in line or cont:
if line.find("#") != -1:
line = line.split("#")[0]
start_braces += line.count('[')
end_braces += line.count(']')
cont = True
argument.append(line)
if start_braces == end_braces:
break
if not argument or start_braces == 0:
return []
else:
argument[0] = argument[0][argument[0].find('['):]
argument[-1] = argument[-1][:argument[-1].rfind(']') + 1]
argument[-1] = argument[-1].rstrip().rstrip(',')
try:
return flat_list(eval(' '.join(argument).strip()))
except: # something unparsable in the list - different errors can come out - function undefined, syntax error, ...
logger.warn('Something unparsable in the list.', exc_info=True)
return []
def has_argument(self, argument):
"""A simple method that finds out if setup() function from setup.py
is called with given argument.
Args:
argument: argument to look for
Returns:
True if argument is used, False otherwise
"""
setup_cfg = self.get_content_of_file('setup.cfg')
if setup_cfg:
argument_re = re.compile(r'\b' + format(argument) + '\s*=')
if argument_re.search(setup_cfg):
return True
setup_py = self.get_content_of_file('setup.py')
if not setup_py:
return False
argument_re = re.compile(
r'setup\(.*(?<!\w){0}.*\)'.format(argument), re.DOTALL)
return True if argument_re.search(setup_py) else False
@property
def json_wheel_metadata(self):
"""Simple getter that get content of pydist.json file in .whl archive
Returns:
metadata from pydist.json in json format
"""
return json.loads(self.get_content_of_file('pydist.json'))
@property
def record(self):
"""Getter that get content of RECORD file in .whl archive
Returns:
dict with keys `modules` and `scripts`
"""
modules = []
scripts = []
if self.get_content_of_file('RECORD'):
lines = self.get_content_of_file('RECORD').splitlines()
for line in lines:
if 'dist-info' in line:
continue
elif '.data/scripts' in line:
script = line.split(',', 1)[0]
# strip Name.version.data/scripts/
scripts.append(re.sub('.*/.*/', '', script))
else:
# strip everything from first occurance of slash
modules.append(re.sub('/.*', '', line))
return {'modules': set(modules), 'scripts': set(scripts)}
|
|
import calendar
import time
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.http import HttpRequest
from django.utils import translation
from two_factor.utils import default_device
from zerver.context_processors import get_apps_page_url
from zerver.lib.events import do_events_register
from zerver.lib.i18n import (
get_and_set_request_language,
get_language_list,
get_language_translation_data,
)
from zerver.lib.realm_description import get_realm_rendered_description
from zerver.lib.request import RequestNotes
from zerver.models import Message, Realm, Stream, UserProfile
from zerver.views.message_flags import get_latest_update_message_flag_activity
@dataclass
class BillingInfo:
show_billing: bool
show_plans: bool
@dataclass
class UserPermissionInfo:
color_scheme: int
is_guest: bool
is_realm_admin: bool
is_realm_owner: bool
show_webathena: bool
def get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]:
if user_profile is None:
return time.time()
user_activity = get_latest_update_message_flag_activity(user_profile)
if user_activity is None:
return None
return calendar.timegm(user_activity.last_visit.utctimetuple())
def get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]:
bot_types: List[Dict[str, object]] = []
if user_profile is None:
return bot_types
for type_id, name in UserProfile.BOT_TYPES.items():
bot_types.append(
dict(
type_id=type_id,
name=name,
allowed=type_id in user_profile.allowed_bot_types,
)
)
return bot_types
def promote_sponsoring_zulip_in_realm(realm: Realm) -> bool:
if not settings.PROMOTE_SPONSORING_ZULIP:
return False
# If PROMOTE_SPONSORING_ZULIP is enabled, advertise sponsoring
# Zulip in the gear menu of non-paying organizations.
return realm.plan_type in [Realm.PLAN_TYPE_STANDARD_FREE, Realm.PLAN_TYPE_SELF_HOSTED]
def get_billing_info(user_profile: Optional[UserProfile]) -> BillingInfo:
show_billing = False
show_plans = False
if settings.CORPORATE_ENABLED and user_profile is not None:
if user_profile.has_billing_access:
from corporate.models import CustomerPlan, get_customer_by_realm
customer = get_customer_by_realm(user_profile.realm)
if customer is not None:
if customer.sponsorship_pending:
show_billing = True
elif CustomerPlan.objects.filter(customer=customer).exists():
show_billing = True
if not user_profile.is_guest and user_profile.realm.plan_type == Realm.PLAN_TYPE_LIMITED:
show_plans = True
return BillingInfo(
show_billing=show_billing,
show_plans=show_plans,
)
def get_user_permission_info(user_profile: Optional[UserProfile]) -> UserPermissionInfo:
if user_profile is not None:
return UserPermissionInfo(
color_scheme=user_profile.color_scheme,
is_guest=user_profile.is_guest,
is_realm_owner=user_profile.is_realm_owner,
is_realm_admin=user_profile.is_realm_admin,
show_webathena=user_profile.realm.webathena_enabled,
)
else:
return UserPermissionInfo(
color_scheme=UserProfile.COLOR_SCHEME_AUTOMATIC,
is_guest=False,
is_realm_admin=False,
is_realm_owner=False,
show_webathena=False,
)
def build_page_params_for_home_page_load(
request: HttpRequest,
user_profile: Optional[UserProfile],
realm: Realm,
insecure_desktop_app: bool,
narrow: List[List[str]],
narrow_stream: Optional[Stream],
narrow_topic: Optional[str],
first_in_realm: bool,
prompt_for_invites: bool,
needs_tutorial: bool,
) -> Tuple[int, Dict[str, Any]]:
"""
This function computes page_params for when we load the home page.
The page_params data structure gets sent to the client.
"""
client_capabilities = {
"notification_settings_null": True,
"bulk_message_deletion": True,
"user_avatar_url_field_optional": True,
"stream_typing_notifications": False, # Set this to True when frontend support is implemented.
"user_settings_object": True,
}
if user_profile is not None:
client = RequestNotes.get_notes(request).client
assert client is not None
register_ret = do_events_register(
user_profile,
client,
apply_markdown=True,
client_gravatar=True,
slim_presence=True,
client_capabilities=client_capabilities,
narrow=narrow,
include_streams=False,
)
else:
# Since events for spectator is not implemented, we only fetch the data
# at the time of request and don't register for any events.
# TODO: Implement events for spectator.
from zerver.lib.events import fetch_initial_state_data, post_process_state
register_ret = fetch_initial_state_data(
user_profile,
realm=realm,
event_types=None,
queue_id=None,
client_gravatar=False,
user_avatar_url_field_optional=client_capabilities["user_avatar_url_field_optional"],
user_settings_object=client_capabilities["user_settings_object"],
slim_presence=False,
include_subscribers=False,
include_streams=False,
)
post_process_state(user_profile, register_ret, False)
furthest_read_time = get_furthest_read_time(user_profile)
request_language = get_and_set_request_language(
request,
register_ret["user_settings"]["default_language"],
translation.get_language_from_path(request.path_info),
)
two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None
billing_info = get_billing_info(user_profile)
user_permission_info = get_user_permission_info(user_profile)
# Pass parameters to the client-side JavaScript code.
# These end up in a JavaScript Object named 'page_params'.
page_params = dict(
## Server settings.
test_suite=settings.TEST_SUITE,
insecure_desktop_app=insecure_desktop_app,
login_page=settings.HOME_NOT_LOGGED_IN,
warn_no_email=settings.WARN_NO_EMAIL,
search_pills_enabled=settings.SEARCH_PILLS_ENABLED,
# Only show marketing email settings if on Zulip Cloud
corporate_enabled=settings.CORPORATE_ENABLED,
## Misc. extra data.
language_list=get_language_list(),
needs_tutorial=needs_tutorial,
first_in_realm=first_in_realm,
prompt_for_invites=prompt_for_invites,
furthest_read_time=furthest_read_time,
bot_types=get_bot_types(user_profile),
two_fa_enabled=two_fa_enabled,
apps_page_url=get_apps_page_url(),
show_billing=billing_info.show_billing,
promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm),
show_plans=billing_info.show_plans,
show_webathena=user_permission_info.show_webathena,
# Adding two_fa_enabled as condition saves us 3 queries when
# 2FA is not enabled.
two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)),
is_spectator=user_profile is None,
# There is no event queue for spectators since
# events support for spectators is not implemented yet.
no_event_queue=user_profile is None,
)
for field_name in register_ret.keys():
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = narrow_stream.recipient
try:
max_message_id = (
Message.objects.filter(recipient=recipient).order_by("id").reverse()[0].id
)
except IndexError:
max_message_id = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = max_message_id
assert isinstance(page_params["user_settings"], dict)
page_params["user_settings"]["enable_desktop_notifications"] = False
page_params["translation_data"] = get_language_translation_data(request_language)
if user_profile is None:
# Get rendered version of realm description which is displayed in right
# sidebar for spectator.
page_params["realm_description"] = get_realm_rendered_description(realm)
return register_ret["queue_id"], page_params
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import os
import re
from datetime import datetime
import cPickle
import tempfile
from contextlib import closing
from string import Template
import wsgiref.util
import cgi
from urllib import quote_plus, unquote_plus
import hashlib
import random
class RequestContext(object):
def __init__(self, environ, start_response):
self.environ = environ
self.response = start_response
app_uri = wsgiref.util.application_uri(environ)
self.app_uri = app_uri if not app_uri.endswith('/') \
else app_uri[0:len(app_uri) - 1]
self.path = self.environ.get('PATH_INFO', '/')
self.method = self.environ['REQUEST_METHOD'].upper()
class Blog(object):
_statuses = {404: '404 Not Found', 200: '200 OK', 303: '303 See Other',
400: '400 Bad Request'}
_tpl_header = Template('<!DOCTYPE html>\n'
'<html xmlns="http://www.w3.org/1999/html">\n'
'<head lang="en">\n'
'\t<meta charset="${encoding}"/>\n'
'\t<link rel="stylesheet" href="${base}/styles.css" '
'type="text/css" media="screen"/>\n'
'\t<script type="text/javascript" '
'src="${base}/script.js"></script>\n'
'\t<link type="application/atom+xml" rel="alternate"'
' title="${title}" href="${feed_url}" />\n'
'\t<title>${title}</title>\n'
'</head>\n'
'<body${body_tag}>\n'
'\t<header>\n'
'\t\t<h1>${title}</h1>\n'
'\t</header>\n'
'\t<main>\n')
_tpl_link = Template('<a href="${link}">${title}</a>')
_tpl_link_wth_cls = Template('<a href="${link}" class="${cls}">${title}'
'</a>')
_tpl_entries_begin = '\t\t<section>\n'
_tpl_entry = Template('\t\t\t<article>\n'
'\t\t\t<header>\n'
'\t\t\t\t<h2>${title}</h2></header>\n'
'\t\t\t<time>${time}</time>\n'
'\t\t\t<p>${text}</p>\n'
'\t\t\t<footer>\n'
'\t\t\t\t<div>posted in ${categories}</div>\n'
'\t\t\t\t<div>${comments}</div>\n'
'\t\t\t</footer>\n'
'\t\t\t</article>\n')
_tpl_view_full = Template('<a href="#">View full post →</a>')
_tpl_entries_end = '\t\t</section>\n'
_tpl_aside = Template('\t\t<aside>\n'
'\t\t\t<nav><h2>Categories</h2>\n'
'\t\t\t\t<ul>\n'
'${categories}'
'\t\t\t\t</ul>\n'
'\t\t\t</nav>\n'
'\t\t\t<nav><h2>Archive</h2>\n'
'\t\t\t\t<ul>\n'
'${archive}'
'\t\t\t\t</ul>\n'
'\t\t\t</nav>\n'
'\t\t</aside>\n')
_tpl_aside_entry = Template('\t\t\t\t\t<li><a href="${link}">${title}</a>'
'</li>\n')
_tpl_footer = Template('\t</main>\n'
'\t<footer>\n'
'\t\t<nav>${links}</nav>\n'
'\t</footer>\n'
'</body>\n'
'</html>\n')
_tpl_post = Template('\t\t\t<article>\n'
'\t\t\t\t<header><h2>${title}</h2></header>\n'
'\t\t\t\t<time>${time}</time>\n'
'\t\t\t\t<p>${text}</p>\n'
'\t\t\t\t<footer>\n'
'\t\t\t\t<div>posted in ${categories}</div>\n'
'\t\t\t\t<div class="comments" id="comments">\n'
'\t\t\t\t<header><h3>${comments_title}</h3>'
'<a onclick="toggleReplyForm(\'reply-form\');'
'return false;" href="#comments">Reply</a></header>\n'
'\t\t\t\t<div class="reply-wrapper" id="reply-form" '
'style="display:none;">\n'
'\t\t\t\t<form method="post" class="reply-form"'
'action="${reply_url}">\n'
'\t\t\t\t<div><input type="text" class="cobweb" '
'name="cobweb" placeholder="Please, paste ${token} '
'value into this field" value=""/></div>\n'
'\t\t\t\t<div><input name="email" '
'type="text" placeholder="Email" value=""/></div>\n'
'\t\t\t\t<div><input name="name" '
'type="text" placeholder="Name" value=""/></div>\n'
'\t\t\t\t<div><textarea rows="4" placeholder="Comment"'
' name="comment"></textarea></div>\n'
'\t\t\t\t<div><input type="submit" value="Send"/>'
'</div>\n'
'\t\t\t\t</form>\n'
'\t\t\t\t</div>\n'
'\t\t\t\t\t${comments}\n'
'\t\t\t\t</div>\n'
'\t\t\t\t</footer>\n'
'\t\t\t</article>\n')
_tpl_comment = Template('\t\t\t\t<div class="comment">\n'
'\t\t\t\t<div class="comment_body">\n'
'\t\t\t\t<header><h3>${name}</h3>'
'<time>${time}</time><span class="delete">'
'${delete_url}</span></header>\n'
'\t\t\t\t<p>${comment}</p>\n'
'\t\t\t\t<footer><a href="#" '
'onclick="toggleReplyForm(\'reply-form-${id}\');'
'return false;">Reply</a></footer>\n'
'\t\t\t\t<div class="reply-wrapper" '
'id="reply-form-${id}" style="display:none;">\n'
'\t\t\t\t<form method="post" class="reply-form"'
'action="${reply_url}">\n'
'\t\t\t\t<div><input type="text" class="cobweb" '
'name="cobweb" placeholder="Please, paste ${token} '
'value into this field" value=""/></div>\n'
'\t\t\t\t<input name="comment_no" type="hidden" '
'value="${id}"/>\n'
'\t\t\t\t<div><input name="email" '
'type="text" placeholder="Email" value=""/></div>\n'
'\t\t\t\t<div><input name="name" '
'type="text" placeholder="Name" value=""/></div>\n'
'\t\t\t\t<div><textarea rows="4" '
'placeholder="Comment" name="comment"></textarea>'
'</div>\n'
'\t\t\t\t<div><input type="submit" value="Send"/>'
'</div>\n'
'\t\t\t\t</form>\n'
'\t\t\t\t</div>\n'
'\t\t\t\t</div>\n'
'\t\t\t\t<div class="reply_comments">${comments}'
'</div>\n'
'\t\t\t\t</div>\n')
_tpl_delete_comment = Template('\t\t\t<form method="post" '
'action="${url}/${ids}">\n'
'\t\t\t\t<input name="password" '
'type="password" placeholder="Password"/>\n'
'\t\t\t\t<input type="submit" '
'value="Delete"/>\n'
'\t\t\t</form>\n')
_tpl_feed_begin = Template('<?xml version="1.0" encoding="${encoding}"?>\n'
'<feed xmlns="http://www.w3.org/2005/Atom">\n'
'\t<title>${title}</title>\n'
'\t<link rel="self" type="text/xml" '
'href="${self_url}"/>\n'
'\t<link type="text/html" rel="alternate" '
'href="${url}"/>\n'
'\t<updated>${updated}</updated>\n'
'\t<author><name>${author}</name></author>\n'
'\t<id>urn:${id}</id>\n')
_tpl_feed_entry = Template('\t<entry>\n'
'\t\t<id>urn:${id}</id>\n'
'\t\t<title>${title}</title>\n'
'\t\t<link type="text/html" rel="alternate" '
'href="${url}"/>\n'
'${categories}'
'\t\t<updated>${updated}</updated>\n'
'\t\t<content type="text/html">${content}'
'</content>\n'
'\t</entry>\n')
_tpl_feed_category = Template('\t\t<category term="${category}"/>\n')
_tpl_feed_end = '</feed>'
def __init__(self):
self._encoding = 'UTF-8'
script_path, _ = os.path.split(os.path.realpath(__file__))
conf = dict()
conf_path = os.path.join(script_path, 'index.conf')
try:
execfile(conf_path, conf)
except IOError:
print 'I wasn\'t able to read configuration file [%s]. Default ' \
'settings will be used' % conf_path
logging.basicConfig(level=logging.DEBUG)
self._logger = logging.getLogger(__name__)
self.entries_dir = conf.get('entries_path', os.path.join(script_path,
'entries'))
self.indices_dir = conf.get('indices_path', os.path.join(script_path,
'indices'))
self.comments_dir = conf.get('comments_path', os.path.join(script_path,
'comments'))
self.file_name_sep = conf.get('file_name_separator', '-')
self.title = conf.get('title', '')
try:
self.items_per_page = int(conf.get('items_per_page', 7))
except ValueError:
self.items_per_page = 7
try:
self.items_per_feed = int(conf.get('items_per_feed', 7))
except ValueError:
self.items_per_feed = 7
try:
self.comments_nesting = int(conf.get('comments_nesting', 7))
except ValueError:
self.comments_nesting = 7
self.index = self._try_main_index(os.path.join(self.indices_dir,
'main.index'))
self.author = conf.get('author', 'anonymous')
self.categories = self.list_categories()
self.archive = self.list_archive()
password = conf.get('password')
if password:
m = hashlib.md5()
m.update(password)
self.password = m.digest()
else:
self.password = None
self._salt = conf.get('salt', ''.join(random.choice('0123456789ABCDEF')
for _ in range(16)))
def _serialize_object(self, obj, file_path, force=False):
tmp_fd, tmp_path = tempfile.mkstemp(dir=self.indices_dir)
with closing(os.fdopen(tmp_fd, 'wb')) as tmp_file:
cPickle.dump(obj, tmp_file, protocol=cPickle.HIGHEST_PROTOCOL)
try:
if not os.path.exists(file_path) or force:
os.rename(tmp_path, file_path)
self._logger.debug('an object was serialized into file [%s]',
file_path)
finally:
if os.path.exists(tmp_path):
os.remove(tmp_path)
def _try_main_index(self, main_index_path):
if not os.path.exists(main_index_path):
return self._create_main_index(main_index_path)
else:
with open(main_index_path, 'rb') as f:
return cPickle.load(f)
def _create_main_index(self, main_index_path):
entries = list()
re_file_name = re.compile('^(.+)' + self.file_name_sep +
'(\d{4}-\d{2}-\d{2})\.txt$')
for file_name in os.listdir(self.entries_dir):
if os.path.isfile(os.path.join(self.entries_dir, file_name)):
matched = re_file_name.match(file_name)
if matched:
try:
date = datetime.strptime(matched.group(2), '%Y-%m-%d')
entries.append((date, matched.group(1),
self._read_categories(file_name)))
except ValueError:
continue
entries.sort(reverse=True, key=lambda entry: (entry[0], entry[1]))
self._serialize_object(entries, main_index_path)
return entries
def _read_categories(self, file_name):
re_categories = re.compile('^categories:([^,]+)(?:,([^,]+))*$')
categories = set()
with open(os.path.join(self.entries_dir, file_name), mode='r') as f:
for line in f:
matched = re_categories.match(line)
if matched:
for category in matched.groups():
categories.add(category.strip())
break
return categories
def status(self, rc, code, response):
rc.response(self._statuses[code], [('Content-Type',
'text/plain; charset=%s' %
self._encoding)])
return response
def redirect(self, rc, url):
rc.response(self._statuses[303], [('Location', rc.app_uri + url)])
def list_file_names(self):
main_index_path = os.path.join(self.indices_dir, 'main.index')
if not os.path.exists(main_index_path):
self._create_main_index(main_index_path)
with open(main_index_path, 'rb') as f:
return cPickle.load(f)
def filter_entries(self, category, archive):
if category:
return [entry for entry in self.index if category in entry[2]]
elif archive:
archive = datetime.strptime(archive, '%Y-%m')
return [entry for entry in self.index if (archive.year,
archive.month) ==
(entry[0].year,
entry[0].month)]
return self.index
def read_post(self, entry):
date, pid, cats = entry
post = dict()
post['date'] = date
post['id'] = pid
with open(os.path.join(self.entries_dir,
self.build_file_name(entry))) as f:
preview, full = False, False
for line in f:
if line.startswith('categories:') and 'categories' not in post:
post['categories'] = [category.strip() for category in
line.lstrip('categories:').split(",")]
preview, full = False, False
elif line.startswith('title:') and 'title' not in post:
post['title'] = line.lstrip('title:').strip()
preview, full = False, False
elif line.startswith('preview:') and not preview \
and 'preview' not in post:
preview, full = True, False
post['preview'] = line.lstrip('preview:').lstrip()
elif line.startswith('full:') and not full \
and 'full' not in post:
preview, full = False, True
post['full'] = line.lstrip('full:').lstrip()
elif preview:
post['preview'] += line
elif full:
post['full'] += line
if 'categories' not in post:
post['categories'] = []
return post
def list_archive(self):
archive = list()
year, month = None, None
for date, _, _ in self.index:
if (date.year, date.month) != (year, month):
archive.append(str(date.year) + "-" + str(date.month))
year, month = date.year, date.month
return archive
def list_categories(self):
categories = set()
for _, _, cats in self.index:
categories.update(cats)
categories = list(categories)
categories.sort()
return categories
@staticmethod
def build_base_uri(app_uri, category, archive, page):
uri = app_uri
if category:
uri += '/category/' + category
elif archive:
uri += '/archive/' + archive
if page:
uri += '/page/' + str(page)
return uri
def build_file_name(self, entry):
date, pid, _ = entry
return pid + self.file_name_sep + date.strftime('%Y-%m-%d') + '.txt'
def find_entry(self, archive, pid):
try:
date = datetime.strptime(archive, '%Y-%m-%d')
entry = next(entry for entry in self.index
if (date, pid) == entry[0:2])
return entry if entry else None
except ValueError:
return None
def get_comment(self, comments, comments_num):
comment = None
level = 0
while comments_num:
index = comments_num[0]
if level == self.comments_nesting:
return comment
if index < len(comments):
comment = comments[index]
comments, comments_num = comment[4], comments_num[1:]
else:
comment, comments_num = None, None
level += 1
return comment
def gather_comments(self, app_uri, comments, archive, pid, token, admin):
reply_url = app_uri + '/post/' + archive + '/' + pid
delete_url = app_uri + '/delete/' + archive + '/' + pid
def _gather_comments(_comments, _buf, _count, ids):
for idx, comment in enumerate(_comments):
date, _, name, text, replies = comment
_ids = list(ids)
_ids.append(str(idx))
comments_str, comments_count = \
_gather_comments(replies, [], 0, _ids)
ids_str = "-".join(_ids)
_buf.append(self._tpl_comment.
substitute(name=cgi.escape(name) or 'anonymous',
time=date.strftime('%Y/%m/%d @ %H:%M'),
reply_url=reply_url, id=ids_str,
comment=cgi.escape(text),
delete_url=self._tpl_link.
substitute(link=delete_url + '/' +
ids_str,
title='X') if admin else '',
comments=comments_str, token=token))
_count += comments_count + 1
return "".join(_buf), _count
buf, count = _gather_comments(comments, [], 0, [])
return "".join(buf), count
def load_comments(self, archive, pid):
comments_path = os.path.join(self.comments_dir,
pid + self.file_name_sep + archive +
'.comments')
if os.path.exists(comments_path):
with open(comments_path, 'rb') as f:
return cPickle.load(f)
else:
return list()
def count_comments(self, comments):
if comments:
count = len(comments)
for _, _, _, _, replies in comments:
count += self.count_comments(replies)
return count
return 0
def configure(self):
main_index_path = os.path.join(self.indices_dir, 'main.index')
if not os.path.exists(main_index_path):
self.index = self._try_main_index(main_index_path)
self.categories = self.list_categories()
self.archive = self.list_archive()
def get_list(self, rc, category=None, archive=None, page=1):
if page > 0:
if category and category not in self.categories:
yield self.status(rc, 404, 'Category %s not found' % category)
elif archive and archive not in self.archive:
yield self.status(rc, 404, 'Archive %s not found' % archive)
else:
rc.response(self._statuses[200], [('Content-Type',
'text/html; charset=%s' %
self._encoding)])
yield self._tpl_header.\
substitute(base=rc.app_uri, feed_url=rc.app_uri + '/rss'
+ ('/' + category if category else ''),
title=cgi.escape(self.title, quote=True),
encoding=self._encoding.lower(), body_tag='')
yield self._tpl_entries_begin
entries = self.filter_entries(category, archive)
items_to = self.items_per_page * page
for entry in entries[items_to - self.items_per_page:items_to]:
post = self.read_post(entry)
date_for_link = post['date'].strftime('%Y-%m-%d')
fmt_categories = ", ".join(
[self._tpl_link.substitute(link=rc.app_uri +
'/category/' +
quote_plus(cat), title=cat)
for cat in post['categories']])
if 'preview' in post:
post_text = post['preview']
if 'full' in post:
post_text += self._tpl_link.substitute(
link=rc.app_uri + '/post/' + date_for_link +
'/' + post['id'], title='View full post →')
elif 'full' in post:
post_text = post['full']
else:
post_text = ''
title = self.\
_tpl_link.substitute(link=rc.app_uri + '/post/' +
date_for_link + '/' + post['id'],
title=post['title'])
comments_count = self.\
count_comments(self.load_comments(date_for_link,
post['id']))
comments_str = 'No comments'
if comments_count == 1:
comments_str = '1 comment'
elif comments_count > 1:
comments_str = '%d comments' % comments_count
comments_str = self._tpl_link.\
substitute(link=rc.app_uri + '/post/' +
date_for_link + '/' + post['id'] +
'#comments', title=comments_str)
yield self._tpl_entry.\
substitute(title=title, categories=fmt_categories,
time=post['date'].strftime('%Y/%m/%d'),
text=post_text, comments=comments_str)
yield self._tpl_entries_end
fmt_categories = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/category/' +
quote_plus(cat),
title=cat)
for cat in self.categories])
fmt_archive = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/archive/' +
quote_plus(arc),
title=arc)
for arc in self.archive])
yield self._tpl_aside.substitute(categories=fmt_categories,
archive=fmt_archive)
older_newer = ''
if entries:
if items_to < len(entries):
older_newer = self.\
_tpl_link_wth_cls.\
substitute(link=Blog.build_base_uri(rc.app_uri,
category,
archive,
page + 1),
cls='older',
title='◄ Older')
if page > 1 and items_to - self.items_per_page < \
len(entries):
older_newer += self.\
_tpl_link_wth_cls.\
substitute(link=Blog.build_base_uri(rc.app_uri,
category,
archive,
page - 1),
cls='newer',
title='Newer ►')
yield self._tpl_footer.substitute(links=older_newer)
else:
yield self.status(rc, 404, 'Page %d not found' % page)
def get_post(self, rc, archive, pid, admin=False):
entry = self.find_entry(archive, pid)
if entry:
post = self.read_post(entry)
rc.response(self._statuses[200], [('Content-Type',
'text/html; charset=%s' %
self._encoding)])
m = hashlib.sha1()
m.update(archive + pid + self._salt)
token = m.hexdigest()
yield self._tpl_header.\
substitute(base=rc.app_uri, encoding=self._encoding,
feed_url=rc.app_uri + '/rss',
body_tag=' onload="setToken(\'' + token + '\');"',
title=cgi.escape(self.title, quote=True))
yield self._tpl_entries_begin
fmt_categories = ", ".join(
[self._tpl_link.substitute(link=rc.app_uri + '/category/' +
quote_plus(cat), title=cat)
for cat in post['categories']])
if 'full' in post:
post_text = post['full']
elif 'preview' in post:
post_text = post['preview']
else:
post_text = ''
comments_path = os.path.join(self.comments_dir, pid +
self.file_name_sep + archive +
'.comments')
comments_title = 'No comments'
comments_str = ''
if os.path.exists(comments_path):
with open(comments_path, 'rb') as f:
comments = cPickle.load(f)
comments_str, count = self.gather_comments(rc.app_uri, comments,
archive, pid, token,
admin)
if count == 1:
comments_title = '1 comment'
elif count > 1:
comments_title = '%d comments' % count
yield self._tpl_post.\
substitute(title=post['title'], categories=fmt_categories,
time=post['date'].strftime('%Y/%m/%d'),
text=post_text, comments_title=comments_title,
comments=comments_str, reply_url=rc.app_uri +
'/post/' + archive + '/' + pid, token=token)
yield self._tpl_entries_end
fmt_categories = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/category/' +
quote_plus(cat), title=cat)
for cat in self.categories])
fmt_archive = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/archive/' + quote_plus(arc),
title=arc)
for arc in self.archive])
yield self._tpl_aside.substitute(categories=fmt_categories,
archive=fmt_archive)
else:
yield self.status(rc, 404, 'Post %s not found' % archive + '/' +
pid)
def get_delete_comment(self, rc, archive, pid, ids_str):
entry = self.find_entry(archive, pid)
if entry:
rc.response(self._statuses[200], [('Content-Type',
'text/html; charset=%s' %
self._encoding)])
yield self._tpl_header.\
substitute(base=rc.app_uri, encoding=self._encoding,
feed_url=rc.app_uri + '/rss', body_tag='',
title=cgi.escape(self.title, quote=True))
yield self._tpl_entries_begin
yield self.\
_tpl_delete_comment.\
substitute(url=rc.app_uri + '/delete/' + archive + '/' + pid,
ids=ids_str)
yield self._tpl_entries_end
fmt_categories = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/category/' +
quote_plus(cat), title=cat)
for cat in self.categories])
fmt_archive = "".join(
[self._tpl_aside_entry.substitute(link=rc.app_uri +
'/archive/' + quote_plus(arc),
title=arc)
for arc in self.archive])
yield self._tpl_aside.substitute(categories=fmt_categories,
archive=fmt_archive)
else:
yield self.status(rc, 404, 'Post %s not found' % archive + '/' +
pid)
def get_rss(self, rc, category=None):
if category and category not in self.categories:
yield self.status(rc, 404, 'Category %s not found' % category)
else:
rc.response(self._statuses[200], [('Content-Type',
'application/atom+xml; '
'charset=%s' %
self._encoding)])
datetime_format = '%Y-%m-%dT%H-%M-%SZ'
entries = self.filter_entries(category, None)
updated = datetime(1986, 4, 26)
if entries:
updated = entries[0][0].strftime(datetime_format)
yield self._tpl_feed_begin.\
substitute(encoding=self._encoding.lower(),
self_url=rc.app_uri + '/rss' +
('/' + category if category else ''),
title=self.title,
author=self.author, url=rc.app_uri +
('/category/' + category if category else ''),
id=rc.app_uri + ('/category/' +
category if category else ''), updated=updated)
for entry in entries[:self.items_per_feed]:
post = self.read_post(entry)
date_for_link = post['date'].strftime('%Y-%m-%d')
post_text = ''
if 'preview' in post:
post_text = post['preview']
elif 'full' in post:
post_text = post['full']
fmt_categories = "".join(
[self._tpl_feed_category.substitute(category=cat)
for cat in post['categories']])
yield self.\
_tpl_feed_entry.\
substitute(id=date_for_link + ':' + post['id'],
title=post['title'], url=rc.app_uri + '/post/'
+ date_for_link + '/' + post['id'],
updated=post['date'].strftime(datetime_format),
categories=fmt_categories, content=post_text)
yield self._tpl_feed_end
def post_comment(self, rc, archive, pid):
entry = self.find_entry(archive, pid)
if entry:
fs = cgi.FieldStorage(keep_blank_values=1,
fp=rc.environ['wsgi.input'],
environ=rc.environ)
email = fs.getvalue('email', '')
name = fs.getvalue('name', '')
comment = fs.getvalue('comment', '')
comments_no_str = fs.getvalue('comment_no')
cobweb = fs.getvalue('cobweb', '')
m = hashlib.sha1()
m.update(archive + pid + self._salt)
if m.hexdigest() == cobweb:
try:
comments_no = [int(comment_no) for comment_no
in comments_no_str.split("-")] if \
comments_no_str else []
path_comment_file = os.path.\
join(self.comments_dir, pid + self.file_name_sep +
archive + '.comments')
comments = list()
if os.path.exists(path_comment_file):
with open(path_comment_file, 'rb') as f:
comments = cPickle.load(f)
parent_comment = self.get_comment(comments, comments_no)
replies = parent_comment[4] if parent_comment else comments
replies.append((datetime.now(), email, name, comment, []))
replies.sort(key=lambda c: c[0], reverse=True)
self._serialize_object(comments, path_comment_file,
force=True)
self.redirect(rc, '/post/' + archive + '/' + pid)
except ValueError:
yield self.status(rc, 401, 'I cannot understand comment_no '
'[%s] parameter' %
comments_no_str)
except IOError:
self._logger.error("IOError occurred while adding comment",
exc_info=1)
self.redirect(rc, '/post/' + archive + '/' + pid)
else:
yield self.status(rc, 400, 'Token for preventing spam was not '
'valid during comment submission. '
'Please, try again')
else:
yield self.status(rc, 404, 'Post %s not found' % archive + '/' +
pid)
def post_delete_comment(self, rc, archive, pid, ids_str):
entry = self.find_entry(archive, pid)
if entry:
fs = cgi.FieldStorage(keep_blank_values=1,
fp=rc.environ['wsgi.input'],
environ=rc.environ)
password = fs.getvalue('password', '')
m = hashlib.md5()
m.update(password)
if m.digest() == self.password:
try:
ids = [int(id_str) for id_str
in ids_str.split("-")] if ids_str else []
if not ids:
raise ValueError()
path_comment_file = os.path.\
join(self.comments_dir, pid + self.file_name_sep +
archive + '.comments')
if os.path.exists(path_comment_file):
with open(path_comment_file, 'rb') as f:
comments = cPickle.load(f)
id_to_delete = ids.pop()
parent_comment = self.get_comment(comments, ids)
replies = comments if not ids else \
(parent_comment[4] if parent_comment else [])
if id_to_delete < len(replies):
del replies[id_to_delete]
self._serialize_object(comments, path_comment_file,
force=True)
else:
self._logger.warn('Comment was not deleted. '
'comment_no is [%s]', ids_str)
self.redirect(rc, '/post/' + archive + '/' + pid)
except ValueError:
yield self.status(rc, 400, 'I cannot understand ids [%s] '
'parameter' % ids_str)
else:
self._logger.warn('Wrong password was provided in order '
'to delete comment %s/%s/%s', archive, pid,
ids_str)
self.redirect(rc, '/post/' + archive + '/' + pid)
else:
yield self.status(rc, 404, 'Post %s not found' % archive + '/' +
pid)
def __call__(self, environ, start_response):
self.configure()
rc = RequestContext(environ, start_response)
if rc.method == 'GET':
if not rc.path or rc.path == '/':
return self.get_list(rc)
elif re.match('^/page/\d+/?$', rc.path):
return self.get_list(rc, page=int(rc.path.split('/')[2]))
elif re.match('^/category/[^/]+/?$', rc.path):
return self.get_list(rc, category=unquote_plus(
rc.path.split('/')[2]))
elif re.match('^/category/[^/]+/page/\d+/?$', rc.path):
path_els = rc.path.split('/')
return self.get_list(rc, category=unquote_plus(path_els[2]),
page=int(path_els[4]))
elif re.match('^/archive/\d{4}-\d{2}/?$', rc.path):
return self.get_list(rc, archive=rc.path.split('/')[2])
elif re.match('^/archive/\d{4}-\d{2}/page/\d+/?$', rc.path):
path_els = rc.path.split('/')
return self.get_list(rc, archive=path_els[2],
page=int(path_els[4]))
elif re.match('^/post/\d{4}-\d{2}-\d{2}/[^/]+/?$', rc.path):
path_els = rc.path.split('/')
return self.get_post(rc, archive=path_els[2],
pid=unquote_plus(path_els[3]))
elif re.match('^/post/\d{4}-\d{2}-\d{2}/[^/]+/admin/?$', rc.path):
path_els = rc.path.split('/')
return self.get_post(rc, archive=path_els[2],
pid=unquote_plus(path_els[3]),
admin=True)
elif re.match('^/delete/\d{4}-\d{2}-\d{2}/[^/]+/\d+(-\d+)*/?$',
rc.path):
path_els = rc.path.split('/')
return self.get_delete_comment(rc, archive=path_els[2],
pid=unquote_plus(path_els[3]),
ids_str=path_els[4])
elif re.match('^/rss/?$', rc.path):
return self.get_rss(rc)
elif re.match('^/rss/[^/]+/?$', rc.path):
return self.get_rss(rc.path.split('/')[2])
elif rc.method == 'POST':
if re.match('^/post/\d{4}-\d{2}-\d{2}/[^/]+/?$', rc.path):
path_els = rc.path.split('/')
return self.post_comment(rc, archive=path_els[2],
pid=unquote_plus(path_els[3]))
elif re.match('^/delete/\d{4}-\d{2}-\d{2}/[^/]+/\d+(-\d+)*/?$',
rc.path):
path_els = rc.path.split('/')
return self.post_delete_comment(rc, path_els[2],
unquote_plus(path_els[3]),
path_els[4])
return self.status(rc, 404, 'Page %s not found' % rc.path)
application = Blog()
|
|
"""
The contents of this file are taken from
https://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py
Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up by Django's
``makemessages`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
https://stackoverflow.com/questions/2090717
"""
import glob
import itertools
import json
import os
import re
from argparse import ArgumentParser
from typing import Any, Dict, Iterable, List, Mapping
from django.core.management.commands import makemessages
from django.template.base import BLOCK_TAG_END, BLOCK_TAG_START
from django.utils.translation import template
strip_whitespace_right = re.compile(f"({BLOCK_TAG_START}-?\\s*(trans|pluralize).*?-{BLOCK_TAG_END})\\s+", re.U)
strip_whitespace_left = re.compile(f"\\s+({BLOCK_TAG_START}-\\s*(endtrans|pluralize).*?-?{BLOCK_TAG_END})", re.U)
regexes = [r'{{#tr .*?}}([\s\S]*?){{/tr}}', # '.' doesn't match '\n' by default
r'{{\s*t "(.*?)"\W*}}',
r"{{\s*t '(.*?)'\W*}}",
r'=\(t "(.*?)"\)(?=[^{]*}})',
r"=\(t '(.*?)'\)(?=[^{]*}})",
r"i18n\.t\('([^']*?)'\)",
r"i18n\.t\('(.*?)',\s*.*?[^,]\)",
r'i18n\.t\("([^"]*?)"\)',
r'i18n\.t\("(.*?)",\s*.*?[^,]\)',
]
tags = [('err_', "error"),
]
frontend_compiled_regexes = [re.compile(regex) for regex in regexes]
multiline_js_comment = re.compile(r"/\*.*?\*/", re.DOTALL)
singleline_js_comment = re.compile("//.*?\n")
def strip_whitespaces(src: str) -> str:
src = strip_whitespace_left.sub('\\1', src)
src = strip_whitespace_right.sub('\\1', src)
return src
class Command(makemessages.Command):
xgettext_options = makemessages.Command.xgettext_options
for func, tag in tags:
xgettext_options += [f'--keyword={func}:1,"{tag}"']
def add_arguments(self, parser: ArgumentParser) -> None:
super().add_arguments(parser)
parser.add_argument('--frontend-source', type=str,
default='static/templates',
help='Name of the Handlebars template directory')
parser.add_argument('--frontend-output', type=str,
default='locale',
help='Name of the frontend messages output directory')
parser.add_argument('--frontend-namespace', type=str,
default='translations.json',
help='Namespace of the frontend locale file')
def handle(self, *args: Any, **options: Any) -> None:
self.handle_django_locales(*args, **options)
self.handle_frontend_locales(**options)
def handle_frontend_locales(self, *,
frontend_source: str,
frontend_output: str,
frontend_namespace: str,
locale: List[str],
exclude: List[str],
all: bool,
**options: Any) -> None:
self.frontend_source = frontend_source
self.frontend_output = frontend_output
self.frontend_namespace = frontend_namespace
self.frontend_locale = locale
self.frontend_exclude = exclude
self.frontend_all = all
translation_strings = self.get_translation_strings()
self.write_translation_strings(translation_strings)
def handle_django_locales(self, *args: Any, **options: Any) -> None:
old_endblock_re = template.endblock_re
old_block_re = template.block_re
old_constant_re = template.constant_re
old_templatize = template.templatize
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
template.endblock_re = re.compile(
template.endblock_re.pattern + '|' + r"""^-?\s*endtrans\s*-?$""")
template.block_re = re.compile(
template.block_re.pattern + '|' + r"""^-?\s*trans(?:\s+(?!'|")(?=.*?=.*?)|\s*-?$)""")
template.plural_re = re.compile(
template.plural_re.pattern + '|' + r"""^-?\s*pluralize(?:\s+.+|-?$)""")
template.constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?')).*\)""")
def my_templatize(src: str, *args: Any, **kwargs: Any) -> str:
new_src = strip_whitespaces(src)
return old_templatize(new_src, *args, **kwargs)
template.templatize = my_templatize
try:
ignore_patterns = options.get('ignore_patterns', [])
ignore_patterns.append('docs/*')
ignore_patterns.append('var/*')
options['ignore_patterns'] = ignore_patterns
super().handle(*args, **options)
finally:
template.endblock_re = old_endblock_re
template.block_re = old_block_re
template.templatize = old_templatize
template.constant_re = old_constant_re
def extract_strings(self, data: str) -> List[str]:
translation_strings: List[str] = []
for regex in frontend_compiled_regexes:
for match in regex.findall(data):
match = match.strip()
match = ' '.join(line.strip() for line in match.splitlines())
match = match.replace('\n', '\\n')
translation_strings.append(match)
return translation_strings
def ignore_javascript_comments(self, data: str) -> str:
# Removes multi line comments.
data = multiline_js_comment.sub('', data)
# Removes single line (//) comments.
data = singleline_js_comment.sub('', data)
return data
def get_translation_strings(self) -> List[str]:
translation_strings: List[str] = []
dirname = self.get_template_dir()
for dirpath, dirnames, filenames in os.walk(dirname):
for filename in [f for f in filenames if f.endswith(".hbs")]:
if filename.startswith('.'):
continue
with open(os.path.join(dirpath, filename)) as reader:
data = reader.read()
translation_strings.extend(self.extract_strings(data))
for dirpath, dirnames, filenames in itertools.chain(os.walk("static/js"),
os.walk("static/shared/js")):
for filename in [f for f in filenames if f.endswith(".js") or f.endswith(".ts")]:
if filename.startswith('.'):
continue
with open(os.path.join(dirpath, filename)) as reader:
data = reader.read()
data = self.ignore_javascript_comments(data)
translation_strings.extend(self.extract_strings(data))
return list(set(translation_strings))
def get_template_dir(self) -> str:
return self.frontend_source
def get_namespace(self) -> str:
return self.frontend_namespace
def get_locales(self) -> Iterable[str]:
locale = self.frontend_locale
exclude = self.frontend_exclude
process_all = self.frontend_all
paths = glob.glob(f'{self.default_locale_path}/*')
all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]
# Account for excluded locales
if process_all:
return all_locales
else:
locales = locale or all_locales
return set(locales) - set(exclude)
def get_base_path(self) -> str:
return self.frontend_output
def get_output_paths(self) -> Iterable[str]:
base_path = self.get_base_path()
locales = self.get_locales()
for path in [os.path.join(base_path, locale) for locale in locales]:
if not os.path.exists(path):
os.makedirs(path)
yield os.path.join(path, self.get_namespace())
def get_new_strings(self, old_strings: Mapping[str, str],
translation_strings: List[str], locale: str) -> Dict[str, str]:
"""
Missing strings are removed, new strings are added and already
translated strings are not touched.
"""
new_strings = {} # Dict[str, str]
for k in translation_strings:
k = k.replace('\\n', '\n')
if locale == 'en':
# For English language, translation is equal to the key.
new_strings[k] = old_strings.get(k, k)
else:
new_strings[k] = old_strings.get(k, "")
plurals = {k: v for k, v in old_strings.items() if k.endswith('_plural')}
for plural_key, value in plurals.items():
components = plural_key.split('_')
singular_key = '_'.join(components[:-1])
if singular_key in new_strings:
new_strings[plural_key] = value
return new_strings
def write_translation_strings(self, translation_strings: List[str]) -> None:
for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
self.stdout.write(f"[frontend] processing locale {locale}")
try:
with open(output_path) as reader:
old_strings = json.load(reader)
except (OSError, ValueError):
old_strings = {}
new_strings = {
k: v
for k, v in self.get_new_strings(old_strings,
translation_strings,
locale).items()
}
with open(output_path, 'w') as writer:
json.dump(new_strings, writer, indent=2, sort_keys=True)
|
|
# SPDX-FileCopyrightText: 2015 Eric Larson
#
# SPDX-License-Identifier: Apache-2.0
"""
The httplib2 algorithms ported for use with requests.
"""
import logging
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
logger = logging.getLogger(__name__)
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
PERMANENT_REDIRECT_STATUSES = (301, 308)
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(
self, cache=None, cache_etags=True, serializer=None, status_codes=None
):
self.cache = DictCache() if cache is None else cache
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
known_directives = {
# https://tools.ietf.org/html/rfc7234#section-5.2
"max-age": (int, True),
"max-stale": (int, False),
"min-fresh": (int, True),
"no-cache": (None, False),
"no-store": (None, False),
"no-transform": (None, False),
"only-if-cached": (None, False),
"must-revalidate": (None, False),
"public": (None, False),
"private": (None, False),
"proxy-revalidate": (None, False),
"s-maxage": (int, True),
}
cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
retval = {}
for cc_directive in cc_headers.split(","):
if not cc_directive.strip():
continue
parts = cc_directive.split("=", 1)
directive = parts[0].strip()
try:
typ, required = known_directives[directive]
except KeyError:
logger.debug("Ignoring unknown cache-control directive: %s", directive)
continue
if not typ or not required:
retval[directive] = None
if typ:
try:
retval[directive] = typ(parts[1].strip())
except IndexError:
if required:
logger.debug(
"Missing value for cache-control " "directive: %s",
directive,
)
except ValueError:
logger.debug(
"Invalid value for cache-control directive " "%s, must be %s",
directive,
typ.__name__,
)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if "max-age" in cc and cc["max-age"] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug("No cache entry available")
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning("Cache entry deserialization failed, entry ignored")
return False
# If we have a cached permanent redirect, return it immediately. We
# don't need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
#
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
msg = (
"Returning cached permanent redirect response "
"(ignoring date and etag information)"
)
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or "date" not in headers:
if "etag" not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
self.cache.delete(cache_url)
logger.debug("Ignoring cached response: no date")
return False
now = time.time()
date = calendar.timegm(parsedate_tz(headers["date"]))
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if "max-age" in resp_cc:
freshness_lifetime = resp_cc["max-age"]
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in headers:
expires = parsedate_tz(headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if "max-age" in cc:
freshness_lifetime = cc["max-age"]
logger.debug(
"Freshness lifetime from request max-age: %i", freshness_lifetime
)
if "min-fresh" in cc:
min_fresh = cc["min-fresh"]
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in headers:
logger.debug('The cached response is "stale" with no etag, purging')
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if "etag" in headers:
new_headers["If-None-Match"] = headers["ETag"]
if "last-modified" in headers:
new_headers["If-Modified-Since"] = headers["Last-Modified"]
return new_headers
def cache_response(self, request, response, body=None, status_codes=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = status_codes or self.cacheable_status_codes
if response.status not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s", response.status, cacheable_status_codes
)
return
response_headers = CaseInsensitiveDict(response.headers)
if "date" in response_headers:
date = calendar.timegm(parsedate_tz(response_headers["date"]))
else:
date = 0
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (
body is not None
and "content-length" in response_headers
and response_headers["content-length"].isdigit()
and int(response_headers["content-length"]) != len(body)
):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
if no_store:
return
# https://tools.ietf.org/html/rfc7234#section-4.1:
# A Vary header field-value of "*" always fails to match.
# Storing such a response leads to a deserialization warning
# during cache lookup and is not allowed to ever be served,
# so storing it can be avoided.
if "*" in response_headers.get("vary", ""):
logger.debug('Response header has "Vary: *"')
return
# If we've been given an etag, then keep the response
if self.cache_etags and "etag" in response_headers:
expires_time = 0
if response_headers.get("expires"):
expires = parsedate_tz(response_headers["expires"])
if expires is not None:
expires_time = calendar.timegm(expires) - date
expires_time = max(expires_time, 14 * 86400)
logger.debug("etag object cached for {0} seconds".format(expires_time))
logger.debug("Caching due to etag")
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body),
expires=expires_time,
)
# Add to the cache any permanent redirects. We do this before looking
# that the Date headers.
elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
logger.debug("Caching permanent redirect")
self.cache.set(cache_url, self.serializer.dumps(request, response, b""))
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif "date" in response_headers:
date = calendar.timegm(parsedate_tz(response_headers["date"]))
# cache when there is a max-age > 0
if "max-age" in cc and cc["max-age"] > 0:
logger.debug("Caching b/c date exists and max-age > 0")
expires_time = cc["max-age"]
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body),
expires=expires_time,
)
# If the request can expire, it means we should cache it
# in the meantime.
elif "expires" in response_headers:
if response_headers["expires"]:
expires = parsedate_tz(response_headers["expires"])
if expires is not None:
expires_time = calendar.timegm(expires) - date
else:
expires_time = None
logger.debug(
"Caching b/c of expires header. expires in {0} seconds".format(
expires_time
)
)
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
expires=expires_time,
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
dict(
(k, v)
for k, v in response.headers.items()
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
return cached_response
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiPkg """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import mock
import pytest
import platform
import qipkg.builder
import qipkg.package
import qisys.qixml
import qisys.command
from qisys.qixml import etree
from qisys.test.conftest import record_messages
from qibuild.test.test_qibuild_deploy import get_ssh_url
TARGET = "{}-{}".format(platform.system().lower(),
platform.processor().lower())
def test_ls_package(qipkg_action, record_messages):
""" Test Ls Package """
pkg_path = os.path.join(os.path.dirname(__file__), "projects", "python_services.pkg")
qipkg_action("ls-package", pkg_path)
assert record_messages.find("lib/my_service.py")
assert record_messages.find("manifest.xml")
def test_make_package(qipkg_action, qipy_action):
""" Test Make Package """
tmpdir = qipy_action.worktree.tmpdir
qipkg_action.add_test_project("a_cpp")
qipkg_action.add_test_project("b_py")
c_pkg_proj = qipkg_action.add_test_project("c_pkg")
# ipython 5 is the last version compatible with Python 2.7
qipy_action("bootstrap", "pip", "virtualenv", "ipython<=5")
pml = os.path.join(c_pkg_proj.path, "c_pkg.pml")
qipkg_action("configure", pml)
qipkg_action("build", pml)
pkg = qipkg_action("make-package", pml)
qipkg_action("extract-package", pkg)
expected_paths = [
"manifest.xml",
"lib/libfoo.so",
"lib/python2.7/site-packages/b.py",
"c_behavior/behavior.xar",
]
for path in expected_paths:
full_path = tmpdir.join("c-0.1", path)
assert full_path.check(file=True)
def test_make_package_empty_uuid(qipkg_action):
""" Test Package Empty UUID """
pml = os.path.join(os.path.dirname(__file__), "projects", "empty_uuid", "empty.pml")
error = qipkg_action("make-package", pml, raises=True)
assert "uuid" in error
def test_make_package_empty_version(qipkg_action):
""" Test Make Package Empty Version """
pml = os.path.join(os.path.dirname(__file__), "projects", "empty_version", "empty.pml")
error = qipkg_action("make-package", pml, raises=True)
assert "version" in error
def test_breakpad_symbols(qipkg_action):
""" Test Breakpad Symbols """
dump_syms = qisys.command.find_program("dump_syms", raises=False)
if not dump_syms:
return
a_cpp_proj = qipkg_action.add_test_project("a_cpp")
pml = os.path.join(a_cpp_proj.path, "a_cpp.pml")
qipkg_action("configure", "--release", "--with-debug-info", pml)
qipkg_action("build", pml)
_pkg, symbols_archive = qipkg_action("make-package", "--with-breakpad", pml)
assert os.path.exists(symbols_archive)
def test_meta(qipkg_action):
""" Test Meta """
_tmpdir = qipkg_action.worktree.tmpdir
qipkg_action.add_test_project("a_cpp")
qipkg_action.add_test_project("d_pkg")
meta_pkg_proj = qipkg_action.add_test_project("meta_pkg")
meta_pml = os.path.join(meta_pkg_proj.path, "meta_pkg.mpml")
qipkg_action("configure", meta_pml)
qipkg_action("build", meta_pml)
pkgs = qipkg_action("make-package", meta_pml)
expected_paths = [
"a-0.1-{}.pkg".format(TARGET),
"d-0.1-{}.pkg".format(TARGET)
]
actual_paths = [os.path.basename(x) for x in pkgs]
assert actual_paths == expected_paths
def test_no_worktree_pure_pml(tmpdir, monkeypatch):
""" Test No Worktree Pure Pml """
project = tmpdir.mkdir("project")
project.ensure("behavior_1", "behavior.xar", file=True)
manifest_path = project.join("manifest.xml")
manifest_path.write("""
<package version="0.1" uuid="fooproject">
<names>
<name lang="en_US">fooproject</name>
</names>
<supportedLanguages>
<language>en_US</language>
</supportedLanguages>
<requirements>
<naoqiRequirement minVersion="1.22"/>
<robotRequirement model="NAO"/>
</requirements>
</package>
""")
pml_path = project.join("project.pml")
pml_path.write("""
<Package name="project">
<BehaviorDescriptions>
<BehaviorDescription name="behavior" src="behavior_1" xar="behavior.xar" />
</BehaviorDescriptions>
</Package>
""")
monkeypatch.chdir(tmpdir)
package = qisys.script.run_action("qipkg.actions.make_package", [pml_path.strpath])
dest = tmpdir.mkdir("dest")
monkeypatch.chdir(dest)
qisys.script.run_action("qipkg.actions.extract_package", [package])
assert dest.join("fooproject-0.1", "manifest.xml").check(file=True)
assert dest.join("fooproject-0.1", "behavior_1", "behavior.xar").check(file=True)
def test_no_worktre_bad_pml(tmpdir, monkeypatch):
""" Test No Worktree Bad Pml """
project = tmpdir.mkdir("project")
manifest_path = project.join("manifest.xml")
manifest_path.write("""
<package version="0.1" uuid="fooproject">
<names>
<name lang="en_US">fooproject</name>
</names>
<supportedLanguages>
<language>en_US</language>
</supportedLanguages>
<requirements>
<naoqiRequirement minVersion="1.22"/>
<robotRequirement model="NAO"/>
</requirements>
</package>
""")
pml_path = project.join("project.pml")
pml_path.write("""
<Package name="project">
<qibuild name="foo" />
</Package>
""")
monkeypatch.chdir(tmpdir)
with pytest.raises(Exception) as error:
_package = qisys.script.run_action("qipkg.actions.make_package", [pml_path.strpath])
assert "not in a worktree" in str(error)
def test_translations(qipkg_action, tmpdir):
""" Test Translation """
tr_project = qipkg_action.add_test_project("tr_project")
pml_path = os.path.join(tr_project.path, "tr.pml")
package = qipkg_action("make-package", pml_path)
dest = tmpdir.mkdir("dest")
qipkg_action.chdir(dest)
qipkg_action("extract-package", package)
assert dest.join("tr-0.1", "translations", "tr_fr_FR.qm").check(file=True)
def test_validate_package(qipkg_action):
""" Test Validate Package """
pkg_path = os.path.join(os.path.dirname(__file__), "projects", "python_services.pkg")
qipkg_action("validate_package", pkg_path)
def test_validate_package_exception(qipkg_action):
""" Test Validate Package Extension """
pkg_path = os.path.join(os.path.dirname(__file__), "projects", "invalid_package.pkg")
error = qipkg_action("validate_package", pkg_path, raises=True)
assert error == "Given package does not satisfy default package requirements"
def test_release_package(qipkg_action, tmpdir):
""" Test Release Package """
pkg_path = os.path.join(os.path.dirname(__file__), "projects", "python_services.pkg")
output_path = tmpdir.join("output.pkg")
qipkg_action("release-package", pkg_path, "--output", str(output_path))
dest = tmpdir.mkdir("dest")
qipkg_action.chdir(dest)
qipkg_action("extract-package", str(output_path))
package = dest.join("python_services-0.0.2")
assert package.join("lib", "my_service.pyc").check(file=True)
assert package.join("lib", "my_service.py").check(file=False)
tree = qisys.qixml.read(str(package.join("manifest.xml")))
services = tree.getroot().findall("services/service")
assert services[0].attrib["execStart"] == "/usr/bin/python2.7 lib/my_service.pyc"
assert services[1].attrib["execStart"] == "/usr/bin/python2.7 lib/my_service.pyc '127.0.0.1'"
# it was already pointing to a *.pyc file, nothing should have changed
assert services[2].attrib["execStart"] == "/usr/bin/python2.7 lib/my_service.pyc"
# it is not pointing to a file of the package, nothing should have changed
assert services[3].attrib["execStart"] == "/usr/bin/python2.7 tata.py"
def test_qipkg_in_wrong_directory(qipkg_action):
""" Test QiPkg In Wrong Directory """
error = qipkg_action("make-package", "foo.pml", raises=True)
assert "foo.pml" in error
def test_qipkg_no_such_project(qipkg_action, tmpdir):
""" Test QiPkg No Such Project """
d_project = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_project.path, "d_pkg.pml")
root = qisys.qixml.read(pml_path).getroot()
elem = etree.SubElement(root, "qipython")
elem.set("name", "foo")
qisys.qixml.write(root, pml_path)
error = qipkg_action("make-package", pml_path, raises=True)
assert "No such python project: foo" in error
assert pml_path in error
def test_bump_version(qipkg_action):
""" Test Bump Version """
d_proj = qipkg_action.add_test_project("d_pkg")
manifest_xml = os.path.join(d_proj.path, "manifest.xml")
name = qipkg.builder.pkg_name(manifest_xml)
assert name == "d-0.1"
qipkg_action("bump-version", manifest_xml)
name = qipkg.builder.pkg_name(manifest_xml)
assert name == "d-0.2"
qipkg_action("bump-version", manifest_xml, "2.0")
name = qipkg.builder.pkg_name(manifest_xml)
assert name == "d-2.0"
def test_install(qipkg_action, tmpdir):
""" Test Install """
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
d_package = qipkg_action("make-package", pml_path)
_url = get_ssh_url(tmpdir)
qipkg_action("install", pml_path, tmpdir.strpath)
assert tmpdir.join("manifest.xml").check(file=True)
def test_deploy(qipkg_action, tmpdir):
""" Test Deploy """
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
d_package = qipkg_action("make-package", pml_path)
url = get_ssh_url(tmpdir)
qipkg_action("deploy", pml_path, "--url", url)
assert tmpdir.join("manifest.xml").check(file=True)
def test_deploy_package_no_qi(qipkg_action, tmpdir, record_messages):
""" Test Deploy Package """
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
d_package = qipkg_action("make-package", pml_path)
url = get_ssh_url(tmpdir)
record_messages.reset()
qipkg_action("deploy-package", d_package, "--url", url)
try:
qipkg_action("deploy-package", d_package, "--url", url)
except ImportError:
assert record_messages("Unable to install pkg, please install qi from pip and retry.")
def test_deploy_package(qipkg_action, tmpdir, record_messages):
""" Test Deploy Package """
try:
import qi
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
d_package = qipkg_action("make-package", pml_path)
url = get_ssh_url(tmpdir)
parsed = qisys.remote.URL(url)
username = parsed.user
fake_qi = mock.Mock()
fake_qi.Application = mock.Mock()
fake_app = mock.Mock()
fake_qi.Application.return_value = fake_app
session = fake_qi.Session()
mock_connect = session.connect
fake_pm = mock.Mock()
session.service.return_value = fake_pm
remove_mock = fake_pm.removePkg
install_mock = fake_pm.install
install_mock.return_value = True
sys.modules["qi"] = fake_qi
record_messages.reset()
qipkg_action("deploy-package", d_package, "--url", url)
assert mock_connect.call_args_list == [mock.call("tcp://localhost:9559")]
assert session.service.call_args_list == [mock.call("PackageManager")]
assert remove_mock.call_args_list == [mock.call("d")]
assert install_mock.call_args_list == [mock.call("/home/%s/d-0.1.pkg" % username)]
assert record_messages.find("PackageManager returned: True")
del sys.modules["qi"]
except ImportError:
pass
def test_deploy_package_from_pml_no_qi(qipkg_action, tmpdir, record_messages):
""" Test Deploy Package From Pml """
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
url = get_ssh_url(tmpdir)
record_messages.reset()
try:
qipkg_action("deploy-package", pml_path, "--url", url)
except ImportError:
assert record_messages("Unable to install pkg, please install qi from pip and retry.")
def test_deploy_package_from_pml(qipkg_action, tmpdir, record_messages):
""" Test Deploy Package From Pml """
try:
import qi
d_proj = qipkg_action.add_test_project("d_pkg")
pml_path = os.path.join(d_proj.path, "d_pkg.pml")
url = get_ssh_url(tmpdir)
parsed = qisys.remote.URL(url)
username = parsed.user
fake_qi = mock.Mock()
fake_qi.Application = mock.Mock()
fake_app = mock.Mock()
fake_qi.Application.return_value = fake_app
session = fake_qi.Session()
mock_connect = session.connect
fake_pm = mock.Mock()
session.service.return_value = fake_pm
remove_mock = fake_pm.removePkg
install_mock = fake_pm.install
install_mock.return_value = True
sys.modules["qi"] = fake_qi
record_messages.reset()
qipkg_action("deploy-package", pml_path, "--url", url)
assert mock_connect.call_args_list == [mock.call("tcp://localhost:9559")]
assert session.service.call_args_list == [mock.call("PackageManager")]
assert remove_mock.call_args_list == [mock.call("d")]
assert install_mock.call_args_list == [mock.call("/home/%s/d-0.1.pkg" % username)]
assert record_messages.find("PackageManager returned: True")
del sys.modules["qi"]
except ImportError:
pass
|
|
# Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import urlparse
from hopper.utils.logger import *
class LocalSource:
def __init__(self, name, path = None):
self.name = name
self.path = path
def canFetch(self):
return True
def getPath(self, environment):
if self.path:
return self.path
if self.name:
for local in [environment.getLocalLayerPath()]:
if local and os.path.exists(local):
valid = [self.name, self.name + ".git"]
for i in os.listdir(local):
if i in valid:
return os.path.join(local, i)
return None
def __repr__(self):
return "LocalSource (%s@%s)" % (self.name, self.path)
class GitSource:
def __init__(self, remote, ref):
self.remote = remote
self.ref = ref
def canFetch(self):
if self.remote != None:
return True
return False
def getPath(self, environment):
if self.remote:
return os.path.join(environment.getWorkingSourcesPath(),
GitSource.getUriCloneName(self.remote))
return None
def __repr__(self):
return "GitSource (%s@%s)" % (self.ref, self.remote)
@staticmethod
def getUriCloneName(uri):
url = urlparse.urlparse(uri)
clonename = url.path.strip("/").split("/")[-1]
if clonename.endswith(".git"):
clonename = clonename[0:len(clonename) - len(".git")]
return clonename
class Layer:
def __init__(self, name, path = None, source = None):
self.name = name
self.path = path
self.source = source
def getName(self):
return self.name
def getFullName(self):
if self.path:
return self.name + "/" + self.path
return self.name
def getPath(self):
return self.path
def getSourcePath(self, environment):
if self.source:
sourceroot = self.getRootSourcePath(environment)
if self.path:
sourceroot = os.path.join(sourceroot, self.path)
return sourceroot
return None
def getRootSourcePath(self, environment):
if self.source:
return self.source.getPath(environment)
return None
def isBitBake(self):
if self.getName() == "bitbake":
return True
return False
def __repr__(self):
return "Meta '%s' {%s} (source = %s)" % (self.name, self.path, self.source)
# Handles parsing and filling in data about layers and repos
class LayerCollection:
def __init__(self, defaultversion = None):
self.indexes = []
self.layers = []
self.defaultversion = GitSource(None, defaultversion)
def __iter__(self):
return self.layers.__iter__()
def __len__(self):
return len(self.layers)
def addIndex(self, index):
self.indexes.append(index)
# validates that the layers and bitbake is available
def validate(self, warnonly = False):
hasBitbake = False
for i in self.layers:
if i.isBitBake():
hasBitbake = True
break
if not hasBitbake:
if warnonly:
warning("BitBake is missing from the described layers.")
else:
warning("BitBake is missing from the described layers, adding BitBake.")
bblayer = self.__findlayer__(
LayerCollection.getLayerNameTriple("bitbake", False),
GitSource(None, "master"))
if bblayer:
self.layers.append(bblayer)
else:
error("Could not find BitBake")
return False
# TODO: check dependencies
return True
# Will parse a input set of repos and layers in the args form and result in a list
def parse(self, layers):
for l in layers.iteritems():
names = LayerCollection.getLayerNameTriple(l[0], False)
revision = LayerCollection.parseRevisionInfo(names[0],
l[1]["path"], l[1]["ref"],
self.defaultversion.ref)
clayer = self.__findlayer__(names, revision)
if clayer:
self.layers.append(clayer)
def add(self, name, version = None):
names = LayerCollection.getLayerNameTriple(name, False)
revision = version or self.defaultversion
layer = self.__findlayer__(names, revision)
if layer:
if layer not in self.layers:
self.layers.append(layer)
return layer
return None
def __findlayer__(self, names, revision = None):
layer = None
# check the collection first
if self.layers:
for i in self.layers:
if i.getName() == names[1]:
return i
subpath = names[2] or ""
if revision != None and revision.canFetch():
# enough info to create the layer
layer = Layer(names[1], subpath, revision)
else:
# find layer in index and or create
# TODO: refactor into a "serachLayerUri" function
layerinfo = self.search(names[1])
if layerinfo:
fullrevision = revision
if fullrevision == None:
fullrevision = GitSource(layerinfo["remote"], None)
elif isinstance(fullrevision, GitSource):
fullrevision = GitSource(layerinfo["remote"], fullrevision.ref)
else:
warning("Unable to fill in source information for layer '%s'." % names[1])
raise Exception("Unable to fill in source information for layer '%s'." % names[1])
if "subpath" in layerinfo:
subpath = layerinfo["subpath"]
layer = Layer(names[1], subpath, fullrevision)
else:
warning("Unable to fill in source information for layer '%s'." % names[1])
raise Exception("Unable to fill in source information for layer '%s'." % names[1])
return layer
def search(self, name):
# search the index for a layer with the same name and repo (if specified
for i in self.indexes:
found = i.find(name)
if found:
return found
return None
def hash(layers):
import hashlib
hashstrings = []
for i in layers:
fullname = i.getFullName()
revstring = ""
if isinstance(i.source, GitSource):
revstring = "%s@%s" % (i.source.ref, i.source.remote)
m = hashlib.md5()
m.update(fullname)
m.update(revstring)
hashstrings.append((fullname, m.hexdigest()))
# sort the strings according to fullname order
m = hashlib.md5()
for i in sorted(hashstrings, key = lambda k : k[0]):
m.update(i[1])
return m.hexdigest()[0:16]
@staticmethod
def parseRevisionInfo(name, remote, ref, defaultref = None):
if ref == "local" and name != None:
# Symbolic Local Source
return LocalSource(name, remote)
if (ref != None and len(ref) != 0):
# assumes git repo
return GitSource(remote, ref)
if defaultref:
return GitSource(remote, defaultref)
return None
@staticmethod
def getLayerNameTriple(name, repospecifier = False):
repo = None
path = None
layer = None
if repospecifier:
reponame = name
else:
nameparts = name.split("/")
repo = nameparts[0]
layer = nameparts[-1]
path = "/".join(nameparts[1:])
if len(path) <= 0:
path = None
return (repo, layer, path)
|
|
import collections
import copy
import multiprocessing as mp
from operator import gt, lt
import random
from random import randint, uniform, shuffle
import pysal as ps
from pysal.region.components import check_contiguity
import numpy as np
from numpy.random import RandomState
class LocalSearch(mp.Process):
"""
Class Attributes
----------------
cbest float the current best known solution
Instance Attributes
--------------------
failures int the current number of failures for this iteration
intensificationsize int the size of the soln space to propagate the best soln
tabulist deque of tuples in the form (unit, move to region)
"""
def __init__(self ,attribute, w, nregions, lock = None, pid=None, floor=3,
maxfailures=100, maxiterations=15, intensification=0.5):
mp.Process.__init__(self)
self.index = pid
self.lock = lock
self.w = w
self.z = attribute
self.floor = floor
self.maxiterations = maxiterations
self.wss = 0
self.cbest = float('inf')
#Shared memory setup
self.solnspace = np.frombuffer(shared_solnspace.get_obj(), dtype=np.float32)
self.solnspace.shape = (-1, self.w.n + 1)
self.solnspacesize = self.solnspace[:,0].size
self.nregions = nregions
#Work on a copy of the shared memory space.
self.solncolumn = np.empty(self.w.n)
self.unitchooser = np.arange(len(self.solncolumn))
#Setup for intensification and diversification
self.intensificationsize = int(self.solnspacesize * intensification)
#Tabu parameters
self.failures = 0
self.maxfailures = maxfailures + int(maxfailures * uniform(-1.1, 1.2))
self.maxiterations = 15
self.tabulength = self.computetabulength()
self.tabulist = collections.deque(maxlen=self.tabulength)
#Seed the python random number generator
self.randstate = RandomState(self.index)
random.seed(self.index)
def __repr__(self):
return """
The current state of index {} is:
working on soln {} / {}
current region membership:
{}
current tabu list length: {}
current maximum number of failures: {}
current obj. func value: {}
current iteration: {}
""".format(self.index, self.index, self.solnspacesize,
self.solncolumn.reshape(8,8), self.tabulength, self.maxfailures, self.wss,
self.maxiterations)
def localsearch(self):
'''
'''
swapping = True
swap_iteration = 0
total_moves = 0
sln = self.solncolumn
ids = np.arange(len(sln))
k = int(self.nregions)
changed_regions = [1] * k
nr = range(k)
while swapping:
moves_made = 0
regionIds = [r+1 for r in nr if changed_regions[r]]
shuffle(regionIds)
changed_regions = [0] * k
for seed in regionIds:
local_swapping = True
local_attempts = 0
while local_swapping:
local_moves = 0
members = ids[sln == seed].tolist()
neighbors = set()
for member in members:
neighbors |= set(self.w.neighbors[member])
neighbors -= set(members)
candidates = []
for neighbor in neighbors:
rid = sln[neighbor]
block = ids[sln == rid].tolist()
if len(block) <= self.floor:
continue
if check_contiguity(self.w, block, neighbor):
candidates.append(neighbor)
if not candidates:
local_swapping = False
else:
nc = len(candidates)
best = None
cv = 0.0 #TODO Needs to be a high positive number with an aspiration func
for area in candidates:
current_internal = members
rid = sln[area]
current_outter = ids[sln == rid].tolist()
currentwss = self.objective_func([current_internal, current_outter])
new_internal = copy.copy(current_internal)
new_outter = copy.copy(current_outter)
new_internal.append(area)
new_outter.remove(area)
newwss = self.objective_func([new_internal, new_outter])
change = newwss - currentwss
old_region = int(sln[area])
if (area, old_region) in self.tabulist:
continue
elif change < cv:
best = area
cv = change
else:
pass
#Aspiration function here
if best:
area = best
moves_made += 1
old_region = int(sln[area])
#changed_regions is 0 based
changed_regions[seed - 1] = 1
changed_regions[old_region - 1] = 1
#print "Moving area: {} from {} to {}".format(area, old_region, seed)
sln[area] = seed
self.tabulist.appendleft((area, old_region))
self.failures = 0
else:
self.failures += 1
local_swapping = False
#swapping = False
if self.failures >= self.maxfailures:
swapping = False
if moves_made == 0:
swapping = False
new_obj = self.objective_func(sln=sln)
diversify = False
#print sln.reshape(8,8), globalobj
with self.lock:
current_obj_vector = self.solnspace[:,0]
if new_obj < current_obj_vector.all():
sortedidx = np.argsort(current_obj_vector)[::-1]
idx = sortedidx[0:self.intensificationsize]
for i in idx:
self.solnspace[i][1:] = sln
self.solnspace[i][0] = new_obj
elif new_obj < current_obj_vector[self.index]:
self.solnspace[self.index][1:] = sln
self.solnspace[self.index][0] = new_obj
else:
diversify = True
if diversify:
pass
#Manual 1 iteration breaker
#self.maxiterations = 0
return
def run(self):
#Populate the initial objective function value
with self.lock:
self.wss = self.solncolumn[0]
while self.maxiterations > 0:
with self.lock:
#Populate the local working space
self.solncolumn[:] = self.solnspace[self.index][1:]
#Compute the current objective function value
self.wss = self.solnspace[self.index,0]
self.failures = 0 #Reset the failure counter before each iteration
self.localsearch()
'''
#This is a constant contiguity check that can be removed once validated.
cont = True
for i in range(1, int(self.nregions) + 1):
region = np.where(self.solncolumn == i)[0].tolist()
if test_region(self.w, region) == False:
cont = False
if cont == False:
print "ERROR: ", self.__repr__()
'''
# Uncomment to enable cycling around the soln space
#Increment the index counter to step around the solution space
self.index += 1
if self.index >= self.solnspacesize:
self.index = 0
self.maxiterations -= 1
def computetabulength(self):
'''Talliard 1990'''
smin = (self.nregions - 1) * 0.9
smax = (self.nregions - 1) * 1.1
tabu_length = 6 + (randint(0, int(smax - smin)))
return tabu_length
def objective_func(self, regions=None, sln=None):
"""
Computes the objective function value
Parameters
----------
regions list of regionsids, if regions is none, computed for all regions
Returns
-------
wss float wss
"""
wss = 0
if regions == None:
computespace = range(1, int(self.nregions) + 1)
for r in computespace:
ids = np.where(sln == r)[0]
m = self.z[ids]
var = m.var()
wss += np.sum(var * len(ids))
else:
computespace = regions
for r in computespace:
m = self.z[r]
var = m.var()
wss += np.sum(var * len(m))
return wss
def initshared_localsoln(_solnspace):
global shared_solnspace
shared_solnspace = _solnspace
def test_region(w,neighbors):
d={}
g=Graph()
for i in neighbors:
d[i]=[j for j in w.neighbors[i] if (j in neighbors)]
for i in d:
for j in d[i]:
g.add_edge(i,j,1.0)
cc=g.connected_components(op=gt)
if len(cc)==1:
return True
else:
return False
class Graph(object):
def __init__(self):
self.nodes=set()
self.edges={}
self.cluster_lookup={}
self.no_link={}
def add_edge(self,n1,n2,w):
self.nodes.add(n1)
self.nodes.add(n2)
self.edges.setdefault(n1,{}).update({n2:w})
self.edges.setdefault(n2,{}).update({n1:w})
def connected_components(self,threshold=0.9, op=lt):
nodes = set(self.nodes)
components,visited =[], set()
while len(nodes) > 0:
connected, visited = self.dfs(nodes.pop(), visited, threshold, op)
connected = set(connected)
for node in connected:
if node in nodes:
nodes.remove(node)
subgraph=Graph()
subgraph.nodes = connected
subgraph.no_link = self.no_link
for s in subgraph.nodes:
for k,v in self.edges.get(s,{}).iteritems():
if k in subgraph.nodes:
subgraph.edges.setdefault(s,{}).update({k:v})
if s in self.cluster_lookup:
subgraph.cluster_lookup[s] = self.cluster_lookup[s]
components.append(subgraph)
return components
def dfs(self, v, visited, threshold, op=lt, first=None):
aux=[v]
visited.add(v)
if first is None:
first = v
for i in (n for n, w in self.edges.get(v,{}).iteritems() \
if op(w, threshold) and n not in visited):
x,y=self.dfs(i,visited,threshold,op,first)
aux.extend(x)
visited=visited.union(y)
return aux, visited
|
|
"""A multi-producer, multi-consumer queue."""
from _pydev_imps._pydev_time import time as _time
from _pydev_imps import _pydev_thread
try:
from _pydev_imps import _pydev_threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _pydev_thread.allocate_lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex) # @UndefinedVariable
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex) # @UndefinedVariable
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex) # @UndefinedVariable
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
|
# $Id: TemplateExpand.py 1053 2008-07-30 12:03:29Z landrgr1 $
#
# Created by Greg Landrum August, 2006
#
#
from __future__ import print_function
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem.ChemUtils.AlignDepict import AlignDepict
import sys
_version="0.8.0"
_greet="This is TemplateExpand version %s"%_version
_usage="""
Usage: TemplateExpand [options] template <sidechains>
Unless otherwise indicated, the template and sidechains are assumed to be
Smiles
Each sidechain entry should be:
[-r] SMARTS filename
The SMARTS pattern is used to recognize the attachment point,
if the -r argument is not provided, then atoms matching the pattern
will be removed from the sidechains.
or
-n filename
where the attachment atom is the first atom in each molecule
The filename provides the list of potential sidechains.
options:
-o filename.sdf: provides the name of the output file, otherwise
stdout is used
--sdf : expect the sidechains to be in SD files
--moltemplate: the template(s) are in a mol/SD file, new depiction(s)
will not be generated unless the --redraw argument is also
provided
--smilesFileTemplate: extract the template(s) from a SMILES file instead of
expecting SMILES on the command line.
--redraw: generate a new depiction for the molecular template(s)
--useall:
or
--useallmatches: generate a product for each possible match of the attachment
pattern to each sidechain. If this is not provided, the first
match (not canonically defined) will be used.
--force: by default, the program prompts the user if the library is
going to contain more than 1000 compounds. This argument
disables the prompt.
--templateSmarts="smarts": provides a space-delimited list containing the SMARTS
patterns to be used to recognize attachment points in
the template
--autoNames: when set this toggle causes the resulting compounds to be named
based on there sequence id in the file, e.g.
"TemplateEnum: Mol_1", "TemplateEnum: Mol_2", etc.
otherwise the names of the template and building blocks (from
the input files) will be combined to form a name for each
product molecule.
--3D : Generate 3d coordinates for the product molecules instead of 2d coordinates,
requires the --moltemplate option
--tether : refine the 3d conformations using a tethered minimization
"""
def Usage():
print(_usage, file=sys.stderr)
sys.exit(-1)
#pylint: disable=C0111,C0103,C0322,C0324,C0323
nDumped=0
def _exploder(mol,depth,sidechains,core,chainIndices,autoNames=True,templateName='',
resetCounter=True,do3D=False,useTethers=False):
global nDumped
if resetCounter:
nDumped=0
ourChains = sidechains[depth]
patt = '[%d*]'%(depth+1)
patt = Chem.MolFromSmiles(patt)
for i,(chainIdx,chain) in enumerate(ourChains):
tchain = chainIndices[:]
tchain.append((i,chainIdx))
rs = Chem.ReplaceSubstructs(mol,patt,chain,replaceAll=True)
if rs:
r = rs[0]
if depth<len(sidechains)-1:
for entry in _exploder(r,depth+1,sidechains,core,tchain,
autoNames=autoNames,templateName=templateName,
resetCounter=0,do3D=do3D,useTethers=useTethers):
yield entry
else:
try:
Chem.SanitizeMol(r)
except ValueError:
import traceback
traceback.print_exc()
continue
if not do3D:
if r.HasSubstructMatch(core):
try:
AlignDepict(r,core)
except:
import traceback
traceback.print_exc()
print(Chem.MolToSmiles(r), file=sys.stderr)
else:
print('>>> no match', file=sys.stderr)
AllChem.Compute2DCoords(r)
else:
r = Chem.AddHs(r)
AllChem.ConstrainedEmbed(r,core,useTethers)
Chem.Kekulize(r)
if autoNames:
tName = "TemplateEnum: Mol_%d"%(nDumped+1)
else:
tName = templateName
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
tName += '_' + bbNm
r.SetProp("_Name",tName)
r.SetProp('seq_num',str(nDumped+1))
r.SetProp('reagent_indices','_'.join([str(x[1]) for x in tchain]))
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
r.SetProp('building_block_%d'%(bbI+1),bbNm)
for propN in bbMol.GetPropNames():
r.SetProp('building_block_%d_%s'%(bbI+1,propN),bbMol.GetProp(propN))
nDumped += 1
if not nDumped%100:
logger.info('Done %d molecules'%nDumped)
yield r
def Explode(template,sidechains,outF,autoNames=True,do3D=False,useTethers=False):
chainIndices=[]
core = Chem.DeleteSubstructs(template,Chem.MolFromSmiles('[*]'))
try:
templateName = template.GetProp('_Name')
except KeyError:
templateName="template"
for mol in _exploder(template,0,sidechains,core,chainIndices,autoNames=autoNames,
templateName=templateName,do3D=do3D,useTethers=useTethers):
outF.write(Chem.MolToMolBlock(mol))
for pN in mol.GetPropNames():
print('> <%s>\n%s\n'%(pN,mol.GetProp(pN)), file=outF)
print('$$$$', file=outF)
def MoveDummyNeighborsToBeginning(mol,useAll=False):
dummyPatt=Chem.MolFromSmiles('[*]')
matches = mol.GetSubstructMatches(dummyPatt)
res = []
for match in matches:
matchIdx = match[0]
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=matchIdx)
entry = Chem.MolFromSmiles(smi)
# entry now has [*] as atom 0 and the neighbor
# as atom 1. Cleave the [*]:
entry = Chem.DeleteSubstructs(entry,dummyPatt)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
res.append(entry)
if not useAll:
break
return res
def ConstructSidechains(suppl,sma=None,replace=True,useAll=False):
if sma:
try:
patt = Chem.MolFromSmarts(sma)
except:
logger.error('could not construct pattern from smarts: %s'%sma,
exc_info=True)
return None
else:
patt = None
if replace:
replacement = Chem.MolFromSmiles('[*]')
res = []
for idx,mol in enumerate(suppl):
if not mol:
continue
if patt:
if not mol.HasSubstructMatch(patt):
logger.warning('The substructure pattern did not match sidechain %d. This may result in errors.'%(idx+1))
if replace:
tmp = list(Chem.ReplaceSubstructs(mol,patt,replacement))
if not useAll: tmp = [tmp[0]]
for i,entry in enumerate(tmp):
entry = MoveDummyNeighborsToBeginning(entry)
if not entry:
continue
entry = entry[0]
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp[i] = (idx+1,entry)
else:
# no replacement, use the pattern to reorder
# atoms only:
matches = mol.GetSubstructMatches(patt)
if matches:
tmp = []
for match in matches:
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=match[0])
entry = Chem.MolFromSmiles(smi)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp.append((idx+1,entry))
else:
tmp = None
else:
tmp = [(idx+1,mol)]
if tmp:
res.extend(tmp)
return res
if __name__=='__main__':
import getopt
print(_greet, file=sys.stderr)
try:
args,extras = getopt.getopt(sys.argv[1:],'o:h',[
'sdf',
'moltemplate',
'molTemplate',
'smilesFileTemplate',
'templateSmarts=',
'redraw',
'force',
'useall',
'useallmatches',
'autoNames',
'3D','3d',
'tethers',
'tether',
])
except:
import traceback
traceback.print_exc()
Usage()
if len(extras)<3:
Usage()
tooLong=1000
sdLigands=False
molTemplate=False
redrawTemplate=False
outF=None
forceIt=False
useAll=False
templateSmarts=[]
smilesFileTemplate=False
autoNames=False
do3D=False
useTethers=False
for arg,val in args:
if arg=='-o':
outF=val
elif arg=='--sdf':
sdLigands=True
elif arg in ('--moltemplate','--molTemplate'):
molTemplate=True
elif arg=='--smilesFileTemplate':
smilesFileTemplate=True
elif arg=='--templateSmarts':
templateSmarts = val
elif arg=='--redraw':
redrawTemplate=True
elif arg=='--force':
forceIt=True
elif arg=='--autoNames':
autoNames=True
elif arg in ('--useall','--useallmatches'):
useAll=True
elif arg in ('--3D','--3d'):
do3D=True
elif arg in ('--tethers','--tether'):
useTethers=True
elif arg=='-h':
Usage()
sys.exit(0)
if do3D:
if not molTemplate:
raise ValueError('the --3D option is only useable in combination with --moltemplate')
if redrawTemplate:
logger.warning('--redrawTemplate does not make sense in combination with --molTemplate. removing it')
redrawTemplate=False
if templateSmarts:
splitL = templateSmarts.split(' ') #pylint: disable=E1103
templateSmarts = []
for i,sma in enumerate(splitL):
patt = Chem.MolFromSmarts(sma)
if not patt:
raise ValueError('could not convert smarts "%s" to a query'%sma)
if i>=4:
i+=1
replace = Chem.MolFromSmiles('[%d*]'%(i+1))
templateSmarts.append((patt,replace))
if molTemplate:
removeHs = not do3D
try:
s = Chem.SDMolSupplier(extras[0],removeHs=removeHs)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
if redrawTemplate:
for template in templates:
AllChem.Compute2DCoords(template)
else:
if not smilesFileTemplate:
try:
templates = [Chem.MolFromSmiles(extras[0])]
except:
logger.error('Could not construct template from smiles: %s'%extras[0],
exc_info=True)
sys.exit(1)
else:
try:
s = Chem.SmilesMolSupplier(extras[0],titleLine=False)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
for template in templates:
AllChem.Compute2DCoords(template)
if templateSmarts:
finalTs = []
for i,template in enumerate(templates):
for j,(patt,replace) in enumerate(templateSmarts):
if not template.HasSubstructMatch(patt):
logger.error('template %d did not match sidechain pattern %d, skipping it'%(i+1,j+1))
template =None
break
template = Chem.ReplaceSubstructs(template,patt,replace)[0]
if template:
Chem.SanitizeMol(template)
finalTs.append(template)
templates = finalTs
sidechains = []
pos = 1
while pos<len(extras):
if extras[pos]=='-r':
replaceIt=False
pos += 1
else:
replaceIt=True
if extras[pos]=='-n':
sma = None
else:
sma = extras[pos]
pos += 1
try:
dat = extras[pos]
except IndexError:
logger.error('missing a sidechain filename')
sys.exit(-1)
pos += 1
if sdLigands:
try:
suppl = Chem.SDMolSupplier(dat)
except:
logger.error('could not construct supplier from SD file: %s'%dat,
exc_info=True)
suppl = []
else:
tmpF = file(dat,'r')
inL = tmpF.readline()
if len(inL.split(' '))<2:
nmCol=-1
else:
nmCol=1
try:
suppl = Chem.SmilesMolSupplier(dat,nameColumn=nmCol)
except:
logger.error('could not construct supplier from smiles file: %s'%dat,
exc_info=True)
suppl = []
suppl = [x for x in suppl]
chains = ConstructSidechains(suppl,sma=sma,replace=replaceIt,useAll=useAll)
if chains:
sidechains.append(chains)
count = 1
for chain in sidechains:
count *= len(chain)
count *= len(templates)
if not sidechains or not count:
print("No molecules to be generated.", file=sys.stderr)
sys.exit(0)
if not forceIt and count>tooLong:
print("This will generate %d molecules."%count, file=sys.stderr)
print("Continue anyway? [no] ", file=sys.stderr, end='')
sys.stderr.flush()
ans = sys.stdin.readline().strip()
if ans not in ('y','yes','Y','YES'):
sys.exit(0)
if outF and outF!="-":
try:
outF = file(outF,'w+')
except IOError:
logger.error('could not open file %s for writing'%(outF),
exc_info=True)
else:
outF = sys.stdout
for template in templates:
Explode(template,sidechains,outF,autoNames=autoNames,do3D=do3D,
useTethers=useTethers)
|
Subsets and Splits