text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _default(self, obj): """ return a serialized version of obj or raise a TypeError :param obj: :return: Serialized version of obj """ return obj.__dict__ if isinstance(obj, JsonObj) else json.JSONDecoder().decode(obj)
[ "def", "_default", "(", "self", ",", "obj", ")", ":", "return", "obj", ".", "__dict__", "if", "isinstance", "(", "obj", ",", "JsonObj", ")", "else", "json", ".", "JSONDecoder", "(", ")", ".", "decode", "(", "obj", ")" ]
36.285714
18.571429
def verifytablecell(self, window_name, object_name, row_index, column_index, row_text): """ Verify table cell value with given text @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_index: Row index to get @type row_index: integer @param column_index: Column index to get, default value 0 @type column_index: integer @param row_text: Row text to match @type string @return: 1 on success 0 on failure. @rtype: integer """ try: value = getcellvalue(window_name, object_name, row_index, column_index) if re.match(row_text, value): return 1 except LdtpServerException: pass return 0
[ "def", "verifytablecell", "(", "self", ",", "window_name", ",", "object_name", ",", "row_index", ",", "column_index", ",", "row_text", ")", ":", "try", ":", "value", "=", "getcellvalue", "(", "window_name", ",", "object_name", ",", "row_index", ",", "column_index", ")", "if", "re", ".", "match", "(", "row_text", ",", "value", ")", ":", "return", "1", "except", "LdtpServerException", ":", "pass", "return", "0" ]
35.964286
15.25
def _station(self) -> str: """Extract station name.""" return str(self.obj.SBRes.SBReq.Start.Station.HafasName.Text.pyval)
[ "def", "_station", "(", "self", ")", "->", "str", ":", "return", "str", "(", "self", ".", "obj", ".", "SBRes", ".", "SBReq", ".", "Start", ".", "Station", ".", "HafasName", ".", "Text", ".", "pyval", ")" ]
45.333333
16.333333
def makeDataFiles(prefix, dir): """ Create distutils data_files structure from dir distutil will copy all file rooted under dir into prefix, excluding dir itself, just like 'ditto src dst' works, and unlike 'cp -r src dst, which copy src into dst'. Typical usage: # install the contents of 'wiki' under sys.prefix+'share/moin' data_files = makeDataFiles('share/moin', 'wiki') For this directory structure: root file1 file2 dir file subdir file makeDataFiles('prefix', 'root') will create this distutil data_files structure: [('prefix', ['file1', 'file2']), ('prefix/dir', ['file']), ('prefix/dir/subdir', ['file'])] """ # Strip 'dir/' from of path before joining with prefix dir = dir.rstrip('/') strip = len(dir) + 1 found = [] os.path.walk(dir, visit, (prefix, strip, found)) #print found[0] return found[0]
[ "def", "makeDataFiles", "(", "prefix", ",", "dir", ")", ":", "# Strip 'dir/' from of path before joining with prefix", "dir", "=", "dir", ".", "rstrip", "(", "'/'", ")", "strip", "=", "len", "(", "dir", ")", "+", "1", "found", "=", "[", "]", "os", ".", "path", ".", "walk", "(", "dir", ",", "visit", ",", "(", "prefix", ",", "strip", ",", "found", ")", ")", "#print found[0]", "return", "found", "[", "0", "]" ]
35.035714
17.285714
def loads(data, wrapper=dict): """ Loads Appinfo content into a Python object. :param data: A byte-like object with the contents of an Appinfo file. :param wrapper: A wrapping object for key-value pairs. :return: An Ordered Dictionary with Appinfo data. """ if not isinstance(data, (bytes, bytearray)): raise TypeError('can only load a bytes-like object as an Appinfo but got ' + type(data).__name__) return AppinfoDecoder(data, wrapper=wrapper).decode()
[ "def", "loads", "(", "data", ",", "wrapper", "=", "dict", ")", ":", "if", "not", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "raise", "TypeError", "(", "'can only load a bytes-like object as an Appinfo but got '", "+", "type", "(", "data", ")", ".", "__name__", ")", "return", "AppinfoDecoder", "(", "data", ",", "wrapper", "=", "wrapper", ")", ".", "decode", "(", ")" ]
44.090909
19.181818
def to_bucket(self, timestamp, steps=0): ''' Calculate the bucket from a timestamp. ''' dt = datetime.utcfromtimestamp( timestamp ) if steps!=0: if self._step == 'daily': dt = dt + timedelta(days=steps) elif self._step == 'weekly': dt = dt + timedelta(weeks=steps) elif self._step == 'monthly': dt = dt + MonthDelta(steps) elif self._step == 'yearly': year = int(dt.strftime( self.FORMATS[self._step] )) year += steps dt = datetime(year=year, month=1, day=1) return int(dt.strftime( self.FORMATS[self._step] ))
[ "def", "to_bucket", "(", "self", ",", "timestamp", ",", "steps", "=", "0", ")", ":", "dt", "=", "datetime", ".", "utcfromtimestamp", "(", "timestamp", ")", "if", "steps", "!=", "0", ":", "if", "self", ".", "_step", "==", "'daily'", ":", "dt", "=", "dt", "+", "timedelta", "(", "days", "=", "steps", ")", "elif", "self", ".", "_step", "==", "'weekly'", ":", "dt", "=", "dt", "+", "timedelta", "(", "weeks", "=", "steps", ")", "elif", "self", ".", "_step", "==", "'monthly'", ":", "dt", "=", "dt", "+", "MonthDelta", "(", "steps", ")", "elif", "self", ".", "_step", "==", "'yearly'", ":", "year", "=", "int", "(", "dt", ".", "strftime", "(", "self", ".", "FORMATS", "[", "self", ".", "_step", "]", ")", ")", "year", "+=", "steps", "dt", "=", "datetime", "(", "year", "=", "year", ",", "month", "=", "1", ",", "day", "=", "1", ")", "return", "int", "(", "dt", ".", "strftime", "(", "self", ".", "FORMATS", "[", "self", ".", "_step", "]", ")", ")" ]
31.052632
14.315789
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) url = api.get_url(obj) title = api.get_title(obj) keyword = obj.getKeyword() # get the category if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["choices"]["min_operator"] = self.min_operator_choices item["choices"]["max_operator"] = self.max_operator_choices item["allow_edit"] = self.get_editable_columns() item["required"] = self.get_required_columns() spec = self.specification.get(keyword, {}) item["selected"] = spec and True or False item["min_operator"] = spec.get("min_operator", "geq") item["min"] = spec.get("min", "") item["max_operator"] = spec.get("max_operator", "leq") item["max"] = spec.get("max", "") item["warn_min"] = spec.get("warn_min", "") item["warn_max"] = spec.get("warn_max", "") item["hidemin"] = spec.get("hidemin", "") item["hidemax"] = spec.get("hidemax", "") item["rangecomment"] = spec.get("rangecomment", "") # Add methods methods = obj.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["Methods"] = ", ".join(links) else: item["methods"] = "" # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "# ensure we have an object and not a brain", "obj", "=", "api", ".", "get_object", "(", "obj", ")", "url", "=", "api", ".", "get_url", "(", "obj", ")", "title", "=", "api", ".", "get_title", "(", "obj", ")", "keyword", "=", "obj", ".", "getKeyword", "(", ")", "# get the category", "if", "self", ".", "show_categories_enabled", "(", ")", ":", "category", "=", "obj", ".", "getCategoryTitle", "(", ")", "if", "category", "not", "in", "self", ".", "categories", ":", "self", ".", "categories", ".", "append", "(", "category", ")", "item", "[", "\"category\"", "]", "=", "category", "item", "[", "\"Title\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Title\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "item", "[", "\"choices\"", "]", "[", "\"min_operator\"", "]", "=", "self", ".", "min_operator_choices", "item", "[", "\"choices\"", "]", "[", "\"max_operator\"", "]", "=", "self", ".", "max_operator_choices", "item", "[", "\"allow_edit\"", "]", "=", "self", ".", "get_editable_columns", "(", ")", "item", "[", "\"required\"", "]", "=", "self", ".", "get_required_columns", "(", ")", "spec", "=", "self", ".", "specification", ".", "get", "(", "keyword", ",", "{", "}", ")", "item", "[", "\"selected\"", "]", "=", "spec", "and", "True", "or", "False", "item", "[", "\"min_operator\"", "]", "=", "spec", ".", "get", "(", "\"min_operator\"", ",", "\"geq\"", ")", "item", "[", "\"min\"", "]", "=", "spec", ".", "get", "(", "\"min\"", ",", "\"\"", ")", "item", "[", "\"max_operator\"", "]", "=", "spec", ".", "get", "(", "\"max_operator\"", ",", "\"leq\"", ")", "item", "[", "\"max\"", "]", "=", "spec", ".", "get", "(", "\"max\"", ",", "\"\"", ")", "item", "[", "\"warn_min\"", "]", "=", "spec", ".", "get", "(", "\"warn_min\"", ",", "\"\"", ")", "item", "[", "\"warn_max\"", "]", "=", "spec", ".", "get", "(", "\"warn_max\"", ",", "\"\"", ")", "item", "[", "\"hidemin\"", "]", "=", "spec", ".", "get", "(", "\"hidemin\"", ",", "\"\"", ")", "item", "[", "\"hidemax\"", "]", "=", "spec", ".", "get", "(", "\"hidemax\"", ",", "\"\"", ")", "item", "[", "\"rangecomment\"", "]", "=", "spec", ".", "get", "(", "\"rangecomment\"", ",", "\"\"", ")", "# Add methods", "methods", "=", "obj", ".", "getMethods", "(", ")", "if", "methods", ":", "links", "=", "map", "(", "lambda", "m", ":", "get_link", "(", "m", ".", "absolute_url", "(", ")", ",", "value", "=", "m", ".", "Title", "(", ")", ",", "css_class", "=", "\"link\"", ")", ",", "methods", ")", "item", "[", "\"replace\"", "]", "[", "\"Methods\"", "]", "=", "\", \"", ".", "join", "(", "links", ")", "else", ":", "item", "[", "\"methods\"", "]", "=", "\"\"", "# Icons", "after_icons", "=", "\"\"", "if", "obj", ".", "getAccredited", "(", ")", ":", "after_icons", "+=", "get_image", "(", "\"accredited.png\"", ",", "title", "=", "_", "(", "\"Accredited\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"r\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_reqd.png\"", ",", "title", "=", "_", "(", "\"Attachment required\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"n\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_no.png\"", ",", "title", "=", "_", "(", "\"Attachment not permitted\"", ")", ")", "if", "after_icons", ":", "item", "[", "\"after\"", "]", "[", "\"Title\"", "]", "=", "after_icons", "return", "item" ]
38.205882
15.985294
def log_difference(lx, ly): """Returns log(exp(lx) - exp(ly)) without leaving log space.""" # Negative log of double-precision infinity li = -709.78271289338397 diff = ly - lx # Make sure log-difference can succeed if np.any(diff >= 0): raise ValueError( 'Cannot compute log(x-y), because y>=x for some elements.') # Otherwise evaluate log-difference return lx + np.log(1. - np.exp(diff))
[ "def", "log_difference", "(", "lx", ",", "ly", ")", ":", "# Negative log of double-precision infinity", "li", "=", "-", "709.78271289338397", "diff", "=", "ly", "-", "lx", "# Make sure log-difference can succeed", "if", "np", ".", "any", "(", "diff", ">=", "0", ")", ":", "raise", "ValueError", "(", "'Cannot compute log(x-y), because y>=x for some elements.'", ")", "# Otherwise evaluate log-difference", "return", "lx", "+", "np", ".", "log", "(", "1.", "-", "np", ".", "exp", "(", "diff", ")", ")" ]
35.833333
13.25
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id, proxy): """Gets the ``OsidSession`` associated with the activity lookup service for the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` of the objective bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ActivityLookupSession) - an ``ActivityLookupSession`` raise: NotFound - ``objective_bank_id`` not found raise: NullArgument - ``objective_bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_activity_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_lookup()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_activity_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.ActivityLookupSession(objective_bank_id, proxy, self._runtime)
[ "def", "get_activity_lookup_session_for_objective_bank", "(", "self", ",", "objective_bank_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_activity_lookup", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found otherwise raise errors.NotFound", "##", "# pylint: disable=no-member", "return", "sessions", ".", "ActivityLookupSession", "(", "objective_bank_id", ",", "proxy", ",", "self", ".", "_runtime", ")" ]
49
20.615385
def unflatten(d, splitter='tuple', inverse=False): """Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict """ if isinstance(splitter, str): splitter = SPLITTER_DICT[splitter] unflattened_dict = {} for flat_key, value in six.viewitems(d): if inverse: flat_key, value = value, flat_key key_tuple = splitter(flat_key) nested_set_dict(unflattened_dict, key_tuple, value) return unflattened_dict
[ "def", "unflatten", "(", "d", ",", "splitter", "=", "'tuple'", ",", "inverse", "=", "False", ")", ":", "if", "isinstance", "(", "splitter", ",", "str", ")", ":", "splitter", "=", "SPLITTER_DICT", "[", "splitter", "]", "unflattened_dict", "=", "{", "}", "for", "flat_key", ",", "value", "in", "six", ".", "viewitems", "(", "d", ")", ":", "if", "inverse", ":", "flat_key", ",", "value", "=", "value", ",", "flat_key", "key_tuple", "=", "splitter", "(", "flat_key", ")", "nested_set_dict", "(", "unflattened_dict", ",", "key_tuple", ",", "value", ")", "return", "unflattened_dict" ]
32.4
19.233333
def _import_images(controller, path): """ Copy images to the images directory or delete them if they already exists. """ image_dir = controller.images_path() root = os.path.join(path, "images") for (dirpath, dirnames, filenames) in os.walk(root): for filename in filenames: path = os.path.join(dirpath, filename) dst = os.path.join(image_dir, os.path.relpath(path, root)) os.makedirs(os.path.dirname(dst), exist_ok=True) shutil.move(path, dst)
[ "def", "_import_images", "(", "controller", ",", "path", ")", ":", "image_dir", "=", "controller", ".", "images_path", "(", ")", "root", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"images\"", ")", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "root", ")", ":", "for", "filename", "in", "filenames", ":", "path", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", "dst", "=", "os", ".", "path", ".", "join", "(", "image_dir", ",", "os", ".", "path", ".", "relpath", "(", "path", ",", "root", ")", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "dst", ")", ",", "exist_ok", "=", "True", ")", "shutil", ".", "move", "(", "path", ",", "dst", ")" ]
36.785714
12.5
def shifted_mean_gauss(image, offset = None, sigma = 5, voxelspacing = None, mask = slice(None)): r""" The approximate mean over a small region at an offset from each voxel. Functions like `local_mean_gauss`, but instead of computing the average over a small patch around the current voxel, the region is centered at an offset away. Can be used to use a distant regions average as feature for a voxel. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). offset : sequence of ints At this offset in voxels of the current position the region is placed. sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- shifted_mean_gauss : ndarray The weighted mean intensities over a region at offset away from each voxel. See also -------- local_mean_gauss """ return _extract_feature(_extract_shifted_mean_gauss, image, mask, offset = offset, sigma = sigma, voxelspacing = voxelspacing)
[ "def", "shifted_mean_gauss", "(", "image", ",", "offset", "=", "None", ",", "sigma", "=", "5", ",", "voxelspacing", "=", "None", ",", "mask", "=", "slice", "(", "None", ")", ")", ":", "return", "_extract_feature", "(", "_extract_shifted_mean_gauss", ",", "image", ",", "mask", ",", "offset", "=", "offset", ",", "sigma", "=", "sigma", ",", "voxelspacing", "=", "voxelspacing", ")" ]
43.028571
28.428571
def _requested_name(self, name, action=None, func=None): """Create a unique name for an operator or a stream. """ if name is not None: if name in self._used_names: # start at 2 for the "second" one of this name n = 2 while True: pn = name + '_' + str(n) if pn not in self._used_names: self._used_names.add(pn) return pn n += 1 else: self._used_names.add(name) return name if func is not None: if hasattr(func, '__name__'): name = func.__name__ if name == '<lambda>': # Avoid use of <> characters in name # as they are converted to unicode # escapes in SPL identifier name = action + '_lambda' elif hasattr(func, '__class__'): name = func.__class__.__name__ if name is None: if action is not None: name = action else: name = self.name # Recurse once to get unique version of name return self._requested_name(name)
[ "def", "_requested_name", "(", "self", ",", "name", ",", "action", "=", "None", ",", "func", "=", "None", ")", ":", "if", "name", "is", "not", "None", ":", "if", "name", "in", "self", ".", "_used_names", ":", "# start at 2 for the \"second\" one of this name", "n", "=", "2", "while", "True", ":", "pn", "=", "name", "+", "'_'", "+", "str", "(", "n", ")", "if", "pn", "not", "in", "self", ".", "_used_names", ":", "self", ".", "_used_names", ".", "add", "(", "pn", ")", "return", "pn", "n", "+=", "1", "else", ":", "self", ".", "_used_names", ".", "add", "(", "name", ")", "return", "name", "if", "func", "is", "not", "None", ":", "if", "hasattr", "(", "func", ",", "'__name__'", ")", ":", "name", "=", "func", ".", "__name__", "if", "name", "==", "'<lambda>'", ":", "# Avoid use of <> characters in name", "# as they are converted to unicode", "# escapes in SPL identifier", "name", "=", "action", "+", "'_lambda'", "elif", "hasattr", "(", "func", ",", "'__class__'", ")", ":", "name", "=", "func", ".", "__class__", ".", "__name__", "if", "name", "is", "None", ":", "if", "action", "is", "not", "None", ":", "name", "=", "action", "else", ":", "name", "=", "self", ".", "name", "# Recurse once to get unique version of name", "return", "self", ".", "_requested_name", "(", "name", ")" ]
34.888889
11.972222
def expand_output_files(value, *args, **kwargs): '''Process output files (perhaps a pattern) to determine input files. ''' if any(isinstance(x, dynamic) for x in args) or any( isinstance(y, dynamic) for y in kwargs.values()): return sos_targets(_undetermined=value) else: return sos_targets( *args, **kwargs, _undetermined=False, _source=env.sos_dict['step_name'])
[ "def", "expand_output_files", "(", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "any", "(", "isinstance", "(", "x", ",", "dynamic", ")", "for", "x", "in", "args", ")", "or", "any", "(", "isinstance", "(", "y", ",", "dynamic", ")", "for", "y", "in", "kwargs", ".", "values", "(", ")", ")", ":", "return", "sos_targets", "(", "_undetermined", "=", "value", ")", "else", ":", "return", "sos_targets", "(", "*", "args", ",", "*", "*", "kwargs", ",", "_undetermined", "=", "False", ",", "_source", "=", "env", ".", "sos_dict", "[", "'step_name'", "]", ")" ]
37.083333
18.083333
def parse_radic_file(filename, settings, selection_mode="after", reciprocal=None): """Import one result file as produced by the SIP256c SIP measuring device (Radic Research) Full settings dictionary: :: settings = { 'filter_skip': (integer) skip dipoles we are interested in 'quadrupole_mode': ['after'|'between'|'before'| 'all'] which dipoles to use from the file } Parameters ---------- filename: string input filename, usually with the ending ".RES" settings: dict Settings for the data import, see code snippet above selection_mode: dict which voltage dipoles should be returned. Possible choices: "all"|"before"|"after" reciprocal: int|None If this is an integer, then assume this was a reciprocal measurement and the number denotes the largest RU number, N. Electrode numbers (a,b,m,n) will then be transformed to (N1 - a, N1 - b, N1 - m, N1 - n), with N1 = N + 1 Returns ------- sip_data: :py:pandas:`pandas.DataFrame` The data contained in a data frame electrodes : None No electrode positions are imported topography : None No topography is imported """ try: with open(filename, 'r', encoding='latin-1') as fid: lines = fid.readlines() except: print('file not found', filename) import pdb pdb.set_trace() groups = itertools.groupby( lines, lambda line: line.startswith('Reading:') ) # parse header group = next(groups) header_data = _parse_radic_header(group, dipole_mode='between') # parse readings reading_blocks = {} for key, group in groups: # determine reading number line = next(group) reading_nr = int(line[8: line.find('/')].strip()) # print('reading nr', reading_nr) reading_blocks[reading_nr] = [x for x in next(groups)[1]] # print reading_blocks[reading_nr] # print(sorted(reading_blocks.keys())) # now parse the readings print('number of readings', len(reading_blocks)) print('keys', sorted(reading_blocks.keys())) readings = {} for key in sorted(reading_blocks): # print('KEY/Reading', key) reading = reading_blocks[key] tmp = parse_reading(reading) # except Exception as e: # print('Parsing of reading failed') # print(''.join(reading)) # print('error message') # print(e) # exit() readings[key] = tmp # print('reading keys', sorted(readings.keys())) logging.debug('removing calibration reading') # remove calibration reading if 0 in readings: del(readings[0]) # print('readings', readings) sip_data_raw = compute_quadrupoles(header_data, readings, settings) sip_data = pd.concat(sip_data_raw) if reciprocal is not None and isinstance(reciprocal, int): sip_data['a'] = (reciprocal + 1) - sip_data['a'] sip_data['b'] = (reciprocal + 1) - sip_data['b'] sip_data['m'] = (reciprocal + 1) - sip_data['m'] sip_data['n'] = (reciprocal + 1) - sip_data['n'] return sip_data, None, None
[ "def", "parse_radic_file", "(", "filename", ",", "settings", ",", "selection_mode", "=", "\"after\"", ",", "reciprocal", "=", "None", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'latin-1'", ")", "as", "fid", ":", "lines", "=", "fid", ".", "readlines", "(", ")", "except", ":", "print", "(", "'file not found'", ",", "filename", ")", "import", "pdb", "pdb", ".", "set_trace", "(", ")", "groups", "=", "itertools", ".", "groupby", "(", "lines", ",", "lambda", "line", ":", "line", ".", "startswith", "(", "'Reading:'", ")", ")", "# parse header", "group", "=", "next", "(", "groups", ")", "header_data", "=", "_parse_radic_header", "(", "group", ",", "dipole_mode", "=", "'between'", ")", "# parse readings", "reading_blocks", "=", "{", "}", "for", "key", ",", "group", "in", "groups", ":", "# determine reading number", "line", "=", "next", "(", "group", ")", "reading_nr", "=", "int", "(", "line", "[", "8", ":", "line", ".", "find", "(", "'/'", ")", "]", ".", "strip", "(", ")", ")", "# print('reading nr', reading_nr)", "reading_blocks", "[", "reading_nr", "]", "=", "[", "x", "for", "x", "in", "next", "(", "groups", ")", "[", "1", "]", "]", "# print reading_blocks[reading_nr]", "# print(sorted(reading_blocks.keys()))", "# now parse the readings", "print", "(", "'number of readings'", ",", "len", "(", "reading_blocks", ")", ")", "print", "(", "'keys'", ",", "sorted", "(", "reading_blocks", ".", "keys", "(", ")", ")", ")", "readings", "=", "{", "}", "for", "key", "in", "sorted", "(", "reading_blocks", ")", ":", "# print('KEY/Reading', key)", "reading", "=", "reading_blocks", "[", "key", "]", "tmp", "=", "parse_reading", "(", "reading", ")", "# except Exception as e:", "# print('Parsing of reading failed')", "# print(''.join(reading))", "# print('error message')", "# print(e)", "# exit()", "readings", "[", "key", "]", "=", "tmp", "# print('reading keys', sorted(readings.keys()))", "logging", ".", "debug", "(", "'removing calibration reading'", ")", "# remove calibration reading", "if", "0", "in", "readings", ":", "del", "(", "readings", "[", "0", "]", ")", "# print('readings', readings)", "sip_data_raw", "=", "compute_quadrupoles", "(", "header_data", ",", "readings", ",", "settings", ")", "sip_data", "=", "pd", ".", "concat", "(", "sip_data_raw", ")", "if", "reciprocal", "is", "not", "None", "and", "isinstance", "(", "reciprocal", ",", "int", ")", ":", "sip_data", "[", "'a'", "]", "=", "(", "reciprocal", "+", "1", ")", "-", "sip_data", "[", "'a'", "]", "sip_data", "[", "'b'", "]", "=", "(", "reciprocal", "+", "1", ")", "-", "sip_data", "[", "'b'", "]", "sip_data", "[", "'m'", "]", "=", "(", "reciprocal", "+", "1", ")", "-", "sip_data", "[", "'m'", "]", "sip_data", "[", "'n'", "]", "=", "(", "reciprocal", "+", "1", ")", "-", "sip_data", "[", "'n'", "]", "return", "sip_data", ",", "None", ",", "None" ]
31.70297
19.267327
def __update_rating(uid, rating): ''' Update rating. ''' entry = TabRating.update( rating=rating ).where(TabRating.uid == uid) entry.execute()
[ "def", "__update_rating", "(", "uid", ",", "rating", ")", ":", "entry", "=", "TabRating", ".", "update", "(", "rating", "=", "rating", ")", ".", "where", "(", "TabRating", ".", "uid", "==", "uid", ")", "entry", ".", "execute", "(", ")" ]
24.375
15.625
def build_payload(self, payload): """ Build payload of message. """ for segment in self.segments: segment.pack(payload, commit=self.autocommit)
[ "def", "build_payload", "(", "self", ",", "payload", ")", ":", "for", "segment", "in", "self", ".", "segments", ":", "segment", ".", "pack", "(", "payload", ",", "commit", "=", "self", ".", "autocommit", ")" ]
42
6.75
def get_jump_target_maps(code, opc): """Returns a dictionary where the key is an offset and the values are a list of instruction offsets which can get run before that instruction. This includes jump instructions as well as non-jump instructions. Therefore, the keys of the dictionary are reachible instructions. The values of the dictionary may be useful in control-flow analysis. """ offset2prev = {} prev_offset = -1 for offset, op, arg in unpack_opargs_wordcode(code, opc): if prev_offset >= 0: prev_list = offset2prev.get(offset, []) prev_list.append(prev_offset) offset2prev[offset] = prev_list prev_offset = offset if op in opc.NOFOLLOW: prev_offset = -1 if arg is not None: jump_offset = -1 if op in opc.JREL_OPS: jump_offset = offset + 2 + arg elif op in opc.JABS_OPS: jump_offset = arg if jump_offset >= 0: prev_list = offset2prev.get(jump_offset, []) prev_list.append(offset) offset2prev[jump_offset] = prev_list return offset2prev
[ "def", "get_jump_target_maps", "(", "code", ",", "opc", ")", ":", "offset2prev", "=", "{", "}", "prev_offset", "=", "-", "1", "for", "offset", ",", "op", ",", "arg", "in", "unpack_opargs_wordcode", "(", "code", ",", "opc", ")", ":", "if", "prev_offset", ">=", "0", ":", "prev_list", "=", "offset2prev", ".", "get", "(", "offset", ",", "[", "]", ")", "prev_list", ".", "append", "(", "prev_offset", ")", "offset2prev", "[", "offset", "]", "=", "prev_list", "prev_offset", "=", "offset", "if", "op", "in", "opc", ".", "NOFOLLOW", ":", "prev_offset", "=", "-", "1", "if", "arg", "is", "not", "None", ":", "jump_offset", "=", "-", "1", "if", "op", "in", "opc", ".", "JREL_OPS", ":", "jump_offset", "=", "offset", "+", "2", "+", "arg", "elif", "op", "in", "opc", ".", "JABS_OPS", ":", "jump_offset", "=", "arg", "if", "jump_offset", ">=", "0", ":", "prev_list", "=", "offset2prev", ".", "get", "(", "jump_offset", ",", "[", "]", ")", "prev_list", ".", "append", "(", "offset", ")", "offset2prev", "[", "jump_offset", "]", "=", "prev_list", "return", "offset2prev" ]
40.172414
12.931034
def new_partition(self, table, **kwargs): """ Creates new partition and returns it. Args: table (orm.Table): Returns: orm.Partition """ from . import Partition # Create the basic partition record, with a sequence ID. if isinstance(table, string_types): table = self.table(table) if 'sequence_id' in kwargs: sequence_id = kwargs['sequence_id'] del kwargs['sequence_id'] else: sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition) p = Partition( t_vid=table.vid, table_name=table.name, sequence_id=sequence_id, dataset=self, d_vid=self.vid, **kwargs ) p.update_id() return p
[ "def", "new_partition", "(", "self", ",", "table", ",", "*", "*", "kwargs", ")", ":", "from", ".", "import", "Partition", "# Create the basic partition record, with a sequence ID.", "if", "isinstance", "(", "table", ",", "string_types", ")", ":", "table", "=", "self", ".", "table", "(", "table", ")", "if", "'sequence_id'", "in", "kwargs", ":", "sequence_id", "=", "kwargs", "[", "'sequence_id'", "]", "del", "kwargs", "[", "'sequence_id'", "]", "else", ":", "sequence_id", "=", "self", ".", "_database", ".", "next_sequence_id", "(", "Dataset", ",", "self", ".", "vid", ",", "Partition", ")", "p", "=", "Partition", "(", "t_vid", "=", "table", ".", "vid", ",", "table_name", "=", "table", ".", "name", ",", "sequence_id", "=", "sequence_id", ",", "dataset", "=", "self", ",", "d_vid", "=", "self", ".", "vid", ",", "*", "*", "kwargs", ")", "p", ".", "update_id", "(", ")", "return", "p" ]
22.694444
21.305556
def append_message(self, message): """ Appends the given message at the end of the message list and delete the oldest one (top most). :param message: The formatted text to log. """ # Delete the first message of the list if len(self.messages) > 0: del self.messages[0] # Append the new message at the end self.messages.append(message) self.changes_made = True # Redraw self.redraw()
[ "def", "append_message", "(", "self", ",", "message", ")", ":", "# Delete the first message of the list", "if", "len", "(", "self", ".", "messages", ")", ">", "0", ":", "del", "self", ".", "messages", "[", "0", "]", "# Append the new message at the end", "self", ".", "messages", ".", "append", "(", "message", ")", "self", ".", "changes_made", "=", "True", "# Redraw", "self", ".", "redraw", "(", ")" ]
30.6875
15.8125
def safeRmTree(rootPath): """ Deletes a tree and returns true if it was correctly deleted """ shutil.rmtree(rootPath, True) return not os.path.exists(rootPath)
[ "def", "safeRmTree", "(", "rootPath", ")", ":", "shutil", ".", "rmtree", "(", "rootPath", ",", "True", ")", "return", "not", "os", ".", "path", ".", "exists", "(", "rootPath", ")" ]
27.714286
12.571429
def ext_pillar(minion_id, pillar, table='pillar', id_field=None, field=None, pillar_key=None): ''' Collect minion external pillars from a RethinkDB database Arguments: * `table`: The RethinkDB table containing external pillar information. Defaults to ``'pillar'`` * `id_field`: Field in document containing the minion id. If blank then we assume the table index matches minion ids * `field`: Specific field in the document used for pillar data, if blank then the entire document will be used * `pillar_key`: The salt-master will nest found external pillars under this key before merging into the minion pillars. If blank, external pillars will be merged at top level ''' host = __opts__['rethinkdb.host'] port = __opts__['rethinkdb.port'] database = __opts__['rethinkdb.database'] username = __opts__['rethinkdb.username'] password = __opts__['rethinkdb.password'] log.debug('Connecting to %s:%s as user \'%s\' for RethinkDB ext_pillar', host, port, username) # Connect to the database conn = rethinkdb.connect(host=host, port=port, db=database, user=username, password=password) data = None try: if id_field: log.debug('ext_pillar.rethinkdb: looking up pillar. ' 'table: %s, field: %s, minion: %s', table, id_field, minion_id) if field: data = rethinkdb.table(table).filter( {id_field: minion_id}).pluck(field).run(conn) else: data = rethinkdb.table(table).filter( {id_field: minion_id}).run(conn) else: log.debug('ext_pillar.rethinkdb: looking up pillar. ' 'table: %s, field: id, minion: %s', table, minion_id) if field: data = rethinkdb.table(table).get(minion_id).pluck(field).run( conn) else: data = rethinkdb.table(table).get(minion_id).run(conn) finally: if conn.is_open(): conn.close() if data.items: # Return nothing if multiple documents are found for a minion if len(data.items) > 1: log.error('ext_pillar.rethinkdb: ambiguous documents found for ' 'minion %s', minion_id) return {} else: result = data.items.pop() if pillar_key: return {pillar_key: result} return result else: # No document found in the database log.debug('ext_pillar.rethinkdb: no document found') return {}
[ "def", "ext_pillar", "(", "minion_id", ",", "pillar", ",", "table", "=", "'pillar'", ",", "id_field", "=", "None", ",", "field", "=", "None", ",", "pillar_key", "=", "None", ")", ":", "host", "=", "__opts__", "[", "'rethinkdb.host'", "]", "port", "=", "__opts__", "[", "'rethinkdb.port'", "]", "database", "=", "__opts__", "[", "'rethinkdb.database'", "]", "username", "=", "__opts__", "[", "'rethinkdb.username'", "]", "password", "=", "__opts__", "[", "'rethinkdb.password'", "]", "log", ".", "debug", "(", "'Connecting to %s:%s as user \\'%s\\' for RethinkDB ext_pillar'", ",", "host", ",", "port", ",", "username", ")", "# Connect to the database", "conn", "=", "rethinkdb", ".", "connect", "(", "host", "=", "host", ",", "port", "=", "port", ",", "db", "=", "database", ",", "user", "=", "username", ",", "password", "=", "password", ")", "data", "=", "None", "try", ":", "if", "id_field", ":", "log", ".", "debug", "(", "'ext_pillar.rethinkdb: looking up pillar. '", "'table: %s, field: %s, minion: %s'", ",", "table", ",", "id_field", ",", "minion_id", ")", "if", "field", ":", "data", "=", "rethinkdb", ".", "table", "(", "table", ")", ".", "filter", "(", "{", "id_field", ":", "minion_id", "}", ")", ".", "pluck", "(", "field", ")", ".", "run", "(", "conn", ")", "else", ":", "data", "=", "rethinkdb", ".", "table", "(", "table", ")", ".", "filter", "(", "{", "id_field", ":", "minion_id", "}", ")", ".", "run", "(", "conn", ")", "else", ":", "log", ".", "debug", "(", "'ext_pillar.rethinkdb: looking up pillar. '", "'table: %s, field: id, minion: %s'", ",", "table", ",", "minion_id", ")", "if", "field", ":", "data", "=", "rethinkdb", ".", "table", "(", "table", ")", ".", "get", "(", "minion_id", ")", ".", "pluck", "(", "field", ")", ".", "run", "(", "conn", ")", "else", ":", "data", "=", "rethinkdb", ".", "table", "(", "table", ")", ".", "get", "(", "minion_id", ")", ".", "run", "(", "conn", ")", "finally", ":", "if", "conn", ".", "is_open", "(", ")", ":", "conn", ".", "close", "(", ")", "if", "data", ".", "items", ":", "# Return nothing if multiple documents are found for a minion", "if", "len", "(", "data", ".", "items", ")", ">", "1", ":", "log", ".", "error", "(", "'ext_pillar.rethinkdb: ambiguous documents found for '", "'minion %s'", ",", "minion_id", ")", "return", "{", "}", "else", ":", "result", "=", "data", ".", "items", ".", "pop", "(", ")", "if", "pillar_key", ":", "return", "{", "pillar_key", ":", "result", "}", "return", "result", "else", ":", "# No document found in the database", "log", ".", "debug", "(", "'ext_pillar.rethinkdb: no document found'", ")", "return", "{", "}" ]
32.44186
21.186047
def read_header(self): """ Read header and return a Python dictionary of key:value pairs """ self.header = {} for key, val in self.h5['data'].attrs.items(): if six.PY3: key = bytes(key, 'ascii') if key == b'src_raj': self.header[key] = Angle(val, unit='hr') elif key == b'src_dej': self.header[key] = Angle(val, unit='deg') else: self.header[key] = val return self.header
[ "def", "read_header", "(", "self", ")", ":", "self", ".", "header", "=", "{", "}", "for", "key", ",", "val", "in", "self", ".", "h5", "[", "'data'", "]", ".", "attrs", ".", "items", "(", ")", ":", "if", "six", ".", "PY3", ":", "key", "=", "bytes", "(", "key", ",", "'ascii'", ")", "if", "key", "==", "b'src_raj'", ":", "self", ".", "header", "[", "key", "]", "=", "Angle", "(", "val", ",", "unit", "=", "'hr'", ")", "elif", "key", "==", "b'src_dej'", ":", "self", ".", "header", "[", "key", "]", "=", "Angle", "(", "val", ",", "unit", "=", "'deg'", ")", "else", ":", "self", ".", "header", "[", "key", "]", "=", "val", "return", "self", ".", "header" ]
30
15.882353
def json_paginate(self, base_url, page_number): """ Return a dict for a JSON paginate """ data = self.page(page_number) first_id = None last_id = None if data: first_id = data[0].id last_id = data[-1].id return { 'meta': { 'total_pages': self.max_pages, 'first_id': first_id, 'last_id': last_id, 'current_page': page_number }, 'data': self.page(page_number), 'links': self.links(base_url, page_number) }
[ "def", "json_paginate", "(", "self", ",", "base_url", ",", "page_number", ")", ":", "data", "=", "self", ".", "page", "(", "page_number", ")", "first_id", "=", "None", "last_id", "=", "None", "if", "data", ":", "first_id", "=", "data", "[", "0", "]", ".", "id", "last_id", "=", "data", "[", "-", "1", "]", ".", "id", "return", "{", "'meta'", ":", "{", "'total_pages'", ":", "self", ".", "max_pages", ",", "'first_id'", ":", "first_id", ",", "'last_id'", ":", "last_id", ",", "'current_page'", ":", "page_number", "}", ",", "'data'", ":", "self", ".", "page", "(", "page_number", ")", ",", "'links'", ":", "self", ".", "links", "(", "base_url", ",", "page_number", ")", "}" ]
33.055556
11.722222
def decstr2int(dec_str, decimals): ''' Returns an integer that has the value of the decimal string: dec_str*10^decimals Arguments: dec_str (string) that represents a decimal number decimals (int): number of decimals for creating the integer output Returns: (int) Raises: ValueError if dec_string is not a valid decimal string TypeError if decimals is not an integer Note: values may be truncated (not rounded). ''' if not isinstance(decimals, int): raise TypeError('decimals must be an integer') try: dollars, cents = dec_str.split('.') except ValueError: if '.' not in dec_str: dollars = dec_str cents = '0' else: raise ValueError('Invalid decimal string') else: if len(cents) < decimals: cents = cents.ljust(decimals, '0') elif decimals < 1: cents = '0' elif len(cents) > decimals: cents = cents[:decimals] try: cents = int(cents) except: cents = 0 try: return int(int(dollars) * (10 ** decimals)) + cents except: raise ValueError('Invalid decimal string')
[ "def", "decstr2int", "(", "dec_str", ",", "decimals", ")", ":", "if", "not", "isinstance", "(", "decimals", ",", "int", ")", ":", "raise", "TypeError", "(", "'decimals must be an integer'", ")", "try", ":", "dollars", ",", "cents", "=", "dec_str", ".", "split", "(", "'.'", ")", "except", "ValueError", ":", "if", "'.'", "not", "in", "dec_str", ":", "dollars", "=", "dec_str", "cents", "=", "'0'", "else", ":", "raise", "ValueError", "(", "'Invalid decimal string'", ")", "else", ":", "if", "len", "(", "cents", ")", "<", "decimals", ":", "cents", "=", "cents", ".", "ljust", "(", "decimals", ",", "'0'", ")", "elif", "decimals", "<", "1", ":", "cents", "=", "'0'", "elif", "len", "(", "cents", ")", ">", "decimals", ":", "cents", "=", "cents", "[", ":", "decimals", "]", "try", ":", "cents", "=", "int", "(", "cents", ")", "except", ":", "cents", "=", "0", "try", ":", "return", "int", "(", "int", "(", "dollars", ")", "*", "(", "10", "**", "decimals", ")", ")", "+", "cents", "except", ":", "raise", "ValueError", "(", "'Invalid decimal string'", ")" ]
29.7
19.2
def init_widget(self): """ Bind the on property to the checked state """ super(UiKitEditText, self).init_widget() #: Init font properties etc... self.init_text() d = self.declaration if d.placeholder: self.set_placeholder(d.placeholder) if d.input_type != 'text': self.set_input_type(d.input_type) if d.style: self.set_style(d.style) #: A really ugly way to add the target #: would be nice if we could just pass the block pointer here :) self.get_app().bridge.addTarget( self.widget, forControlEvents=UITextField.UIControlEventEditingChanged, andCallback=self.widget.getId(), usingMethod="onValueChanged", withValues=["text"]#,"selected"] ) self.widget.onValueChanged.connect(self.on_value_changed)
[ "def", "init_widget", "(", "self", ")", ":", "super", "(", "UiKitEditText", ",", "self", ")", ".", "init_widget", "(", ")", "#: Init font properties etc...", "self", ".", "init_text", "(", ")", "d", "=", "self", ".", "declaration", "if", "d", ".", "placeholder", ":", "self", ".", "set_placeholder", "(", "d", ".", "placeholder", ")", "if", "d", ".", "input_type", "!=", "'text'", ":", "self", ".", "set_input_type", "(", "d", ".", "input_type", ")", "if", "d", ".", "style", ":", "self", ".", "set_style", "(", "d", ".", "style", ")", "#: A really ugly way to add the target", "#: would be nice if we could just pass the block pointer here :)", "self", ".", "get_app", "(", ")", ".", "bridge", ".", "addTarget", "(", "self", ".", "widget", ",", "forControlEvents", "=", "UITextField", ".", "UIControlEventEditingChanged", ",", "andCallback", "=", "self", ".", "widget", ".", "getId", "(", ")", ",", "usingMethod", "=", "\"onValueChanged\"", ",", "withValues", "=", "[", "\"text\"", "]", "#,\"selected\"]", ")", "self", ".", "widget", ".", "onValueChanged", ".", "connect", "(", "self", ".", "on_value_changed", ")" ]
33.730769
16.307692
def process_default(self, event): """ Fallback. """ if self.job.LOG.isEnabledFor(logging.DEBUG): # On debug level, we subscribe to ALL events, so they're expected in that case ;) if self.job.config.trace_inotify: self.job.LOG.debug("Ignored inotify event:\n %r" % event) else: self.job.LOG.warning("Unexpected inotify event %r" % event)
[ "def", "process_default", "(", "self", ",", "event", ")", ":", "if", "self", ".", "job", ".", "LOG", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "# On debug level, we subscribe to ALL events, so they're expected in that case ;)", "if", "self", ".", "job", ".", "config", ".", "trace_inotify", ":", "self", ".", "job", ".", "LOG", ".", "debug", "(", "\"Ignored inotify event:\\n %r\"", "%", "event", ")", "else", ":", "self", ".", "job", ".", "LOG", ".", "warning", "(", "\"Unexpected inotify event %r\"", "%", "event", ")" ]
46.111111
19
def download_price(self, symbol: str, currency: str, agent: str) -> PriceModel: """ Download and save price online """ price = self.__download_price(symbol, currency, agent) self.save() return price
[ "def", "download_price", "(", "self", ",", "symbol", ":", "str", ",", "currency", ":", "str", ",", "agent", ":", "str", ")", "->", "PriceModel", ":", "price", "=", "self", ".", "__download_price", "(", "symbol", ",", "currency", ",", "agent", ")", "self", ".", "save", "(", ")", "return", "price" ]
45.2
20.4
def _load(self, file_parser, section_name): """The current element is loaded from the configuration file, all constraints and requirements are checked. """ # pylint: disable-msg=W0621 log = logging.getLogger('argtoolbox') try: log.debug("looking for field (section=" + section_name + ") : " + self._name) data = None try: if self.e_type == int: data = file_parser.getint(section_name, self._name) elif self.e_type == float: data = file_parser.getfloat(section_name, self._name) elif self.e_type == bool: data = file_parser.getboolean(section_name, self._name) elif self.e_type == list: data = file_parser.get(section_name, self._name) data = data.strip() data = data.decode(locale.getpreferredencoding()) data = data.split() if not data: msg = "The optional field '%(name)s' was present, \ type is list, but the current value is an empty \ list." % {"name": self._name} log.error(msg) raise ValueError(msg) elif self.e_type == str: data = file_parser.get(section_name, self._name) # happens only when the current field is present, # type is string, but value is '' if not data: msg = "The optional field '%(name)s' was present, \ type is string, but the current value is an empty \ string." % {"name": self._name} log.error(msg) raise ValueError(msg) data = data.decode(locale.getpreferredencoding()) else: msg = "Data type not supported : %(type)s " % { "type": self.e_type} log.error(msg) raise TypeError(msg) except ValueError as ex: msg = "The current field '%(name)s' was present, but the \ required type is : %(e_type)s." % { "name": self._name, "e_type": self.e_type } log.error(msg) log.error(str(ex)) raise ValueError(str(ex)) log_data = {"name": self._name, "data": data, "e_type": self.e_type} if self.hidden: log_data['data'] = "xxxxxxxx" log.debug("field found : '%(name)s', value : '%(data)s', \ type : '%(e_type)s'", log_data) self.value = data except ConfigParser.NoOptionError: if self.conf_required: msg = "The required field '%(name)s' was missing from the \ config file." % {"name": self._name} log.error(msg) raise ValueError(msg) if self.default is not None: self.value = self.default log_data = {"name": self._name, "data": self.default, "e_type": self.e_type} if self.hidden: log_data['data'] = "xxxxxxxx" log.debug("Field not found : '%(name)s', default value : \ '%(data)s', type : '%(e_type)s'", log_data) else: log.debug("Field not found : '" + self._name + "'")
[ "def", "_load", "(", "self", ",", "file_parser", ",", "section_name", ")", ":", "# pylint: disable-msg=W0621", "log", "=", "logging", ".", "getLogger", "(", "'argtoolbox'", ")", "try", ":", "log", ".", "debug", "(", "\"looking for field (section=\"", "+", "section_name", "+", "\") : \"", "+", "self", ".", "_name", ")", "data", "=", "None", "try", ":", "if", "self", ".", "e_type", "==", "int", ":", "data", "=", "file_parser", ".", "getint", "(", "section_name", ",", "self", ".", "_name", ")", "elif", "self", ".", "e_type", "==", "float", ":", "data", "=", "file_parser", ".", "getfloat", "(", "section_name", ",", "self", ".", "_name", ")", "elif", "self", ".", "e_type", "==", "bool", ":", "data", "=", "file_parser", ".", "getboolean", "(", "section_name", ",", "self", ".", "_name", ")", "elif", "self", ".", "e_type", "==", "list", ":", "data", "=", "file_parser", ".", "get", "(", "section_name", ",", "self", ".", "_name", ")", "data", "=", "data", ".", "strip", "(", ")", "data", "=", "data", ".", "decode", "(", "locale", ".", "getpreferredencoding", "(", ")", ")", "data", "=", "data", ".", "split", "(", ")", "if", "not", "data", ":", "msg", "=", "\"The optional field '%(name)s' was present, \\\ntype is list, but the current value is an empty \\\nlist.\"", "%", "{", "\"name\"", ":", "self", ".", "_name", "}", "log", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "elif", "self", ".", "e_type", "==", "str", ":", "data", "=", "file_parser", ".", "get", "(", "section_name", ",", "self", ".", "_name", ")", "# happens only when the current field is present,", "# type is string, but value is ''", "if", "not", "data", ":", "msg", "=", "\"The optional field '%(name)s' was present, \\\n type is string, but the current value is an empty \\\n string.\"", "%", "{", "\"name\"", ":", "self", ".", "_name", "}", "log", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "data", "=", "data", ".", "decode", "(", "locale", ".", "getpreferredencoding", "(", ")", ")", "else", ":", "msg", "=", "\"Data type not supported : %(type)s \"", "%", "{", "\"type\"", ":", "self", ".", "e_type", "}", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "except", "ValueError", "as", "ex", ":", "msg", "=", "\"The current field '%(name)s' was present, but the \\\nrequired type is : %(e_type)s.\"", "%", "{", "\"name\"", ":", "self", ".", "_name", ",", "\"e_type\"", ":", "self", ".", "e_type", "}", "log", ".", "error", "(", "msg", ")", "log", ".", "error", "(", "str", "(", "ex", ")", ")", "raise", "ValueError", "(", "str", "(", "ex", ")", ")", "log_data", "=", "{", "\"name\"", ":", "self", ".", "_name", ",", "\"data\"", ":", "data", ",", "\"e_type\"", ":", "self", ".", "e_type", "}", "if", "self", ".", "hidden", ":", "log_data", "[", "'data'", "]", "=", "\"xxxxxxxx\"", "log", ".", "debug", "(", "\"field found : '%(name)s', value : '%(data)s', \\\ntype : '%(e_type)s'\"", ",", "log_data", ")", "self", ".", "value", "=", "data", "except", "ConfigParser", ".", "NoOptionError", ":", "if", "self", ".", "conf_required", ":", "msg", "=", "\"The required field '%(name)s' was missing from the \\\nconfig file.\"", "%", "{", "\"name\"", ":", "self", ".", "_name", "}", "log", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "if", "self", ".", "default", "is", "not", "None", ":", "self", ".", "value", "=", "self", ".", "default", "log_data", "=", "{", "\"name\"", ":", "self", ".", "_name", ",", "\"data\"", ":", "self", ".", "default", ",", "\"e_type\"", ":", "self", ".", "e_type", "}", "if", "self", ".", "hidden", ":", "log_data", "[", "'data'", "]", "=", "\"xxxxxxxx\"", "log", ".", "debug", "(", "\"Field not found : '%(name)s', default value : \\\n'%(data)s', type : '%(e_type)s'\"", ",", "log_data", ")", "else", ":", "log", ".", "debug", "(", "\"Field not found : '\"", "+", "self", ".", "_name", "+", "\"'\"", ")" ]
43.575
14.5
def set_file(name, source, template=None, context=None, defaults=None, **kwargs): ''' Set debconf selections from a file or a template .. code-block:: yaml <state_id>: debconf.set_file: - source: salt://pathto/pkg.selections <state_id>: debconf.set_file: - source: salt://pathto/pkg.selections?saltenv=myenvironment <state_id>: debconf.set_file: - source: salt://pathto/pkg.selections.jinja2 - template: jinja - context: some_value: "false" source: The location of the file containing the package selections template If this setting is applied then the named templating engine will be used to render the package selections file, currently jinja, mako, and wempy are supported context Overrides default context variables passed to the template. defaults Default context passed to the template. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if context is None: context = {} elif not isinstance(context, dict): ret['result'] = False ret['comment'] = 'Context must be formed as a dict' return ret if defaults is None: defaults = {} elif not isinstance(defaults, dict): ret['result'] = False ret['comment'] = 'Defaults must be formed as a dict' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Debconf selections would have been set.' return ret if template: result = __salt__['debconf.set_template'](source, template, context, defaults, **kwargs) else: result = __salt__['debconf.set_file'](source, **kwargs) if result: ret['comment'] = 'Debconf selections were set.' else: ret['result'] = False ret['comment'] = 'Unable to set debconf selections from file.' return ret
[ "def", "set_file", "(", "name", ",", "source", ",", "template", "=", "None", ",", "context", "=", "None", ",", "defaults", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "if", "context", "is", "None", ":", "context", "=", "{", "}", "elif", "not", "isinstance", "(", "context", ",", "dict", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Context must be formed as a dict'", "return", "ret", "if", "defaults", "is", "None", ":", "defaults", "=", "{", "}", "elif", "not", "isinstance", "(", "defaults", ",", "dict", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Defaults must be formed as a dict'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Debconf selections would have been set.'", "return", "ret", "if", "template", ":", "result", "=", "__salt__", "[", "'debconf.set_template'", "]", "(", "source", ",", "template", ",", "context", ",", "defaults", ",", "*", "*", "kwargs", ")", "else", ":", "result", "=", "__salt__", "[", "'debconf.set_file'", "]", "(", "source", ",", "*", "*", "kwargs", ")", "if", "result", ":", "ret", "[", "'comment'", "]", "=", "'Debconf selections were set.'", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Unable to set debconf selections from file.'", "return", "ret" ]
27.760563
24.464789
def _GetReportService(self): """Lazily initializes a report service client.""" if not self._report_service: self._report_service = self._ad_manager_client.GetService( 'ReportService', self._version, self._server) return self._report_service
[ "def", "_GetReportService", "(", "self", ")", ":", "if", "not", "self", ".", "_report_service", ":", "self", ".", "_report_service", "=", "self", ".", "_ad_manager_client", ".", "GetService", "(", "'ReportService'", ",", "self", ".", "_version", ",", "self", ".", "_server", ")", "return", "self", ".", "_report_service" ]
43.833333
11.333333
def list_bucket(self, bucket): """Create several files and paginate through them. Production apps should set page_size to a practical value. Args: bucket: bucket. """ self.response.write('Listbucket result:\n') page_size = 1 stats = gcs.listbucket(bucket + '/foo', max_keys=page_size) while True: count = 0 for stat in stats: count += 1 self.response.write(repr(stat)) self.response.write('\n') if count != page_size or count == 0: break stats = gcs.listbucket(bucket + '/foo', max_keys=page_size, marker=stat.filename)
[ "def", "list_bucket", "(", "self", ",", "bucket", ")", ":", "self", ".", "response", ".", "write", "(", "'Listbucket result:\\n'", ")", "page_size", "=", "1", "stats", "=", "gcs", ".", "listbucket", "(", "bucket", "+", "'/foo'", ",", "max_keys", "=", "page_size", ")", "while", "True", ":", "count", "=", "0", "for", "stat", "in", "stats", ":", "count", "+=", "1", "self", ".", "response", ".", "write", "(", "repr", "(", "stat", ")", ")", "self", ".", "response", ".", "write", "(", "'\\n'", ")", "if", "count", "!=", "page_size", "or", "count", "==", "0", ":", "break", "stats", "=", "gcs", ".", "listbucket", "(", "bucket", "+", "'/foo'", ",", "max_keys", "=", "page_size", ",", "marker", "=", "stat", ".", "filename", ")" ]
27.130435
19.782609
def _lookup_abs(self, p, klass, create=1): """ Fast (?) lookup of a *normalized* absolute path. This method is intended for use by internal lookups with already-normalized path data. For general-purpose lookups, use the FS.Entry(), FS.Dir() or FS.File() methods. The caller is responsible for making sure we're passed a normalized absolute path; we merely let Python's dictionary look up and return the One True Node.FS object for the path. If a Node for the specified "p" doesn't already exist, and "create" is specified, the Node may be created after recursive invocation to find or create the parent directory or directories. """ k = _my_normcase(p) try: result = self._lookupDict[k] except KeyError: if not create: msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self)) raise SCons.Errors.UserError(msg) # There is no Node for this path name, and we're allowed # to create it. dir_name, file_name = p.rsplit('/',1) dir_node = self._lookup_abs(dir_name, Dir) result = klass(file_name, dir_node, self.fs) # Double-check on disk (as configured) that the Node we # created matches whatever is out there in the real world. result.diskcheck_match() self._lookupDict[k] = result dir_node.entries[_my_normcase(file_name)] = result dir_node.implicit = None else: # There is already a Node for this path name. Allow it to # complain if we were looking for an inappropriate type. result.must_be_same(klass) return result
[ "def", "_lookup_abs", "(", "self", ",", "p", ",", "klass", ",", "create", "=", "1", ")", ":", "k", "=", "_my_normcase", "(", "p", ")", "try", ":", "result", "=", "self", ".", "_lookupDict", "[", "k", "]", "except", "KeyError", ":", "if", "not", "create", ":", "msg", "=", "\"No such file or directory: '%s' in '%s' (and create is False)\"", "%", "(", "p", ",", "str", "(", "self", ")", ")", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "msg", ")", "# There is no Node for this path name, and we're allowed", "# to create it.", "dir_name", ",", "file_name", "=", "p", ".", "rsplit", "(", "'/'", ",", "1", ")", "dir_node", "=", "self", ".", "_lookup_abs", "(", "dir_name", ",", "Dir", ")", "result", "=", "klass", "(", "file_name", ",", "dir_node", ",", "self", ".", "fs", ")", "# Double-check on disk (as configured) that the Node we", "# created matches whatever is out there in the real world.", "result", ".", "diskcheck_match", "(", ")", "self", ".", "_lookupDict", "[", "k", "]", "=", "result", "dir_node", ".", "entries", "[", "_my_normcase", "(", "file_name", ")", "]", "=", "result", "dir_node", ".", "implicit", "=", "None", "else", ":", "# There is already a Node for this path name. Allow it to", "# complain if we were looking for an inappropriate type.", "result", ".", "must_be_same", "(", "klass", ")", "return", "result" ]
43.219512
21.219512
def get_longest_orf(orfs): """Find longest ORF from the given list of ORFs.""" sorted_orf = sorted(orfs, key=lambda x: len(x['sequence']), reverse=True)[0] return sorted_orf
[ "def", "get_longest_orf", "(", "orfs", ")", ":", "sorted_orf", "=", "sorted", "(", "orfs", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "'sequence'", "]", ")", ",", "reverse", "=", "True", ")", "[", "0", "]", "return", "sorted_orf" ]
45.5
18.25
def is_parameter(self): """Whether this is a function parameter.""" return (isinstance(self.scope, CodeFunction) and self in self.scope.parameters)
[ "def", "is_parameter", "(", "self", ")", ":", "return", "(", "isinstance", "(", "self", ".", "scope", ",", "CodeFunction", ")", "and", "self", "in", "self", ".", "scope", ".", "parameters", ")" ]
44
9.75
def getGroundResolution(self, latitude, level): ''' returns the ground resolution for based on latitude and zoom level. ''' latitude = self.clipValue(latitude, self.min_lat, self.max_lat); mapSize = self.getMapDimensionsByZoomLevel(level) return math.cos( latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \ mapSize
[ "def", "getGroundResolution", "(", "self", ",", "latitude", ",", "level", ")", ":", "latitude", "=", "self", ".", "clipValue", "(", "latitude", ",", "self", ".", "min_lat", ",", "self", ".", "max_lat", ")", "mapSize", "=", "self", ".", "getMapDimensionsByZoomLevel", "(", "level", ")", "return", "math", ".", "cos", "(", "latitude", "*", "math", ".", "pi", "/", "180", ")", "*", "2", "*", "math", ".", "pi", "*", "self", ".", "earth_radius", "/", "mapSize" ]
43.777778
24.222222
def _put(self, url, data={}): """Wrapper around request.put() to use the API prefix. Returns a JSON response.""" r = requests.put(self._api_prefix + url, data=json.dumps(data), headers=self.headers, auth=self.auth, allow_redirects=False, ) return self._action(r)
[ "def", "_put", "(", "self", ",", "url", ",", "data", "=", "{", "}", ")", ":", "r", "=", "requests", ".", "put", "(", "self", ".", "_api_prefix", "+", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "self", ".", "headers", ",", "auth", "=", "self", ".", "auth", ",", "allow_redirects", "=", "False", ",", ")", "return", "self", ".", "_action", "(", "r", ")" ]
37.111111
10.222222
def time_correlation_by_diagonalization(P, pi, obs1, obs2=None, time=1, rdl=None): """ calculates time correlation. Raises P to power 'times' by diagonalization. If rdl tuple (R, D, L) is given, it will be used for further calculation. """ if rdl is None: raise ValueError("no rdl decomposition") R, D, L = rdl d_times = np.diag(D) ** time diag_inds = np.diag_indices_from(D) D_time = np.zeros(D.shape, dtype=d_times.dtype) D_time[diag_inds] = d_times P_time = np.dot(np.dot(R, D_time), L) # multiply element-wise obs1 and pi. this is obs1' diag(pi) l = np.multiply(obs1, pi) m = np.dot(P_time, obs2) result = np.dot(l, m) return result
[ "def", "time_correlation_by_diagonalization", "(", "P", ",", "pi", ",", "obs1", ",", "obs2", "=", "None", ",", "time", "=", "1", ",", "rdl", "=", "None", ")", ":", "if", "rdl", "is", "None", ":", "raise", "ValueError", "(", "\"no rdl decomposition\"", ")", "R", ",", "D", ",", "L", "=", "rdl", "d_times", "=", "np", ".", "diag", "(", "D", ")", "**", "time", "diag_inds", "=", "np", ".", "diag_indices_from", "(", "D", ")", "D_time", "=", "np", ".", "zeros", "(", "D", ".", "shape", ",", "dtype", "=", "d_times", ".", "dtype", ")", "D_time", "[", "diag_inds", "]", "=", "d_times", "P_time", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "R", ",", "D_time", ")", ",", "L", ")", "# multiply element-wise obs1 and pi. this is obs1' diag(pi)", "l", "=", "np", ".", "multiply", "(", "obs1", ",", "pi", ")", "m", "=", "np", ".", "dot", "(", "P_time", ",", "obs2", ")", "result", "=", "np", ".", "dot", "(", "l", ",", "m", ")", "return", "result" ]
33.047619
17.047619
def _get_unicode(data, force=False): """Try to return a text aka unicode object from the given data.""" if isinstance(data, binary_type): return data.decode('utf-8') elif data is None: return '' elif force: if PY2: return unicode(data) else: return str(data) else: return data
[ "def", "_get_unicode", "(", "data", ",", "force", "=", "False", ")", ":", "if", "isinstance", "(", "data", ",", "binary_type", ")", ":", "return", "data", ".", "decode", "(", "'utf-8'", ")", "elif", "data", "is", "None", ":", "return", "''", "elif", "force", ":", "if", "PY2", ":", "return", "unicode", "(", "data", ")", "else", ":", "return", "str", "(", "data", ")", "else", ":", "return", "data" ]
26.769231
15.538462
def _import_next_layer(self, proto, length): """Import next layer extractor. Positional arguments: * proto -- str, next layer protocol name * length -- int, valid (not padding) length Returns: * bool -- flag if extraction of next layer succeeded * Info -- info of next layer * ProtoChain -- protocol chain of next layer * str -- alias of next layer """ if self._exproto == 'null' and self._exlayer == 'None': from pcapkit.protocols.raw import Raw as NextLayer else: from pcapkit.foundation.analysis import analyse as NextLayer # from pcapkit.foundation.analysis import analyse as NextLayer if length == 0: next_ = NoPayload() elif self._onerror: next_ = beholder_ng(NextLayer)(self._file, length, _termination=self._sigterm) else: next_ = NextLayer(self._file, length, _termination=self._sigterm) return next_
[ "def", "_import_next_layer", "(", "self", ",", "proto", ",", "length", ")", ":", "if", "self", ".", "_exproto", "==", "'null'", "and", "self", ".", "_exlayer", "==", "'None'", ":", "from", "pcapkit", ".", "protocols", ".", "raw", "import", "Raw", "as", "NextLayer", "else", ":", "from", "pcapkit", ".", "foundation", ".", "analysis", "import", "analyse", "as", "NextLayer", "# from pcapkit.foundation.analysis import analyse as NextLayer", "if", "length", "==", "0", ":", "next_", "=", "NoPayload", "(", ")", "elif", "self", ".", "_onerror", ":", "next_", "=", "beholder_ng", "(", "NextLayer", ")", "(", "self", ".", "_file", ",", "length", ",", "_termination", "=", "self", ".", "_sigterm", ")", "else", ":", "next_", "=", "NextLayer", "(", "self", ".", "_file", ",", "length", ",", "_termination", "=", "self", ".", "_sigterm", ")", "return", "next_" ]
38.730769
20.5
def _findRedundantProteins(protToPeps, pepToProts, proteins=None): """Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the "protToPeps" and "pepToProts" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a "minimal" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of "minimal" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in "pepToProts". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins "A" and "B", protein "B" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in "protToPeps" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides """ if proteins is None: proteins = viewkeys(protToPeps) pepFrequency = _getValueCounts(pepToProts) protPepCounts = _getValueCounts(protToPeps) getCount = operator.itemgetter(1) getProt = operator.itemgetter(0) #TODO: quick and dirty solution #NOTE: add a test for merged proteins proteinTuples = list() for protein in proteins: if isinstance(protein, tuple): proteinTuples.append(protein) else: proteinTuples.append(tuple([protein])) sort = list() for protein in sorted(proteinTuples, reverse=True): if len(protein) == 1: protein = protein[0] protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]] if min(protPepFreq) > 1: sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True)) sort.append((protein, sortValue)) sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True)) redundantProteins = set() for protein in sortedProteins: for pep in protToPeps[protein]: if pepFrequency[pep] <= 1: break else: protPepFrequency = Counter(protToPeps[protein]) pepFrequency.subtract(protPepFrequency) redundantProteins.add(protein) return redundantProteins
[ "def", "_findRedundantProteins", "(", "protToPeps", ",", "pepToProts", ",", "proteins", "=", "None", ")", ":", "if", "proteins", "is", "None", ":", "proteins", "=", "viewkeys", "(", "protToPeps", ")", "pepFrequency", "=", "_getValueCounts", "(", "pepToProts", ")", "protPepCounts", "=", "_getValueCounts", "(", "protToPeps", ")", "getCount", "=", "operator", ".", "itemgetter", "(", "1", ")", "getProt", "=", "operator", ".", "itemgetter", "(", "0", ")", "#TODO: quick and dirty solution", "#NOTE: add a test for merged proteins", "proteinTuples", "=", "list", "(", ")", "for", "protein", "in", "proteins", ":", "if", "isinstance", "(", "protein", ",", "tuple", ")", ":", "proteinTuples", ".", "append", "(", "protein", ")", "else", ":", "proteinTuples", ".", "append", "(", "tuple", "(", "[", "protein", "]", ")", ")", "sort", "=", "list", "(", ")", "for", "protein", "in", "sorted", "(", "proteinTuples", ",", "reverse", "=", "True", ")", ":", "if", "len", "(", "protein", ")", "==", "1", ":", "protein", "=", "protein", "[", "0", "]", "protPepFreq", "=", "[", "pepFrequency", "[", "pep", "]", "for", "pep", "in", "protToPeps", "[", "protein", "]", "]", "if", "min", "(", "protPepFreq", ")", ">", "1", ":", "sortValue", "=", "(", "len", "(", "protPepFreq", ")", "*", "-", "1", ",", "sorted", "(", "protPepFreq", ",", "reverse", "=", "True", ")", ")", "sort", ".", "append", "(", "(", "protein", ",", "sortValue", ")", ")", "sortedProteins", "=", "map", "(", "getProt", ",", "sorted", "(", "sort", ",", "key", "=", "getCount", ",", "reverse", "=", "True", ")", ")", "redundantProteins", "=", "set", "(", ")", "for", "protein", "in", "sortedProteins", ":", "for", "pep", "in", "protToPeps", "[", "protein", "]", ":", "if", "pepFrequency", "[", "pep", "]", "<=", "1", ":", "break", "else", ":", "protPepFrequency", "=", "Counter", "(", "protToPeps", "[", "protein", "]", ")", "pepFrequency", ".", "subtract", "(", "protPepFrequency", ")", "redundantProteins", ".", "add", "(", "protein", ")", "return", "redundantProteins" ]
45.783784
22.243243
def model_attr(attr_name): """ Creates a setter that will set the specified model attribute to the current value. @param attr_name: the name of an attribute belonging to the model. @type attr_name: str """ def model_attr(value, context, **_params): setattr(context["model"], attr_name, value) return _attr() return model_attr
[ "def", "model_attr", "(", "attr_name", ")", ":", "def", "model_attr", "(", "value", ",", "context", ",", "*", "*", "_params", ")", ":", "setattr", "(", "context", "[", "\"model\"", "]", ",", "attr_name", ",", "value", ")", "return", "_attr", "(", ")", "return", "model_attr" ]
27.923077
17.923077
def melspectrogram(y=None, sr=22050, S=None, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect', power=2.0, **kwargs): """Compute a mel-scaled spectrogram. If a spectrogram input `S` is provided, then it is mapped directly onto the mel basis `mel_f` by `mel_f.dot(S)`. If a time-series input `y, sr` is provided, then its magnitude spectrogram `S` is first computed, and then mapped onto the mel scale by `mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum. Parameters ---------- y : np.ndarray [shape=(n,)] or None audio time-series sr : number > 0 [scalar] sampling rate of `y` S : np.ndarray [shape=(d, t)] spectrogram n_fft : int > 0 [scalar] length of the FFT window hop_length : int > 0 [scalar] number of samples between successive frames. See `librosa.core.stft` win_length : int <= n_fft [scalar] Each frame of audio is windowed by `window()`. The window will be of length `win_length` and then padded with zeros to match `n_fft`. If unspecified, defaults to ``win_length = n_fft``. window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)] - a window specification (string, tuple, or number); see `scipy.signal.get_window` - a window function, such as `scipy.signal.hanning` - a vector or array of length `n_fft` .. see also:: `filters.get_window` center : boolean - If `True`, the signal `y` is padded so that frame `t` is centered at `y[t * hop_length]`. - If `False`, then frame `t` begins at `y[t * hop_length]` pad_mode : string If `center=True`, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding. power : float > 0 [scalar] Exponent for the magnitude melspectrogram. e.g., 1 for energy, 2 for power, etc. kwargs : additional keyword arguments Mel filter bank parameters. See `librosa.filters.mel` for details. Returns ------- S : np.ndarray [shape=(n_mels, t)] Mel spectrogram See Also -------- librosa.filters.mel Mel filter bank construction librosa.core.stft Short-time Fourier Transform Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.feature.melspectrogram(y=y, sr=sr) array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09], [ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09], ..., [ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09], [ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]]) Using a pre-computed power spectrogram >>> D = np.abs(librosa.stft(y))**2 >>> S = librosa.feature.melspectrogram(S=D) >>> # Passing through arguments to the Mel filters >>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, ... fmax=8000) >>> import matplotlib.pyplot as plt >>> plt.figure(figsize=(10, 4)) >>> librosa.display.specshow(librosa.power_to_db(S, ... ref=np.max), ... y_axis='mel', fmax=8000, ... x_axis='time') >>> plt.colorbar(format='%+2.0f dB') >>> plt.title('Mel spectrogram') >>> plt.tight_layout() """ S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=power, win_length=win_length, window=window, center=center, pad_mode=pad_mode) # Build a Mel filter mel_basis = filters.mel(sr, n_fft, **kwargs) return np.dot(mel_basis, S)
[ "def", "melspectrogram", "(", "y", "=", "None", ",", "sr", "=", "22050", ",", "S", "=", "None", ",", "n_fft", "=", "2048", ",", "hop_length", "=", "512", ",", "win_length", "=", "None", ",", "window", "=", "'hann'", ",", "center", "=", "True", ",", "pad_mode", "=", "'reflect'", ",", "power", "=", "2.0", ",", "*", "*", "kwargs", ")", ":", "S", ",", "n_fft", "=", "_spectrogram", "(", "y", "=", "y", ",", "S", "=", "S", ",", "n_fft", "=", "n_fft", ",", "hop_length", "=", "hop_length", ",", "power", "=", "power", ",", "win_length", "=", "win_length", ",", "window", "=", "window", ",", "center", "=", "center", ",", "pad_mode", "=", "pad_mode", ")", "# Build a Mel filter", "mel_basis", "=", "filters", ".", "mel", "(", "sr", ",", "n_fft", ",", "*", "*", "kwargs", ")", "return", "np", ".", "dot", "(", "mel_basis", ",", "S", ")" ]
33.184211
21.684211
def add_unit(unit,**kwargs): """ Add the unit defined into the object "unit" to the DB If unit["project_id"] is None it means that the unit is global, otherwise is property of a project If the unit exists emits an exception A minimal example: .. code-block:: python new_unit = dict( name = 'Teaspoons per second', abbreviation = 'tsp s^-1', cf = 0, # Constant conversion factor lf = 1.47867648e-05, # Linear conversion factor dimension_id = 2, description = 'A flow of one teaspoon per second.', ) add_unit(new_unit) """ new_unit = Unit() new_unit.dimension_id = unit["dimension_id"] new_unit.name = unit['name'] # Needed to uniform abbr to abbreviation new_unit.abbreviation = unit['abbreviation'] # Needed to uniform into to description new_unit.description = unit['description'] new_unit.lf = unit['lf'] new_unit.cf = unit['cf'] if ('project_id' in unit) and (unit['project_id'] is not None): # Adding dimension to the "user" dimensions list new_unit.project_id = unit['project_id'] # Save on DB db.DBSession.add(new_unit) db.DBSession.flush() return JSONObject(new_unit)
[ "def", "add_unit", "(", "unit", ",", "*", "*", "kwargs", ")", ":", "new_unit", "=", "Unit", "(", ")", "new_unit", ".", "dimension_id", "=", "unit", "[", "\"dimension_id\"", "]", "new_unit", ".", "name", "=", "unit", "[", "'name'", "]", "# Needed to uniform abbr to abbreviation", "new_unit", ".", "abbreviation", "=", "unit", "[", "'abbreviation'", "]", "# Needed to uniform into to description", "new_unit", ".", "description", "=", "unit", "[", "'description'", "]", "new_unit", ".", "lf", "=", "unit", "[", "'lf'", "]", "new_unit", ".", "cf", "=", "unit", "[", "'cf'", "]", "if", "(", "'project_id'", "in", "unit", ")", "and", "(", "unit", "[", "'project_id'", "]", "is", "not", "None", ")", ":", "# Adding dimension to the \"user\" dimensions list", "new_unit", ".", "project_id", "=", "unit", "[", "'project_id'", "]", "# Save on DB", "db", ".", "DBSession", ".", "add", "(", "new_unit", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "JSONObject", "(", "new_unit", ")" ]
29.042553
21.510638
async def fetch_state(self, request): """Fetches data from a specific address in the validator's state tree. Request: query: - head: The id of the block to use as the head of the chain - address: The 70 character address of the data to be fetched Response: data: The base64 encoded binary data stored at that address head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block """ error_traps = [ error_handlers.InvalidAddressTrap, error_handlers.StateNotFoundTrap] address = request.match_info.get('address', '') head = request.url.query.get('head', None) head, root = await self._head_to_root(head) response = await self._query_validator( Message.CLIENT_STATE_GET_REQUEST, client_state_pb2.ClientStateGetResponse, client_state_pb2.ClientStateGetRequest( state_root=root, address=address), error_traps) return self._wrap_response( request, data=response['value'], metadata=self._get_metadata(request, response, head=head))
[ "async", "def", "fetch_state", "(", "self", ",", "request", ")", ":", "error_traps", "=", "[", "error_handlers", ".", "InvalidAddressTrap", ",", "error_handlers", ".", "StateNotFoundTrap", "]", "address", "=", "request", ".", "match_info", ".", "get", "(", "'address'", ",", "''", ")", "head", "=", "request", ".", "url", ".", "query", ".", "get", "(", "'head'", ",", "None", ")", "head", ",", "root", "=", "await", "self", ".", "_head_to_root", "(", "head", ")", "response", "=", "await", "self", ".", "_query_validator", "(", "Message", ".", "CLIENT_STATE_GET_REQUEST", ",", "client_state_pb2", ".", "ClientStateGetResponse", ",", "client_state_pb2", ".", "ClientStateGetRequest", "(", "state_root", "=", "root", ",", "address", "=", "address", ")", ",", "error_traps", ")", "return", "self", ".", "_wrap_response", "(", "request", ",", "data", "=", "response", "[", "'value'", "]", ",", "metadata", "=", "self", ".", "_get_metadata", "(", "request", ",", "response", ",", "head", "=", "head", ")", ")" ]
38.8125
19.46875
def die(self, password=''): """ Tells the IRCd to die. Optional arguments: * password='' - Die command password. """ with self.lock: self.send('DIE :%s' % password, error_check=True)
[ "def", "die", "(", "self", ",", "password", "=", "''", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "send", "(", "'DIE :%s'", "%", "password", ",", "error_check", "=", "True", ")" ]
29.375
9.875
def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index): """ Returns the tf op to fetch when unbuffered observations are passed in. Args: states (any): One state (usually a value tuple) or dict of states if multiple states are expected. actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected. internals (any): Internal list. terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action. Returns: Tf op to fetch when `observe()` is called. """ # Increment episode num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int')) increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes)) increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes)) with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)): # Stop gradients # Not using buffers here. states = util.map_tensors(fn=tf.stop_gradient, tensors=states) internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals) actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions) terminal = tf.stop_gradient(input=terminal) reward = tf.stop_gradient(input=reward) # Observation observation = self.fn_observe_timestep( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward ) with tf.control_dependencies(control_inputs=(observation,)): # Trivial operation to enforce control dependency. self.unbuffered_episode_output = self.global_episode + 0
[ "def", "create_atomic_observe_operations", "(", "self", ",", "states", ",", "actions", ",", "internals", ",", "terminal", ",", "reward", ",", "index", ")", ":", "# Increment episode", "num_episodes", "=", "tf", ".", "count_nonzero", "(", "input_tensor", "=", "terminal", ",", "dtype", "=", "util", ".", "tf_dtype", "(", "'int'", ")", ")", "increment_episode", "=", "tf", ".", "assign_add", "(", "ref", "=", "self", ".", "episode", ",", "value", "=", "tf", ".", "to_int64", "(", "x", "=", "num_episodes", ")", ")", "increment_global_episode", "=", "tf", ".", "assign_add", "(", "ref", "=", "self", ".", "global_episode", ",", "value", "=", "tf", ".", "to_int64", "(", "x", "=", "num_episodes", ")", ")", "with", "tf", ".", "control_dependencies", "(", "control_inputs", "=", "(", "increment_episode", ",", "increment_global_episode", ")", ")", ":", "# Stop gradients", "# Not using buffers here.", "states", "=", "util", ".", "map_tensors", "(", "fn", "=", "tf", ".", "stop_gradient", ",", "tensors", "=", "states", ")", "internals", "=", "util", ".", "map_tensors", "(", "fn", "=", "tf", ".", "stop_gradient", ",", "tensors", "=", "internals", ")", "actions", "=", "util", ".", "map_tensors", "(", "fn", "=", "tf", ".", "stop_gradient", ",", "tensors", "=", "actions", ")", "terminal", "=", "tf", ".", "stop_gradient", "(", "input", "=", "terminal", ")", "reward", "=", "tf", ".", "stop_gradient", "(", "input", "=", "reward", ")", "# Observation", "observation", "=", "self", ".", "fn_observe_timestep", "(", "states", "=", "states", ",", "internals", "=", "internals", ",", "actions", "=", "actions", ",", "terminal", "=", "terminal", ",", "reward", "=", "reward", ")", "with", "tf", ".", "control_dependencies", "(", "control_inputs", "=", "(", "observation", ",", ")", ")", ":", "# Trivial operation to enforce control dependency.", "self", ".", "unbuffered_episode_output", "=", "self", ".", "global_episode", "+", "0" ]
50.74359
29.461538
def search(self, q, start=1, num=10, sortField="username", sortOrder="asc"): """ The User Search operation searches for users in the portal. The search index is updated whenever users are created, updated, or deleted. There can be a lag between the time that the user is updated and the time when it's reflected in the search results. The results only contain users that the calling user has permissions to see. Users can control this visibility by changing the access property of their user. Inputs: q -The query string to search the users against. start - The number of the first entry in the result set response. The index number is 1-based. The default value of start is 1 (for example, the first search result). The start parameter, along with the num parameter can be used to paginate the search results. num - The maximum number of results to be included in the result set response. The default value is 10, and the maximum allowed value is 100. The start parameter, along with the num parameter can be used to paginate the search results. The actual number of returned results may be less than num. This happens when the number of results remaining after start is less than num. sortField - Field to sort by. The allowed field names are username and created. sortOrder - Describes whether the returned results are in ascending or descending order. Default is ascending. Values: asc | desc """ params = { "f" : "json", "q" : q, "start" : start, "num" : num, "sortField" : sortField, "sortOrder" : sortOrder } url = self._url return self._get( url = url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "search", "(", "self", ",", "q", ",", "start", "=", "1", ",", "num", "=", "10", ",", "sortField", "=", "\"username\"", ",", "sortOrder", "=", "\"asc\"", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"q\"", ":", "q", ",", "\"start\"", ":", "start", ",", "\"num\"", ":", "num", ",", "\"sortField\"", ":", "sortField", ",", "\"sortOrder\"", ":", "sortOrder", "}", "url", "=", "self", ".", "_url", "return", "self", ".", "_get", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
43.96
20
def update_subtask(self, subtask_id, revision, title=None, completed=None): ''' Updates the subtask with the given ID See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information Returns: Subtask with given ID with properties and revision updated ''' return subtasks_endpoint.update_subtask(self, subtask_id, revision, title=title, completed=completed)
[ "def", "update_subtask", "(", "self", ",", "subtask_id", ",", "revision", ",", "title", "=", "None", ",", "completed", "=", "None", ")", ":", "return", "subtasks_endpoint", ".", "update_subtask", "(", "self", ",", "subtask_id", ",", "revision", ",", "title", "=", "title", ",", "completed", "=", "completed", ")" ]
44.4
36.8
def machine_info(): """Retrieve core and memory information for the current machine. """ import psutil BYTES_IN_GIG = 1073741824.0 free_bytes = psutil.virtual_memory().total return [{"memory": float("%.1f" % (free_bytes / BYTES_IN_GIG)), "cores": multiprocessing.cpu_count(), "name": socket.gethostname()}]
[ "def", "machine_info", "(", ")", ":", "import", "psutil", "BYTES_IN_GIG", "=", "1073741824.0", "free_bytes", "=", "psutil", ".", "virtual_memory", "(", ")", ".", "total", "return", "[", "{", "\"memory\"", ":", "float", "(", "\"%.1f\"", "%", "(", "free_bytes", "/", "BYTES_IN_GIG", ")", ")", ",", "\"cores\"", ":", "multiprocessing", ".", "cpu_count", "(", ")", ",", "\"name\"", ":", "socket", ".", "gethostname", "(", ")", "}", "]" ]
42
15.875
def power_off(self, si, logger, session, vcenter_data_model, vm_uuid, resource_fullname): """ Power off of a vm :param vcenter_data_model: vcenter model :param si: Service Instance :param logger: :param session: :param vcenter_data_model: vcenter_data_model :param vm_uuid: the uuid of the vm :param resource_fullname: the full name of the deployed app resource :return: """ logger.info('retrieving vm by uuid: {0}'.format(vm_uuid)) vm = self.pv_service.find_by_uuid(si, vm_uuid) if vm.summary.runtime.powerState == 'poweredOff': logger.info('vm already powered off') task_result = 'Already powered off' else: # hard power off logger.info('{0} powering of vm'.format(vcenter_data_model.shutdown_method)) if vcenter_data_model.shutdown_method.lower() != 'soft': task = vm.PowerOff() task_result = self.synchronous_task_waiter.wait_for_task(task=task, logger=logger, action_name='Power Off') else: if vm.guest.toolsStatus == 'toolsNotInstalled': logger.warning('VMWare Tools status on virtual machine \'{0}\' are not installed'.format(vm.name)) raise ValueError('Cannot power off the vm softly because VMWare Tools are not installed') if vm.guest.toolsStatus == 'toolsNotRunning': logger.warning('VMWare Tools status on virtual machine \'{0}\' are not running'.format(vm.name)) raise ValueError('Cannot power off the vm softly because VMWare Tools are not running') vm.ShutdownGuest() task_result = 'vm powered off' return task_result
[ "def", "power_off", "(", "self", ",", "si", ",", "logger", ",", "session", ",", "vcenter_data_model", ",", "vm_uuid", ",", "resource_fullname", ")", ":", "logger", ".", "info", "(", "'retrieving vm by uuid: {0}'", ".", "format", "(", "vm_uuid", ")", ")", "vm", "=", "self", ".", "pv_service", ".", "find_by_uuid", "(", "si", ",", "vm_uuid", ")", "if", "vm", ".", "summary", ".", "runtime", ".", "powerState", "==", "'poweredOff'", ":", "logger", ".", "info", "(", "'vm already powered off'", ")", "task_result", "=", "'Already powered off'", "else", ":", "# hard power off", "logger", ".", "info", "(", "'{0} powering of vm'", ".", "format", "(", "vcenter_data_model", ".", "shutdown_method", ")", ")", "if", "vcenter_data_model", ".", "shutdown_method", ".", "lower", "(", ")", "!=", "'soft'", ":", "task", "=", "vm", ".", "PowerOff", "(", ")", "task_result", "=", "self", ".", "synchronous_task_waiter", ".", "wait_for_task", "(", "task", "=", "task", ",", "logger", "=", "logger", ",", "action_name", "=", "'Power Off'", ")", "else", ":", "if", "vm", ".", "guest", ".", "toolsStatus", "==", "'toolsNotInstalled'", ":", "logger", ".", "warning", "(", "'VMWare Tools status on virtual machine \\'{0}\\' are not installed'", ".", "format", "(", "vm", ".", "name", ")", ")", "raise", "ValueError", "(", "'Cannot power off the vm softly because VMWare Tools are not installed'", ")", "if", "vm", ".", "guest", ".", "toolsStatus", "==", "'toolsNotRunning'", ":", "logger", ".", "warning", "(", "'VMWare Tools status on virtual machine \\'{0}\\' are not running'", ".", "format", "(", "vm", ".", "name", ")", ")", "raise", "ValueError", "(", "'Cannot power off the vm softly because VMWare Tools are not running'", ")", "vm", ".", "ShutdownGuest", "(", ")", "task_result", "=", "'vm powered off'", "return", "task_result" ]
48.2
27.5
def sortarai(self,datablock,s,Zdiff): """ sorts data block in to first_Z, first_I, etc. """ # print "calling sortarai()" first_Z,first_I,zptrm_check,ptrm_check,ptrm_tail=[],[],[],[],[] field,phi,theta="","","" starthere=0 Treat_I,Treat_Z,Treat_PZ,Treat_PI,Treat_M,Treat_AC=[],[],[],[],[],[] ISteps,ZSteps,PISteps,PZSteps,MSteps,ACSteps=[],[],[],[],[],[] GammaChecks=[] # comparison of pTRM direction acquired and lab field Mkeys=['measurement_magn_moment','measurement_magn_volume','measurement_magn_mass','measurement_magnitude'] rec=datablock[0] # finds which type of magnetic measurement is present in magic_measurements.txt, then assigns momkey to that value for key in Mkeys: if key in list(rec.keys()) and rec[key]!="": momkey=key break # first find all the steps for k in range(len(datablock)): # iterates through records. rec=datablock[k] if "treatment_temp" in list(rec.keys()): temp=float(rec["treatment_temp"]) elif "treatment_mw_power" in list(rec.keys()): temp=float(rec["treatment_mw_power"]) methcodes=[] tmp=rec["magic_method_codes"].split(":") for meth in tmp: methcodes.append(meth.strip()) # methchodes contains all codes for a particular record # for thellier-thellier if 'LT-T-I' in methcodes and 'LP-PI-TRM' in methcodes and 'LP-TRM' not in methcodes : # IF specimen cooling AND using a laboratory trm AND NOT trm acquisition Treat_I.append(temp) ISteps.append(k) if field=="":field=float(rec["treatment_dc_field"]) if phi=="": phi=float(rec['treatment_dc_field_phi']) theta=float(rec['treatment_dc_field_theta']) # for Microwave if 'LT-M-I' in methcodes and 'LP-PI-M' in methcodes : # if using microwave radiation in lab field AND using microwave demagnetisation Treat_I.append(temp) ISteps.append(k) if field=="":field=float(rec["treatment_dc_field"]) if phi=="": phi=float(rec['treatment_dc_field_phi']) theta=float(rec['treatment_dc_field_theta']) # stick first zero field stuff into first_Z if 'LT-NO' in methcodes: # if no treatments applied before measurements Treat_Z.append(temp) ZSteps.append(k) if 'LT-T-Z' in methcodes or 'LT-M-Z' in methcodes: # if specimen cooling in zero field OR using microwave radiation: In zero field Treat_Z.append(temp) ZSteps.append(k) if 'LT-PTRM-Z' : # maybe this should be in methcodes ?? note I no longer understand # if pTRM tail check Treat_PZ.append(temp) PZSteps.append(k) if 'LT-PTRM-I' in methcodes or 'LT-PMRM-I' in methcodes: # if pTRM check Treat_PI.append(temp) PISteps.append(k) if 'LT-PTRM-MD' in methcodes: # if pTRM tail check Treat_M.append(temp) MSteps.append(k) if 'LT-PTRM-AC' in methcodes or 'LT-PMRM-AC' in methcodes: Treat_AC.append(temp) ACSteps.append(k) if 'LT-NO' in methcodes: # if no treatments applied before measurement dec=float(rec["measurement_dec"]) inc=float(rec["measurement_inc"]) str=float(rec[momkey]) if 'LP-PI-M' not in methcodes: # if not using microwave demagnetisation first_I.append([273,0.,0.,0.,1]) first_Z.append([273,dec,inc,str,1]) # NRM step else: first_I.append([0,0.,0.,0.,1]) first_Z.append([0,dec,inc,str,1]) # NRM step # the block above seems to be sorting out into wheter it is Treat_Z (zero field), Treat_I (infield), a ptrm check, or a ptrm tail check. so, each record has been appended to whichever of those it belongs in. #--------------------- # find IZ and ZI #--------------------- for temp in Treat_I: # look through infield steps and find matching Z step if temp in Treat_Z: # found a match istep=ISteps[Treat_I.index(temp)] irec=datablock[istep] methcodes=[] tmp=irec["magic_method_codes"].split(":") for meth in tmp: methcodes.append(meth.strip()) brec=datablock[istep-1] # take last record as baseline to subtract zstep=ZSteps[Treat_Z.index(temp)] zrec=datablock[zstep] # sort out first_Z records if "LP-PI-TRM-IZ" in methcodes or "LP-PI-M-IZ" in methcodes: ZI=0 else: ZI=1 dec=float(zrec["measurement_dec"]) inc=float(zrec["measurement_inc"]) str=float(zrec[momkey]) first_Z.append([temp,dec,inc,str,ZI]) # sort out first_I records #print 'irec', irec # full data set for infield measurement #print 'zrec', zrec # coresponding zerofield measurement idec=float(irec["measurement_dec"]) iinc=float(irec["measurement_inc"]) istr=float(irec[momkey]) X=self.dir2cart([idec,iinc,istr]) BL=self.dir2cart([dec,inc,str]) I=[] for c in range(3): I.append((X[c]-BL[c])) iDir=self.cart2dir(I) first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI]) now_ignore = """ #if I[2]!=0: # lj PUT THIS BACK if True: iDir=self.cart2dir(I) if Zdiff==0: print "Zdiff == 0, appending to first_I" #lj print [temp,iDir[0],iDir[1],iDir[2],ZI] #lj first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI]) else: print "Zdiff != 0, appending to first_I" #lj print [temp,0.,0.,I[2],ZI] #lj first_I.append([temp,0.,0.,I[2],ZI]) ## gamma=angle([iDir[0],iDir[1]],[phi,theta]) else: print "0,0,0 appending to first_I" print [temp,0.,0.,0.,ZI] first_I.append([temp,0.,0.,0.,ZI]) ## gamma=0.0 ## # put in Gamma check (infield trm versus lab field) ## if 180.-gamma<gamma: ## gamma=180.-gamma ## GammaChecks.append([temp-273.,gamma]) """ #--------------------- # find Thellier Thellier protocol #--------------------- if 'LP-PI-II'in methcodes or 'LP-PI-T-II' in methcodes or 'LP-PI-M-II' in methcodes: for i in range(1,len(Treat_I)): # look through infield steps and find matching Z step if Treat_I[i] == Treat_I[i-1]: # ignore, if there are more than temp= Treat_I[i] irec1=datablock[ISteps[i-1]] dec1=float(irec1["measurement_dec"]) inc1=float(irec1["measurement_inc"]) moment1=float(irec1["measurement_magn_moment"]) if len(first_I)<2: dec_initial=dec1;inc_initial=inc1 cart1=numpy.array(self.dir2cart([dec1,inc1,moment1])) irec2=datablock[ISteps[i]] dec2=float(irec2["measurement_dec"]) inc2=float(irec2["measurement_inc"]) moment2=float(irec2["measurement_magn_moment"]) cart2=numpy.array(self.dir2cart([dec2,inc2,moment2])) # check if its in the same treatment if Treat_I[i] == Treat_I[i-2] and dec2!=dec_initial and inc2!=inc_initial: continue if dec1!=dec2 and inc1!=inc2: zerofield=old_div((cart2+cart1),2) infield=old_div((cart2-cart1),2) DIR_zerofield=self.cart2dir(zerofield) DIR_infield=self.cart2dir(infield) first_Z.append([temp,DIR_zerofield[0],DIR_zerofield[1],DIR_zerofield[2],0]) print("appending to first_I") # LJ remove this print([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0]) # LJ remove this first_I.append([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0]) #--------------------- # find pTRM checks #--------------------- for temp in Treat_PI: # look through infield steps and find matching Z step if 'LP-PI-II' not in methcodes: step=PISteps[Treat_PI.index(temp)] rec=datablock[step] dec=float(rec["measurement_dec"]) inc=float(rec["measurement_inc"]) str=float(rec[momkey]) brec=datablock[step-1] # take last record as baseline to subtract pdec=float(brec["measurement_dec"]) pinc=float(brec["measurement_inc"]) pint=float(brec[momkey]) X=self.dir2cart([dec,inc,str]) prevX=self.dir2cart([pdec,pinc,pint]) I=[] for c in range(3): I.append(X[c]-prevX[c]) dir1=self.cart2dir(I) if Zdiff==0: ptrm_check.append([temp,dir1[0],dir1[1],dir1[2]]) else: ptrm_check.append([temp,0.,0.,I[2]]) else: step=PISteps[Treat_PI.index(temp)] rec=datablock[step] dec=float(rec["measurement_dec"]) inc=float(rec["measurement_inc"]) moment=float(rec["measurement_magn_moment"]) for zerofield in first_Z: if zerofield[0]==temp: M1=numpy.array(self.dir2cart([dec,inc,moment])) M2=numpy.array(self.dir2cart([zerofield[1],zerofield[2],zerofield[3]])) diff=M1-M2 diff_cart=self.cart2dir(diff) ptrm_check.append([temp,diff_cart[0],diff_cart[1],diff_cart[2]]) # in case there are zero-field pTRM checks (not the SIO way) for temp in Treat_PZ: step=PZSteps[Treat_PZ.index(temp)] rec=datablock[step] dec=float(rec["measurement_dec"]) inc=float(rec["measurement_inc"]) str=float(rec[momkey]) brec=datablock[step-1] pdec=float(brec["measurement_dec"]) pinc=float(brec["measurement_inc"]) pint=float(brec[momkey]) X=self.dir2cart([dec,inc,str]) prevX=self.dir2cart([pdec,pinc,pint]) I=[] for c in range(3): I.append(X[c]-prevX[c]) dir2=self.cart2dir(I) zptrm_check.append([temp,dir2[0],dir2[1],dir2[2]]) ## get pTRM tail checks together - for temp in Treat_M: step=MSteps[Treat_M.index(temp)] # tail check step - just do a difference in magnitude! rec=datablock[step] str=float(rec[momkey]) if temp in Treat_Z: step=ZSteps[Treat_Z.index(temp)] brec=datablock[step] pint=float(brec[momkey]) ptrm_tail.append([temp,0,0,str-pint]) # difference - if negative, negative tail! else: print(s, ' has a tail check with no first zero field step - check input file! for step',temp-273.) # # final check # if len(first_Z)!=len(first_I): print(len(first_Z),len(first_I)) print(" Something wrong with this specimen! Better fix it or delete it ") input(" press return to acknowledge message") #--------------------- # find Additivity (patch by rshaar) #--------------------- additivity_check=[] for i in range(len(Treat_AC)): step_0=ACSteps[i] temp=Treat_AC[i] dec0=float(datablock[step_0]["measurement_dec"]) inc0=float(datablock[step_0]["measurement_inc"]) moment0=float(datablock[step_0]['measurement_magn_moment']) V0=self.dir2cart([dec0,inc0,moment0]) # find the infield step that comes before the additivity check foundit=False for j in range(step_0,1,-1): if "LT-T-I" in datablock[j]['magic_method_codes']: foundit=True ; break if foundit: dec1=float(datablock[j]["measurement_dec"]) inc1=float(datablock[j]["measurement_inc"]) moment1=float(datablock[j]['measurement_magn_moment']) #lj start_temp=float(datablock[j]['treatment_temp']); #lj V1=self.dir2cart([dec1,inc1,moment1]) I=[] #print "temp (K)", temp - 273 #print "start_temp (K)", start_temp - 273 #print "dec0: {}, inc0: {}, moment0: {}".format(dec0, inc0, moment0) #print "V0: ", V0 #print "dec1: {}, inc1: {}, moment1: {}".format(dec1, inc1,moment1) #print "V1: ", V1 #print "---" for c in range(3): I.append(V1[c]-V0[c]) dir1=self.cart2dir(I) additivity_check.append([temp,dir1[0],dir1[1],dir1[2]]) araiblock=(first_Z,first_I,ptrm_check,ptrm_tail,zptrm_check,GammaChecks,additivity_check) # print "done with sortarai()" # print "araiblock[0] (first_Z) " # [[273, 277.5, 79.6, 1.66e-09, 1], .....] # print araiblock[0] # print "araiblock[0][0]:" # print araiblock[0][0] # print "araiblock[1] (first_I)" # print araiblock[1] # print "araiblock[2] (ptrm_check)" # print araiblock[2] # print "araiblock[3] (ptrm_tail)" # print araiblock[3] # print "araiblock[4] (zptrm_check)" # print araiblock[4] # print "araiblock[5] (GammaChecks) " # print araiblock[5] # print "field ", field return araiblock,field
[ "def", "sortarai", "(", "self", ",", "datablock", ",", "s", ",", "Zdiff", ")", ":", "# print \"calling sortarai()\"", "first_Z", ",", "first_I", ",", "zptrm_check", ",", "ptrm_check", ",", "ptrm_tail", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "field", ",", "phi", ",", "theta", "=", "\"\"", ",", "\"\"", ",", "\"\"", "starthere", "=", "0", "Treat_I", ",", "Treat_Z", ",", "Treat_PZ", ",", "Treat_PI", ",", "Treat_M", ",", "Treat_AC", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "ISteps", ",", "ZSteps", ",", "PISteps", ",", "PZSteps", ",", "MSteps", ",", "ACSteps", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "GammaChecks", "=", "[", "]", "# comparison of pTRM direction acquired and lab field", "Mkeys", "=", "[", "'measurement_magn_moment'", ",", "'measurement_magn_volume'", ",", "'measurement_magn_mass'", ",", "'measurement_magnitude'", "]", "rec", "=", "datablock", "[", "0", "]", "# finds which type of magnetic measurement is present in magic_measurements.txt, then assigns momkey to that value", "for", "key", "in", "Mkeys", ":", "if", "key", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", "and", "rec", "[", "key", "]", "!=", "\"\"", ":", "momkey", "=", "key", "break", "# first find all the steps", "for", "k", "in", "range", "(", "len", "(", "datablock", ")", ")", ":", "# iterates through records.", "rec", "=", "datablock", "[", "k", "]", "if", "\"treatment_temp\"", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", ":", "temp", "=", "float", "(", "rec", "[", "\"treatment_temp\"", "]", ")", "elif", "\"treatment_mw_power\"", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", ":", "temp", "=", "float", "(", "rec", "[", "\"treatment_mw_power\"", "]", ")", "methcodes", "=", "[", "]", "tmp", "=", "rec", "[", "\"magic_method_codes\"", "]", ".", "split", "(", "\":\"", ")", "for", "meth", "in", "tmp", ":", "methcodes", ".", "append", "(", "meth", ".", "strip", "(", ")", ")", "# methchodes contains all codes for a particular record", "# for thellier-thellier", "if", "'LT-T-I'", "in", "methcodes", "and", "'LP-PI-TRM'", "in", "methcodes", "and", "'LP-TRM'", "not", "in", "methcodes", ":", "# IF specimen cooling AND using a laboratory trm AND NOT trm acquisition", "Treat_I", ".", "append", "(", "temp", ")", "ISteps", ".", "append", "(", "k", ")", "if", "field", "==", "\"\"", ":", "field", "=", "float", "(", "rec", "[", "\"treatment_dc_field\"", "]", ")", "if", "phi", "==", "\"\"", ":", "phi", "=", "float", "(", "rec", "[", "'treatment_dc_field_phi'", "]", ")", "theta", "=", "float", "(", "rec", "[", "'treatment_dc_field_theta'", "]", ")", "# for Microwave", "if", "'LT-M-I'", "in", "methcodes", "and", "'LP-PI-M'", "in", "methcodes", ":", "# if using microwave radiation in lab field AND using microwave demagnetisation", "Treat_I", ".", "append", "(", "temp", ")", "ISteps", ".", "append", "(", "k", ")", "if", "field", "==", "\"\"", ":", "field", "=", "float", "(", "rec", "[", "\"treatment_dc_field\"", "]", ")", "if", "phi", "==", "\"\"", ":", "phi", "=", "float", "(", "rec", "[", "'treatment_dc_field_phi'", "]", ")", "theta", "=", "float", "(", "rec", "[", "'treatment_dc_field_theta'", "]", ")", "# stick first zero field stuff into first_Z", "if", "'LT-NO'", "in", "methcodes", ":", "# if no treatments applied before measurements", "Treat_Z", ".", "append", "(", "temp", ")", "ZSteps", ".", "append", "(", "k", ")", "if", "'LT-T-Z'", "in", "methcodes", "or", "'LT-M-Z'", "in", "methcodes", ":", "# if specimen cooling in zero field OR using microwave radiation: In zero field", "Treat_Z", ".", "append", "(", "temp", ")", "ZSteps", ".", "append", "(", "k", ")", "if", "'LT-PTRM-Z'", ":", "# maybe this should be in methcodes ?? note I no longer understand", "# if pTRM tail check", "Treat_PZ", ".", "append", "(", "temp", ")", "PZSteps", ".", "append", "(", "k", ")", "if", "'LT-PTRM-I'", "in", "methcodes", "or", "'LT-PMRM-I'", "in", "methcodes", ":", "# if pTRM check", "Treat_PI", ".", "append", "(", "temp", ")", "PISteps", ".", "append", "(", "k", ")", "if", "'LT-PTRM-MD'", "in", "methcodes", ":", "# if pTRM tail check", "Treat_M", ".", "append", "(", "temp", ")", "MSteps", ".", "append", "(", "k", ")", "if", "'LT-PTRM-AC'", "in", "methcodes", "or", "'LT-PMRM-AC'", "in", "methcodes", ":", "Treat_AC", ".", "append", "(", "temp", ")", "ACSteps", ".", "append", "(", "k", ")", "if", "'LT-NO'", "in", "methcodes", ":", "# if no treatments applied before measurement", "dec", "=", "float", "(", "rec", "[", "\"measurement_dec\"", "]", ")", "inc", "=", "float", "(", "rec", "[", "\"measurement_inc\"", "]", ")", "str", "=", "float", "(", "rec", "[", "momkey", "]", ")", "if", "'LP-PI-M'", "not", "in", "methcodes", ":", "# if not using microwave demagnetisation", "first_I", ".", "append", "(", "[", "273", ",", "0.", ",", "0.", ",", "0.", ",", "1", "]", ")", "first_Z", ".", "append", "(", "[", "273", ",", "dec", ",", "inc", ",", "str", ",", "1", "]", ")", "# NRM step", "else", ":", "first_I", ".", "append", "(", "[", "0", ",", "0.", ",", "0.", ",", "0.", ",", "1", "]", ")", "first_Z", ".", "append", "(", "[", "0", ",", "dec", ",", "inc", ",", "str", ",", "1", "]", ")", "# NRM step", "# the block above seems to be sorting out into wheter it is Treat_Z (zero field), Treat_I (infield), a ptrm check, or a ptrm tail check. so, each record has been appended to whichever of those it belongs in.", "#---------------------", "# find IZ and ZI", "#---------------------", "for", "temp", "in", "Treat_I", ":", "# look through infield steps and find matching Z step", "if", "temp", "in", "Treat_Z", ":", "# found a match", "istep", "=", "ISteps", "[", "Treat_I", ".", "index", "(", "temp", ")", "]", "irec", "=", "datablock", "[", "istep", "]", "methcodes", "=", "[", "]", "tmp", "=", "irec", "[", "\"magic_method_codes\"", "]", ".", "split", "(", "\":\"", ")", "for", "meth", "in", "tmp", ":", "methcodes", ".", "append", "(", "meth", ".", "strip", "(", ")", ")", "brec", "=", "datablock", "[", "istep", "-", "1", "]", "# take last record as baseline to subtract", "zstep", "=", "ZSteps", "[", "Treat_Z", ".", "index", "(", "temp", ")", "]", "zrec", "=", "datablock", "[", "zstep", "]", "# sort out first_Z records", "if", "\"LP-PI-TRM-IZ\"", "in", "methcodes", "or", "\"LP-PI-M-IZ\"", "in", "methcodes", ":", "ZI", "=", "0", "else", ":", "ZI", "=", "1", "dec", "=", "float", "(", "zrec", "[", "\"measurement_dec\"", "]", ")", "inc", "=", "float", "(", "zrec", "[", "\"measurement_inc\"", "]", ")", "str", "=", "float", "(", "zrec", "[", "momkey", "]", ")", "first_Z", ".", "append", "(", "[", "temp", ",", "dec", ",", "inc", ",", "str", ",", "ZI", "]", ")", "# sort out first_I records", "#print 'irec', irec # full data set for infield measurement", "#print 'zrec', zrec # coresponding zerofield measurement", "idec", "=", "float", "(", "irec", "[", "\"measurement_dec\"", "]", ")", "iinc", "=", "float", "(", "irec", "[", "\"measurement_inc\"", "]", ")", "istr", "=", "float", "(", "irec", "[", "momkey", "]", ")", "X", "=", "self", ".", "dir2cart", "(", "[", "idec", ",", "iinc", ",", "istr", "]", ")", "BL", "=", "self", ".", "dir2cart", "(", "[", "dec", ",", "inc", ",", "str", "]", ")", "I", "=", "[", "]", "for", "c", "in", "range", "(", "3", ")", ":", "I", ".", "append", "(", "(", "X", "[", "c", "]", "-", "BL", "[", "c", "]", ")", ")", "iDir", "=", "self", ".", "cart2dir", "(", "I", ")", "first_I", ".", "append", "(", "[", "temp", ",", "iDir", "[", "0", "]", ",", "iDir", "[", "1", "]", ",", "iDir", "[", "2", "]", ",", "ZI", "]", ")", "now_ignore", "=", "\"\"\"\n #if I[2]!=0: # lj PUT THIS BACK\n if True:\n iDir=self.cart2dir(I)\n if Zdiff==0:\n print \"Zdiff == 0, appending to first_I\" #lj\n print [temp,iDir[0],iDir[1],iDir[2],ZI] #lj\n first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI])\n else:\n print \"Zdiff != 0, appending to first_I\" #lj\n print [temp,0.,0.,I[2],ZI] #lj\n first_I.append([temp,0.,0.,I[2],ZI])\n## gamma=angle([iDir[0],iDir[1]],[phi,theta])\n else:\n print \"0,0,0 appending to first_I\"\n print [temp,0.,0.,0.,ZI]\n first_I.append([temp,0.,0.,0.,ZI])\n## gamma=0.0\n## # put in Gamma check (infield trm versus lab field)\n## if 180.-gamma<gamma:\n## gamma=180.-gamma\n## GammaChecks.append([temp-273.,gamma])\n \"\"\"", "#---------------------", "# find Thellier Thellier protocol", "#---------------------", "if", "'LP-PI-II'", "in", "methcodes", "or", "'LP-PI-T-II'", "in", "methcodes", "or", "'LP-PI-M-II'", "in", "methcodes", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "Treat_I", ")", ")", ":", "# look through infield steps and find matching Z step", "if", "Treat_I", "[", "i", "]", "==", "Treat_I", "[", "i", "-", "1", "]", ":", "# ignore, if there are more than", "temp", "=", "Treat_I", "[", "i", "]", "irec1", "=", "datablock", "[", "ISteps", "[", "i", "-", "1", "]", "]", "dec1", "=", "float", "(", "irec1", "[", "\"measurement_dec\"", "]", ")", "inc1", "=", "float", "(", "irec1", "[", "\"measurement_inc\"", "]", ")", "moment1", "=", "float", "(", "irec1", "[", "\"measurement_magn_moment\"", "]", ")", "if", "len", "(", "first_I", ")", "<", "2", ":", "dec_initial", "=", "dec1", "inc_initial", "=", "inc1", "cart1", "=", "numpy", ".", "array", "(", "self", ".", "dir2cart", "(", "[", "dec1", ",", "inc1", ",", "moment1", "]", ")", ")", "irec2", "=", "datablock", "[", "ISteps", "[", "i", "]", "]", "dec2", "=", "float", "(", "irec2", "[", "\"measurement_dec\"", "]", ")", "inc2", "=", "float", "(", "irec2", "[", "\"measurement_inc\"", "]", ")", "moment2", "=", "float", "(", "irec2", "[", "\"measurement_magn_moment\"", "]", ")", "cart2", "=", "numpy", ".", "array", "(", "self", ".", "dir2cart", "(", "[", "dec2", ",", "inc2", ",", "moment2", "]", ")", ")", "# check if its in the same treatment", "if", "Treat_I", "[", "i", "]", "==", "Treat_I", "[", "i", "-", "2", "]", "and", "dec2", "!=", "dec_initial", "and", "inc2", "!=", "inc_initial", ":", "continue", "if", "dec1", "!=", "dec2", "and", "inc1", "!=", "inc2", ":", "zerofield", "=", "old_div", "(", "(", "cart2", "+", "cart1", ")", ",", "2", ")", "infield", "=", "old_div", "(", "(", "cart2", "-", "cart1", ")", ",", "2", ")", "DIR_zerofield", "=", "self", ".", "cart2dir", "(", "zerofield", ")", "DIR_infield", "=", "self", ".", "cart2dir", "(", "infield", ")", "first_Z", ".", "append", "(", "[", "temp", ",", "DIR_zerofield", "[", "0", "]", ",", "DIR_zerofield", "[", "1", "]", ",", "DIR_zerofield", "[", "2", "]", ",", "0", "]", ")", "print", "(", "\"appending to first_I\"", ")", "# LJ remove this", "print", "(", "[", "temp", ",", "DIR_infield", "[", "0", "]", ",", "DIR_infield", "[", "1", "]", ",", "DIR_infield", "[", "2", "]", ",", "0", "]", ")", "# LJ remove this", "first_I", ".", "append", "(", "[", "temp", ",", "DIR_infield", "[", "0", "]", ",", "DIR_infield", "[", "1", "]", ",", "DIR_infield", "[", "2", "]", ",", "0", "]", ")", "#---------------------", "# find pTRM checks", "#---------------------", "for", "temp", "in", "Treat_PI", ":", "# look through infield steps and find matching Z step", "if", "'LP-PI-II'", "not", "in", "methcodes", ":", "step", "=", "PISteps", "[", "Treat_PI", ".", "index", "(", "temp", ")", "]", "rec", "=", "datablock", "[", "step", "]", "dec", "=", "float", "(", "rec", "[", "\"measurement_dec\"", "]", ")", "inc", "=", "float", "(", "rec", "[", "\"measurement_inc\"", "]", ")", "str", "=", "float", "(", "rec", "[", "momkey", "]", ")", "brec", "=", "datablock", "[", "step", "-", "1", "]", "# take last record as baseline to subtract", "pdec", "=", "float", "(", "brec", "[", "\"measurement_dec\"", "]", ")", "pinc", "=", "float", "(", "brec", "[", "\"measurement_inc\"", "]", ")", "pint", "=", "float", "(", "brec", "[", "momkey", "]", ")", "X", "=", "self", ".", "dir2cart", "(", "[", "dec", ",", "inc", ",", "str", "]", ")", "prevX", "=", "self", ".", "dir2cart", "(", "[", "pdec", ",", "pinc", ",", "pint", "]", ")", "I", "=", "[", "]", "for", "c", "in", "range", "(", "3", ")", ":", "I", ".", "append", "(", "X", "[", "c", "]", "-", "prevX", "[", "c", "]", ")", "dir1", "=", "self", ".", "cart2dir", "(", "I", ")", "if", "Zdiff", "==", "0", ":", "ptrm_check", ".", "append", "(", "[", "temp", ",", "dir1", "[", "0", "]", ",", "dir1", "[", "1", "]", ",", "dir1", "[", "2", "]", "]", ")", "else", ":", "ptrm_check", ".", "append", "(", "[", "temp", ",", "0.", ",", "0.", ",", "I", "[", "2", "]", "]", ")", "else", ":", "step", "=", "PISteps", "[", "Treat_PI", ".", "index", "(", "temp", ")", "]", "rec", "=", "datablock", "[", "step", "]", "dec", "=", "float", "(", "rec", "[", "\"measurement_dec\"", "]", ")", "inc", "=", "float", "(", "rec", "[", "\"measurement_inc\"", "]", ")", "moment", "=", "float", "(", "rec", "[", "\"measurement_magn_moment\"", "]", ")", "for", "zerofield", "in", "first_Z", ":", "if", "zerofield", "[", "0", "]", "==", "temp", ":", "M1", "=", "numpy", ".", "array", "(", "self", ".", "dir2cart", "(", "[", "dec", ",", "inc", ",", "moment", "]", ")", ")", "M2", "=", "numpy", ".", "array", "(", "self", ".", "dir2cart", "(", "[", "zerofield", "[", "1", "]", ",", "zerofield", "[", "2", "]", ",", "zerofield", "[", "3", "]", "]", ")", ")", "diff", "=", "M1", "-", "M2", "diff_cart", "=", "self", ".", "cart2dir", "(", "diff", ")", "ptrm_check", ".", "append", "(", "[", "temp", ",", "diff_cart", "[", "0", "]", ",", "diff_cart", "[", "1", "]", ",", "diff_cart", "[", "2", "]", "]", ")", "# in case there are zero-field pTRM checks (not the SIO way)", "for", "temp", "in", "Treat_PZ", ":", "step", "=", "PZSteps", "[", "Treat_PZ", ".", "index", "(", "temp", ")", "]", "rec", "=", "datablock", "[", "step", "]", "dec", "=", "float", "(", "rec", "[", "\"measurement_dec\"", "]", ")", "inc", "=", "float", "(", "rec", "[", "\"measurement_inc\"", "]", ")", "str", "=", "float", "(", "rec", "[", "momkey", "]", ")", "brec", "=", "datablock", "[", "step", "-", "1", "]", "pdec", "=", "float", "(", "brec", "[", "\"measurement_dec\"", "]", ")", "pinc", "=", "float", "(", "brec", "[", "\"measurement_inc\"", "]", ")", "pint", "=", "float", "(", "brec", "[", "momkey", "]", ")", "X", "=", "self", ".", "dir2cart", "(", "[", "dec", ",", "inc", ",", "str", "]", ")", "prevX", "=", "self", ".", "dir2cart", "(", "[", "pdec", ",", "pinc", ",", "pint", "]", ")", "I", "=", "[", "]", "for", "c", "in", "range", "(", "3", ")", ":", "I", ".", "append", "(", "X", "[", "c", "]", "-", "prevX", "[", "c", "]", ")", "dir2", "=", "self", ".", "cart2dir", "(", "I", ")", "zptrm_check", ".", "append", "(", "[", "temp", ",", "dir2", "[", "0", "]", ",", "dir2", "[", "1", "]", ",", "dir2", "[", "2", "]", "]", ")", "## get pTRM tail checks together -", "for", "temp", "in", "Treat_M", ":", "step", "=", "MSteps", "[", "Treat_M", ".", "index", "(", "temp", ")", "]", "# tail check step - just do a difference in magnitude!", "rec", "=", "datablock", "[", "step", "]", "str", "=", "float", "(", "rec", "[", "momkey", "]", ")", "if", "temp", "in", "Treat_Z", ":", "step", "=", "ZSteps", "[", "Treat_Z", ".", "index", "(", "temp", ")", "]", "brec", "=", "datablock", "[", "step", "]", "pint", "=", "float", "(", "brec", "[", "momkey", "]", ")", "ptrm_tail", ".", "append", "(", "[", "temp", ",", "0", ",", "0", ",", "str", "-", "pint", "]", ")", "# difference - if negative, negative tail!", "else", ":", "print", "(", "s", ",", "' has a tail check with no first zero field step - check input file! for step'", ",", "temp", "-", "273.", ")", "#", "# final check", "#", "if", "len", "(", "first_Z", ")", "!=", "len", "(", "first_I", ")", ":", "print", "(", "len", "(", "first_Z", ")", ",", "len", "(", "first_I", ")", ")", "print", "(", "\" Something wrong with this specimen! Better fix it or delete it \"", ")", "input", "(", "\" press return to acknowledge message\"", ")", "#---------------------", "# find Additivity (patch by rshaar)", "#---------------------", "additivity_check", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "Treat_AC", ")", ")", ":", "step_0", "=", "ACSteps", "[", "i", "]", "temp", "=", "Treat_AC", "[", "i", "]", "dec0", "=", "float", "(", "datablock", "[", "step_0", "]", "[", "\"measurement_dec\"", "]", ")", "inc0", "=", "float", "(", "datablock", "[", "step_0", "]", "[", "\"measurement_inc\"", "]", ")", "moment0", "=", "float", "(", "datablock", "[", "step_0", "]", "[", "'measurement_magn_moment'", "]", ")", "V0", "=", "self", ".", "dir2cart", "(", "[", "dec0", ",", "inc0", ",", "moment0", "]", ")", "# find the infield step that comes before the additivity check", "foundit", "=", "False", "for", "j", "in", "range", "(", "step_0", ",", "1", ",", "-", "1", ")", ":", "if", "\"LT-T-I\"", "in", "datablock", "[", "j", "]", "[", "'magic_method_codes'", "]", ":", "foundit", "=", "True", "break", "if", "foundit", ":", "dec1", "=", "float", "(", "datablock", "[", "j", "]", "[", "\"measurement_dec\"", "]", ")", "inc1", "=", "float", "(", "datablock", "[", "j", "]", "[", "\"measurement_inc\"", "]", ")", "moment1", "=", "float", "(", "datablock", "[", "j", "]", "[", "'measurement_magn_moment'", "]", ")", "#lj", "start_temp", "=", "float", "(", "datablock", "[", "j", "]", "[", "'treatment_temp'", "]", ")", "#lj", "V1", "=", "self", ".", "dir2cart", "(", "[", "dec1", ",", "inc1", ",", "moment1", "]", ")", "I", "=", "[", "]", "#print \"temp (K)\", temp - 273", "#print \"start_temp (K)\", start_temp - 273", "#print \"dec0: {}, inc0: {}, moment0: {}\".format(dec0, inc0, moment0)", "#print \"V0: \", V0", "#print \"dec1: {}, inc1: {}, moment1: {}\".format(dec1, inc1,moment1)", "#print \"V1: \", V1", "#print \"---\"", "for", "c", "in", "range", "(", "3", ")", ":", "I", ".", "append", "(", "V1", "[", "c", "]", "-", "V0", "[", "c", "]", ")", "dir1", "=", "self", ".", "cart2dir", "(", "I", ")", "additivity_check", ".", "append", "(", "[", "temp", ",", "dir1", "[", "0", "]", ",", "dir1", "[", "1", "]", ",", "dir1", "[", "2", "]", "]", ")", "araiblock", "=", "(", "first_Z", ",", "first_I", ",", "ptrm_check", ",", "ptrm_tail", ",", "zptrm_check", ",", "GammaChecks", ",", "additivity_check", ")", "# print \"done with sortarai()\"", "# print \"araiblock[0] (first_Z) \"", "# [[273, 277.5, 79.6, 1.66e-09, 1], .....]", "# print araiblock[0]", "# print \"araiblock[0][0]:\"", "# print araiblock[0][0]", "# print \"araiblock[1] (first_I)\"", "# print araiblock[1]", "# print \"araiblock[2] (ptrm_check)\"", "# print araiblock[2]", "# print \"araiblock[3] (ptrm_tail)\"", "# print araiblock[3]", "# print \"araiblock[4] (zptrm_check)\"", "# print araiblock[4]", "# print \"araiblock[5] (GammaChecks) \"", "# print araiblock[5]", "# print \"field \", field", "return", "araiblock", ",", "field" ]
45.406832
18.729814
def get_value_ddist(self, attr_name, attr_value): """ Returns the class value probability distribution of the given attribute value. """ assert not self.tree.data.is_continuous_class, \ "Discrete distributions are only maintained for " + \ "discrete class types." ddist = DDist() cls_counts = self._attr_class_value_counts[attr_name][attr_value] for cls_value, cls_count in iteritems(cls_counts): ddist.add(cls_value, count=cls_count) return ddist
[ "def", "get_value_ddist", "(", "self", ",", "attr_name", ",", "attr_value", ")", ":", "assert", "not", "self", ".", "tree", ".", "data", ".", "is_continuous_class", ",", "\"Discrete distributions are only maintained for \"", "+", "\"discrete class types.\"", "ddist", "=", "DDist", "(", ")", "cls_counts", "=", "self", ".", "_attr_class_value_counts", "[", "attr_name", "]", "[", "attr_value", "]", "for", "cls_value", ",", "cls_count", "in", "iteritems", "(", "cls_counts", ")", ":", "ddist", ".", "add", "(", "cls_value", ",", "count", "=", "cls_count", ")", "return", "ddist" ]
41.769231
15.153846
def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None, X_idx_sorted=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = numpy.ones((n_samples, ), dtype=numpy.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.dropout_rate > 0.: scale = numpy.ones(self.n_estimators, dtype=float) else: scale = None # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage y_oob_sample = y[~sample_mask] old_oob_score = loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, scale, X_idx_sorted, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = (old_oob_score - loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break if self.dropout_rate > 0.: self.scale_ = scale return i + 1
[ "def", "_fit_stages", "(", "self", ",", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "random_state", ",", "begin_at_stage", "=", "0", ",", "monitor", "=", "None", ",", "X_idx_sorted", "=", "None", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "do_oob", "=", "self", ".", "subsample", "<", "1.0", "sample_mask", "=", "numpy", ".", "ones", "(", "(", "n_samples", ",", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "n_inbag", "=", "max", "(", "1", ",", "int", "(", "self", ".", "subsample", "*", "n_samples", ")", ")", "loss_", "=", "self", ".", "loss_", "if", "self", ".", "verbose", ":", "verbose_reporter", "=", "VerboseReporter", "(", "self", ".", "verbose", ")", "verbose_reporter", ".", "init", "(", "self", ",", "begin_at_stage", ")", "X_csc", "=", "csc_matrix", "(", "X", ")", "if", "issparse", "(", "X", ")", "else", "None", "X_csr", "=", "csr_matrix", "(", "X", ")", "if", "issparse", "(", "X", ")", "else", "None", "if", "self", ".", "dropout_rate", ">", "0.", ":", "scale", "=", "numpy", ".", "ones", "(", "self", ".", "n_estimators", ",", "dtype", "=", "float", ")", "else", ":", "scale", "=", "None", "# perform boosting iterations", "i", "=", "begin_at_stage", "for", "i", "in", "range", "(", "begin_at_stage", ",", "self", ".", "n_estimators", ")", ":", "# subsampling", "if", "do_oob", ":", "sample_mask", "=", "_random_sample_mask", "(", "n_samples", ",", "n_inbag", ",", "random_state", ")", "# OOB score before adding this stage", "y_oob_sample", "=", "y", "[", "~", "sample_mask", "]", "old_oob_score", "=", "loss_", "(", "y_oob_sample", ",", "y_pred", "[", "~", "sample_mask", "]", ",", "sample_weight", "[", "~", "sample_mask", "]", ")", "# fit next stage of trees", "y_pred", "=", "self", ".", "_fit_stage", "(", "i", ",", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "sample_mask", ",", "random_state", ",", "scale", ",", "X_idx_sorted", ",", "X_csc", ",", "X_csr", ")", "# track deviance (= loss)", "if", "do_oob", ":", "self", ".", "train_score_", "[", "i", "]", "=", "loss_", "(", "y", "[", "sample_mask", "]", ",", "y_pred", "[", "sample_mask", "]", ",", "sample_weight", "[", "sample_mask", "]", ")", "self", ".", "oob_improvement_", "[", "i", "]", "=", "(", "old_oob_score", "-", "loss_", "(", "y_oob_sample", ",", "y_pred", "[", "~", "sample_mask", "]", ",", "sample_weight", "[", "~", "sample_mask", "]", ")", ")", "else", ":", "# no need to fancy index w/ no subsampling", "self", ".", "train_score_", "[", "i", "]", "=", "loss_", "(", "y", ",", "y_pred", ",", "sample_weight", ")", "if", "self", ".", "verbose", ">", "0", ":", "verbose_reporter", ".", "update", "(", "i", ",", "self", ")", "if", "monitor", "is", "not", "None", ":", "early_stopping", "=", "monitor", "(", "i", ",", "self", ",", "locals", "(", ")", ")", "if", "early_stopping", ":", "break", "if", "self", ".", "dropout_rate", ">", "0.", ":", "self", ".", "scale_", "=", "scale", "return", "i", "+", "1" ]
39.42029
21.536232
def images(self): """Generate images for all the datasets from the scene.""" for ds_id, projectable in self.datasets.items(): if ds_id in self.wishlist: yield projectable.to_image()
[ "def", "images", "(", "self", ")", ":", "for", "ds_id", ",", "projectable", "in", "self", ".", "datasets", ".", "items", "(", ")", ":", "if", "ds_id", "in", "self", ".", "wishlist", ":", "yield", "projectable", ".", "to_image", "(", ")" ]
44.2
9
def bandit(self, choice_rewards): """ Multi-armed bandit method which chooses the arm for which the upper confidence bound (UCB) of expected reward is greatest. If there are multiple arms with the same UCB1 index, then one is chosen at random. An explanation is here: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf """ # count the larger of 1 and the total number of arm pulls total_pulls = max(1, sum(len(r) for r in choice_rewards.values())) def ucb1(choice): rewards = choice_rewards[choice] choice_pulls = max(len(rewards), 1) average_reward = np.nanmean(rewards) if len(rewards) else 0 error = np.sqrt(2.0 * np.log(total_pulls) / choice_pulls) return average_reward + error return max(shuffle(choice_rewards), key=ucb1)
[ "def", "bandit", "(", "self", ",", "choice_rewards", ")", ":", "# count the larger of 1 and the total number of arm pulls", "total_pulls", "=", "max", "(", "1", ",", "sum", "(", "len", "(", "r", ")", "for", "r", "in", "choice_rewards", ".", "values", "(", ")", ")", ")", "def", "ucb1", "(", "choice", ")", ":", "rewards", "=", "choice_rewards", "[", "choice", "]", "choice_pulls", "=", "max", "(", "len", "(", "rewards", ")", ",", "1", ")", "average_reward", "=", "np", ".", "nanmean", "(", "rewards", ")", "if", "len", "(", "rewards", ")", "else", "0", "error", "=", "np", ".", "sqrt", "(", "2.0", "*", "np", ".", "log", "(", "total_pulls", ")", "/", "choice_pulls", ")", "return", "average_reward", "+", "error", "return", "max", "(", "shuffle", "(", "choice_rewards", ")", ",", "key", "=", "ucb1", ")" ]
38.521739
23.043478
def response_voice(self, media_id): """ 将 media_id 所代表的语音组装为符合微信服务器要求的响应数据 :param media_id: 语音的 MediaID :return: 符合微信服务器要求的 XML 响应数据 """ self._check_parse() response = VoiceReply(message=self.__message, media_id=media_id).render() return self._encrypt_response(response)
[ "def", "response_voice", "(", "self", ",", "media_id", ")", ":", "self", ".", "_check_parse", "(", ")", "response", "=", "VoiceReply", "(", "message", "=", "self", ".", "__message", ",", "media_id", "=", "media_id", ")", ".", "render", "(", ")", "return", "self", ".", "_encrypt_response", "(", "response", ")" ]
32.6
11.6
def get_all_function_definitions(base_most_function): """ Obtains all function definitions given a base-most function. This includes the provided function, plus any overrides of that function. Returns: (list): Returns any the provided function and any overriding functions defined for it. """ # We assume the provided function is the base-most function, so we check all derived contracts # for a redefinition return [base_most_function] + [function for derived_contract in base_most_function.contract.derived_contracts for function in derived_contract.functions if function.full_name == base_most_function.full_name]
[ "def", "get_all_function_definitions", "(", "base_most_function", ")", ":", "# We assume the provided function is the base-most function, so we check all derived contracts", "# for a redefinition", "return", "[", "base_most_function", "]", "+", "[", "function", "for", "derived_contract", "in", "base_most_function", ".", "contract", ".", "derived_contracts", "for", "function", "in", "derived_contract", ".", "functions", "if", "function", ".", "full_name", "==", "base_most_function", ".", "full_name", "]" ]
58.384615
35.307692
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' log.debug('sdstack_etcd returner <save_load> called jid: %s', jid) write_profile = __opts__.get('etcd.returner_write_profile') client, path = _get_conn(__opts__, write_profile) if write_profile: ttl = __opts__.get(write_profile, {}).get('etcd.ttl') else: ttl = __opts__.get('etcd.ttl') client.set( '/'.join((path, 'jobs', jid, '.load.p')), salt.utils.json.dumps(load), ttl=ttl, )
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "log", ".", "debug", "(", "'sdstack_etcd returner <save_load> called jid: %s'", ",", "jid", ")", "write_profile", "=", "__opts__", ".", "get", "(", "'etcd.returner_write_profile'", ")", "client", ",", "path", "=", "_get_conn", "(", "__opts__", ",", "write_profile", ")", "if", "write_profile", ":", "ttl", "=", "__opts__", ".", "get", "(", "write_profile", ",", "{", "}", ")", ".", "get", "(", "'etcd.ttl'", ")", "else", ":", "ttl", "=", "__opts__", ".", "get", "(", "'etcd.ttl'", ")", "client", ".", "set", "(", "'/'", ".", "join", "(", "(", "path", ",", "'jobs'", ",", "jid", ",", "'.load.p'", ")", ")", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "load", ")", ",", "ttl", "=", "ttl", ",", ")" ]
32.9375
19.0625
def _prep_grabix_indexes(in_files, data): """Parallel preparation of grabix indexes for files. """ # if we have gzipped but not bgzipped, add a fake index for CWL support # Also skips bgzip indexing if we don't need alignment splitting if _ready_gzip_fastq(in_files, data) and (not _ready_gzip_fastq(in_files, data, require_bgzip=True) or dd.get_align_split_size(data) is False): for in_file in in_files: if not utils.file_exists(in_file + ".gbi"): with file_transaction(data, in_file + ".gbi") as tx_gbi_file: with open(tx_gbi_file, "w") as out_handle: out_handle.write("Not grabix indexed; index added for compatibility.\n") else: items = [[{"bgzip_file": x, "config": copy.deepcopy(data["config"])}] for x in in_files if x] run_multicore(_grabix_index, items, data["config"]) return data
[ "def", "_prep_grabix_indexes", "(", "in_files", ",", "data", ")", ":", "# if we have gzipped but not bgzipped, add a fake index for CWL support", "# Also skips bgzip indexing if we don't need alignment splitting", "if", "_ready_gzip_fastq", "(", "in_files", ",", "data", ")", "and", "(", "not", "_ready_gzip_fastq", "(", "in_files", ",", "data", ",", "require_bgzip", "=", "True", ")", "or", "dd", ".", "get_align_split_size", "(", "data", ")", "is", "False", ")", ":", "for", "in_file", "in", "in_files", ":", "if", "not", "utils", ".", "file_exists", "(", "in_file", "+", "\".gbi\"", ")", ":", "with", "file_transaction", "(", "data", ",", "in_file", "+", "\".gbi\"", ")", "as", "tx_gbi_file", ":", "with", "open", "(", "tx_gbi_file", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "\"Not grabix indexed; index added for compatibility.\\n\"", ")", "else", ":", "items", "=", "[", "[", "{", "\"bgzip_file\"", ":", "x", ",", "\"config\"", ":", "copy", ".", "deepcopy", "(", "data", "[", "\"config\"", "]", ")", "}", "]", "for", "x", "in", "in_files", "if", "x", "]", "run_multicore", "(", "_grabix_index", ",", "items", ",", "data", "[", "\"config\"", "]", ")", "return", "data" ]
59.0625
28.125
def propogate_candidates(self, node_ip): """ Used to progate new candidates to passive simultaneous nodes. """ if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Not connected. if not candidate["con"].connected: continue # Already sent -- updated when they accept this challenge. if candidate["propogated"]: continue # Notify node of challege from client. msg = "CHALLENGE %s %s %s" % ( candidate["ip_addr"], " ".join(map(str, candidate["predictions"])), candidate["proto"]) self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) old_candidates.append(candidate)
[ "def", "propogate_candidates", "(", "self", ",", "node_ip", ")", ":", "if", "node_ip", "in", "self", ".", "factory", ".", "candidates", ":", "old_candidates", "=", "[", "]", "for", "candidate", "in", "self", ".", "factory", ".", "candidates", "[", "node_ip", "]", ":", "# Not connected.\r", "if", "not", "candidate", "[", "\"con\"", "]", ".", "connected", ":", "continue", "# Already sent -- updated when they accept this challenge.\r", "if", "candidate", "[", "\"propogated\"", "]", ":", "continue", "# Notify node of challege from client.\r", "msg", "=", "\"CHALLENGE %s %s %s\"", "%", "(", "candidate", "[", "\"ip_addr\"", "]", ",", "\" \"", ".", "join", "(", "map", "(", "str", ",", "candidate", "[", "\"predictions\"", "]", ")", ")", ",", "candidate", "[", "\"proto\"", "]", ")", "self", ".", "factory", ".", "nodes", "[", "\"simultaneous\"", "]", "[", "node_ip", "]", "[", "\"con\"", "]", ".", "send_line", "(", "msg", ")", "old_candidates", ".", "append", "(", "candidate", ")" ]
36.615385
15.923077
def bcdc_package_show(package): """Query DataBC Catalogue API about given package """ params = {"id": package} r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params) if r.status_code != 200: raise ValueError("{d} is not present in DataBC API list".format(d=package)) return r.json()["result"]
[ "def", "bcdc_package_show", "(", "package", ")", ":", "params", "=", "{", "\"id\"", ":", "package", "}", "r", "=", "requests", ".", "get", "(", "bcdata", ".", "BCDC_API_URL", "+", "\"package_show\"", ",", "params", "=", "params", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ValueError", "(", "\"{d} is not present in DataBC API list\"", ".", "format", "(", "d", "=", "package", ")", ")", "return", "r", ".", "json", "(", ")", "[", "\"result\"", "]" ]
41.5
15
def motion_commanded(self): """ Whether motion is commanded or not. ``bool`` Can't be set. Notes ----- It is the value of the first bit of the 'TAS' command. """ rsp = self.driver.send_command('TAS', immediate=True) if self.driver.command_error(rsp) or len(rsp[4]) != 1 \ or rsp[4][0][0:4] != '*TAS': return False else: return (rsp[4][0][4] == '1')
[ "def", "motion_commanded", "(", "self", ")", ":", "rsp", "=", "self", ".", "driver", ".", "send_command", "(", "'TAS'", ",", "immediate", "=", "True", ")", "if", "self", ".", "driver", ".", "command_error", "(", "rsp", ")", "or", "len", "(", "rsp", "[", "4", "]", ")", "!=", "1", "or", "rsp", "[", "4", "]", "[", "0", "]", "[", "0", ":", "4", "]", "!=", "'*TAS'", ":", "return", "False", "else", ":", "return", "(", "rsp", "[", "4", "]", "[", "0", "]", "[", "4", "]", "==", "'1'", ")" ]
25.277778
21.277778
def matched(self, key, regex, ignore_case=False, multi_line=False): """ 增加查询条件,限制查询结果对象指定字段满足指定的正则表达式。 :param key: 查询条件字段名 :param regex: 查询正则表达式 :param ignore_case: 查询是否忽略大小写,默认不忽略 :param multi_line: 查询是否匹配多行,默认不匹配 :rtype: Query """ if not isinstance(regex, six.string_types): raise TypeError('matched only accept str or unicode') self._add_condition(key, '$regex', regex) modifiers = '' if ignore_case: modifiers += 'i' if multi_line: modifiers += 'm' if modifiers: self._add_condition(key, '$options', modifiers) return self
[ "def", "matched", "(", "self", ",", "key", ",", "regex", ",", "ignore_case", "=", "False", ",", "multi_line", "=", "False", ")", ":", "if", "not", "isinstance", "(", "regex", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "'matched only accept str or unicode'", ")", "self", ".", "_add_condition", "(", "key", ",", "'$regex'", ",", "regex", ")", "modifiers", "=", "''", "if", "ignore_case", ":", "modifiers", "+=", "'i'", "if", "multi_line", ":", "modifiers", "+=", "'m'", "if", "modifiers", ":", "self", ".", "_add_condition", "(", "key", ",", "'$options'", ",", "modifiers", ")", "return", "self" ]
32.142857
14.142857
def execute(self): """ Execute the task. The sync map produced will be stored inside the task object. :raises: :class:`~aeneas.executetask.ExecuteTaskInputError`: if there is a problem with the input parameters :raises: :class:`~aeneas.executetask.ExecuteTaskExecutionError`: if there is a problem during the task execution """ self.log(u"Executing task...") # check that we have the AudioFile object if self.task.audio_file is None: self.log_exc(u"The task does not seem to have its audio file set", None, True, ExecuteTaskInputError) if ( (self.task.audio_file.audio_length is None) or (self.task.audio_file.audio_length <= 0) ): self.log_exc(u"The task seems to have an invalid audio file", None, True, ExecuteTaskInputError) task_max_audio_length = self.rconf[RuntimeConfiguration.TASK_MAX_AUDIO_LENGTH] if ( (task_max_audio_length > 0) and (self.task.audio_file.audio_length > task_max_audio_length) ): self.log_exc(u"The audio file of the task has length %.3f, more than the maximum allowed (%.3f)." % (self.task.audio_file.audio_length, task_max_audio_length), None, True, ExecuteTaskInputError) # check that we have the TextFile object if self.task.text_file is None: self.log_exc(u"The task does not seem to have its text file set", None, True, ExecuteTaskInputError) if len(self.task.text_file) == 0: self.log_exc(u"The task text file seems to have no text fragments", None, True, ExecuteTaskInputError) task_max_text_length = self.rconf[RuntimeConfiguration.TASK_MAX_TEXT_LENGTH] if ( (task_max_text_length > 0) and (len(self.task.text_file) > task_max_text_length) ): self.log_exc(u"The text file of the task has %d fragments, more than the maximum allowed (%d)." % (len(self.task.text_file), task_max_text_length), None, True, ExecuteTaskInputError) if self.task.text_file.chars == 0: self.log_exc(u"The task text file seems to have empty text", None, True, ExecuteTaskInputError) self.log(u"Both audio and text input file are present") # execute self.step_index = 1 self.step_total = 0.000 if self.task.text_file.file_format in TextFileFormat.MULTILEVEL_VALUES: self._execute_multi_level_task() else: self._execute_single_level_task() self.log(u"Executing task... done")
[ "def", "execute", "(", "self", ")", ":", "self", ".", "log", "(", "u\"Executing task...\"", ")", "# check that we have the AudioFile object", "if", "self", ".", "task", ".", "audio_file", "is", "None", ":", "self", ".", "log_exc", "(", "u\"The task does not seem to have its audio file set\"", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "if", "(", "(", "self", ".", "task", ".", "audio_file", ".", "audio_length", "is", "None", ")", "or", "(", "self", ".", "task", ".", "audio_file", ".", "audio_length", "<=", "0", ")", ")", ":", "self", ".", "log_exc", "(", "u\"The task seems to have an invalid audio file\"", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "task_max_audio_length", "=", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "TASK_MAX_AUDIO_LENGTH", "]", "if", "(", "(", "task_max_audio_length", ">", "0", ")", "and", "(", "self", ".", "task", ".", "audio_file", ".", "audio_length", ">", "task_max_audio_length", ")", ")", ":", "self", ".", "log_exc", "(", "u\"The audio file of the task has length %.3f, more than the maximum allowed (%.3f).\"", "%", "(", "self", ".", "task", ".", "audio_file", ".", "audio_length", ",", "task_max_audio_length", ")", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "# check that we have the TextFile object", "if", "self", ".", "task", ".", "text_file", "is", "None", ":", "self", ".", "log_exc", "(", "u\"The task does not seem to have its text file set\"", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "if", "len", "(", "self", ".", "task", ".", "text_file", ")", "==", "0", ":", "self", ".", "log_exc", "(", "u\"The task text file seems to have no text fragments\"", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "task_max_text_length", "=", "self", ".", "rconf", "[", "RuntimeConfiguration", ".", "TASK_MAX_TEXT_LENGTH", "]", "if", "(", "(", "task_max_text_length", ">", "0", ")", "and", "(", "len", "(", "self", ".", "task", ".", "text_file", ")", ">", "task_max_text_length", ")", ")", ":", "self", ".", "log_exc", "(", "u\"The text file of the task has %d fragments, more than the maximum allowed (%d).\"", "%", "(", "len", "(", "self", ".", "task", ".", "text_file", ")", ",", "task_max_text_length", ")", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "if", "self", ".", "task", ".", "text_file", ".", "chars", "==", "0", ":", "self", ".", "log_exc", "(", "u\"The task text file seems to have empty text\"", ",", "None", ",", "True", ",", "ExecuteTaskInputError", ")", "self", ".", "log", "(", "u\"Both audio and text input file are present\"", ")", "# execute", "self", ".", "step_index", "=", "1", "self", ".", "step_total", "=", "0.000", "if", "self", ".", "task", ".", "text_file", ".", "file_format", "in", "TextFileFormat", ".", "MULTILEVEL_VALUES", ":", "self", ".", "_execute_multi_level_task", "(", ")", "else", ":", "self", ".", "_execute_single_level_task", "(", ")", "self", ".", "log", "(", "u\"Executing task... done\"", ")" ]
52.428571
33.44898
def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker)
[ "def", "_update_priority", "(", "self", ",", "task", ",", "prio", ",", "worker", ")", ":", "task", ".", "priority", "=", "prio", "=", "max", "(", "prio", ",", "task", ".", "priority", ")", "for", "dep", "in", "task", ".", "deps", "or", "[", "]", ":", "t", "=", "self", ".", "_state", ".", "get_task", "(", "dep", ")", "if", "t", "is", "not", "None", "and", "prio", ">", "t", ".", "priority", ":", "self", ".", "_update_priority", "(", "t", ",", "prio", ",", "worker", ")" ]
42.083333
14.583333
def difference(self, other): """ Summarise the differences between this node and the other node. :param other: The other node :return: A tuple containing the diff, the counts of the diff, and whether this plate is a sub-plate of the other :type other: Node """ diff = (tuple(set(self.plates) - set(other.plates)), tuple(set(other.plates) - set(self.plates))) counts = map(len, diff) # is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0]) is_sub_plate = counts == [1, 1] and diff[0][0].is_sub_plate(diff[1][0]) # MK fixed if len(other.plates) == 1 and counts == [1, 0] and diff[0][0].parent == other.plates[0].parent: is_sub_plate = True return diff, counts, is_sub_plate
[ "def", "difference", "(", "self", ",", "other", ")", ":", "diff", "=", "(", "tuple", "(", "set", "(", "self", ".", "plates", ")", "-", "set", "(", "other", ".", "plates", ")", ")", ",", "tuple", "(", "set", "(", "other", ".", "plates", ")", "-", "set", "(", "self", ".", "plates", ")", ")", ")", "counts", "=", "map", "(", "len", ",", "diff", ")", "# is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0])", "is_sub_plate", "=", "counts", "==", "[", "1", ",", "1", "]", "and", "diff", "[", "0", "]", "[", "0", "]", ".", "is_sub_plate", "(", "diff", "[", "1", "]", "[", "0", "]", ")", "# MK fixed", "if", "len", "(", "other", ".", "plates", ")", "==", "1", "and", "counts", "==", "[", "1", ",", "0", "]", "and", "diff", "[", "0", "]", "[", "0", "]", ".", "parent", "==", "other", ".", "plates", "[", "0", "]", ".", "parent", ":", "is_sub_plate", "=", "True", "return", "diff", ",", "counts", ",", "is_sub_plate" ]
52.333333
28.066667
def delete(self, force=False): """Delete a record. If `force` is ``False``, the record is soft-deleted: record data will be deleted but the record identifier and the history of the record will be kept. This ensures that the same record identifier cannot be used twice, and that you can still retrieve its history. If `force` is ``True``, then the record is completely deleted from the database. #. Send a signal :data:`invenio_records.signals.before_record_delete` with the current record as parameter. #. Delete or soft-delete the current record. #. Send a signal :data:`invenio_records.signals.after_record_delete` with the current deleted record as parameter. :param force: if ``True``, completely deletes the current record from the database, otherwise soft-deletes it. :returns: The deleted :class:`Record` instance. """ if self.model is None: raise MissingModelError() with db.session.begin_nested(): before_record_delete.send( current_app._get_current_object(), record=self ) if force: db.session.delete(self.model) else: self.model.json = None db.session.merge(self.model) after_record_delete.send( current_app._get_current_object(), record=self ) return self
[ "def", "delete", "(", "self", ",", "force", "=", "False", ")", ":", "if", "self", ".", "model", "is", "None", ":", "raise", "MissingModelError", "(", ")", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "before_record_delete", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ",", "record", "=", "self", ")", "if", "force", ":", "db", ".", "session", ".", "delete", "(", "self", ".", "model", ")", "else", ":", "self", ".", "model", ".", "json", "=", "None", "db", ".", "session", ".", "merge", "(", "self", ".", "model", ")", "after_record_delete", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ",", "record", "=", "self", ")", "return", "self" ]
35.902439
21.609756
def try_lock(self, key, lease_time=-1, timeout=-1): """ Tries to acquire the lock for the specified key. When the lock is not available, * If timeout is not provided, the current thread doesn't wait and returns ``false`` immediately. * If a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies dormant until one of the followings happens: * the lock is acquired by the current thread, or * the specified waiting time elapses. If lease_time is provided, lock will be released after this time elapses. :param key: (object), key to lock in this map. :param lease_time: (int), time in seconds to wait before releasing the lock (optional). :param timeout: (int), maximum time in seconds to wait for the lock (optional). :return: (bool), ``true`` if the lock was acquired and otherwise, false. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_try_lock_codec, key_data, key=key_data, thread_id=thread_id(), lease=to_millis(lease_time), timeout=to_millis(timeout), reference_id=self.reference_id_generator.get_and_increment())
[ "def", "try_lock", "(", "self", ",", "key", ",", "lease_time", "=", "-", "1", ",", "timeout", "=", "-", "1", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "return", "self", ".", "_encode_invoke_on_key", "(", "multi_map_try_lock_codec", ",", "key_data", ",", "key", "=", "key_data", ",", "thread_id", "=", "thread_id", "(", ")", ",", "lease", "=", "to_millis", "(", "lease_time", ")", ",", "timeout", "=", "to_millis", "(", "timeout", ")", ",", "reference_id", "=", "self", ".", "reference_id_generator", ".", "get_and_increment", "(", ")", ")" ]
60.73913
33.956522
def glyph_path(self, glyphs): """Adds closed paths for the glyphs to the current path. The generated path if filled, achieves an effect similar to that of :meth:`show_glyphs`. :param glyphs: The glyphs to show. See :meth:`show_text_glyphs` for the data structure. """ glyphs = ffi.new('cairo_glyph_t[]', glyphs) cairo.cairo_glyph_path(self._pointer, glyphs, len(glyphs)) self._check_status()
[ "def", "glyph_path", "(", "self", ",", "glyphs", ")", ":", "glyphs", "=", "ffi", ".", "new", "(", "'cairo_glyph_t[]'", ",", "glyphs", ")", "cairo", ".", "cairo_glyph_path", "(", "self", ".", "_pointer", ",", "glyphs", ",", "len", "(", "glyphs", ")", ")", "self", ".", "_check_status", "(", ")" ]
36.076923
16.923077
def waitforcard(self): """Wait for card insertion and returns a card service.""" AbstractCardRequest.waitforcard(self) cardfound = False # for non infinite timeout, a timer will signal # the end of the time-out by setting the evt event evt = threading.Event() if INFINITE == self.timeout: timertimeout = 1 else: timertimeout = self.timeout timer = threading.Timer( timertimeout, signalEvent, [evt, INFINITE == self.timeout]) # create a dictionary entry for new readers readerstates = {} readernames = self.getReaderNames() for reader in readernames: if not reader in readerstates: readerstates[reader] = (reader, SCARD_STATE_UNAWARE) # remove dictionary entry for readers that disappeared for oldreader in list(readerstates.keys()): if oldreader not in readernames: del readerstates[oldreader] # call SCardGetStatusChange only if we have some readers if {} != readerstates: hresult, newstates = SCardGetStatusChange( self.hcontext, 0, list(readerstates.values())) else: hresult = 0 newstates = [] # we can expect normally time-outs or reader # disappearing just before the call # otherwise, raise execption on error if 0 != hresult and \ SCARD_E_TIMEOUT != hresult and \ SCARD_E_UNKNOWN_READER != hresult: raise CardRequestException( 'Failed to SCardGetStatusChange ' + \ SCardGetErrorMessage(hresult)) # in case of timeout or reader disappearing, # the content of the states is useless # in which case we clear the changed bit if SCARD_E_TIMEOUT == hresult or SCARD_E_UNKNOWN_READER == hresult: for state in newstates: state[1] = state[1] & (0xFFFFFFFF ^ SCARD_STATE_CHANGED) # update readerstate for state in newstates: readername, eventstate, atr = state readerstates[readername] = (readername, eventstate) # if a new card is not requested, just return the first available if not self.newcardonly: for state in newstates: readername, eventstate, atr = state if eventstate & SCARD_STATE_PRESENT: reader = PCSCReader(readername) if self.cardType.matches(atr, reader): if self.cardServiceClass.supports('dummy'): cardfound = True return self.cardServiceClass( reader.createConnection()) timerstarted = False while not evt.isSet() and not cardfound: if not timerstarted: timerstarted = True timer.start() time.sleep(self.pollinginterval) # create a dictionary entry for new readers readernames = self.getReaderNames() for reader in readernames: if not reader in readerstates: readerstates[reader] = (reader, SCARD_STATE_UNAWARE) # remove dictionary entry for readers that disappeared for oldreader in list(readerstates.keys()): if oldreader not in readernames: del readerstates[oldreader] # wait for card insertion if {} != readerstates: hresult, newstates = SCardGetStatusChange( self.hcontext, 0, list(readerstates.values())) else: hresult = SCARD_E_TIMEOUT newstates = [] # time-out if SCARD_E_TIMEOUT == hresult: if evt.isSet(): raise CardRequestTimeoutException() # reader vanished before or during the call elif SCARD_E_UNKNOWN_READER == hresult: pass # some error happened elif 0 != hresult: timer.cancel() raise CardRequestException( 'Failed to get status change ' + \ SCardGetErrorMessage(hresult)) # something changed! else: # check if we have to return a match, i.e. # if no new card in inserted and there is a card found # or if a new card is requested, and there is a change+present for state in newstates: readername, eventstate, atr = state r, oldstate = readerstates[readername] # the status can change on a card already inserted, e.g. # unpowered, in use, ... # if a new card is requested, clear the state changed bit # if the card was already inserted and is still inserted if self.newcardonly: if oldstate & SCARD_STATE_PRESENT and \ eventstate & \ (SCARD_STATE_CHANGED | SCARD_STATE_PRESENT): eventstate = eventstate & \ (0xFFFFFFFF ^ SCARD_STATE_CHANGED) if (self.newcardonly and \ eventstate & SCARD_STATE_PRESENT and \ eventstate & SCARD_STATE_CHANGED) or \ (not self.newcardonly and \ eventstate & SCARD_STATE_PRESENT): reader = PCSCReader(readername) if self.cardType.matches(atr, reader): if self.cardServiceClass.supports('dummy'): cardfound = True timer.cancel() return self.cardServiceClass( reader.createConnection()) # update state dictionary readerstates[readername] = (readername, eventstate) if evt.isSet(): raise CardRequestTimeoutException()
[ "def", "waitforcard", "(", "self", ")", ":", "AbstractCardRequest", ".", "waitforcard", "(", "self", ")", "cardfound", "=", "False", "# for non infinite timeout, a timer will signal", "# the end of the time-out by setting the evt event", "evt", "=", "threading", ".", "Event", "(", ")", "if", "INFINITE", "==", "self", ".", "timeout", ":", "timertimeout", "=", "1", "else", ":", "timertimeout", "=", "self", ".", "timeout", "timer", "=", "threading", ".", "Timer", "(", "timertimeout", ",", "signalEvent", ",", "[", "evt", ",", "INFINITE", "==", "self", ".", "timeout", "]", ")", "# create a dictionary entry for new readers", "readerstates", "=", "{", "}", "readernames", "=", "self", ".", "getReaderNames", "(", ")", "for", "reader", "in", "readernames", ":", "if", "not", "reader", "in", "readerstates", ":", "readerstates", "[", "reader", "]", "=", "(", "reader", ",", "SCARD_STATE_UNAWARE", ")", "# remove dictionary entry for readers that disappeared", "for", "oldreader", "in", "list", "(", "readerstates", ".", "keys", "(", ")", ")", ":", "if", "oldreader", "not", "in", "readernames", ":", "del", "readerstates", "[", "oldreader", "]", "# call SCardGetStatusChange only if we have some readers", "if", "{", "}", "!=", "readerstates", ":", "hresult", ",", "newstates", "=", "SCardGetStatusChange", "(", "self", ".", "hcontext", ",", "0", ",", "list", "(", "readerstates", ".", "values", "(", ")", ")", ")", "else", ":", "hresult", "=", "0", "newstates", "=", "[", "]", "# we can expect normally time-outs or reader", "# disappearing just before the call", "# otherwise, raise execption on error", "if", "0", "!=", "hresult", "and", "SCARD_E_TIMEOUT", "!=", "hresult", "and", "SCARD_E_UNKNOWN_READER", "!=", "hresult", ":", "raise", "CardRequestException", "(", "'Failed to SCardGetStatusChange '", "+", "SCardGetErrorMessage", "(", "hresult", ")", ")", "# in case of timeout or reader disappearing,", "# the content of the states is useless", "# in which case we clear the changed bit", "if", "SCARD_E_TIMEOUT", "==", "hresult", "or", "SCARD_E_UNKNOWN_READER", "==", "hresult", ":", "for", "state", "in", "newstates", ":", "state", "[", "1", "]", "=", "state", "[", "1", "]", "&", "(", "0xFFFFFFFF", "^", "SCARD_STATE_CHANGED", ")", "# update readerstate", "for", "state", "in", "newstates", ":", "readername", ",", "eventstate", ",", "atr", "=", "state", "readerstates", "[", "readername", "]", "=", "(", "readername", ",", "eventstate", ")", "# if a new card is not requested, just return the first available", "if", "not", "self", ".", "newcardonly", ":", "for", "state", "in", "newstates", ":", "readername", ",", "eventstate", ",", "atr", "=", "state", "if", "eventstate", "&", "SCARD_STATE_PRESENT", ":", "reader", "=", "PCSCReader", "(", "readername", ")", "if", "self", ".", "cardType", ".", "matches", "(", "atr", ",", "reader", ")", ":", "if", "self", ".", "cardServiceClass", ".", "supports", "(", "'dummy'", ")", ":", "cardfound", "=", "True", "return", "self", ".", "cardServiceClass", "(", "reader", ".", "createConnection", "(", ")", ")", "timerstarted", "=", "False", "while", "not", "evt", ".", "isSet", "(", ")", "and", "not", "cardfound", ":", "if", "not", "timerstarted", ":", "timerstarted", "=", "True", "timer", ".", "start", "(", ")", "time", ".", "sleep", "(", "self", ".", "pollinginterval", ")", "# create a dictionary entry for new readers", "readernames", "=", "self", ".", "getReaderNames", "(", ")", "for", "reader", "in", "readernames", ":", "if", "not", "reader", "in", "readerstates", ":", "readerstates", "[", "reader", "]", "=", "(", "reader", ",", "SCARD_STATE_UNAWARE", ")", "# remove dictionary entry for readers that disappeared", "for", "oldreader", "in", "list", "(", "readerstates", ".", "keys", "(", ")", ")", ":", "if", "oldreader", "not", "in", "readernames", ":", "del", "readerstates", "[", "oldreader", "]", "# wait for card insertion", "if", "{", "}", "!=", "readerstates", ":", "hresult", ",", "newstates", "=", "SCardGetStatusChange", "(", "self", ".", "hcontext", ",", "0", ",", "list", "(", "readerstates", ".", "values", "(", ")", ")", ")", "else", ":", "hresult", "=", "SCARD_E_TIMEOUT", "newstates", "=", "[", "]", "# time-out", "if", "SCARD_E_TIMEOUT", "==", "hresult", ":", "if", "evt", ".", "isSet", "(", ")", ":", "raise", "CardRequestTimeoutException", "(", ")", "# reader vanished before or during the call", "elif", "SCARD_E_UNKNOWN_READER", "==", "hresult", ":", "pass", "# some error happened", "elif", "0", "!=", "hresult", ":", "timer", ".", "cancel", "(", ")", "raise", "CardRequestException", "(", "'Failed to get status change '", "+", "SCardGetErrorMessage", "(", "hresult", ")", ")", "# something changed!", "else", ":", "# check if we have to return a match, i.e.", "# if no new card in inserted and there is a card found", "# or if a new card is requested, and there is a change+present", "for", "state", "in", "newstates", ":", "readername", ",", "eventstate", ",", "atr", "=", "state", "r", ",", "oldstate", "=", "readerstates", "[", "readername", "]", "# the status can change on a card already inserted, e.g.", "# unpowered, in use, ...", "# if a new card is requested, clear the state changed bit", "# if the card was already inserted and is still inserted", "if", "self", ".", "newcardonly", ":", "if", "oldstate", "&", "SCARD_STATE_PRESENT", "and", "eventstate", "&", "(", "SCARD_STATE_CHANGED", "|", "SCARD_STATE_PRESENT", ")", ":", "eventstate", "=", "eventstate", "&", "(", "0xFFFFFFFF", "^", "SCARD_STATE_CHANGED", ")", "if", "(", "self", ".", "newcardonly", "and", "eventstate", "&", "SCARD_STATE_PRESENT", "and", "eventstate", "&", "SCARD_STATE_CHANGED", ")", "or", "(", "not", "self", ".", "newcardonly", "and", "eventstate", "&", "SCARD_STATE_PRESENT", ")", ":", "reader", "=", "PCSCReader", "(", "readername", ")", "if", "self", ".", "cardType", ".", "matches", "(", "atr", ",", "reader", ")", ":", "if", "self", ".", "cardServiceClass", ".", "supports", "(", "'dummy'", ")", ":", "cardfound", "=", "True", "timer", ".", "cancel", "(", ")", "return", "self", ".", "cardServiceClass", "(", "reader", ".", "createConnection", "(", ")", ")", "# update state dictionary", "readerstates", "[", "readername", "]", "=", "(", "readername", ",", "eventstate", ")", "if", "evt", ".", "isSet", "(", ")", ":", "raise", "CardRequestTimeoutException", "(", ")" ]
40.427632
18.052632
def get_op_version(name): ''' .. versionadded:: 2019.2.0 Returns the glusterfs volume op-version name Name of the glusterfs volume CLI Example: .. code-block:: bash salt '*' glusterfs.get_op_version <volume> ''' cmd = 'volume get {0} cluster.op-version'.format(name) root = _gluster_xml(cmd) if not _gluster_ok(root): return False, root.find('opErrstr').text result = {} for op_version in _iter(root, 'volGetopts'): for item in op_version: if item.tag == 'Value': result = item.text elif item.tag == 'Opt': for child in item: if child.tag == 'Value': result = child.text return result
[ "def", "get_op_version", "(", "name", ")", ":", "cmd", "=", "'volume get {0} cluster.op-version'", ".", "format", "(", "name", ")", "root", "=", "_gluster_xml", "(", "cmd", ")", "if", "not", "_gluster_ok", "(", "root", ")", ":", "return", "False", ",", "root", ".", "find", "(", "'opErrstr'", ")", ".", "text", "result", "=", "{", "}", "for", "op_version", "in", "_iter", "(", "root", ",", "'volGetopts'", ")", ":", "for", "item", "in", "op_version", ":", "if", "item", ".", "tag", "==", "'Value'", ":", "result", "=", "item", ".", "text", "elif", "item", ".", "tag", "==", "'Opt'", ":", "for", "child", "in", "item", ":", "if", "child", ".", "tag", "==", "'Value'", ":", "result", "=", "child", ".", "text", "return", "result" ]
22.575758
20.69697
def args(self, *args, **kwargs): """ Creates a ArgumentsExpectationRule and adds it to the expectation """ self._any_args = False self._arguments_rule.set_args(*args, **kwargs) return self
[ "def", "args", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_any_args", "=", "False", "self", ".", "_arguments_rule", ".", "set_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self" ]
32.857143
12.285714
def X(self, i, j=slice(None, None, None)): ''' Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order. ''' X1 = self.fpix[j] / self.norm[j].reshape(-1, 1) X = np.product(list(multichoose(X1.T, i + 1)), axis=1).T if self.X1N is not None: return np.hstack([X, self.X1N[j] ** (i + 1)]) else: return X
[ "def", "X", "(", "self", ",", "i", ",", "j", "=", "slice", "(", "None", ",", "None", ",", "None", ")", ")", ":", "X1", "=", "self", ".", "fpix", "[", "j", "]", "/", "self", ".", "norm", "[", "j", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "X", "=", "np", ".", "product", "(", "list", "(", "multichoose", "(", "X1", ".", "T", ",", "i", "+", "1", ")", ")", ",", "axis", "=", "1", ")", ".", "T", "if", "self", ".", "X1N", "is", "not", "None", ":", "return", "np", ".", "hstack", "(", "[", "X", ",", "self", ".", "X1N", "[", "j", "]", "**", "(", "i", "+", "1", ")", "]", ")", "else", ":", "return", "X" ]
38.625
26
def get_group(self, name, user_name=None): """ Get information on the given group or whether or not a user is a member of the group. Args: name (string): Name of group to query. user_name (optional[string]): Supply None if not interested in determining if user is a member of the given group. Returns: (mixed): Dictionary if getting group information or bool if a user name is supplied. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.get_group(name, user_name)
[ "def", "get_group", "(", "self", ",", "name", ",", "user_name", "=", "None", ")", ":", "self", ".", "project_service", ".", "set_auth", "(", "self", ".", "_token_project", ")", "return", "self", ".", "project_service", ".", "get_group", "(", "name", ",", "user_name", ")" ]
35.157895
21.578947
def read_touchstone(fname): r""" Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file. According to the specification a data line can have at most values for four complex parameters (plus potentially the frequency point), however this function is able to process malformed files as long as they have the correct number of data points (:code:`points` x :code:`nports` x :code:`nports` where :code:`points` represents the number of frequency points and :code:`nports` represents the number of ports in the file). Per the Touchstone specification noise data is only supported for two-port files :param fname: Touchstone file name :type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/ ptypes.html#filenameexists>`_ :rtype: dictionary with the following structure: * **nports** (*integer*) -- number of ports * **opts** (:ref:`TouchstoneOptions`) -- File options * **data** (:ref:`TouchstoneData`) -- Parameter data * **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone specification only supported in 2-port files .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.touchstone.read_touchstone :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (File *[fname]* does not have a valid extension) * RuntimeError (File *[fname]* has no data) * RuntimeError (First non-comment line is not the option line) * RuntimeError (Frequency must increase) * RuntimeError (Illegal data in line *[lineno]*) * RuntimeError (Illegal option line) * RuntimeError (Malformed data) * RuntimeError (Malformed noise data) * RuntimeError (Noise frequency must increase) .. [[[end]]] .. note:: The returned parameter(s) are complex numbers in real and imaginary format regardless of the format used in the Touchstone file. Similarly, the returned frequency vector unit is Hertz regardless of the unit used in the Touchstone file """ # pylint: disable=R0912,R0915,W0702 # Exceptions definitions exnports = pexdoc.exh.addex( RuntimeError, "File *[fname]* does not have a valid extension" ) exnoopt = pexdoc.exh.addex( RuntimeError, "First non-comment line is not the option line" ) exopt = pexdoc.exh.addex(RuntimeError, "Illegal option line") exline = pexdoc.exh.addex(RuntimeError, "Illegal data in line *[lineno]*") exnodata = pexdoc.exh.addex(RuntimeError, "File *[fname]* has no data") exdata = pexdoc.exh.addex(RuntimeError, "Malformed data") exndata = pexdoc.exh.addex(RuntimeError, "Malformed noise data") exfreq = pexdoc.exh.addex(RuntimeError, "Frequency must increase") exnfreq = pexdoc.exh.addex(RuntimeError, "Noise frequency must increase") # Verify that file has correct extension format _, ext = os.path.splitext(fname) ext = ext.lower() nports_regexp = re.compile(r"\.s(\d+)p") match = nports_regexp.match(ext) exnports(not match, edata={"field": "fname", "value": fname}) nports = int(match.groups()[0]) opt_line = False units_dict = {"GHZ": "GHz", "MHZ": "MHz", "KHZ": "KHz", "HZ": "Hz"} scale_dict = {"GHZ": 1e9, "MHZ": 1e6, "KHZ": 1e3, "HZ": 1.0} units_opts = ["GHZ", "MHZ", "KHZ", "HZ"] type_opts = ["S", "Y", "Z", "H", "G"] format_opts = ["DB", "MA", "RI"] opts = dict(units=None, ptype=None, pformat=None, z0=None) data = [] with open(fname, "r") as fobj: for num, line in enumerate(fobj): line = line.strip().upper() # Comment line if line.startswith("!"): continue # Options line if (not opt_line) and (not line.startswith("#")): exnoopt(True) if not opt_line: # Each Touchstone data file must contain an option line # (additional option lines after the first one will be ignored) opt_line = True tokens = line[1:].split() # Remove initial hash if "R" in tokens: idx = tokens.index("R") add = 1 if len(tokens) > idx + 1: try: opts["z0"] = float(tokens[idx + 1]) add = 2 except: pass tokens = tokens[:idx] + tokens[idx + add :] matches = 0 for token in tokens: if (token in format_opts) and (not opts["pformat"]): matches += 1 opts["pformat"] = token elif (token in units_opts) and (not opts["units"]): matches += 1 opts["units"] = units_dict[token] elif (token in type_opts) and (not opts["ptype"]): matches += 1 opts["ptype"] = token exopt(matches != len(tokens)) if opt_line and line.startswith("#"): continue # Data lines try: if "!" in line: idx = line.index("!") line = line[:idx] tokens = [float(item) for item in line.split()] data.append(tokens) except: exline(True, edata={"field": "lineno", "value": num + 1}) data = np.concatenate(data) exnodata(not data.size, edata={"field": "fname", "value": fname}) # Set option defaults opts["units"] = opts["units"] or "GHz" opts["ptype"] = opts["ptype"] or "S" opts["pformat"] = opts["pformat"] or "MA" opts["z0"] = opts["z0"] or 50 # Format data data_dict = {} nums_per_freq = 1 + (2 * (nports ** 2)) fslice = slice(0, data.size, nums_per_freq) freq = data[fslice] ndiff = np.diff(freq) ndict = {} if (nports == 2) and ndiff.size and (min(ndiff) <= 0): # Extract noise data npoints = np.where(ndiff <= 0)[0][0] + 1 freq = freq[:npoints] ndata = data[9 * npoints :] nfpoints = int(ndata.size / 5.0) exndata(ndata.size % 5 != 0) data = data[: 9 * npoints] ndiff = 1 nfslice = slice(0, ndata.size, 5) nfreq = ndata[nfslice] ndiff = np.diff(nfreq) exnfreq(bool(ndiff.size and (min(ndiff) <= 0))) nfig_slice = slice(1, ndata.size, 5) rlmag_slice = slice(2, ndata.size, 5) rlphase_slice = slice(3, ndata.size, 5) res_slice = slice(4, ndata.size, 5) ndict["freq"] = scale_dict[opts["units"].upper()] * nfreq ndict["nf"] = ndata[nfig_slice] ndict["rc"] = ndata[rlmag_slice] * np.exp(1j * ndata[rlphase_slice]) ndict["res"] = ndata[res_slice] ndict["points"] = nfpoints exdata(data.size % nums_per_freq != 0) npoints = int(data.size / nums_per_freq) exfreq(bool(ndiff.size and (min(ndiff) <= 0))) data_dict["freq"] = scale_dict[opts["units"].upper()] * freq d1slice = slice(0, data.size, 2) d2slice = slice(1, data.size, 2) data = np.delete(data, fslice) # For format that has angle information, the angle is given in degrees if opts["pformat"] == "MA": data = data[d1slice] * np.exp(1j * np.deg2rad(data[d2slice])) elif opts["pformat"] == "RI": data = data[d1slice] + (1j * data[d2slice]) else: # if opts['pformat'] == 'DB': data = (10 ** (data[d1slice] / 20.0)) * np.exp(1j * np.deg2rad(data[d2slice])) if nports > 1: data_dict["pars"] = np.resize(data, (npoints, nports, nports)) else: data_dict["pars"] = copy.copy(data) del data data_dict["points"] = npoints if nports == 2: # The order of data for a two-port file is N11, N21, N12, N22 but for # m ports where m > 2, the order is N11, N12, N13, ..., N1m data_dict["pars"] = np.transpose(data_dict["pars"], (0, 2, 1)) return dict(nports=nports, opts=opts, data=data_dict, noise=ndict)
[ "def", "read_touchstone", "(", "fname", ")", ":", "# pylint: disable=R0912,R0915,W0702", "# Exceptions definitions", "exnports", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"File *[fname]* does not have a valid extension\"", ")", "exnoopt", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"First non-comment line is not the option line\"", ")", "exopt", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Illegal option line\"", ")", "exline", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Illegal data in line *[lineno]*\"", ")", "exnodata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"File *[fname]* has no data\"", ")", "exdata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Malformed data\"", ")", "exndata", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Malformed noise data\"", ")", "exfreq", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Frequency must increase\"", ")", "exnfreq", "=", "pexdoc", ".", "exh", ".", "addex", "(", "RuntimeError", ",", "\"Noise frequency must increase\"", ")", "# Verify that file has correct extension format", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "ext", "=", "ext", ".", "lower", "(", ")", "nports_regexp", "=", "re", ".", "compile", "(", "r\"\\.s(\\d+)p\"", ")", "match", "=", "nports_regexp", ".", "match", "(", "ext", ")", "exnports", "(", "not", "match", ",", "edata", "=", "{", "\"field\"", ":", "\"fname\"", ",", "\"value\"", ":", "fname", "}", ")", "nports", "=", "int", "(", "match", ".", "groups", "(", ")", "[", "0", "]", ")", "opt_line", "=", "False", "units_dict", "=", "{", "\"GHZ\"", ":", "\"GHz\"", ",", "\"MHZ\"", ":", "\"MHz\"", ",", "\"KHZ\"", ":", "\"KHz\"", ",", "\"HZ\"", ":", "\"Hz\"", "}", "scale_dict", "=", "{", "\"GHZ\"", ":", "1e9", ",", "\"MHZ\"", ":", "1e6", ",", "\"KHZ\"", ":", "1e3", ",", "\"HZ\"", ":", "1.0", "}", "units_opts", "=", "[", "\"GHZ\"", ",", "\"MHZ\"", ",", "\"KHZ\"", ",", "\"HZ\"", "]", "type_opts", "=", "[", "\"S\"", ",", "\"Y\"", ",", "\"Z\"", ",", "\"H\"", ",", "\"G\"", "]", "format_opts", "=", "[", "\"DB\"", ",", "\"MA\"", ",", "\"RI\"", "]", "opts", "=", "dict", "(", "units", "=", "None", ",", "ptype", "=", "None", ",", "pformat", "=", "None", ",", "z0", "=", "None", ")", "data", "=", "[", "]", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "fobj", ":", "for", "num", ",", "line", "in", "enumerate", "(", "fobj", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "upper", "(", ")", "# Comment line", "if", "line", ".", "startswith", "(", "\"!\"", ")", ":", "continue", "# Options line", "if", "(", "not", "opt_line", ")", "and", "(", "not", "line", ".", "startswith", "(", "\"#\"", ")", ")", ":", "exnoopt", "(", "True", ")", "if", "not", "opt_line", ":", "# Each Touchstone data file must contain an option line", "# (additional option lines after the first one will be ignored)", "opt_line", "=", "True", "tokens", "=", "line", "[", "1", ":", "]", ".", "split", "(", ")", "# Remove initial hash", "if", "\"R\"", "in", "tokens", ":", "idx", "=", "tokens", ".", "index", "(", "\"R\"", ")", "add", "=", "1", "if", "len", "(", "tokens", ")", ">", "idx", "+", "1", ":", "try", ":", "opts", "[", "\"z0\"", "]", "=", "float", "(", "tokens", "[", "idx", "+", "1", "]", ")", "add", "=", "2", "except", ":", "pass", "tokens", "=", "tokens", "[", ":", "idx", "]", "+", "tokens", "[", "idx", "+", "add", ":", "]", "matches", "=", "0", "for", "token", "in", "tokens", ":", "if", "(", "token", "in", "format_opts", ")", "and", "(", "not", "opts", "[", "\"pformat\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"pformat\"", "]", "=", "token", "elif", "(", "token", "in", "units_opts", ")", "and", "(", "not", "opts", "[", "\"units\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"units\"", "]", "=", "units_dict", "[", "token", "]", "elif", "(", "token", "in", "type_opts", ")", "and", "(", "not", "opts", "[", "\"ptype\"", "]", ")", ":", "matches", "+=", "1", "opts", "[", "\"ptype\"", "]", "=", "token", "exopt", "(", "matches", "!=", "len", "(", "tokens", ")", ")", "if", "opt_line", "and", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "# Data lines", "try", ":", "if", "\"!\"", "in", "line", ":", "idx", "=", "line", ".", "index", "(", "\"!\"", ")", "line", "=", "line", "[", ":", "idx", "]", "tokens", "=", "[", "float", "(", "item", ")", "for", "item", "in", "line", ".", "split", "(", ")", "]", "data", ".", "append", "(", "tokens", ")", "except", ":", "exline", "(", "True", ",", "edata", "=", "{", "\"field\"", ":", "\"lineno\"", ",", "\"value\"", ":", "num", "+", "1", "}", ")", "data", "=", "np", ".", "concatenate", "(", "data", ")", "exnodata", "(", "not", "data", ".", "size", ",", "edata", "=", "{", "\"field\"", ":", "\"fname\"", ",", "\"value\"", ":", "fname", "}", ")", "# Set option defaults", "opts", "[", "\"units\"", "]", "=", "opts", "[", "\"units\"", "]", "or", "\"GHz\"", "opts", "[", "\"ptype\"", "]", "=", "opts", "[", "\"ptype\"", "]", "or", "\"S\"", "opts", "[", "\"pformat\"", "]", "=", "opts", "[", "\"pformat\"", "]", "or", "\"MA\"", "opts", "[", "\"z0\"", "]", "=", "opts", "[", "\"z0\"", "]", "or", "50", "# Format data", "data_dict", "=", "{", "}", "nums_per_freq", "=", "1", "+", "(", "2", "*", "(", "nports", "**", "2", ")", ")", "fslice", "=", "slice", "(", "0", ",", "data", ".", "size", ",", "nums_per_freq", ")", "freq", "=", "data", "[", "fslice", "]", "ndiff", "=", "np", ".", "diff", "(", "freq", ")", "ndict", "=", "{", "}", "if", "(", "nports", "==", "2", ")", "and", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ":", "# Extract noise data", "npoints", "=", "np", ".", "where", "(", "ndiff", "<=", "0", ")", "[", "0", "]", "[", "0", "]", "+", "1", "freq", "=", "freq", "[", ":", "npoints", "]", "ndata", "=", "data", "[", "9", "*", "npoints", ":", "]", "nfpoints", "=", "int", "(", "ndata", ".", "size", "/", "5.0", ")", "exndata", "(", "ndata", ".", "size", "%", "5", "!=", "0", ")", "data", "=", "data", "[", ":", "9", "*", "npoints", "]", "ndiff", "=", "1", "nfslice", "=", "slice", "(", "0", ",", "ndata", ".", "size", ",", "5", ")", "nfreq", "=", "ndata", "[", "nfslice", "]", "ndiff", "=", "np", ".", "diff", "(", "nfreq", ")", "exnfreq", "(", "bool", "(", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ")", ")", "nfig_slice", "=", "slice", "(", "1", ",", "ndata", ".", "size", ",", "5", ")", "rlmag_slice", "=", "slice", "(", "2", ",", "ndata", ".", "size", ",", "5", ")", "rlphase_slice", "=", "slice", "(", "3", ",", "ndata", ".", "size", ",", "5", ")", "res_slice", "=", "slice", "(", "4", ",", "ndata", ".", "size", ",", "5", ")", "ndict", "[", "\"freq\"", "]", "=", "scale_dict", "[", "opts", "[", "\"units\"", "]", ".", "upper", "(", ")", "]", "*", "nfreq", "ndict", "[", "\"nf\"", "]", "=", "ndata", "[", "nfig_slice", "]", "ndict", "[", "\"rc\"", "]", "=", "ndata", "[", "rlmag_slice", "]", "*", "np", ".", "exp", "(", "1j", "*", "ndata", "[", "rlphase_slice", "]", ")", "ndict", "[", "\"res\"", "]", "=", "ndata", "[", "res_slice", "]", "ndict", "[", "\"points\"", "]", "=", "nfpoints", "exdata", "(", "data", ".", "size", "%", "nums_per_freq", "!=", "0", ")", "npoints", "=", "int", "(", "data", ".", "size", "/", "nums_per_freq", ")", "exfreq", "(", "bool", "(", "ndiff", ".", "size", "and", "(", "min", "(", "ndiff", ")", "<=", "0", ")", ")", ")", "data_dict", "[", "\"freq\"", "]", "=", "scale_dict", "[", "opts", "[", "\"units\"", "]", ".", "upper", "(", ")", "]", "*", "freq", "d1slice", "=", "slice", "(", "0", ",", "data", ".", "size", ",", "2", ")", "d2slice", "=", "slice", "(", "1", ",", "data", ".", "size", ",", "2", ")", "data", "=", "np", ".", "delete", "(", "data", ",", "fslice", ")", "# For format that has angle information, the angle is given in degrees", "if", "opts", "[", "\"pformat\"", "]", "==", "\"MA\"", ":", "data", "=", "data", "[", "d1slice", "]", "*", "np", ".", "exp", "(", "1j", "*", "np", ".", "deg2rad", "(", "data", "[", "d2slice", "]", ")", ")", "elif", "opts", "[", "\"pformat\"", "]", "==", "\"RI\"", ":", "data", "=", "data", "[", "d1slice", "]", "+", "(", "1j", "*", "data", "[", "d2slice", "]", ")", "else", ":", "# if opts['pformat'] == 'DB':", "data", "=", "(", "10", "**", "(", "data", "[", "d1slice", "]", "/", "20.0", ")", ")", "*", "np", ".", "exp", "(", "1j", "*", "np", ".", "deg2rad", "(", "data", "[", "d2slice", "]", ")", ")", "if", "nports", ">", "1", ":", "data_dict", "[", "\"pars\"", "]", "=", "np", ".", "resize", "(", "data", ",", "(", "npoints", ",", "nports", ",", "nports", ")", ")", "else", ":", "data_dict", "[", "\"pars\"", "]", "=", "copy", ".", "copy", "(", "data", ")", "del", "data", "data_dict", "[", "\"points\"", "]", "=", "npoints", "if", "nports", "==", "2", ":", "# The order of data for a two-port file is N11, N21, N12, N22 but for", "# m ports where m > 2, the order is N11, N12, N13, ..., N1m", "data_dict", "[", "\"pars\"", "]", "=", "np", ".", "transpose", "(", "data_dict", "[", "\"pars\"", "]", ",", "(", "0", ",", "2", ",", "1", ")", ")", "return", "dict", "(", "nports", "=", "nports", ",", "opts", "=", "opts", ",", "data", "=", "data_dict", ",", "noise", "=", "ndict", ")" ]
40.505
18.36
def undeploy(jboss_config, deployment): ''' Undeploy the application from jboss instance jboss_config Configuration dictionary with properties specified above. deployment Deployment name to undeploy CLI Example: .. code-block:: bash salt '*' jboss7.undeploy '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_deployment ''' log.debug("======================== MODULE FUNCTION: jboss7.undeploy, deployment=%s", deployment) command = 'undeploy {deployment} '.format(deployment=deployment) return __salt__['jboss7_cli.run_command'](jboss_config, command)
[ "def", "undeploy", "(", "jboss_config", ",", "deployment", ")", ":", "log", ".", "debug", "(", "\"======================== MODULE FUNCTION: jboss7.undeploy, deployment=%s\"", ",", "deployment", ")", "command", "=", "'undeploy {deployment} '", ".", "format", "(", "deployment", "=", "deployment", ")", "return", "__salt__", "[", "'jboss7_cli.run_command'", "]", "(", "jboss_config", ",", "command", ")" ]
39.888889
35.333333
def _start_again_message(self, message=None): """Simple method to form a start again message and give the answer in readable form.""" logging.debug("Start again message delivered: {}".format(message)) the_answer = ', '.join( [str(d) for d in self.game.answer][:-1] ) + ', and ' + [str(d) for d in self.game.answer][-1] return "{0}{1} The correct answer was {2}. Please start a new game.".format( message, "." if message[-1] not in [".", ",", ";", ":", "!"] else "", the_answer )
[ "def", "_start_again_message", "(", "self", ",", "message", "=", "None", ")", ":", "logging", ".", "debug", "(", "\"Start again message delivered: {}\"", ".", "format", "(", "message", ")", ")", "the_answer", "=", "', '", ".", "join", "(", "[", "str", "(", "d", ")", "for", "d", "in", "self", ".", "game", ".", "answer", "]", "[", ":", "-", "1", "]", ")", "+", "', and '", "+", "[", "str", "(", "d", ")", "for", "d", "in", "self", ".", "game", ".", "answer", "]", "[", "-", "1", "]", "return", "\"{0}{1} The correct answer was {2}. Please start a new game.\"", ".", "format", "(", "message", ",", "\".\"", "if", "message", "[", "-", "1", "]", "not", "in", "[", "\".\"", ",", "\",\"", ",", "\";\"", ",", "\":\"", ",", "\"!\"", "]", "else", "\"\"", ",", "the_answer", ")" ]
47
22.083333
def caf_to_fastq(infile, outfile, min_length=0, trim=False): '''Convert a CAF file to fastq. Reads shorter than min_length are not output. If clipping information is in the CAF file (with a line Clipping QUAL ...) and trim=True, then trim the reads''' caf_reader = caf.file_reader(infile) fout = utils.open_file_write(outfile) for c in caf_reader: if trim: if c.clip_start is not None and c.clip_end is not None: c.seq.seq = c.seq.seq[c.clip_start:c.clip_end + 1] c.seq.qual = c.seq.qual[c.clip_start:c.clip_end + 1] else: print('Warning: no clipping info for sequence', c.id, file=sys.stderr) if len(c.seq) >= min_length: print(c.seq, file=fout) utils.close(fout)
[ "def", "caf_to_fastq", "(", "infile", ",", "outfile", ",", "min_length", "=", "0", ",", "trim", "=", "False", ")", ":", "caf_reader", "=", "caf", ".", "file_reader", "(", "infile", ")", "fout", "=", "utils", ".", "open_file_write", "(", "outfile", ")", "for", "c", "in", "caf_reader", ":", "if", "trim", ":", "if", "c", ".", "clip_start", "is", "not", "None", "and", "c", ".", "clip_end", "is", "not", "None", ":", "c", ".", "seq", ".", "seq", "=", "c", ".", "seq", ".", "seq", "[", "c", ".", "clip_start", ":", "c", ".", "clip_end", "+", "1", "]", "c", ".", "seq", ".", "qual", "=", "c", ".", "seq", ".", "qual", "[", "c", ".", "clip_start", ":", "c", ".", "clip_end", "+", "1", "]", "else", ":", "print", "(", "'Warning: no clipping info for sequence'", ",", "c", ".", "id", ",", "file", "=", "sys", ".", "stderr", ")", "if", "len", "(", "c", ".", "seq", ")", ">=", "min_length", ":", "print", "(", "c", ".", "seq", ",", "file", "=", "fout", ")", "utils", ".", "close", "(", "fout", ")" ]
42.833333
30.722222
def fit_model(ts, sc=None): """ Fits an AR(1) + GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a AR+GARCH model as a Numpy array Returns an ARGARCH model """ assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.ARGARCH.fitModel(_py2java(sc, Vectors.dense(ts))) return ARGARCHModel(jmodel=jmodel, sc=sc)
[ "def", "fit_model", "(", "ts", ",", "sc", "=", "None", ")", ":", "assert", "sc", "!=", "None", ",", "\"Missing SparkContext\"", "jvm", "=", "sc", ".", "_jvm", "jmodel", "=", "jvm", ".", "com", ".", "cloudera", ".", "sparkts", ".", "models", ".", "ARGARCH", ".", "fitModel", "(", "_py2java", "(", "sc", ",", "Vectors", ".", "dense", "(", "ts", ")", ")", ")", "return", "ARGARCHModel", "(", "jmodel", "=", "jmodel", ",", "sc", "=", "sc", ")" ]
29.0625
22.8125
def get_fit(self, TrapFreq, WidthOfPeakToFit, A_Initial=0.1e10, Gamma_Initial=400, silent=False, MakeFig=True, show_fig=True): """ Function that fits to a peak to the PSD to extract the frequency, A factor and Gamma (damping) factor. Parameters ---------- TrapFreq : float The approximate trapping frequency to use initially as the centre of the peak WidthOfPeakToFit : float The width of the peak to be fitted to. This limits the region that the fitting function can see in order to stop it from fitting to the wrong peak A_Initial : float, optional The initial value of the A parameter to use in fitting Gamma_Initial : float, optional The initial value of the Gamma parameter to use in fitting Silent : bool, optional Whether to print any output when running this function defaults to False MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- A : uncertainties.ufloat Fitting constant A A = γ**2*2*Γ_0*(K_b*T_0)/(π*m) where: γ = conversionFactor Γ_0 = Damping factor due to environment π = pi OmegaTrap : uncertainties.ufloat The trapping frequency in the z axis (in angular frequency) Gamma : uncertainties.ufloat The damping factor Gamma = Γ = Γ_0 + δΓ where: Γ_0 = Damping factor due to environment δΓ = extra damping due to feedback or other effects fig : matplotlib.figure.Figure object figure object containing the plot ax : matplotlib.axes.Axes object axes with the data plotted of the: - initial data - smoothed data - initial fit - final fit """ if MakeFig == True: Params, ParamsErr, fig, ax = fit_PSD( self, WidthOfPeakToFit, TrapFreq, A_Initial, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_PSD( self, WidthOfPeakToFit, TrapFreq, A_Initial, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print("\n") print("A: {} +- {}% ".format(Params[0], ParamsErr[0] / Params[0] * 100)) print( "Trap Frequency: {} +- {}% ".format(Params[1], ParamsErr[1] / Params[1] * 100)) print( "Big Gamma: {} +- {}% ".format(Params[2], ParamsErr[2] / Params[2] * 100)) self.A = _uncertainties.ufloat(Params[0], ParamsErr[0]) self.OmegaTrap = _uncertainties.ufloat(Params[1], ParamsErr[1]) self.Gamma = _uncertainties.ufloat(Params[2], ParamsErr[2]) if MakeFig == True: return self.A, self.OmegaTrap, self.Gamma, fig, ax else: return self.A, self.OmegaTrap, self.Gamma, None, None
[ "def", "get_fit", "(", "self", ",", "TrapFreq", ",", "WidthOfPeakToFit", ",", "A_Initial", "=", "0.1e10", ",", "Gamma_Initial", "=", "400", ",", "silent", "=", "False", ",", "MakeFig", "=", "True", ",", "show_fig", "=", "True", ")", ":", "if", "MakeFig", "==", "True", ":", "Params", ",", "ParamsErr", ",", "fig", ",", "ax", "=", "fit_PSD", "(", "self", ",", "WidthOfPeakToFit", ",", "TrapFreq", ",", "A_Initial", ",", "Gamma_Initial", ",", "MakeFig", "=", "MakeFig", ",", "show_fig", "=", "show_fig", ")", "else", ":", "Params", ",", "ParamsErr", ",", "_", ",", "_", "=", "fit_PSD", "(", "self", ",", "WidthOfPeakToFit", ",", "TrapFreq", ",", "A_Initial", ",", "Gamma_Initial", ",", "MakeFig", "=", "MakeFig", ",", "show_fig", "=", "show_fig", ")", "if", "silent", "==", "False", ":", "print", "(", "\"\\n\"", ")", "print", "(", "\"A: {} +- {}% \"", ".", "format", "(", "Params", "[", "0", "]", ",", "ParamsErr", "[", "0", "]", "/", "Params", "[", "0", "]", "*", "100", ")", ")", "print", "(", "\"Trap Frequency: {} +- {}% \"", ".", "format", "(", "Params", "[", "1", "]", ",", "ParamsErr", "[", "1", "]", "/", "Params", "[", "1", "]", "*", "100", ")", ")", "print", "(", "\"Big Gamma: {} +- {}% \"", ".", "format", "(", "Params", "[", "2", "]", ",", "ParamsErr", "[", "2", "]", "/", "Params", "[", "2", "]", "*", "100", ")", ")", "self", ".", "A", "=", "_uncertainties", ".", "ufloat", "(", "Params", "[", "0", "]", ",", "ParamsErr", "[", "0", "]", ")", "self", ".", "OmegaTrap", "=", "_uncertainties", ".", "ufloat", "(", "Params", "[", "1", "]", ",", "ParamsErr", "[", "1", "]", ")", "self", ".", "Gamma", "=", "_uncertainties", ".", "ufloat", "(", "Params", "[", "2", "]", ",", "ParamsErr", "[", "2", "]", ")", "if", "MakeFig", "==", "True", ":", "return", "self", ".", "A", ",", "self", ".", "OmegaTrap", ",", "self", ".", "Gamma", ",", "fig", ",", "ax", "else", ":", "return", "self", ".", "A", ",", "self", ".", "OmegaTrap", ",", "self", ".", "Gamma", ",", "None", ",", "None" ]
40.974684
20.822785
def alpha_senders(self): """ Access the alpha_senders :returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderList :rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderList """ if self._alpha_senders is None: self._alpha_senders = AlphaSenderList(self._version, service_sid=self._solution['sid'], ) return self._alpha_senders
[ "def", "alpha_senders", "(", "self", ")", ":", "if", "self", ".", "_alpha_senders", "is", "None", ":", "self", ".", "_alpha_senders", "=", "AlphaSenderList", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_alpha_senders" ]
40.8
20.8
def followers(self): """获取关注此问题的用户 :return: 关注此问题的用户 :rtype: Author.Iterable :问题: 要注意若执行过程中另外有人关注,可能造成重复获取到某些用户 """ self._make_soup() followers_url = self.url + 'followers' for x in common_follower(followers_url, self.xsrf, self._session): yield x
[ "def", "followers", "(", "self", ")", ":", "self", ".", "_make_soup", "(", ")", "followers_url", "=", "self", ".", "url", "+", "'followers'", "for", "x", "in", "common_follower", "(", "followers_url", ",", "self", ".", "xsrf", ",", "self", ".", "_session", ")", ":", "yield", "x" ]
28.545455
14.727273
def missing_or_other_newer(path, other_path, cwd=None): """ Investigate if path is non-existant or older than provided reference path. Parameters ========== path: string path to path which might be missing or too old other_path: string reference path cwd: string working directory (root of relative paths) Returns ======= True if path is older or missing. """ cwd = cwd or '.' path = get_abspath(path, cwd=cwd) other_path = get_abspath(other_path, cwd=cwd) if not os.path.exists(path): return True if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path): # 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/ return True return False
[ "def", "missing_or_other_newer", "(", "path", ",", "other_path", ",", "cwd", "=", "None", ")", ":", "cwd", "=", "cwd", "or", "'.'", "path", "=", "get_abspath", "(", "path", ",", "cwd", "=", "cwd", ")", "other_path", "=", "get_abspath", "(", "other_path", ",", "cwd", "=", "cwd", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "True", "if", "os", ".", "path", ".", "getmtime", "(", "other_path", ")", "-", "1e-6", ">=", "os", ".", "path", ".", "getmtime", "(", "path", ")", ":", "# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/", "return", "True", "return", "False" ]
27.925926
20.444444
def unregister(self, thread): """ Unregisters an existing thread, so that this thread is no longer available. This function is mainly used during plugin deactivation. :param thread: Name of the thread """ if thread not in self.threads.keys(): self.log.warning("Can not unregister thread %s" % thread) else: del (self.threads[thread]) self.__log.debug("Thread %s got unregistered" % thread)
[ "def", "unregister", "(", "self", ",", "thread", ")", ":", "if", "thread", "not", "in", "self", ".", "threads", ".", "keys", "(", ")", ":", "self", ".", "log", ".", "warning", "(", "\"Can not unregister thread %s\"", "%", "thread", ")", "else", ":", "del", "(", "self", ".", "threads", "[", "thread", "]", ")", "self", ".", "__log", ".", "debug", "(", "\"Thread %s got unregistered\"", "%", "thread", ")" ]
36.230769
19.153846
def search(self, start=0, amount=15, order='asc', concise=False, user=True, dependencies=True, comments=True, votes=True, filters=None): """ Search the LEX :param start: Start at this result number :param amount: Number of results to return :param order: Ordering of the results ('asc' or 'desc') :param concise: Return only concise results (name and id) :param user: Should user (authenticated) information be returned (e.g. last_downloaded) :param dependencies: Should a dependency list be returned :param comments: Should a list of comments be returned :param votes: Should a list of votes be returned :param filters: Extra filters to add to the search. At least one is required. See `the LEX API documentation <https://github.com/caspervg/SC4Devotion-LEX-API/blob/master/documentation/Search.md#filtering-parameters>` for more information about the possibilities. Use a dictionary with the name of the filter as key, and the filter value as value. :return: List of search results. Can be empty, if no results match the requested filters. :rtype: list """ if not filters or len(filters.keys()) < 1: raise Exception('Need at least one filter in the "filters" dict') main = { 'start': start, 'amount': amount, 'order': order, } if user: main['user'] = 'true' if dependencies: main['dependencies'] = 'true' if comments: main['comments'] = 'true' if votes: main['votes'] = 'true' if concise: main['concise'] = 'true' main.update(filters) return self._get_json('search', **main)
[ "def", "search", "(", "self", ",", "start", "=", "0", ",", "amount", "=", "15", ",", "order", "=", "'asc'", ",", "concise", "=", "False", ",", "user", "=", "True", ",", "dependencies", "=", "True", ",", "comments", "=", "True", ",", "votes", "=", "True", ",", "filters", "=", "None", ")", ":", "if", "not", "filters", "or", "len", "(", "filters", ".", "keys", "(", ")", ")", "<", "1", ":", "raise", "Exception", "(", "'Need at least one filter in the \"filters\" dict'", ")", "main", "=", "{", "'start'", ":", "start", ",", "'amount'", ":", "amount", ",", "'order'", ":", "order", ",", "}", "if", "user", ":", "main", "[", "'user'", "]", "=", "'true'", "if", "dependencies", ":", "main", "[", "'dependencies'", "]", "=", "'true'", "if", "comments", ":", "main", "[", "'comments'", "]", "=", "'true'", "if", "votes", ":", "main", "[", "'votes'", "]", "=", "'true'", "if", "concise", ":", "main", "[", "'concise'", "]", "=", "'true'", "main", ".", "update", "(", "filters", ")", "return", "self", ".", "_get_json", "(", "'search'", ",", "*", "*", "main", ")" ]
43.365854
24.439024
def process_step(self, form): """ Stores the validated data for `form` and cleans out validated forms for next steps, as those might be affected by the current step. Note that this behaviour is relied upon by the `LoginView` to prevent users from bypassing the `TokenForm` by going steps back and changing credentials. """ step = self.steps.current # If the form is not-idempotent (cannot be validated multiple times), # the cleaned data should be stored; marking the form as validated. self.storage.validated_step_data[step] = form.cleaned_data # It is assumed that earlier steps affect later steps; so even though # those forms might not be idempotent, we'll remove the validated data # to force re-entry. # form_list = self.get_form_list(idempotent=False) form_list = self.get_form_list() keys = list(form_list.keys()) key = keys.index(step) + 1 for next_step in keys[key:]: self.storage.validated_step_data.pop(next_step, None) return super(IdempotentSessionWizardView, self).process_step(form)
[ "def", "process_step", "(", "self", ",", "form", ")", ":", "step", "=", "self", ".", "steps", ".", "current", "# If the form is not-idempotent (cannot be validated multiple times),", "# the cleaned data should be stored; marking the form as validated.", "self", ".", "storage", ".", "validated_step_data", "[", "step", "]", "=", "form", ".", "cleaned_data", "# It is assumed that earlier steps affect later steps; so even though", "# those forms might not be idempotent, we'll remove the validated data", "# to force re-entry.", "# form_list = self.get_form_list(idempotent=False)", "form_list", "=", "self", ".", "get_form_list", "(", ")", "keys", "=", "list", "(", "form_list", ".", "keys", "(", ")", ")", "key", "=", "keys", ".", "index", "(", "step", ")", "+", "1", "for", "next_step", "in", "keys", "[", "key", ":", "]", ":", "self", ".", "storage", ".", "validated_step_data", ".", "pop", "(", "next_step", ",", "None", ")", "return", "super", "(", "IdempotentSessionWizardView", ",", "self", ")", ".", "process_step", "(", "form", ")" ]
45.96
22.92
def load_http_response(cls, http_response): """ This method should return an instantiated class and set its response to the requests.Response object. """ if not http_response.ok: raise APIResponseError(http_response.text) c = cls(http_response) c.response = http_response RateLimits.getRateLimits(cls.__name__).set(c.response.headers) return c
[ "def", "load_http_response", "(", "cls", ",", "http_response", ")", ":", "if", "not", "http_response", ".", "ok", ":", "raise", "APIResponseError", "(", "http_response", ".", "text", ")", "c", "=", "cls", "(", "http_response", ")", "c", ".", "response", "=", "http_response", "RateLimits", ".", "getRateLimits", "(", "cls", ".", "__name__", ")", ".", "set", "(", "c", ".", "response", ".", "headers", ")", "return", "c" ]
32.076923
16.230769
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True, wrap_async=False): """ Collect up a module's execution environment then use it to invoke target.run_module() or helpers.run_module_async() in the target context. """ if module_name is None: module_name = self._task.action if module_args is None: module_args = self._task.args if task_vars is None: task_vars = {} self._update_module_args(module_name, module_args, task_vars) env = {} self._compute_environment_string(env) self._temp_file_gibberish(module_args, wrap_async) self._connection._connect() result = ansible_mitogen.planner.invoke( ansible_mitogen.planner.Invocation( action=self, connection=self._connection, module_name=mitogen.core.to_text(module_name), module_args=mitogen.utils.cast(module_args), task_vars=task_vars, templar=self._templar, env=mitogen.utils.cast(env), wrap_async=wrap_async, timeout_secs=self.get_task_timeout_secs(), ) ) if ansible.__version__ < '2.5' and delete_remote_tmp and \ getattr(self._connection._shell, 'tmpdir', None) is not None: # Built-in actions expected tmpdir to be cleaned up automatically # on _execute_module(). self._remove_tmp_path(self._connection._shell.tmpdir) return result
[ "def", "_execute_module", "(", "self", ",", "module_name", "=", "None", ",", "module_args", "=", "None", ",", "tmp", "=", "None", ",", "task_vars", "=", "None", ",", "persist_files", "=", "False", ",", "delete_remote_tmp", "=", "True", ",", "wrap_async", "=", "False", ")", ":", "if", "module_name", "is", "None", ":", "module_name", "=", "self", ".", "_task", ".", "action", "if", "module_args", "is", "None", ":", "module_args", "=", "self", ".", "_task", ".", "args", "if", "task_vars", "is", "None", ":", "task_vars", "=", "{", "}", "self", ".", "_update_module_args", "(", "module_name", ",", "module_args", ",", "task_vars", ")", "env", "=", "{", "}", "self", ".", "_compute_environment_string", "(", "env", ")", "self", ".", "_temp_file_gibberish", "(", "module_args", ",", "wrap_async", ")", "self", ".", "_connection", ".", "_connect", "(", ")", "result", "=", "ansible_mitogen", ".", "planner", ".", "invoke", "(", "ansible_mitogen", ".", "planner", ".", "Invocation", "(", "action", "=", "self", ",", "connection", "=", "self", ".", "_connection", ",", "module_name", "=", "mitogen", ".", "core", ".", "to_text", "(", "module_name", ")", ",", "module_args", "=", "mitogen", ".", "utils", ".", "cast", "(", "module_args", ")", ",", "task_vars", "=", "task_vars", ",", "templar", "=", "self", ".", "_templar", ",", "env", "=", "mitogen", ".", "utils", ".", "cast", "(", "env", ")", ",", "wrap_async", "=", "wrap_async", ",", "timeout_secs", "=", "self", ".", "get_task_timeout_secs", "(", ")", ",", ")", ")", "if", "ansible", ".", "__version__", "<", "'2.5'", "and", "delete_remote_tmp", "and", "getattr", "(", "self", ".", "_connection", ".", "_shell", ",", "'tmpdir'", ",", "None", ")", "is", "not", "None", ":", "# Built-in actions expected tmpdir to be cleaned up automatically", "# on _execute_module().", "self", ".", "_remove_tmp_path", "(", "self", ".", "_connection", ".", "_shell", ".", "tmpdir", ")", "return", "result" ]
39.738095
18.166667
def match_lists(pos1, pos2, tolerance=MATCH_TOLERANCE, spherical=False): """ Given two sets of x/y positions match the lists, uniquely. :rtype : numpy.ma, numpy.ma :param pos1: list of x/y positions. :param pos2: list of x/y positions. :param tolerance: float distance, in pixels, to consider a match Algorithm: - Find all the members of pos2 that are within tolerance of pos1[idx1]. These pos2 members are match_group_1 - Find all the members of pos1 that are within tolerance of match_group_1[idx2]. These pos1 members are match_group_2 - If pos1[idx] is in match_group_2 then pos1[idx] is a match of object at match_group_1[idx2] """ assert isinstance(pos1, numpy.ndarray) assert isinstance(pos2, numpy.ndarray) # build some arrays to hold the index of things that matched between lists. npts2 = npts1 = 0 if len(pos1) > 0: npts1 = len(pos1[:, 0]) pos1_idx_array = numpy.arange(npts1, dtype=numpy.int16) if len(pos2) > 0: npts2 = len(pos2[:, 0]) pos2_idx_array = numpy.arange(npts2, dtype=numpy.int16) # this is the array of final matched index, -1 indicates no match found. match1 = numpy.ma.zeros(npts1, dtype=numpy.int16) match1.mask = True # this is the array of matches in pos2, -1 indicates no match found. match2 = numpy.ma.zeros(npts2, dtype=numpy.int16) match2.mask = True # if one of the two input arrays are zero length then there is no matching to do. if npts1 * npts2 == 0: return match1, match2 for idx1 in range(npts1): # compute the distance source idx1 to each member of pos2 if not spherical : sep = numpy.sqrt((pos2[:, 0] - pos1[idx1, 0]) ** 2 + (pos2[:, 1] - pos1[idx1, 1]) ** 2) else: sep = numpy.sqrt((numpy.cos(numpy.radians(pos1[idx1,1]))*(pos2[:, 0] - pos1[idx1, 0])) ** 2 + (pos2[:, 1] - pos1[idx1, 1]) ** 2) # considered a match if sep is below tolerance and is the closest match available. match_condition = numpy.all((sep <= tolerance, sep == sep.min()), axis=0) # match_group_1 is list of the indexes of pos2 entries that qualified as possible matches to pos1[idx1] match_group_1 = pos2_idx_array[match_condition] # For each of those pos2 objects that could be a match to pos1[idx] find the best match in all of pos1 for idx2 in match_group_1: # compute the distance from this pos2 object that is a possible match to pos1[idx1] to all members of pos1 sep = numpy.sqrt((pos1[:, 0] - pos2[idx2, 0]) ** 2 + (pos1[:, 1] - pos2[idx2, 1]) ** 2) # considered a match if sep is below tolerance and is the closest match available. match_condition = numpy.all((sep <= tolerance, sep == sep.min()), axis=0) match_group_2 = pos1_idx_array[match_condition] # Are any of the pos1 members that were matches to the matched pos2 member the pos1[idx] entry? if idx1 in match_group_2: match1[idx1] = idx2 match2[idx2] = idx1 # this BREAK is in here since once we have a match we're done. break return match1, match2
[ "def", "match_lists", "(", "pos1", ",", "pos2", ",", "tolerance", "=", "MATCH_TOLERANCE", ",", "spherical", "=", "False", ")", ":", "assert", "isinstance", "(", "pos1", ",", "numpy", ".", "ndarray", ")", "assert", "isinstance", "(", "pos2", ",", "numpy", ".", "ndarray", ")", "# build some arrays to hold the index of things that matched between lists.", "npts2", "=", "npts1", "=", "0", "if", "len", "(", "pos1", ")", ">", "0", ":", "npts1", "=", "len", "(", "pos1", "[", ":", ",", "0", "]", ")", "pos1_idx_array", "=", "numpy", ".", "arange", "(", "npts1", ",", "dtype", "=", "numpy", ".", "int16", ")", "if", "len", "(", "pos2", ")", ">", "0", ":", "npts2", "=", "len", "(", "pos2", "[", ":", ",", "0", "]", ")", "pos2_idx_array", "=", "numpy", ".", "arange", "(", "npts2", ",", "dtype", "=", "numpy", ".", "int16", ")", "# this is the array of final matched index, -1 indicates no match found.", "match1", "=", "numpy", ".", "ma", ".", "zeros", "(", "npts1", ",", "dtype", "=", "numpy", ".", "int16", ")", "match1", ".", "mask", "=", "True", "# this is the array of matches in pos2, -1 indicates no match found.", "match2", "=", "numpy", ".", "ma", ".", "zeros", "(", "npts2", ",", "dtype", "=", "numpy", ".", "int16", ")", "match2", ".", "mask", "=", "True", "# if one of the two input arrays are zero length then there is no matching to do.", "if", "npts1", "*", "npts2", "==", "0", ":", "return", "match1", ",", "match2", "for", "idx1", "in", "range", "(", "npts1", ")", ":", "# compute the distance source idx1 to each member of pos2", "if", "not", "spherical", ":", "sep", "=", "numpy", ".", "sqrt", "(", "(", "pos2", "[", ":", ",", "0", "]", "-", "pos1", "[", "idx1", ",", "0", "]", ")", "**", "2", "+", "(", "pos2", "[", ":", ",", "1", "]", "-", "pos1", "[", "idx1", ",", "1", "]", ")", "**", "2", ")", "else", ":", "sep", "=", "numpy", ".", "sqrt", "(", "(", "numpy", ".", "cos", "(", "numpy", ".", "radians", "(", "pos1", "[", "idx1", ",", "1", "]", ")", ")", "*", "(", "pos2", "[", ":", ",", "0", "]", "-", "pos1", "[", "idx1", ",", "0", "]", ")", ")", "**", "2", "+", "(", "pos2", "[", ":", ",", "1", "]", "-", "pos1", "[", "idx1", ",", "1", "]", ")", "**", "2", ")", "# considered a match if sep is below tolerance and is the closest match available.", "match_condition", "=", "numpy", ".", "all", "(", "(", "sep", "<=", "tolerance", ",", "sep", "==", "sep", ".", "min", "(", ")", ")", ",", "axis", "=", "0", ")", "# match_group_1 is list of the indexes of pos2 entries that qualified as possible matches to pos1[idx1]", "match_group_1", "=", "pos2_idx_array", "[", "match_condition", "]", "# For each of those pos2 objects that could be a match to pos1[idx] find the best match in all of pos1", "for", "idx2", "in", "match_group_1", ":", "# compute the distance from this pos2 object that is a possible match to pos1[idx1] to all members of pos1", "sep", "=", "numpy", ".", "sqrt", "(", "(", "pos1", "[", ":", ",", "0", "]", "-", "pos2", "[", "idx2", ",", "0", "]", ")", "**", "2", "+", "(", "pos1", "[", ":", ",", "1", "]", "-", "pos2", "[", "idx2", ",", "1", "]", ")", "**", "2", ")", "# considered a match if sep is below tolerance and is the closest match available.", "match_condition", "=", "numpy", ".", "all", "(", "(", "sep", "<=", "tolerance", ",", "sep", "==", "sep", ".", "min", "(", ")", ")", ",", "axis", "=", "0", ")", "match_group_2", "=", "pos1_idx_array", "[", "match_condition", "]", "# Are any of the pos1 members that were matches to the matched pos2 member the pos1[idx] entry?", "if", "idx1", "in", "match_group_2", ":", "match1", "[", "idx1", "]", "=", "idx2", "match2", "[", "idx2", "]", "=", "idx1", "# this BREAK is in here since once we have a match we're done.", "break", "return", "match1", ",", "match2" ]
41.649351
30.298701
def CopyFromStringTuple(self, time_elements_tuple): """Copies time elements from string-based time elements tuple. Args: time_elements_tuple (Optional[tuple[str, str, str, str, str, str]]): time elements, contains year, month, day of month, hours, minutes and seconds. Raises: ValueError: if the time elements tuple is invalid. """ if len(time_elements_tuple) < 6: raise ValueError(( 'Invalid time elements tuple at least 6 elements required,' 'got: {0:d}').format(len(time_elements_tuple))) try: year = int(time_elements_tuple[0], 10) except (TypeError, ValueError): raise ValueError('Invalid year value: {0!s}'.format( time_elements_tuple[0])) try: month = int(time_elements_tuple[1], 10) except (TypeError, ValueError): raise ValueError('Invalid month value: {0!s}'.format( time_elements_tuple[1])) try: day_of_month = int(time_elements_tuple[2], 10) except (TypeError, ValueError): raise ValueError('Invalid day of month value: {0!s}'.format( time_elements_tuple[2])) try: hours = int(time_elements_tuple[3], 10) except (TypeError, ValueError): raise ValueError('Invalid hours value: {0!s}'.format( time_elements_tuple[3])) try: minutes = int(time_elements_tuple[4], 10) except (TypeError, ValueError): raise ValueError('Invalid minutes value: {0!s}'.format( time_elements_tuple[4])) try: seconds = int(time_elements_tuple[5], 10) except (TypeError, ValueError): raise ValueError('Invalid seconds value: {0!s}'.format( time_elements_tuple[5])) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds)
[ "def", "CopyFromStringTuple", "(", "self", ",", "time_elements_tuple", ")", ":", "if", "len", "(", "time_elements_tuple", ")", "<", "6", ":", "raise", "ValueError", "(", "(", "'Invalid time elements tuple at least 6 elements required,'", "'got: {0:d}'", ")", ".", "format", "(", "len", "(", "time_elements_tuple", ")", ")", ")", "try", ":", "year", "=", "int", "(", "time_elements_tuple", "[", "0", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid year value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "0", "]", ")", ")", "try", ":", "month", "=", "int", "(", "time_elements_tuple", "[", "1", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid month value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "1", "]", ")", ")", "try", ":", "day_of_month", "=", "int", "(", "time_elements_tuple", "[", "2", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid day of month value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "2", "]", ")", ")", "try", ":", "hours", "=", "int", "(", "time_elements_tuple", "[", "3", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid hours value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "3", "]", ")", ")", "try", ":", "minutes", "=", "int", "(", "time_elements_tuple", "[", "4", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid minutes value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "4", "]", ")", ")", "try", ":", "seconds", "=", "int", "(", "time_elements_tuple", "[", "5", "]", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid seconds value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "5", "]", ")", ")", "self", ".", "_normalized_timestamp", "=", "None", "self", ".", "_number_of_seconds", "=", "self", ".", "_GetNumberOfSecondsFromElements", "(", "year", ",", "month", ",", "day_of_month", ",", "hours", ",", "minutes", ",", "seconds", ")", "self", ".", "_time_elements_tuple", "=", "(", "year", ",", "month", ",", "day_of_month", ",", "hours", ",", "minutes", ",", "seconds", ")" ]
33.649123
19.385965
async def send_debug(self): """ Sends the debug draw execution. Put this after your debug creation functions. """ await self._execute( debug=sc_pb.RequestDebug( debug=[ debug_pb.DebugCommand( draw=debug_pb.DebugDraw( text=self._debug_texts if self._debug_texts else None, lines=self._debug_lines if self._debug_lines else None, boxes=self._debug_boxes if self._debug_boxes else None, spheres=self._debug_spheres if self._debug_spheres else None, ) ) ] ) ) self._debug_texts.clear() self._debug_lines.clear() self._debug_boxes.clear() self._debug_spheres.clear()
[ "async", "def", "send_debug", "(", "self", ")", ":", "await", "self", ".", "_execute", "(", "debug", "=", "sc_pb", ".", "RequestDebug", "(", "debug", "=", "[", "debug_pb", ".", "DebugCommand", "(", "draw", "=", "debug_pb", ".", "DebugDraw", "(", "text", "=", "self", ".", "_debug_texts", "if", "self", ".", "_debug_texts", "else", "None", ",", "lines", "=", "self", ".", "_debug_lines", "if", "self", ".", "_debug_lines", "else", "None", ",", "boxes", "=", "self", ".", "_debug_boxes", "if", "self", ".", "_debug_boxes", "else", "None", ",", "spheres", "=", "self", ".", "_debug_spheres", "if", "self", ".", "_debug_spheres", "else", "None", ",", ")", ")", "]", ")", ")", "self", ".", "_debug_texts", ".", "clear", "(", ")", "self", ".", "_debug_lines", ".", "clear", "(", ")", "self", ".", "_debug_boxes", ".", "clear", "(", ")", "self", ".", "_debug_spheres", ".", "clear", "(", ")" ]
42.7
18.65
def _process_info(raw_info: VideoInfo) -> VideoInfo: """Process raw information about the video (parse date, etc.).""" raw_date = raw_info.date date = datetime.strptime(raw_date, '%Y-%m-%d %H:%M') # 2018-04-05 17:00 video_info = raw_info._replace(date=date) return video_info
[ "def", "_process_info", "(", "raw_info", ":", "VideoInfo", ")", "->", "VideoInfo", ":", "raw_date", "=", "raw_info", ".", "date", "date", "=", "datetime", ".", "strptime", "(", "raw_date", ",", "'%Y-%m-%d %H:%M'", ")", "# 2018-04-05 17:00", "video_info", "=", "raw_info", ".", "_replace", "(", "date", "=", "date", ")", "return", "video_info" ]
51.833333
14
def datasets_create_version(self, owner_slug, dataset_slug, dataset_new_version_request, **kwargs): # noqa: E501 """Create a new dataset version # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.datasets_create_version(owner_slug, dataset_slug, dataset_new_version_request, async_req=True) >>> result = thread.get() :param async_req bool :param str owner_slug: Dataset owner (required) :param str dataset_slug: Dataset name (required) :param DatasetNewVersionRequest dataset_new_version_request: Information for creating a new dataset version (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.datasets_create_version_with_http_info(owner_slug, dataset_slug, dataset_new_version_request, **kwargs) # noqa: E501 else: (data) = self.datasets_create_version_with_http_info(owner_slug, dataset_slug, dataset_new_version_request, **kwargs) # noqa: E501 return data
[ "def", "datasets_create_version", "(", "self", ",", "owner_slug", ",", "dataset_slug", ",", "dataset_new_version_request", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "datasets_create_version_with_http_info", "(", "owner_slug", ",", "dataset_slug", ",", "dataset_new_version_request", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "datasets_create_version_with_http_info", "(", "owner_slug", ",", "dataset_slug", ",", "dataset_new_version_request", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
57.090909
32.727273
def init_blueprint(self, blueprint, path='templates.yaml'): """Initialize a Flask Blueprint, similar to init_app, but without the access to the application config. Keyword Arguments: blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None}) path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'}) """ if self._route is not None: raise TypeError("route cannot be set when using blueprints!") # we need to tuck our reference to this Ask instance into the blueprint object and find it later! blueprint.ask = self # BlueprintSetupState.add_url_rule gets called underneath the covers and # concats the rule string, so we should set to an empty string to allow # Blueprint('blueprint_api', __name__, url_prefix="/ask") to result in # exposing the rule at "/ask" and not "/ask/". blueprint.add_url_rule("", view_func=self._flask_view_func, methods=['POST']) blueprint.jinja_loader = ChoiceLoader([YamlLoader(blueprint, path)])
[ "def", "init_blueprint", "(", "self", ",", "blueprint", ",", "path", "=", "'templates.yaml'", ")", ":", "if", "self", ".", "_route", "is", "not", "None", ":", "raise", "TypeError", "(", "\"route cannot be set when using blueprints!\"", ")", "# we need to tuck our reference to this Ask instance into the blueprint object and find it later!", "blueprint", ".", "ask", "=", "self", "# BlueprintSetupState.add_url_rule gets called underneath the covers and", "# concats the rule string, so we should set to an empty string to allow", "# Blueprint('blueprint_api', __name__, url_prefix=\"/ask\") to result in", "# exposing the rule at \"/ask\" and not \"/ask/\".", "blueprint", ".", "add_url_rule", "(", "\"\"", ",", "view_func", "=", "self", ".", "_flask_view_func", ",", "methods", "=", "[", "'POST'", "]", ")", "blueprint", ".", "jinja_loader", "=", "ChoiceLoader", "(", "[", "YamlLoader", "(", "blueprint", ",", "path", ")", "]", ")" ]
55.6
30.55
def begin(self): """ This method will implement the handshake of the Bitcoin protocol. It will send the Version message, and block until it receives a VerAck. Once we receive the version, we'll send the verack, and begin downloading. """ log.debug("handshake (version %s)" % PROTOCOL_VERSION) version = Version() version.services = 0 # can't send blocks log.debug("send Version") self.send_message(version)
[ "def", "begin", "(", "self", ")", ":", "log", ".", "debug", "(", "\"handshake (version %s)\"", "%", "PROTOCOL_VERSION", ")", "version", "=", "Version", "(", ")", "version", ".", "services", "=", "0", "# can't send blocks", "log", ".", "debug", "(", "\"send Version\"", ")", "self", ".", "send_message", "(", "version", ")" ]
37.923077
11.615385
def solve(self,problem): """ Solves optimization problem. Parameters ---------- problem : Object """ # Local vars norm2 = self.norm2 norminf = self.norminf parameters = self.parameters # Parameters tol = parameters['tol'] maxiter = parameters['maxiter'] quiet = parameters['quiet'] sigma = parameters['sigma'] eps = parameters['eps'] eps_cold = parameters['eps_cold'] # Problem if not isinstance(problem,QuadProblem): problem = cast_problem(problem) quad_problem = QuadProblem(None,None,None,None,None,None,problem=problem) else: quad_problem = problem self.problem = problem self.quad_problem = quad_problem # Linsolver self.linsolver = new_linsolver(parameters['linsolver'],'symmetric') # Reset self.reset() # Checks if not np.all(problem.l <= problem.u): raise OptSolverError_NoInterior(self) # Data self.H = quad_problem.H self.g = quad_problem.g self.A = quad_problem.A self.AT = quad_problem.A.T self.b = quad_problem.b self.l = quad_problem.l-tol/10. self.u = quad_problem.u+tol/10. self.n = quad_problem.H.shape[0] self.m = quad_problem.A.shape[0] self.e = np.ones(self.n) self.I = eye(self.n,format='coo') self.Onm = coo_matrix((self.n,self.m)) self.Omm = coo_matrix((self.m,self.m)) # Initial primal if quad_problem.x is None: self.x = (self.u + self.l)/2. else: self.x = np.maximum(np.minimum(quad_problem.x,problem.u),problem.l) # Initial duals if quad_problem.lam is None: self.lam = np.zeros(self.m) else: self.lam = quad_problem.lam.copy() if quad_problem.mu is None: self.mu = np.ones(self.x.size)*eps_cold else: self.mu = np.maximum(quad_problem.mu,eps) if quad_problem.pi is None: self.pi = np.ones(self.x.size)*eps_cold else: self.pi = np.maximum(quad_problem.pi,eps) # Check interior try: assert(np.all(self.l < self.x)) assert(np.all(self.x < self.u)) assert(np.all(self.mu > 0)) assert(np.all(self.pi > 0)) except AssertionError: raise OptSolverError_Infeasibility(self) # Init vector self.y = np.hstack((self.x,self.lam,self.mu,self.pi)) # Complementarity measures self.eta_mu = np.dot(self.mu,self.u-self.x)/self.x.size self.eta_pi = np.dot(self.pi,self.x-self.l)/self.x.size # Objective scaling fdata = self.func(self.y) self.obj_sca = np.maximum(norminf(self.g+self.H*self.x)/10.,1.) self.H = self.H/self.obj_sca self.g = self.g/self.obj_sca fdata = self.func(self.y) # Header if not quiet: print('\nSolver: IQP') print('-----------') # Outer s = 0. self.k = 0 while True: # Complementarity measures self.eta_mu = np.dot(self.mu,self.u-self.x)/self.x.size self.eta_pi = np.dot(self.pi,self.x-self.l)/self.x.size # Init eval fdata = self.func(self.y) fmax = norminf(fdata.f) gmax = norminf(fdata.GradF) # Done if fmax < tol and sigma*np.maximum(self.eta_mu,self.eta_pi) < tol: self.set_status(self.STATUS_SOLVED) self.set_error_msg('') return # Target tau = sigma*norminf(fdata.GradF) # Header if not quiet: if self.k > 0: print('') print('{0:^3s}'.format('iter'), end=' ') print('{0:^9s}'.format('phi'), end=' ') print('{0:^9s}'.format('fmax'), end=' ') print('{0:^9s}'.format('gmax'), end=' ') print('{0:^8s}'.format('cu'), end=' ') print('{0:^8s}'.format('cl'), end=' ') print('{0:^8s}'.format('s')) # Inner while True: # Eval fdata = self.func(self.y) fmax = norminf(fdata.f) gmax = norminf(fdata.GradF) compu = norminf(self.mu*(self.u-self.x)) compl = norminf(self.pi*(self.x-self.l)) phi = (0.5*np.dot(self.x,self.H*self.x)+np.dot(self.g,self.x))*self.obj_sca # Show progress if not quiet: print('{0:^3d}'.format(self.k), end=' ') print('{0:^9.2e}'.format(phi), end=' ') print('{0:^9.2e}'.format(fmax), end=' ') print('{0:^9.2e}'.format(gmax), end=' ') print('{0:^8.1e}'.format(compu), end=' ') print('{0:^8.1e}'.format(compl), end=' ') print('{0:^8.1e}'.format(s)) # Done if gmax < tau: break # Done if fmax < tol and np.maximum(compu,compl) < tol: break # Maxiters if self.k >= maxiter: raise OptSolverError_MaxIters(self) # Search direction ux = self.u-self.x xl = self.x-self.l D1 = spdiags(self.mu/ux,0,self.n,self.n,format='coo') D2 = spdiags(self.pi/xl,0,self.n,self.n,format='coo') fbar = np.hstack((-fdata.rd+fdata.ru/ux-fdata.rl/xl,fdata.rp)) if self.A.shape[0] > 0: Jbar = bmat([[tril(self.H)+D1+D2,None], [-self.A,self.Omm]],format='coo') else: Jbar = bmat([[tril(self.H)+D1+D2]], format='coo') try: if not self.linsolver.is_analyzed(): self.linsolver.analyze(Jbar) pbar = self.linsolver.factorize_and_solve(Jbar,fbar) except RuntimeError: raise OptSolverError_BadLinSystem(self) px = pbar[:self.n] pmu = (-fdata.ru + self.mu*px)/ux ppi = (-fdata.rl - self.pi*px)/xl p = np.hstack((pbar,pmu,ppi)) # Steplength bounds indices = px > 0 s1 = np.min(np.hstack(((1.-eps)*(self.u-self.x)[indices]/px[indices],np.inf))) indices = px < 0 s2 = np.min(np.hstack(((eps-1.)*(self.x-self.l)[indices]/px[indices],np.inf))) indices = pmu < 0 s3 = np.min(np.hstack(((eps-1.)*self.mu[indices]/pmu[indices],np.inf))) indices = ppi < 0 s4 = np.min(np.hstack(((eps-1.)*self.pi[indices]/ppi[indices],np.inf))) smax = np.min([s1,s2,s3,s4]) # Line search s,fdata = self.line_search(self.y,p,fdata.F,fdata.GradF,self.func,smax) # Update x self.y += s*p self.k += 1 self.x,self.lam,self.mu,self.pi = self.extract_components(self.y) # Check try: assert(np.all(self.x < self.u)) assert(np.all(self.x > self.l)) assert(np.all(self.mu > 0)) assert(np.all(self.pi > 0)) except AssertionError: raise OptSolverError_Infeasibility(self)
[ "def", "solve", "(", "self", ",", "problem", ")", ":", "# Local vars", "norm2", "=", "self", ".", "norm2", "norminf", "=", "self", ".", "norminf", "parameters", "=", "self", ".", "parameters", "# Parameters", "tol", "=", "parameters", "[", "'tol'", "]", "maxiter", "=", "parameters", "[", "'maxiter'", "]", "quiet", "=", "parameters", "[", "'quiet'", "]", "sigma", "=", "parameters", "[", "'sigma'", "]", "eps", "=", "parameters", "[", "'eps'", "]", "eps_cold", "=", "parameters", "[", "'eps_cold'", "]", "# Problem", "if", "not", "isinstance", "(", "problem", ",", "QuadProblem", ")", ":", "problem", "=", "cast_problem", "(", "problem", ")", "quad_problem", "=", "QuadProblem", "(", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "problem", "=", "problem", ")", "else", ":", "quad_problem", "=", "problem", "self", ".", "problem", "=", "problem", "self", ".", "quad_problem", "=", "quad_problem", "# Linsolver", "self", ".", "linsolver", "=", "new_linsolver", "(", "parameters", "[", "'linsolver'", "]", ",", "'symmetric'", ")", "# Reset", "self", ".", "reset", "(", ")", "# Checks", "if", "not", "np", ".", "all", "(", "problem", ".", "l", "<=", "problem", ".", "u", ")", ":", "raise", "OptSolverError_NoInterior", "(", "self", ")", "# Data", "self", ".", "H", "=", "quad_problem", ".", "H", "self", ".", "g", "=", "quad_problem", ".", "g", "self", ".", "A", "=", "quad_problem", ".", "A", "self", ".", "AT", "=", "quad_problem", ".", "A", ".", "T", "self", ".", "b", "=", "quad_problem", ".", "b", "self", ".", "l", "=", "quad_problem", ".", "l", "-", "tol", "/", "10.", "self", ".", "u", "=", "quad_problem", ".", "u", "+", "tol", "/", "10.", "self", ".", "n", "=", "quad_problem", ".", "H", ".", "shape", "[", "0", "]", "self", ".", "m", "=", "quad_problem", ".", "A", ".", "shape", "[", "0", "]", "self", ".", "e", "=", "np", ".", "ones", "(", "self", ".", "n", ")", "self", ".", "I", "=", "eye", "(", "self", ".", "n", ",", "format", "=", "'coo'", ")", "self", ".", "Onm", "=", "coo_matrix", "(", "(", "self", ".", "n", ",", "self", ".", "m", ")", ")", "self", ".", "Omm", "=", "coo_matrix", "(", "(", "self", ".", "m", ",", "self", ".", "m", ")", ")", "# Initial primal", "if", "quad_problem", ".", "x", "is", "None", ":", "self", ".", "x", "=", "(", "self", ".", "u", "+", "self", ".", "l", ")", "/", "2.", "else", ":", "self", ".", "x", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "quad_problem", ".", "x", ",", "problem", ".", "u", ")", ",", "problem", ".", "l", ")", "# Initial duals", "if", "quad_problem", ".", "lam", "is", "None", ":", "self", ".", "lam", "=", "np", ".", "zeros", "(", "self", ".", "m", ")", "else", ":", "self", ".", "lam", "=", "quad_problem", ".", "lam", ".", "copy", "(", ")", "if", "quad_problem", ".", "mu", "is", "None", ":", "self", ".", "mu", "=", "np", ".", "ones", "(", "self", ".", "x", ".", "size", ")", "*", "eps_cold", "else", ":", "self", ".", "mu", "=", "np", ".", "maximum", "(", "quad_problem", ".", "mu", ",", "eps", ")", "if", "quad_problem", ".", "pi", "is", "None", ":", "self", ".", "pi", "=", "np", ".", "ones", "(", "self", ".", "x", ".", "size", ")", "*", "eps_cold", "else", ":", "self", ".", "pi", "=", "np", ".", "maximum", "(", "quad_problem", ".", "pi", ",", "eps", ")", "# Check interior", "try", ":", "assert", "(", "np", ".", "all", "(", "self", ".", "l", "<", "self", ".", "x", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "x", "<", "self", ".", "u", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "mu", ">", "0", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "pi", ">", "0", ")", ")", "except", "AssertionError", ":", "raise", "OptSolverError_Infeasibility", "(", "self", ")", "# Init vector", "self", ".", "y", "=", "np", ".", "hstack", "(", "(", "self", ".", "x", ",", "self", ".", "lam", ",", "self", ".", "mu", ",", "self", ".", "pi", ")", ")", "# Complementarity measures", "self", ".", "eta_mu", "=", "np", ".", "dot", "(", "self", ".", "mu", ",", "self", ".", "u", "-", "self", ".", "x", ")", "/", "self", ".", "x", ".", "size", "self", ".", "eta_pi", "=", "np", ".", "dot", "(", "self", ".", "pi", ",", "self", ".", "x", "-", "self", ".", "l", ")", "/", "self", ".", "x", ".", "size", "# Objective scaling", "fdata", "=", "self", ".", "func", "(", "self", ".", "y", ")", "self", ".", "obj_sca", "=", "np", ".", "maximum", "(", "norminf", "(", "self", ".", "g", "+", "self", ".", "H", "*", "self", ".", "x", ")", "/", "10.", ",", "1.", ")", "self", ".", "H", "=", "self", ".", "H", "/", "self", ".", "obj_sca", "self", ".", "g", "=", "self", ".", "g", "/", "self", ".", "obj_sca", "fdata", "=", "self", ".", "func", "(", "self", ".", "y", ")", "# Header", "if", "not", "quiet", ":", "print", "(", "'\\nSolver: IQP'", ")", "print", "(", "'-----------'", ")", "# Outer", "s", "=", "0.", "self", ".", "k", "=", "0", "while", "True", ":", "# Complementarity measures", "self", ".", "eta_mu", "=", "np", ".", "dot", "(", "self", ".", "mu", ",", "self", ".", "u", "-", "self", ".", "x", ")", "/", "self", ".", "x", ".", "size", "self", ".", "eta_pi", "=", "np", ".", "dot", "(", "self", ".", "pi", ",", "self", ".", "x", "-", "self", ".", "l", ")", "/", "self", ".", "x", ".", "size", "# Init eval", "fdata", "=", "self", ".", "func", "(", "self", ".", "y", ")", "fmax", "=", "norminf", "(", "fdata", ".", "f", ")", "gmax", "=", "norminf", "(", "fdata", ".", "GradF", ")", "# Done", "if", "fmax", "<", "tol", "and", "sigma", "*", "np", ".", "maximum", "(", "self", ".", "eta_mu", ",", "self", ".", "eta_pi", ")", "<", "tol", ":", "self", ".", "set_status", "(", "self", ".", "STATUS_SOLVED", ")", "self", ".", "set_error_msg", "(", "''", ")", "return", "# Target", "tau", "=", "sigma", "*", "norminf", "(", "fdata", ".", "GradF", ")", "# Header", "if", "not", "quiet", ":", "if", "self", ".", "k", ">", "0", ":", "print", "(", "''", ")", "print", "(", "'{0:^3s}'", ".", "format", "(", "'iter'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9s}'", ".", "format", "(", "'phi'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9s}'", ".", "format", "(", "'fmax'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9s}'", ".", "format", "(", "'gmax'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8s}'", ".", "format", "(", "'cu'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8s}'", ".", "format", "(", "'cl'", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8s}'", ".", "format", "(", "'s'", ")", ")", "# Inner", "while", "True", ":", "# Eval", "fdata", "=", "self", ".", "func", "(", "self", ".", "y", ")", "fmax", "=", "norminf", "(", "fdata", ".", "f", ")", "gmax", "=", "norminf", "(", "fdata", ".", "GradF", ")", "compu", "=", "norminf", "(", "self", ".", "mu", "*", "(", "self", ".", "u", "-", "self", ".", "x", ")", ")", "compl", "=", "norminf", "(", "self", ".", "pi", "*", "(", "self", ".", "x", "-", "self", ".", "l", ")", ")", "phi", "=", "(", "0.5", "*", "np", ".", "dot", "(", "self", ".", "x", ",", "self", ".", "H", "*", "self", ".", "x", ")", "+", "np", ".", "dot", "(", "self", ".", "g", ",", "self", ".", "x", ")", ")", "*", "self", ".", "obj_sca", "# Show progress", "if", "not", "quiet", ":", "print", "(", "'{0:^3d}'", ".", "format", "(", "self", ".", "k", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9.2e}'", ".", "format", "(", "phi", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9.2e}'", ".", "format", "(", "fmax", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^9.2e}'", ".", "format", "(", "gmax", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8.1e}'", ".", "format", "(", "compu", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8.1e}'", ".", "format", "(", "compl", ")", ",", "end", "=", "' '", ")", "print", "(", "'{0:^8.1e}'", ".", "format", "(", "s", ")", ")", "# Done", "if", "gmax", "<", "tau", ":", "break", "# Done", "if", "fmax", "<", "tol", "and", "np", ".", "maximum", "(", "compu", ",", "compl", ")", "<", "tol", ":", "break", "# Maxiters", "if", "self", ".", "k", ">=", "maxiter", ":", "raise", "OptSolverError_MaxIters", "(", "self", ")", "# Search direction", "ux", "=", "self", ".", "u", "-", "self", ".", "x", "xl", "=", "self", ".", "x", "-", "self", ".", "l", "D1", "=", "spdiags", "(", "self", ".", "mu", "/", "ux", ",", "0", ",", "self", ".", "n", ",", "self", ".", "n", ",", "format", "=", "'coo'", ")", "D2", "=", "spdiags", "(", "self", ".", "pi", "/", "xl", ",", "0", ",", "self", ".", "n", ",", "self", ".", "n", ",", "format", "=", "'coo'", ")", "fbar", "=", "np", ".", "hstack", "(", "(", "-", "fdata", ".", "rd", "+", "fdata", ".", "ru", "/", "ux", "-", "fdata", ".", "rl", "/", "xl", ",", "fdata", ".", "rp", ")", ")", "if", "self", ".", "A", ".", "shape", "[", "0", "]", ">", "0", ":", "Jbar", "=", "bmat", "(", "[", "[", "tril", "(", "self", ".", "H", ")", "+", "D1", "+", "D2", ",", "None", "]", ",", "[", "-", "self", ".", "A", ",", "self", ".", "Omm", "]", "]", ",", "format", "=", "'coo'", ")", "else", ":", "Jbar", "=", "bmat", "(", "[", "[", "tril", "(", "self", ".", "H", ")", "+", "D1", "+", "D2", "]", "]", ",", "format", "=", "'coo'", ")", "try", ":", "if", "not", "self", ".", "linsolver", ".", "is_analyzed", "(", ")", ":", "self", ".", "linsolver", ".", "analyze", "(", "Jbar", ")", "pbar", "=", "self", ".", "linsolver", ".", "factorize_and_solve", "(", "Jbar", ",", "fbar", ")", "except", "RuntimeError", ":", "raise", "OptSolverError_BadLinSystem", "(", "self", ")", "px", "=", "pbar", "[", ":", "self", ".", "n", "]", "pmu", "=", "(", "-", "fdata", ".", "ru", "+", "self", ".", "mu", "*", "px", ")", "/", "ux", "ppi", "=", "(", "-", "fdata", ".", "rl", "-", "self", ".", "pi", "*", "px", ")", "/", "xl", "p", "=", "np", ".", "hstack", "(", "(", "pbar", ",", "pmu", ",", "ppi", ")", ")", "# Steplength bounds", "indices", "=", "px", ">", "0", "s1", "=", "np", ".", "min", "(", "np", ".", "hstack", "(", "(", "(", "1.", "-", "eps", ")", "*", "(", "self", ".", "u", "-", "self", ".", "x", ")", "[", "indices", "]", "/", "px", "[", "indices", "]", ",", "np", ".", "inf", ")", ")", ")", "indices", "=", "px", "<", "0", "s2", "=", "np", ".", "min", "(", "np", ".", "hstack", "(", "(", "(", "eps", "-", "1.", ")", "*", "(", "self", ".", "x", "-", "self", ".", "l", ")", "[", "indices", "]", "/", "px", "[", "indices", "]", ",", "np", ".", "inf", ")", ")", ")", "indices", "=", "pmu", "<", "0", "s3", "=", "np", ".", "min", "(", "np", ".", "hstack", "(", "(", "(", "eps", "-", "1.", ")", "*", "self", ".", "mu", "[", "indices", "]", "/", "pmu", "[", "indices", "]", ",", "np", ".", "inf", ")", ")", ")", "indices", "=", "ppi", "<", "0", "s4", "=", "np", ".", "min", "(", "np", ".", "hstack", "(", "(", "(", "eps", "-", "1.", ")", "*", "self", ".", "pi", "[", "indices", "]", "/", "ppi", "[", "indices", "]", ",", "np", ".", "inf", ")", ")", ")", "smax", "=", "np", ".", "min", "(", "[", "s1", ",", "s2", ",", "s3", ",", "s4", "]", ")", "# Line search", "s", ",", "fdata", "=", "self", ".", "line_search", "(", "self", ".", "y", ",", "p", ",", "fdata", ".", "F", ",", "fdata", ".", "GradF", ",", "self", ".", "func", ",", "smax", ")", "# Update x", "self", ".", "y", "+=", "s", "*", "p", "self", ".", "k", "+=", "1", "self", ".", "x", ",", "self", ".", "lam", ",", "self", ".", "mu", ",", "self", ".", "pi", "=", "self", ".", "extract_components", "(", "self", ".", "y", ")", "# Check", "try", ":", "assert", "(", "np", ".", "all", "(", "self", ".", "x", "<", "self", ".", "u", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "x", ">", "self", ".", "l", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "mu", ">", "0", ")", ")", "assert", "(", "np", ".", "all", "(", "self", ".", "pi", ">", "0", ")", ")", "except", "AssertionError", ":", "raise", "OptSolverError_Infeasibility", "(", "self", ")" ]
34.981982
17.369369
def set_setting(self, setting, value): """ Set a specific setting value. This will overwrite the current setting value for the specified setting. Args: setting (string): The name of the setting to set (e.g., 'certificate_path', 'hostname'). Required. value (misc): The value of the setting to set. Type varies based on setting. Required. Raises: ConfigurationError: Raised if the setting is not supported or if the setting value is invalid. """ if setting not in self._expected_settings + self._optional_settings: raise exceptions.ConfigurationError( "Setting '{0}' is not supported.".format(setting) ) if setting == 'hostname': self._set_hostname(value) elif setting == 'port': self._set_port(value) elif setting == 'certificate_path': self._set_certificate_path(value) elif setting == 'key_path': self._set_key_path(value) elif setting == 'ca_path': self._set_ca_path(value) elif setting == 'auth_suite': self._set_auth_suite(value) elif setting == 'policy_path': self._set_policy_path(value) elif setting == 'enable_tls_client_auth': self._set_enable_tls_client_auth(value) elif setting == 'tls_cipher_suites': self._set_tls_cipher_suites(value) elif setting == 'logging_level': self._set_logging_level(value) else: self._set_database_path(value)
[ "def", "set_setting", "(", "self", ",", "setting", ",", "value", ")", ":", "if", "setting", "not", "in", "self", ".", "_expected_settings", "+", "self", ".", "_optional_settings", ":", "raise", "exceptions", ".", "ConfigurationError", "(", "\"Setting '{0}' is not supported.\"", ".", "format", "(", "setting", ")", ")", "if", "setting", "==", "'hostname'", ":", "self", ".", "_set_hostname", "(", "value", ")", "elif", "setting", "==", "'port'", ":", "self", ".", "_set_port", "(", "value", ")", "elif", "setting", "==", "'certificate_path'", ":", "self", ".", "_set_certificate_path", "(", "value", ")", "elif", "setting", "==", "'key_path'", ":", "self", ".", "_set_key_path", "(", "value", ")", "elif", "setting", "==", "'ca_path'", ":", "self", ".", "_set_ca_path", "(", "value", ")", "elif", "setting", "==", "'auth_suite'", ":", "self", ".", "_set_auth_suite", "(", "value", ")", "elif", "setting", "==", "'policy_path'", ":", "self", ".", "_set_policy_path", "(", "value", ")", "elif", "setting", "==", "'enable_tls_client_auth'", ":", "self", ".", "_set_enable_tls_client_auth", "(", "value", ")", "elif", "setting", "==", "'tls_cipher_suites'", ":", "self", ".", "_set_tls_cipher_suites", "(", "value", ")", "elif", "setting", "==", "'logging_level'", ":", "self", ".", "_set_logging_level", "(", "value", ")", "else", ":", "self", ".", "_set_database_path", "(", "value", ")" ]
37.627907
13.302326
def get_usage(self): """ parses /proc/stat and calcualtes total and busy time (more specific USER_HZ see man 5 proc for further informations ) """ usage = {} for cpu, timings in self.get_cpu_timings().items(): cpu_total = sum(timings) del timings[3:5] cpu_busy = sum(timings) cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy) usage['usage_' + cpu] = cpu_usage # for backward compatibility usage['usage'] = usage['usage_cpu'] return usage
[ "def", "get_usage", "(", "self", ")", ":", "usage", "=", "{", "}", "for", "cpu", ",", "timings", "in", "self", ".", "get_cpu_timings", "(", ")", ".", "items", "(", ")", ":", "cpu_total", "=", "sum", "(", "timings", ")", "del", "timings", "[", "3", ":", "5", "]", "cpu_busy", "=", "sum", "(", "timings", ")", "cpu_usage", "=", "self", ".", "calculate_usage", "(", "cpu", ",", "cpu_total", ",", "cpu_busy", ")", "usage", "[", "'usage_'", "+", "cpu", "]", "=", "cpu_usage", "# for backward compatibility", "usage", "[", "'usage'", "]", "=", "usage", "[", "'usage_cpu'", "]", "return", "usage" ]
29.684211
18.736842
def remove_none_dict_values(obj): """ Remove None values from dict. """ if isinstance(obj, (list, tuple, set)): return type(obj)(remove_none_dict_values(x) for x in obj) elif isinstance(obj, dict): return type(obj)((k, remove_none_dict_values(v)) for k, v in obj.items() if v is not None) else: return obj
[ "def", "remove_none_dict_values", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "return", "type", "(", "obj", ")", "(", "remove_none_dict_values", "(", "x", ")", "for", "x", "in", "obj", ")", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "type", "(", "obj", ")", "(", "(", "k", ",", "remove_none_dict_values", "(", "v", ")", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", "if", "v", "is", "not", "None", ")", "else", ":", "return", "obj" ]
32.666667
10.833333