text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def pylog(self, *args, **kwargs): """Display all available logging information.""" printerr(self.name, args, kwargs, traceback.format_exc())
[ "def", "pylog", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "printerr", "(", "self", ".", "name", ",", "args", ",", "kwargs", ",", "traceback", ".", "format_exc", "(", ")", ")" ]
51.333333
10.666667
def file_to_list(file_name, file_location): """ Function to import a text file to a list Args: file_name: The name of file to be import file_location: The location of the file, derive from the os module Returns: returns a list """ file = __os.path.join(file_location, file_name) read_file = open(file, "r") temp_list = read_file.read().splitlines() read_file.close() return temp_list
[ "def", "file_to_list", "(", "file_name", ",", "file_location", ")", ":", "file", "=", "__os", ".", "path", ".", "join", "(", "file_location", ",", "file_name", ")", "read_file", "=", "open", "(", "file", ",", "\"r\"", ")", "temp_list", "=", "read_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "read_file", ".", "close", "(", ")", "return", "temp_list" ]
28.466667
15.8
def addPoint( self, x, y ): """ Adds a new chart point to this item. :param x | <variant> y | <variant> """ self._points.append((x, y)) self._dirty = True
[ "def", "addPoint", "(", "self", ",", "x", ",", "y", ")", ":", "self", ".", "_points", ".", "append", "(", "(", "x", ",", "y", ")", ")", "self", ".", "_dirty", "=", "True" ]
26.222222
8.666667
def get_user_and_user_email_by_id(self, user_or_user_email_id): """Retrieve the User and UserEmail object by ID.""" if self.UserEmailClass: user_email = self.db_adapter.get_object(self.UserEmailClass, user_or_user_email_id) user = user_email.user if user_email else None else: user = self.db_adapter.get_object(self.UserClass, user_or_user_email_id) user_email = user return (user, user_email)
[ "def", "get_user_and_user_email_by_id", "(", "self", ",", "user_or_user_email_id", ")", ":", "if", "self", ".", "UserEmailClass", ":", "user_email", "=", "self", ".", "db_adapter", ".", "get_object", "(", "self", ".", "UserEmailClass", ",", "user_or_user_email_id", ")", "user", "=", "user_email", ".", "user", "if", "user_email", "else", "None", "else", ":", "user", "=", "self", ".", "db_adapter", ".", "get_object", "(", "self", ".", "UserClass", ",", "user_or_user_email_id", ")", "user_email", "=", "user", "return", "(", "user", ",", "user_email", ")" ]
51.666667
21.555556
def _find_usage_subnets(self): """find usage for Subnets; return dict of SubnetId to AZ""" # subnets per VPC subnet_to_az = {} subnets = defaultdict(int) for subnet in self.conn.describe_subnets()['Subnets']: subnets[subnet['VpcId']] += 1 subnet_to_az[subnet['SubnetId']] = subnet['AvailabilityZone'] for vpc_id in subnets: self.limits['Subnets per VPC']._add_current_usage( subnets[vpc_id], aws_type='AWS::EC2::VPC', resource_id=vpc_id ) return subnet_to_az
[ "def", "_find_usage_subnets", "(", "self", ")", ":", "# subnets per VPC", "subnet_to_az", "=", "{", "}", "subnets", "=", "defaultdict", "(", "int", ")", "for", "subnet", "in", "self", ".", "conn", ".", "describe_subnets", "(", ")", "[", "'Subnets'", "]", ":", "subnets", "[", "subnet", "[", "'VpcId'", "]", "]", "+=", "1", "subnet_to_az", "[", "subnet", "[", "'SubnetId'", "]", "]", "=", "subnet", "[", "'AvailabilityZone'", "]", "for", "vpc_id", "in", "subnets", ":", "self", ".", "limits", "[", "'Subnets per VPC'", "]", ".", "_add_current_usage", "(", "subnets", "[", "vpc_id", "]", ",", "aws_type", "=", "'AWS::EC2::VPC'", ",", "resource_id", "=", "vpc_id", ")", "return", "subnet_to_az" ]
39.733333
12.6
def _multitaper_spectrum(self, clm, k, convention='power', unit='per_l', clat=None, clon=None, coord_degrees=True, lmax=None, taper_wt=None): """ Return the multitaper spectrum estimate and standard error for an input SHCoeffs class instance. """ if lmax is None: lmax = clm.lmax if (clat is not None and clon is not None and clat == self.clat and clon == self.clon and coord_degrees is self.coord_degrees and k <= self.nwinrot): # use the already stored coeffs pass elif (clat is None and clon is None) and \ (self.clat is not None and self.clon is not None and k <= self.nwinrot): # use the already stored coeffs pass else: if clat is None: clat = self.clat if clon is None: clon = self.clon if (clat is None and clon is not None) or \ (clat is not None and clon is None): raise ValueError('clat and clon must both be input. ' + 'clat = {:s}, clon = {:s}' .format(repr(clat), repr(clon))) if clat is None and clon is None: self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k) else: self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees, nwinrot=k) sh = clm.to_array(normalization='4pi', csphase=1, lmax=lmax) if taper_wt is None: mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.coeffs, lmax=lmax, k=k) else: mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.coeffs, lmax=lmax, k=k, taper_wt=taper_wt) if (unit == 'per_l'): pass elif (unit == 'per_lm'): degree_l = _np.arange(len(mtse)) mtse /= (2.0 * degree_l + 1.0) sd /= (2.0 * degree_l + 1.0) else: raise ValueError( "unit must be 'per_l' or 'per_lm'." + "Input value was {:s}".format(repr(unit))) if (convention == 'power'): return mtse, sd elif (convention == 'energy'): return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi else: raise ValueError( "convention must be 'power' or 'energy'." + "Input value was {:s}".format(repr(convention)))
[ "def", "_multitaper_spectrum", "(", "self", ",", "clm", ",", "k", ",", "convention", "=", "'power'", ",", "unit", "=", "'per_l'", ",", "clat", "=", "None", ",", "clon", "=", "None", ",", "coord_degrees", "=", "True", ",", "lmax", "=", "None", ",", "taper_wt", "=", "None", ")", ":", "if", "lmax", "is", "None", ":", "lmax", "=", "clm", ".", "lmax", "if", "(", "clat", "is", "not", "None", "and", "clon", "is", "not", "None", "and", "clat", "==", "self", ".", "clat", "and", "clon", "==", "self", ".", "clon", "and", "coord_degrees", "is", "self", ".", "coord_degrees", "and", "k", "<=", "self", ".", "nwinrot", ")", ":", "# use the already stored coeffs", "pass", "elif", "(", "clat", "is", "None", "and", "clon", "is", "None", ")", "and", "(", "self", ".", "clat", "is", "not", "None", "and", "self", ".", "clon", "is", "not", "None", "and", "k", "<=", "self", ".", "nwinrot", ")", ":", "# use the already stored coeffs", "pass", "else", ":", "if", "clat", "is", "None", ":", "clat", "=", "self", ".", "clat", "if", "clon", "is", "None", ":", "clon", "=", "self", ".", "clon", "if", "(", "clat", "is", "None", "and", "clon", "is", "not", "None", ")", "or", "(", "clat", "is", "not", "None", "and", "clon", "is", "None", ")", ":", "raise", "ValueError", "(", "'clat and clon must both be input. '", "+", "'clat = {:s}, clon = {:s}'", ".", "format", "(", "repr", "(", "clat", ")", ",", "repr", "(", "clon", ")", ")", ")", "if", "clat", "is", "None", "and", "clon", "is", "None", ":", "self", ".", "rotate", "(", "clat", "=", "90.", ",", "clon", "=", "0.", ",", "coord_degrees", "=", "True", ",", "nwinrot", "=", "k", ")", "else", ":", "self", ".", "rotate", "(", "clat", "=", "clat", ",", "clon", "=", "clon", ",", "coord_degrees", "=", "coord_degrees", ",", "nwinrot", "=", "k", ")", "sh", "=", "clm", ".", "to_array", "(", "normalization", "=", "'4pi'", ",", "csphase", "=", "1", ",", "lmax", "=", "lmax", ")", "if", "taper_wt", "is", "None", ":", "mtse", ",", "sd", "=", "_shtools", ".", "SHMultiTaperMaskSE", "(", "sh", ",", "self", ".", "coeffs", ",", "lmax", "=", "lmax", ",", "k", "=", "k", ")", "else", ":", "mtse", ",", "sd", "=", "_shtools", ".", "SHMultiTaperMaskSE", "(", "sh", ",", "self", ".", "coeffs", ",", "lmax", "=", "lmax", ",", "k", "=", "k", ",", "taper_wt", "=", "taper_wt", ")", "if", "(", "unit", "==", "'per_l'", ")", ":", "pass", "elif", "(", "unit", "==", "'per_lm'", ")", ":", "degree_l", "=", "_np", ".", "arange", "(", "len", "(", "mtse", ")", ")", "mtse", "/=", "(", "2.0", "*", "degree_l", "+", "1.0", ")", "sd", "/=", "(", "2.0", "*", "degree_l", "+", "1.0", ")", "else", ":", "raise", "ValueError", "(", "\"unit must be 'per_l' or 'per_lm'.\"", "+", "\"Input value was {:s}\"", ".", "format", "(", "repr", "(", "unit", ")", ")", ")", "if", "(", "convention", "==", "'power'", ")", ":", "return", "mtse", ",", "sd", "elif", "(", "convention", "==", "'energy'", ")", ":", "return", "mtse", "*", "4.0", "*", "_np", ".", "pi", ",", "sd", "*", "4.0", "*", "_np", ".", "pi", "else", ":", "raise", "ValueError", "(", "\"convention must be 'power' or 'energy'.\"", "+", "\"Input value was {:s}\"", ".", "format", "(", "repr", "(", "convention", ")", ")", ")" ]
40.71875
19.0625
def get_ancestors(self): """ Get all unique instance ancestors """ ancestors = list(self.get_parents()) ancestor_unique_attributes = set([(a.__class__, a.id) for a in ancestors]) ancestors_with_parents = [a for a in ancestors if isinstance(a, DescendantMixin)] for ancestor in ancestors_with_parents: for parent in ancestor.get_ancestors(): if (parent.__class__, parent.id) not in ancestor_unique_attributes: ancestors.append(parent) return ancestors
[ "def", "get_ancestors", "(", "self", ")", ":", "ancestors", "=", "list", "(", "self", ".", "get_parents", "(", ")", ")", "ancestor_unique_attributes", "=", "set", "(", "[", "(", "a", ".", "__class__", ",", "a", ".", "id", ")", "for", "a", "in", "ancestors", "]", ")", "ancestors_with_parents", "=", "[", "a", "for", "a", "in", "ancestors", "if", "isinstance", "(", "a", ",", "DescendantMixin", ")", "]", "for", "ancestor", "in", "ancestors_with_parents", ":", "for", "parent", "in", "ancestor", ".", "get_ancestors", "(", ")", ":", "if", "(", "parent", ".", "__class__", ",", "parent", ".", "id", ")", "not", "in", "ancestor_unique_attributes", ":", "ancestors", ".", "append", "(", "parent", ")", "return", "ancestors" ]
53.7
19.2
def list_operations(self, name, filter_, page_size=0, options=None): """ Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns ``UNIMPLEMENTED``. NOTE: the ``name`` binding below allows API services to override the binding to use different resource name schemes, such as ``users/*/operations``. Example: >>> from google.gapic.longrunning import operations_client >>> from google.gax import CallOptions, INITIAL_PAGE >>> api = operations_client.OperationsClient() >>> name = '' >>> filter_ = '' >>> >>> # Iterate over all results >>> for element in api.list_operations(name, filter_): >>> # process element >>> pass >>> >>> # Or iterate over results one page at a time >>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)): >>> for element in page: >>> # process element >>> pass Args: name (string): The name of the operation collection. filter_ (string): The standard list filter. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.gax.PageIterator` instance. By default, this is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances. This object can also be configured to iterate over the pages of the response through the `CallOptions` parameter. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """ # Create the request object. request = operations_pb2.ListOperationsRequest( name=name, filter=filter_, page_size=page_size) return self._list_operations(request, options)
[ "def", "list_operations", "(", "self", ",", "name", ",", "filter_", ",", "page_size", "=", "0", ",", "options", "=", "None", ")", ":", "# Create the request object.", "request", "=", "operations_pb2", ".", "ListOperationsRequest", "(", "name", "=", "name", ",", "filter", "=", "filter_", ",", "page_size", "=", "page_size", ")", "return", "self", ".", "_list_operations", "(", "request", ",", "options", ")" ]
47.06
24.54
def length_estimate(self): ''' Calculates and estimated word count based on number of characters, locations, and arcs. For reference see: http://www.writingexcuses.com/2017/07/02/12-27-choosing-a-length/ ''' characters = self.characterinstance_set.filter( Q(main_character=True) | Q(pov_character=True) | Q(protagonist=True) | Q(antagonist=True) | Q(villain=True)).count() locations = self.locationinstance_set.count() arcs = self.arc_set.count() return ((characters + locations) * 750) * (1.5 * arcs)
[ "def", "length_estimate", "(", "self", ")", ":", "characters", "=", "self", ".", "characterinstance_set", ".", "filter", "(", "Q", "(", "main_character", "=", "True", ")", "|", "Q", "(", "pov_character", "=", "True", ")", "|", "Q", "(", "protagonist", "=", "True", ")", "|", "Q", "(", "antagonist", "=", "True", ")", "|", "Q", "(", "villain", "=", "True", ")", ")", ".", "count", "(", ")", "locations", "=", "self", ".", "locationinstance_set", ".", "count", "(", ")", "arcs", "=", "self", ".", "arc_set", ".", "count", "(", ")", "return", "(", "(", "characters", "+", "locations", ")", "*", "750", ")", "*", "(", "1.5", "*", "arcs", ")" ]
41.266667
15.8
def save(f, arr, vocab): """ Save word embedding file. Check :func:`word_embedding_loader.saver.glove.save` for the API. """ f.write(('%d %d' % (arr.shape[0], arr.shape[1])).encode('utf-8')) for word, idx in vocab: _write_line(f, arr[idx], word)
[ "def", "save", "(", "f", ",", "arr", ",", "vocab", ")", ":", "f", ".", "write", "(", "(", "'%d %d'", "%", "(", "arr", ".", "shape", "[", "0", "]", ",", "arr", ".", "shape", "[", "1", "]", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", "for", "word", ",", "idx", "in", "vocab", ":", "_write_line", "(", "f", ",", "arr", "[", "idx", "]", ",", "word", ")" ]
33.75
12.5
def size_r_img_inches(width, height): """Compute the width and height for an R image for display in IPython Neight width nor height can be null but should be integer pixel values > 0. Returns a tuple of (width, height) that should be used by ggsave in R to produce an appropriately sized jpeg/png/pdf image with the right aspect ratio. The returned values are in inches. """ # both width and height are given aspect_ratio = height / (1.0 * width) return R_IMAGE_SIZE, round(aspect_ratio * R_IMAGE_SIZE, 2)
[ "def", "size_r_img_inches", "(", "width", ",", "height", ")", ":", "# both width and height are given", "aspect_ratio", "=", "height", "/", "(", "1.0", "*", "width", ")", "return", "R_IMAGE_SIZE", ",", "round", "(", "aspect_ratio", "*", "R_IMAGE_SIZE", ",", "2", ")" ]
41
20.384615
def collect_aliases(self): """Collect the type aliases in the source. :sig: () -> None """ self.aliases = get_aliases(self._code_lines) for alias, signature in self.aliases.items(): _, _, requires = parse_signature(signature) self.required_types |= requires self.defined_types |= {alias}
[ "def", "collect_aliases", "(", "self", ")", ":", "self", ".", "aliases", "=", "get_aliases", "(", "self", ".", "_code_lines", ")", "for", "alias", ",", "signature", "in", "self", ".", "aliases", ".", "items", "(", ")", ":", "_", ",", "_", ",", "requires", "=", "parse_signature", "(", "signature", ")", "self", ".", "required_types", "|=", "requires", "self", ".", "defined_types", "|=", "{", "alias", "}" ]
35.5
11.4
def cut(self): """ Copy the currently selected text to the clipboard and delete it if it's inside the input buffer. """ self.copy() if self.can_cut(): self._control.textCursor().removeSelectedText()
[ "def", "cut", "(", "self", ")", ":", "self", ".", "copy", "(", ")", "if", "self", ".", "can_cut", "(", ")", ":", "self", ".", "_control", ".", "textCursor", "(", ")", ".", "removeSelectedText", "(", ")" ]
35.428571
12
def read_file(rel_path, paths=None, raw=False, as_list=False, as_iter=False, *args, **kwargs): ''' find a file that lives somewhere within a set of paths and return its contents. Default paths include 'static_dir' ''' if not rel_path: raise ValueError("rel_path can not be null!") paths = str2list(paths) # try looking the file up in a directory called static relative # to SRC_DIR, eg assuming metrique git repo is in ~/metrique # we'd look in ~/metrique/static paths.extend([STATIC_DIR, os.path.join(SRC_DIR, 'static')]) paths = [os.path.expanduser(p) for p in set(paths)] for path in paths: path = os.path.join(path, rel_path) logger.debug("trying to read: %s " % path) if os.path.exists(path): break else: raise IOError("path %s does not exist!" % rel_path) args = args if args else ['rU'] fd = open(path, *args, **kwargs) if raw: return fd if as_iter: return read_in_chunks(fd) else: fd_lines = fd.readlines() if as_list: return fd_lines else: return ''.join(fd_lines)
[ "def", "read_file", "(", "rel_path", ",", "paths", "=", "None", ",", "raw", "=", "False", ",", "as_list", "=", "False", ",", "as_iter", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "rel_path", ":", "raise", "ValueError", "(", "\"rel_path can not be null!\"", ")", "paths", "=", "str2list", "(", "paths", ")", "# try looking the file up in a directory called static relative", "# to SRC_DIR, eg assuming metrique git repo is in ~/metrique", "# we'd look in ~/metrique/static", "paths", ".", "extend", "(", "[", "STATIC_DIR", ",", "os", ".", "path", ".", "join", "(", "SRC_DIR", ",", "'static'", ")", "]", ")", "paths", "=", "[", "os", ".", "path", ".", "expanduser", "(", "p", ")", "for", "p", "in", "set", "(", "paths", ")", "]", "for", "path", "in", "paths", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "rel_path", ")", "logger", ".", "debug", "(", "\"trying to read: %s \"", "%", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "break", "else", ":", "raise", "IOError", "(", "\"path %s does not exist!\"", "%", "rel_path", ")", "args", "=", "args", "if", "args", "else", "[", "'rU'", "]", "fd", "=", "open", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "raw", ":", "return", "fd", "if", "as_iter", ":", "return", "read_in_chunks", "(", "fd", ")", "else", ":", "fd_lines", "=", "fd", ".", "readlines", "(", ")", "if", "as_list", ":", "return", "fd_lines", "else", ":", "return", "''", ".", "join", "(", "fd_lines", ")" ]
32.428571
20.085714
def scale_axes_from_data(self): """Restrict data limits for Y-axis based on what you can see """ # get tight limits for X-axis if self.args.xmin is None: self.args.xmin = min(ts.xspan[0] for ts in self.timeseries) if self.args.xmax is None: self.args.xmax = max(ts.xspan[1] for ts in self.timeseries) # autoscale view for Y-axis cropped = [ts.crop(self.args.xmin, self.args.xmax) for ts in self.timeseries] ymin = min(ts.value.min() for ts in cropped) ymax = max(ts.value.max() for ts in cropped) self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True) self.plot.gca().autoscale_view(scalex=False)
[ "def", "scale_axes_from_data", "(", "self", ")", ":", "# get tight limits for X-axis", "if", "self", ".", "args", ".", "xmin", "is", "None", ":", "self", ".", "args", ".", "xmin", "=", "min", "(", "ts", ".", "xspan", "[", "0", "]", "for", "ts", "in", "self", ".", "timeseries", ")", "if", "self", ".", "args", ".", "xmax", "is", "None", ":", "self", ".", "args", ".", "xmax", "=", "max", "(", "ts", ".", "xspan", "[", "1", "]", "for", "ts", "in", "self", ".", "timeseries", ")", "# autoscale view for Y-axis", "cropped", "=", "[", "ts", ".", "crop", "(", "self", ".", "args", ".", "xmin", ",", "self", ".", "args", ".", "xmax", ")", "for", "ts", "in", "self", ".", "timeseries", "]", "ymin", "=", "min", "(", "ts", ".", "value", ".", "min", "(", ")", "for", "ts", "in", "cropped", ")", "ymax", "=", "max", "(", "ts", ".", "value", ".", "max", "(", ")", "for", "ts", "in", "cropped", ")", "self", ".", "plot", ".", "gca", "(", ")", ".", "yaxis", ".", "set_data_interval", "(", "ymin", ",", "ymax", ",", "ignore", "=", "True", ")", "self", ".", "plot", ".", "gca", "(", ")", ".", "autoscale_view", "(", "scalex", "=", "False", ")" ]
45.1875
13.875
def add_errback(future, callback, loop=None): '''Add a ``callback`` to a ``future`` executed only if an exception or cancellation has occurred.''' def _error_back(fut): if fut._exception: callback(fut.exception()) elif fut.cancelled(): callback(CancelledError()) future = ensure_future(future, loop=None) future.add_done_callback(_error_back) return future
[ "def", "add_errback", "(", "future", ",", "callback", ",", "loop", "=", "None", ")", ":", "def", "_error_back", "(", "fut", ")", ":", "if", "fut", ".", "_exception", ":", "callback", "(", "fut", ".", "exception", "(", ")", ")", "elif", "fut", ".", "cancelled", "(", ")", ":", "callback", "(", "CancelledError", "(", ")", ")", "future", "=", "ensure_future", "(", "future", ",", "loop", "=", "None", ")", "future", ".", "add_done_callback", "(", "_error_back", ")", "return", "future" ]
34.166667
12.833333
def dump(archive, calc_id=0, user=None): """ Dump the openquake database and all the complete calculations into a zip file. In a multiuser installation must be run as administrator. """ t0 = time.time() assert archive.endswith('.zip'), archive getfnames = 'select ds_calc_dir || ".hdf5" from job where ?A' param = dict(status='complete') if calc_id: param['id'] = calc_id if user: param['user_name'] = user fnames = [f for f, in db(getfnames, param) if os.path.exists(f)] zipfiles(fnames, archive, 'w', safeprint) pending_jobs = db('select id, status, description from job ' 'where status="executing"') if pending_jobs: safeprint('WARNING: there were calculations executing during the dump,' ' they have been not copied') for job_id, status, descr in pending_jobs: safeprint('%d %s %s' % (job_id, status, descr)) # this also checks that the copied db is not corrupted smart_save(db.path, archive, calc_id) dt = time.time() - t0 safeprint('Archived %d calculations into %s in %d seconds' % (len(fnames), archive, dt))
[ "def", "dump", "(", "archive", ",", "calc_id", "=", "0", ",", "user", "=", "None", ")", ":", "t0", "=", "time", ".", "time", "(", ")", "assert", "archive", ".", "endswith", "(", "'.zip'", ")", ",", "archive", "getfnames", "=", "'select ds_calc_dir || \".hdf5\" from job where ?A'", "param", "=", "dict", "(", "status", "=", "'complete'", ")", "if", "calc_id", ":", "param", "[", "'id'", "]", "=", "calc_id", "if", "user", ":", "param", "[", "'user_name'", "]", "=", "user", "fnames", "=", "[", "f", "for", "f", ",", "in", "db", "(", "getfnames", ",", "param", ")", "if", "os", ".", "path", ".", "exists", "(", "f", ")", "]", "zipfiles", "(", "fnames", ",", "archive", ",", "'w'", ",", "safeprint", ")", "pending_jobs", "=", "db", "(", "'select id, status, description from job '", "'where status=\"executing\"'", ")", "if", "pending_jobs", ":", "safeprint", "(", "'WARNING: there were calculations executing during the dump,'", "' they have been not copied'", ")", "for", "job_id", ",", "status", ",", "descr", "in", "pending_jobs", ":", "safeprint", "(", "'%d %s %s'", "%", "(", "job_id", ",", "status", ",", "descr", ")", ")", "# this also checks that the copied db is not corrupted", "smart_save", "(", "db", ".", "path", ",", "archive", ",", "calc_id", ")", "dt", "=", "time", ".", "time", "(", ")", "-", "t0", "safeprint", "(", "'Archived %d calculations into %s in %d seconds'", "%", "(", "len", "(", "fnames", ")", ",", "archive", ",", "dt", ")", ")" ]
40
16.827586
def set_coords(self, x=0, y=0, z=0, t=0): """ set coords of agent in an arbitrary world """ self.coords = {} self.coords['x'] = x self.coords['y'] = y self.coords['z'] = z self.coords['t'] = t
[ "def", "set_coords", "(", "self", ",", "x", "=", "0", ",", "y", "=", "0", ",", "z", "=", "0", ",", "t", "=", "0", ")", ":", "self", ".", "coords", "=", "{", "}", "self", ".", "coords", "[", "'x'", "]", "=", "x", "self", ".", "coords", "[", "'y'", "]", "=", "y", "self", ".", "coords", "[", "'z'", "]", "=", "z", "self", ".", "coords", "[", "'t'", "]", "=", "t" ]
27.555556
8.222222
def _load_connection_error(hostname, error): ''' Format and Return a connection error ''' ret = {'code': None, 'content': 'Error: Unable to connect to the bigip device: {host}\n{error}'.format(host=hostname, error=error)} return ret
[ "def", "_load_connection_error", "(", "hostname", ",", "error", ")", ":", "ret", "=", "{", "'code'", ":", "None", ",", "'content'", ":", "'Error: Unable to connect to the bigip device: {host}\\n{error}'", ".", "format", "(", "host", "=", "hostname", ",", "error", "=", "error", ")", "}", "return", "ret" ]
30.875
33.875
def merkleroot(merkletree: 'MerkleTreeState') -> Locksroot: """ Return the root element of the merkle tree. """ assert merkletree.layers, 'the merkle tree layers are empty' assert merkletree.layers[MERKLEROOT], 'the root layer is empty' return Locksroot(merkletree.layers[MERKLEROOT][0])
[ "def", "merkleroot", "(", "merkletree", ":", "'MerkleTreeState'", ")", "->", "Locksroot", ":", "assert", "merkletree", ".", "layers", ",", "'the merkle tree layers are empty'", "assert", "merkletree", ".", "layers", "[", "MERKLEROOT", "]", ",", "'the root layer is empty'", "return", "Locksroot", "(", "merkletree", ".", "layers", "[", "MERKLEROOT", "]", "[", "0", "]", ")" ]
49.833333
20.666667
def set_logfile(path, instance): """Specify logfile path""" global logfile logfile = os.path.normpath(path) + '/hfos.' + instance + '.log'
[ "def", "set_logfile", "(", "path", ",", "instance", ")", ":", "global", "logfile", "logfile", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "+", "'/hfos.'", "+", "instance", "+", "'.log'" ]
29.4
19.4
def _create_syns(b, needed_syns): """ Create empty synthetics :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter list needed_syns: list of dictionaries containing kwargs to access the dataset (dataset, component, kind) :return: :class:`phoebe.parameters.parameters.ParameterSet` of all new parameters """ # needs_mesh = {info['dataset']: info['kind'] for info in needed_syns if info['needs_mesh']} params = [] for needed_syn in needed_syns: # print "*** _create_syns needed_syn", needed_syn # used to be {}_syn syn_kind = '{}'.format(needed_syn['kind']) # if needed_syn['kind']=='mesh': # parameters.dataset.mesh will handle creating the necessary columns # needed_syn['dataset_fields'] = needs_mesh # needed_syn['columns'] = b.get_value(qualifier='columns', dataset=needed_syn['dataset'], context='dataset') # datasets = b.get_value(qualifier='datasets', dataset=needed_syn['dataset'], context='dataset') # needed_syn['datasets'] = {ds: b.filter(datset=ds, context='dataset').exclude(kind='*_dep').kind for ds in datasets} # phoebe will compute everything sorted - even if the input times array # is out of order, so let's make sure the exposed times array is in # the correct (sorted) order if 'times' in needed_syn.keys(): needed_syn['times'].sort() needed_syn['empty_arrays_len'] = len(needed_syn['times']) these_params, these_constraints = getattr(_dataset, "{}_syn".format(syn_kind.lower()))(**needed_syn) # TODO: do we need to handle constraints? these_params = these_params.to_list() for param in these_params: if param._dataset is None: # dataset may be set for mesh columns param._dataset = needed_syn['dataset'] param._kind = syn_kind param._component = needed_syn['component'] # reset copy_for... model Parameters should never copy param._copy_for = {} # context, model, etc will be handle by the bundle once these are returned params += these_params return ParameterSet(params)
[ "def", "_create_syns", "(", "b", ",", "needed_syns", ")", ":", "# needs_mesh = {info['dataset']: info['kind'] for info in needed_syns if info['needs_mesh']}", "params", "=", "[", "]", "for", "needed_syn", "in", "needed_syns", ":", "# print \"*** _create_syns needed_syn\", needed_syn", "# used to be {}_syn", "syn_kind", "=", "'{}'", ".", "format", "(", "needed_syn", "[", "'kind'", "]", ")", "# if needed_syn['kind']=='mesh':", "# parameters.dataset.mesh will handle creating the necessary columns", "# needed_syn['dataset_fields'] = needs_mesh", "# needed_syn['columns'] = b.get_value(qualifier='columns', dataset=needed_syn['dataset'], context='dataset')", "# datasets = b.get_value(qualifier='datasets', dataset=needed_syn['dataset'], context='dataset')", "# needed_syn['datasets'] = {ds: b.filter(datset=ds, context='dataset').exclude(kind='*_dep').kind for ds in datasets}", "# phoebe will compute everything sorted - even if the input times array", "# is out of order, so let's make sure the exposed times array is in", "# the correct (sorted) order", "if", "'times'", "in", "needed_syn", ".", "keys", "(", ")", ":", "needed_syn", "[", "'times'", "]", ".", "sort", "(", ")", "needed_syn", "[", "'empty_arrays_len'", "]", "=", "len", "(", "needed_syn", "[", "'times'", "]", ")", "these_params", ",", "these_constraints", "=", "getattr", "(", "_dataset", ",", "\"{}_syn\"", ".", "format", "(", "syn_kind", ".", "lower", "(", ")", ")", ")", "(", "*", "*", "needed_syn", ")", "# TODO: do we need to handle constraints?", "these_params", "=", "these_params", ".", "to_list", "(", ")", "for", "param", "in", "these_params", ":", "if", "param", ".", "_dataset", "is", "None", ":", "# dataset may be set for mesh columns", "param", ".", "_dataset", "=", "needed_syn", "[", "'dataset'", "]", "param", ".", "_kind", "=", "syn_kind", "param", ".", "_component", "=", "needed_syn", "[", "'component'", "]", "# reset copy_for... model Parameters should never copy", "param", ".", "_copy_for", "=", "{", "}", "# context, model, etc will be handle by the bundle once these are returned", "params", "+=", "these_params", "return", "ParameterSet", "(", "params", ")" ]
43.294118
26.196078
def alias_repository(self, repository_id, alias_id): """Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility. The primary ``Id`` of the ``Repository`` is determined by the provider. The new ``Id`` is an alias to the primary ``Id``. If the alias is a pointer to another repository, it is reassigned to the given repository ``Id``. arg: repository_id (osid.id.Id): the ``Id`` of a ``Repository`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is in use as a primary ``Id`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.alias_bin_template if self._catalog_session is not None: return self._catalog_session.alias_catalog(catalog_id=repository_id, alias_id=alias_id) self._alias_id(primary_id=repository_id, equivalent_id=alias_id)
[ "def", "alias_repository", "(", "self", ",", "repository_id", ",", "alias_id", ")", ":", "# Implemented from template for", "# osid.resource.BinLookupSession.alias_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "alias_catalog", "(", "catalog_id", "=", "repository_id", ",", "alias_id", "=", "alias_id", ")", "self", ".", "_alias_id", "(", "primary_id", "=", "repository_id", ",", "equivalent_id", "=", "alias_id", ")" ]
49
21.038462
def compile_model(self, input_model_config, output_model_config, role, job_name, stop_condition, tags): """Create an Amazon SageMaker Neo compilation job. Args: input_model_config (dict): the trained model and the Amazon S3 location where it is stored. output_model_config (dict): Identifies the Amazon S3 location where you want Amazon SageMaker Neo to save the results of compilation job role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker Neo compilation jobs use this role to access model artifacts. You must grant sufficient permissions to this role. job_name (str): Name of the compilation job being created. stop_condition (dict): Defines when compilation job shall finish. Contains entries that can be understood by the service like ``MaxRuntimeInSeconds``. tags (list[dict]): List of tags for labeling a compile model job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Returns: str: ARN of the compile model job, if it is created. """ compilation_job_request = { 'InputConfig': input_model_config, 'OutputConfig': output_model_config, 'RoleArn': role, 'StoppingCondition': stop_condition, 'CompilationJobName': job_name } if tags is not None: compilation_job_request['Tags'] = tags LOGGER.info('Creating compilation-job with name: {}'.format(job_name)) self.sagemaker_client.create_compilation_job(**compilation_job_request)
[ "def", "compile_model", "(", "self", ",", "input_model_config", ",", "output_model_config", ",", "role", ",", "job_name", ",", "stop_condition", ",", "tags", ")", ":", "compilation_job_request", "=", "{", "'InputConfig'", ":", "input_model_config", ",", "'OutputConfig'", ":", "output_model_config", ",", "'RoleArn'", ":", "role", ",", "'StoppingCondition'", ":", "stop_condition", ",", "'CompilationJobName'", ":", "job_name", "}", "if", "tags", "is", "not", "None", ":", "compilation_job_request", "[", "'Tags'", "]", "=", "tags", "LOGGER", ".", "info", "(", "'Creating compilation-job with name: {}'", ".", "format", "(", "job_name", ")", ")", "self", ".", "sagemaker_client", ".", "create_compilation_job", "(", "*", "*", "compilation_job_request", ")" ]
50.727273
29.909091
def _set_prompt(self): """Set prompt so it displays the current working directory.""" self.cwd = os.getcwd() self.prompt = Fore.CYAN + '{!r} $ '.format(self.cwd) + Fore.RESET
[ "def", "_set_prompt", "(", "self", ")", ":", "self", ".", "cwd", "=", "os", ".", "getcwd", "(", ")", "self", ".", "prompt", "=", "Fore", ".", "CYAN", "+", "'{!r} $ '", ".", "format", "(", "self", ".", "cwd", ")", "+", "Fore", ".", "RESET" ]
48.75
15.25
def create_tcp_monitor(self, topics, batch_size=1, batch_duration=0, compression='gzip', format_type='json'): """Creates a TCP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor """ monitor_xml = """\ <Monitor> <monTopic>{topics}</monTopic> <monBatchSize>{batch_size}</monBatchSize> <monFormatType>{format_type}</monFormatType> <monTransportType>tcp</monTransportType> <monCompression>{compression}</monCompression> </Monitor> """.format( topics=','.join(topics), batch_size=batch_size, batch_duration=batch_duration, format_type=format_type, compression=compression, ) monitor_xml = textwrap.dedent(monitor_xml) response = self._conn.post("/ws/Monitor", monitor_xml) location = ET.fromstring(response.text).find('.//location').text monitor_id = int(location.split('/')[-1]) return TCPDeviceCloudMonitor(self._conn, monitor_id, self._tcp_client_manager)
[ "def", "create_tcp_monitor", "(", "self", ",", "topics", ",", "batch_size", "=", "1", ",", "batch_duration", "=", "0", ",", "compression", "=", "'gzip'", ",", "format_type", "=", "'json'", ")", ":", "monitor_xml", "=", "\"\"\"\\\n <Monitor>\n <monTopic>{topics}</monTopic>\n <monBatchSize>{batch_size}</monBatchSize>\n <monFormatType>{format_type}</monFormatType>\n <monTransportType>tcp</monTransportType>\n <monCompression>{compression}</monCompression>\n </Monitor>\n \"\"\"", ".", "format", "(", "topics", "=", "','", ".", "join", "(", "topics", ")", ",", "batch_size", "=", "batch_size", ",", "batch_duration", "=", "batch_duration", ",", "format_type", "=", "format_type", ",", "compression", "=", "compression", ",", ")", "monitor_xml", "=", "textwrap", ".", "dedent", "(", "monitor_xml", ")", "response", "=", "self", ".", "_conn", ".", "post", "(", "\"/ws/Monitor\"", ",", "monitor_xml", ")", "location", "=", "ET", ".", "fromstring", "(", "response", ".", "text", ")", ".", "find", "(", "'.//location'", ")", ".", "text", "monitor_id", "=", "int", "(", "location", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "return", "TCPDeviceCloudMonitor", "(", "self", ".", "_conn", ",", "monitor_id", ",", "self", ".", "_tcp_client_manager", ")" ]
43.555556
18.555556
def submit_sample(self, filepath, filename, tags=['TheHive']): """ Uploads a new sample to VMRay api. Filename gets sent base64 encoded. :param filepath: path to sample :type filepath: str :param filename: filename of the original file :type filename: str :param tags: List of tags to apply to the sample :type tags: list(str) :returns: Dictionary of results :rtype: dict """ apiurl = '/rest/sample/submit?sample_file' params = {'sample_filename_b64enc': base64.b64encode(filename.encode('utf-8')), 'reanalyze': self.reanalyze} if tags: params['tags'] = ','.join(tags) if os.path.isfile(filepath): res = self.session.post(url=self.url + apiurl, files=[('sample_file', open(filepath, mode='rb'))], params=params) if res.status_code == 200: return json.loads(res.text) else: raise BadResponseError('Response from VMRay was not HTTP 200.' ' Responsecode: {}; Text: {}'.format(res.status_code, res.text)) else: raise SampleFileNotFoundError('Given sample file was not found.')
[ "def", "submit_sample", "(", "self", ",", "filepath", ",", "filename", ",", "tags", "=", "[", "'TheHive'", "]", ")", ":", "apiurl", "=", "'/rest/sample/submit?sample_file'", "params", "=", "{", "'sample_filename_b64enc'", ":", "base64", ".", "b64encode", "(", "filename", ".", "encode", "(", "'utf-8'", ")", ")", ",", "'reanalyze'", ":", "self", ".", "reanalyze", "}", "if", "tags", ":", "params", "[", "'tags'", "]", "=", "','", ".", "join", "(", "tags", ")", "if", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "res", "=", "self", ".", "session", ".", "post", "(", "url", "=", "self", ".", "url", "+", "apiurl", ",", "files", "=", "[", "(", "'sample_file'", ",", "open", "(", "filepath", ",", "mode", "=", "'rb'", ")", ")", "]", ",", "params", "=", "params", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "json", ".", "loads", "(", "res", ".", "text", ")", "else", ":", "raise", "BadResponseError", "(", "'Response from VMRay was not HTTP 200.'", "' Responsecode: {}; Text: {}'", ".", "format", "(", "res", ".", "status_code", ",", "res", ".", "text", ")", ")", "else", ":", "raise", "SampleFileNotFoundError", "(", "'Given sample file was not found.'", ")" ]
43.133333
19.666667
def assign_edge_colors_and_widths(self): """ Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute. """ # node_color overrides fill. Tricky to catch cuz it can be many types. # SET edge_widths and POP edge_style.stroke-width if self.style.edge_widths is None: if not self.style.edge_style["stroke-width"]: self.style.edge_style.pop("stroke-width") self.style.edge_style.pop("stroke") self.edge_widths = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke-width"], (list, tuple)): raise ToytreeError( "Use edge_widths not edge_style for multiple edge widths") # check the color width = self.style.edge_style["stroke-width"] self.style.edge_style.pop("stroke-width") self.edge_widths = [width] * self.nedges else: self.style.edge_style.pop("stroke-width") if isinstance(self.style.edge_widths, (str, int)): self.edge_widths = [int(self.style.edge_widths)] * self.nedges elif isinstance(self.style.edge_widths, (list, tuple)): if len(self.style.edge_widths) != self.nedges: raise ToytreeError("edge_widths arg is the wrong length") for cidx in range(self.nedges): self.edge_widths[cidx] = self.style.edge_widths[cidx] # SET edge_colors and POP edge_style.stroke if self.style.edge_colors is None: if self.style.edge_style["stroke"] is None: self.style.edge_style.pop("stroke") self.edge_colors = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke"], (list, tuple)): raise ToytreeError( "Use edge_colors not edge_style for multiple edge colors") # check the color color = self.style.edge_style["stroke"] if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.style.edge_style.pop("stroke") self.edge_colors = [color] * self.nedges # otherwise parse node_color else: self.style.edge_style.pop("stroke") if isinstance(self.style.edge_colors, (str, int)): # check the color color = self.style.edge_colors if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.edge_colors = [color] * self.nedges elif isinstance(self.style.edge_colors, (list, tuple)): if len(self.style.edge_colors) != self.nedges: raise ToytreeError("edge_colors arg is the wrong length") for cidx in range(self.nedges): self.edge_colors[cidx] = self.style.edge_colors[cidx] # do not allow empty edge_colors or widths self.edge_colors = [i if i else "#262626" for i in self.edge_colors] self.edge_widths = [i if i else 2 for i in self.edge_widths]
[ "def", "assign_edge_colors_and_widths", "(", "self", ")", ":", "# node_color overrides fill. Tricky to catch cuz it can be many types.", "# SET edge_widths and POP edge_style.stroke-width", "if", "self", ".", "style", ".", "edge_widths", "is", "None", ":", "if", "not", "self", ".", "style", ".", "edge_style", "[", "\"stroke-width\"", "]", ":", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke-width\"", ")", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke\"", ")", "self", ".", "edge_widths", "=", "[", "None", "]", "*", "self", ".", "nedges", "else", ":", "if", "isinstance", "(", "self", ".", "style", ".", "edge_style", "[", "\"stroke-width\"", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ToytreeError", "(", "\"Use edge_widths not edge_style for multiple edge widths\"", ")", "# check the color", "width", "=", "self", ".", "style", ".", "edge_style", "[", "\"stroke-width\"", "]", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke-width\"", ")", "self", ".", "edge_widths", "=", "[", "width", "]", "*", "self", ".", "nedges", "else", ":", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke-width\"", ")", "if", "isinstance", "(", "self", ".", "style", ".", "edge_widths", ",", "(", "str", ",", "int", ")", ")", ":", "self", ".", "edge_widths", "=", "[", "int", "(", "self", ".", "style", ".", "edge_widths", ")", "]", "*", "self", ".", "nedges", "elif", "isinstance", "(", "self", ".", "style", ".", "edge_widths", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "self", ".", "style", ".", "edge_widths", ")", "!=", "self", ".", "nedges", ":", "raise", "ToytreeError", "(", "\"edge_widths arg is the wrong length\"", ")", "for", "cidx", "in", "range", "(", "self", ".", "nedges", ")", ":", "self", ".", "edge_widths", "[", "cidx", "]", "=", "self", ".", "style", ".", "edge_widths", "[", "cidx", "]", "# SET edge_colors and POP edge_style.stroke", "if", "self", ".", "style", ".", "edge_colors", "is", "None", ":", "if", "self", ".", "style", ".", "edge_style", "[", "\"stroke\"", "]", "is", "None", ":", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke\"", ")", "self", ".", "edge_colors", "=", "[", "None", "]", "*", "self", ".", "nedges", "else", ":", "if", "isinstance", "(", "self", ".", "style", ".", "edge_style", "[", "\"stroke\"", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ToytreeError", "(", "\"Use edge_colors not edge_style for multiple edge colors\"", ")", "# check the color", "color", "=", "self", ".", "style", ".", "edge_style", "[", "\"stroke\"", "]", "if", "isinstance", "(", "color", ",", "(", "np", ".", "ndarray", ",", "np", ".", "void", ",", "list", ",", "tuple", ")", ")", ":", "color", "=", "toyplot", ".", "color", ".", "to_css", "(", "color", ")", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke\"", ")", "self", ".", "edge_colors", "=", "[", "color", "]", "*", "self", ".", "nedges", "# otherwise parse node_color", "else", ":", "self", ".", "style", ".", "edge_style", ".", "pop", "(", "\"stroke\"", ")", "if", "isinstance", "(", "self", ".", "style", ".", "edge_colors", ",", "(", "str", ",", "int", ")", ")", ":", "# check the color", "color", "=", "self", ".", "style", ".", "edge_colors", "if", "isinstance", "(", "color", ",", "(", "np", ".", "ndarray", ",", "np", ".", "void", ",", "list", ",", "tuple", ")", ")", ":", "color", "=", "toyplot", ".", "color", ".", "to_css", "(", "color", ")", "self", ".", "edge_colors", "=", "[", "color", "]", "*", "self", ".", "nedges", "elif", "isinstance", "(", "self", ".", "style", ".", "edge_colors", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "self", ".", "style", ".", "edge_colors", ")", "!=", "self", ".", "nedges", ":", "raise", "ToytreeError", "(", "\"edge_colors arg is the wrong length\"", ")", "for", "cidx", "in", "range", "(", "self", ".", "nedges", ")", ":", "self", ".", "edge_colors", "[", "cidx", "]", "=", "self", ".", "style", ".", "edge_colors", "[", "cidx", "]", "# do not allow empty edge_colors or widths", "self", ".", "edge_colors", "=", "[", "i", "if", "i", "else", "\"#262626\"", "for", "i", "in", "self", ".", "edge_colors", "]", "self", ".", "edge_widths", "=", "[", "i", "if", "i", "else", "2", "for", "i", "in", "self", ".", "edge_widths", "]" ]
51.328571
22.642857
def list_subscriptions(self, target_id=None, ids=None, query_flags=None): """ListSubscriptions. [Preview API] :param str target_id: :param [str] ids: :param str query_flags: :rtype: [NotificationSubscription] """ query_parameters = {} if target_id is not None: query_parameters['targetId'] = self._serialize.query('target_id', target_id, 'str') if ids is not None: ids = ",".join(ids) query_parameters['ids'] = self._serialize.query('ids', ids, 'str') if query_flags is not None: query_parameters['queryFlags'] = self._serialize.query('query_flags', query_flags, 'str') response = self._send(http_method='GET', location_id='70f911d6-abac-488c-85b3-a206bf57e165', version='5.0-preview.1', query_parameters=query_parameters) return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response))
[ "def", "list_subscriptions", "(", "self", ",", "target_id", "=", "None", ",", "ids", "=", "None", ",", "query_flags", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "target_id", "is", "not", "None", ":", "query_parameters", "[", "'targetId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'target_id'", ",", "target_id", ",", "'str'", ")", "if", "ids", "is", "not", "None", ":", "ids", "=", "\",\"", ".", "join", "(", "ids", ")", "query_parameters", "[", "'ids'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'ids'", ",", "ids", ",", "'str'", ")", "if", "query_flags", "is", "not", "None", ":", "query_parameters", "[", "'queryFlags'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'query_flags'", ",", "query_flags", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'70f911d6-abac-488c-85b3-a206bf57e165'", ",", "version", "=", "'5.0-preview.1'", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[NotificationSubscription]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
49.238095
20.571429
def euclidean(h1, h2): # 9 us @array, 33 us @list \w 100 bins r""" Equal to Minowski distance with :math:`p=2`. See also -------- minowski """ h1, h2 = __prepare_histogram(h1, h2) return math.sqrt(scipy.sum(scipy.square(scipy.absolute(h1 - h2))))
[ "def", "euclidean", "(", "h1", ",", "h2", ")", ":", "# 9 us @array, 33 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "return", "math", ".", "sqrt", "(", "scipy", ".", "sum", "(", "scipy", ".", "square", "(", "scipy", ".", "absolute", "(", "h1", "-", "h2", ")", ")", ")", ")" ]
27.4
17.9
def get_vid_from_url(self, url): """Extracts video ID from live.qq.com. """ hit = re.search(r'live.qq.com/(\d+)', url) if hit is not None: return hit.group(1) hit = re.search(r'live.qq.com/directory/match/(\d+)', url) if hit is not None: return self.get_room_id_from_url(hit.group(1)) html = get_content(url) room_id = match1(html, r'room_id\":(\d+)') if room_id is None: log.wtf('Unknown page {}'.format(url)) return room_id
[ "def", "get_vid_from_url", "(", "self", ",", "url", ")", ":", "hit", "=", "re", ".", "search", "(", "r'live.qq.com/(\\d+)'", ",", "url", ")", "if", "hit", "is", "not", "None", ":", "return", "hit", ".", "group", "(", "1", ")", "hit", "=", "re", ".", "search", "(", "r'live.qq.com/directory/match/(\\d+)'", ",", "url", ")", "if", "hit", "is", "not", "None", ":", "return", "self", ".", "get_room_id_from_url", "(", "hit", ".", "group", "(", "1", ")", ")", "html", "=", "get_content", "(", "url", ")", "room_id", "=", "match1", "(", "html", ",", "r'room_id\\\":(\\d+)'", ")", "if", "room_id", "is", "None", ":", "log", ".", "wtf", "(", "'Unknown page {}'", ".", "format", "(", "url", ")", ")", "return", "room_id" ]
37.714286
11.214286
def time_str (time_t, slug = False): '''Converts floating point number a'la time.time() using DEFAULT_TIMEFORMAT ''' return datetime.fromtimestamp (int (time_t)).strftime ( DEFAULT_SLUGFORMAT if slug else DEFAULT_TIMEFORMAT)
[ "def", "time_str", "(", "time_t", ",", "slug", "=", "False", ")", ":", "return", "datetime", ".", "fromtimestamp", "(", "int", "(", "time_t", ")", ")", ".", "strftime", "(", "DEFAULT_SLUGFORMAT", "if", "slug", "else", "DEFAULT_TIMEFORMAT", ")" ]
38.5
16.166667
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
[ "def", "parse", "(", "self", ",", "fd", ")", ":", "def", "resolve_args", "(", "args", ")", ":", "# FIXME break this out, it's in common with the templating stuff elsewhere", "root", "=", "self", ".", "sections", "[", "0", "]", "val_dict", "=", "dict", "(", "(", "'<'", "+", "t", "+", "'>'", ",", "u", ")", "for", "(", "t", ",", "u", ")", "in", "root", ".", "get_variables", "(", ")", ".", "items", "(", ")", ")", "resolved_args", "=", "[", "]", "for", "arg", "in", "args", ":", "for", "subst", ",", "value", "in", "val_dict", ".", "items", "(", ")", ":", "arg", "=", "arg", ".", "replace", "(", "subst", ",", "value", ")", "resolved_args", ".", "append", "(", "arg", ")", "return", "resolved_args", "def", "handle_section_defn", "(", "keyword", ",", "parts", ")", ":", "if", "keyword", "==", "'@HostAttrs'", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @HostAttrs <hostname>'", ")", "if", "self", ".", "sections", "[", "0", "]", ".", "has_pending_with", "(", ")", ":", "raise", "ParserException", "(", "'@with not supported with @HostAttrs'", ")", "self", ".", "sections", ".", "append", "(", "HostAttrs", "(", "parts", "[", "0", "]", ")", ")", "return", "True", "if", "keyword", "==", "'Host'", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: Host <hostname>'", ")", "self", ".", "sections", ".", "append", "(", "Host", "(", "parts", "[", "0", "]", ",", "self", ".", "sections", "[", "0", "]", ".", "pop_pending_with", "(", ")", ")", ")", "return", "True", "def", "handle_vardef", "(", "root", ",", "keyword", ",", "parts", ")", ":", "if", "keyword", "==", "'@with'", ":", "root", ".", "add_pending_with", "(", "parts", ")", "return", "True", "def", "handle_set_args", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "==", "0", ":", "raise", "ParserException", "(", "'usage: @args arg-name ...'", ")", "if", "not", "self", ".", "is_include", "(", ")", ":", "return", "if", "self", ".", "_args", "is", "None", "or", "len", "(", "self", ".", "_args", ")", "!=", "len", "(", "parts", ")", ":", "raise", "ParserException", "(", "'required arguments not passed to include {url} ({parts})'", ".", "format", "(", "url", "=", "self", ".", "_url", ",", "parts", "=", "', '", ".", "join", "(", "parts", ")", ")", ")", "root", "=", "self", ".", "sections", "[", "0", "]", "for", "key", ",", "value", "in", "zip", "(", "parts", ",", "self", ".", "_args", ")", ":", "root", ".", "set_value", "(", "key", ",", "value", ")", "def", "handle_set_value", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "2", ":", "raise", "ParserException", "(", "'usage: @set <key> <value>'", ")", "root", "=", "self", ".", "sections", "[", "0", "]", "root", ".", "set_value", "(", "*", "resolve_args", "(", "parts", ")", ")", "def", "handle_add_type", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @is <HostAttrName>'", ")", "section", ".", "add_type", "(", "parts", "[", "0", "]", ")", "def", "handle_via", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @via <Hostname>'", ")", "section", ".", "add_line", "(", "'ProxyCommand'", ",", "(", "'ssh {args} nc %h %p 2> /dev/null'", ".", "format", "(", "args", "=", "pipes", ".", "quote", "(", "resolve_args", "(", "parts", ")", "[", "0", "]", ")", ")", ",", ")", ")", "def", "handle_identity", "(", "section", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "!=", "1", ":", "raise", "ParserException", "(", "'usage: @identity <name>'", ")", "section", ".", "add_identity", "(", "resolve_args", "(", "parts", ")", "[", "0", "]", ")", "def", "handle_include", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "==", "0", ":", "raise", "ParserException", "(", "'usage: @include <https://...|/path/to/file.sedge> [arg ...]'", ")", "url", "=", "parts", "[", "0", "]", "parsed_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "if", "parsed_url", ".", "scheme", "==", "'https'", ":", "req", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "self", ".", "_verify_ssl", ")", "text", "=", "req", ".", "text", "elif", "parsed_url", ".", "scheme", "==", "'file'", ":", "with", "open", "(", "parsed_url", ".", "path", ")", "as", "fd", ":", "text", "=", "fd", ".", "read", "(", ")", "elif", "parsed_url", ".", "scheme", "==", "''", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "url", ")", "with", "open", "(", "path", ")", "as", "fd", ":", "text", "=", "fd", ".", "read", "(", ")", "else", ":", "raise", "SecurityException", "(", "'error: @includes may only use paths or https:// or file:// URLs'", ")", "subconfig", "=", "SedgeEngine", "(", "self", ".", "_key_library", ",", "StringIO", "(", "text", ")", ",", "self", ".", "_verify_ssl", ",", "url", "=", "url", ",", "args", "=", "resolve_args", "(", "parts", "[", "1", ":", "]", ")", ",", "parent_keydefs", "=", "self", ".", "keydefs", ",", "via_include", "=", "True", ")", "self", ".", "includes", ".", "append", "(", "(", "url", ",", "subconfig", ")", ")", "def", "handle_keydef", "(", "_", ",", "parts", ")", ":", "if", "len", "(", "parts", ")", "<", "2", ":", "raise", "ParserException", "(", "'usage: @key <name> [fingerprint]...'", ")", "name", "=", "parts", "[", "0", "]", "fingerprints", "=", "parts", "[", "1", ":", "]", "self", ".", "keydefs", "[", "name", "]", "=", "fingerprints", "def", "handle_keyword", "(", "section", ",", "keyword", ",", "parts", ")", ":", "handlers", "=", "{", "'@set'", ":", "handle_set_value", ",", "'@args'", ":", "handle_set_args", ",", "'@is'", ":", "handle_add_type", ",", "'@via'", ":", "handle_via", ",", "'@include'", ":", "handle_include", ",", "'@key'", ":", "handle_keydef", ",", "'@identity'", ":", "handle_identity", "}", "if", "keyword", "in", "handlers", ":", "handlers", "[", "keyword", "]", "(", "section", ",", "parts", ")", "return", "True", "for", "line", "in", "(", "t", ".", "strip", "(", ")", "for", "t", "in", "fd", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "or", "line", "==", "''", ":", "continue", "keyword", ",", "parts", "=", "SedgeEngine", ".", "parse_config_line", "(", "line", ")", "if", "handle_section_defn", "(", "keyword", ",", "parts", ")", ":", "continue", "if", "handle_vardef", "(", "self", ".", "sections", "[", "0", "]", ",", "keyword", ",", "parts", ")", ":", "continue", "current_section", "=", "self", ".", "sections", "[", "-", "1", "]", "if", "handle_keyword", "(", "current_section", ",", "keyword", ",", "parts", ")", ":", "continue", "if", "keyword", ".", "startswith", "(", "'@'", ")", ":", "raise", "ParserException", "(", "\"unknown expansion keyword {}\"", ".", "format", "(", "keyword", ")", ")", "# use other rather than parts to avoid messing up user", "# whitespace; we don't handle quotes in here as we don't", "# need to", "current_section", ".", "add_line", "(", "keyword", ",", "parts", ")" ]
40.49635
16.394161
def _wait_for_any_event(events, timeout_s): """Wait for any in a list of threading.Event's to be set. Args: events: List of threading.Event's. timeout_s: Max duration in seconds to wait before returning. Returns: True if at least one event was set before the timeout expired, else False. """ def any_event_set(): return any(event.is_set() for event in events) result = timeouts.loop_until_timeout_or_true( timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S) return result or any_event_set()
[ "def", "_wait_for_any_event", "(", "events", ",", "timeout_s", ")", ":", "def", "any_event_set", "(", ")", ":", "return", "any", "(", "event", ".", "is_set", "(", ")", "for", "event", "in", "events", ")", "result", "=", "timeouts", ".", "loop_until_timeout_or_true", "(", "timeout_s", ",", "any_event_set", ",", "sleep_s", "=", "_WAIT_FOR_ANY_EVENT_POLL_S", ")", "return", "result", "or", "any_event_set", "(", ")" ]
30.941176
21.176471
def FUNCTIONNOPROTO(self, _cursor_type): """Handles function with no prototype.""" # id, returns, attributes returns = _cursor_type.get_result() # if self.is_fundamental_type(returns): returns = self.parse_cursor_type(returns) attributes = [] obj = typedesc.FunctionType(returns, attributes) # argument_types cant be asked. no arguments. self.set_location(obj, None) return obj
[ "def", "FUNCTIONNOPROTO", "(", "self", ",", "_cursor_type", ")", ":", "# id, returns, attributes", "returns", "=", "_cursor_type", ".", "get_result", "(", ")", "# if self.is_fundamental_type(returns):", "returns", "=", "self", ".", "parse_cursor_type", "(", "returns", ")", "attributes", "=", "[", "]", "obj", "=", "typedesc", ".", "FunctionType", "(", "returns", ",", "attributes", ")", "# argument_types cant be asked. no arguments.", "self", ".", "set_location", "(", "obj", ",", "None", ")", "return", "obj" ]
40.636364
8.909091
def replace_name_with_id(cls, name): """ Used to replace a foreign key reference using a name with an ID. Works by searching the record in Pulsar and expects to find exactly one hit. First, will check if the foreign key reference is an integer value and if so, returns that as it is presumed to be the foreign key. Raises: `pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search. `pulsarpy.models.RecordNotFound`: No results were produced from the name search. """ try: int(name) return name #Already a presumed ID. except ValueError: pass #Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8. if name.split("-")[0] in Meta._MODEL_ABBREVS: return int(name.split("-", 1)[1]) try: result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name) if result: return result["id"] except pulsarpy.elasticsearch_utils.MultipleHitsException as e: raise raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__))
[ "def", "replace_name_with_id", "(", "cls", ",", "name", ")", ":", "try", ":", "int", "(", "name", ")", "return", "name", "#Already a presumed ID.", "except", "ValueError", ":", "pass", "#Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8.", "if", "name", ".", "split", "(", "\"-\"", ")", "[", "0", "]", "in", "Meta", ".", "_MODEL_ABBREVS", ":", "return", "int", "(", "name", ".", "split", "(", "\"-\"", ",", "1", ")", "[", "1", "]", ")", "try", ":", "result", "=", "cls", ".", "ES", ".", "get_record_by_name", "(", "cls", ".", "ES_INDEX_NAME", ",", "name", ")", "if", "result", ":", "return", "result", "[", "\"id\"", "]", "except", "pulsarpy", ".", "elasticsearch_utils", ".", "MultipleHitsException", "as", "e", ":", "raise", "raise", "RecordNotFound", "(", "\"Name '{}' for model '{}' not found.\"", ".", "format", "(", "name", ",", "cls", ".", "__name__", ")", ")" ]
48.04
28.6
def append(self, vals: list, index=None): """ Append a row to the main dataframe :param vals: list of the row values to add :type vals: list :param index: index key, defaults to None :param index: any, optional :example: ``ds.append([0, 2, 2, 3, 4])`` """ try: self.df = self.df.loc[len(self.df.index)] = vals except Exception as e: self.err(e, self.append, "Can not append row") return self.ok("Row added to dataframe")
[ "def", "append", "(", "self", ",", "vals", ":", "list", ",", "index", "=", "None", ")", ":", "try", ":", "self", ".", "df", "=", "self", ".", "df", ".", "loc", "[", "len", "(", "self", ".", "df", ".", "index", ")", "]", "=", "vals", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "self", ".", "append", ",", "\"Can not append row\"", ")", "return", "self", ".", "ok", "(", "\"Row added to dataframe\"", ")" ]
31.176471
13.529412
def liveReceivers(receivers): """Filter sequence of receivers to get resolved, live receivers This is a generator which will iterate over the passed sequence, checking for weak references and resolving them, then returning all live receivers. """ for receiver in receivers: if isinstance( receiver, WEAKREF_TYPES): # Dereference the weak reference. receiver = receiver() if receiver is not None: yield receiver else: yield receiver
[ "def", "liveReceivers", "(", "receivers", ")", ":", "for", "receiver", "in", "receivers", ":", "if", "isinstance", "(", "receiver", ",", "WEAKREF_TYPES", ")", ":", "# Dereference the weak reference.", "receiver", "=", "receiver", "(", ")", "if", "receiver", "is", "not", "None", ":", "yield", "receiver", "else", ":", "yield", "receiver" ]
32.8125
11.8125
def record_set_absent(name, zone_name, resource_group, connection_auth=None): ''' .. versionadded:: Fluorine Ensure a record set does not exist in the DNS zone. :param name: Name of the record set. :param zone_name: Name of the DNS zone. :param resource_group: The resource group assigned to the DNS zone. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret rec_set = __salt__['azurearm_dns.record_set_get']( name, zone_name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' in rec_set: ret['result'] = True ret['comment'] = 'Record set {0} was not found in zone {1}.'.format(name, zone_name) return ret elif __opts__['test']: ret['comment'] = 'Record set {0} would be deleted.'.format(name) ret['result'] = None ret['changes'] = { 'old': rec_set, 'new': {}, } return ret deleted = __salt__['azurearm_dns.record_set_delete'](name, zone_name, resource_group, **connection_auth) if deleted: ret['result'] = True ret['comment'] = 'Record set {0} has been deleted.'.format(name) ret['changes'] = { 'old': rec_set, 'new': {} } return ret ret['comment'] = 'Failed to delete record set {0}!'.format(name) return ret
[ "def", "record_set_absent", "(", "name", ",", "zone_name", ",", "resource_group", ",", "connection_auth", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "not", "isinstance", "(", "connection_auth", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "'Connection information must be specified via connection_auth dictionary!'", "return", "ret", "rec_set", "=", "__salt__", "[", "'azurearm_dns.record_set_get'", "]", "(", "name", ",", "zone_name", ",", "resource_group", ",", "azurearm_log_level", "=", "'info'", ",", "*", "*", "connection_auth", ")", "if", "'error'", "in", "rec_set", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Record set {0} was not found in zone {1}.'", ".", "format", "(", "name", ",", "zone_name", ")", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Record set {0} would be deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "rec_set", ",", "'new'", ":", "{", "}", ",", "}", "return", "ret", "deleted", "=", "__salt__", "[", "'azurearm_dns.record_set_delete'", "]", "(", "name", ",", "zone_name", ",", "resource_group", ",", "*", "*", "connection_auth", ")", "if", "deleted", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Record set {0} has been deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "rec_set", ",", "'new'", ":", "{", "}", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Failed to delete record set {0}!'", ".", "format", "(", "name", ")", "return", "ret" ]
26.815385
25.738462
def del_doc(self, doc): """ Delete a document """ logger.info("Removing doc from the index: %s" % doc) doc = doc.clone() # make sure it can be serialized safely self.docsearch.index.del_doc(doc)
[ "def", "del_doc", "(", "self", ",", "doc", ")", ":", "logger", ".", "info", "(", "\"Removing doc from the index: %s\"", "%", "doc", ")", "doc", "=", "doc", ".", "clone", "(", ")", "# make sure it can be serialized safely", "self", ".", "docsearch", ".", "index", ".", "del_doc", "(", "doc", ")" ]
33.857143
11.285714
def set_beam_prop(self, prop, values, repeat="up"): """ Specify the properties of the beams :param values: :param repeat: if 'up' then duplicate up the structure :return: """ values = np.array(values) if repeat == "up": assert len(values.shape) == 1 values = [values for ss in range(self.n_storeys)] else: assert len(values.shape) == 2 if len(values[0]) != self.n_bays: raise ModelError("beam depths does not match number of bays (%i)." % self.n_bays) for ss in range(self.n_storeys): for i in range(self.n_bays): self._beams[ss][i].set_section_prop(prop, values[0][i])
[ "def", "set_beam_prop", "(", "self", ",", "prop", ",", "values", ",", "repeat", "=", "\"up\"", ")", ":", "values", "=", "np", ".", "array", "(", "values", ")", "if", "repeat", "==", "\"up\"", ":", "assert", "len", "(", "values", ".", "shape", ")", "==", "1", "values", "=", "[", "values", "for", "ss", "in", "range", "(", "self", ".", "n_storeys", ")", "]", "else", ":", "assert", "len", "(", "values", ".", "shape", ")", "==", "2", "if", "len", "(", "values", "[", "0", "]", ")", "!=", "self", ".", "n_bays", ":", "raise", "ModelError", "(", "\"beam depths does not match number of bays (%i).\"", "%", "self", ".", "n_bays", ")", "for", "ss", "in", "range", "(", "self", ".", "n_storeys", ")", ":", "for", "i", "in", "range", "(", "self", ".", "n_bays", ")", ":", "self", ".", "_beams", "[", "ss", "]", "[", "i", "]", ".", "set_section_prop", "(", "prop", ",", "values", "[", "0", "]", "[", "i", "]", ")" ]
37.684211
14.421053
def assert_text_visible(self, text, selector="html", by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT): """ Same as assert_text() """ if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) return self.assert_text(text, selector, by=by, timeout=timeout)
[ "def", "assert_text_visible", "(", "self", ",", "text", ",", "selector", "=", "\"html\"", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "SMALL_TIMEOUT", ")", ":", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "SMALL_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "return", "self", ".", "assert_text", "(", "text", ",", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")" ]
61
21.5
def cleanup_all(data_home=None): """ Cleans up all the example datasets in the data directory specified by ``get_data_home`` either to clear up disk space or start from fresh. """ removed = 0 for name, meta in DATASETS.items(): _, ext = os.path.splitext(meta['url']) removed += cleanup_dataset(name, data_home=data_home, ext=ext) print( "Removed {} fixture objects from {}".format(removed, get_data_home(data_home)) )
[ "def", "cleanup_all", "(", "data_home", "=", "None", ")", ":", "removed", "=", "0", "for", "name", ",", "meta", "in", "DATASETS", ".", "items", "(", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "meta", "[", "'url'", "]", ")", "removed", "+=", "cleanup_dataset", "(", "name", ",", "data_home", "=", "data_home", ",", "ext", "=", "ext", ")", "print", "(", "\"Removed {} fixture objects from {}\"", ".", "format", "(", "removed", ",", "get_data_home", "(", "data_home", ")", ")", ")" ]
35.538462
22
def connected_components(graph): """ Connected components. @type graph: graph, hypergraph @param graph: Graph. @rtype: dictionary @return: Pairing that associates each node to its connected component. """ recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes())*2,recursionlimit)) visited = {} count = 1 # For 'each' node not found to belong to a connected component, find its connected # component. for each in graph: if (each not in visited): _dfs(graph, visited, count, each) count = count + 1 setrecursionlimit(recursionlimit) return visited
[ "def", "connected_components", "(", "graph", ")", ":", "recursionlimit", "=", "getrecursionlimit", "(", ")", "setrecursionlimit", "(", "max", "(", "len", "(", "graph", ".", "nodes", "(", ")", ")", "*", "2", ",", "recursionlimit", ")", ")", "visited", "=", "{", "}", "count", "=", "1", "# For 'each' node not found to belong to a connected component, find its connected", "# component.", "for", "each", "in", "graph", ":", "if", "(", "each", "not", "in", "visited", ")", ":", "_dfs", "(", "graph", ",", "visited", ",", "count", ",", "each", ")", "count", "=", "count", "+", "1", "setrecursionlimit", "(", "recursionlimit", ")", "return", "visited" ]
26.12
19.88
def get_file(self): """ Load data into a file and return file path. :return: path to file as string """ content = self._load() if not content: return None filename = "temporary_file.bin" with open(filename, "wb") as file_name: file_name.write(content) return filename
[ "def", "get_file", "(", "self", ")", ":", "content", "=", "self", ".", "_load", "(", ")", "if", "not", "content", ":", "return", "None", "filename", "=", "\"temporary_file.bin\"", "with", "open", "(", "filename", ",", "\"wb\"", ")", "as", "file_name", ":", "file_name", ".", "write", "(", "content", ")", "return", "filename" ]
27.076923
11.230769
def past_date(start='-30d'): """ Returns a ``date`` object in the past between 1 day ago and the specified ``start``. ``start`` can be a string, another date, or a timedelta. If it's a string, it must start with `-`, followed by and integer and a unit, Eg: ``'-30d'``. Defaults to `'-30d'` Valid units are: * ``'years'``, ``'y'`` * ``'weeks'``, ``'w'`` * ``'days'``, ``'d'`` * ``'hours'``, ``'h'`` * ``'minutes'``, ``'m'`` * ``'seconds'``, ``'s'`` """ return lambda n, f: f.past_date( start_date=start, tzinfo=get_timezone(), )
[ "def", "past_date", "(", "start", "=", "'-30d'", ")", ":", "return", "lambda", "n", ",", "f", ":", "f", ".", "past_date", "(", "start_date", "=", "start", ",", "tzinfo", "=", "get_timezone", "(", ")", ",", ")" ]
32.166667
17.166667
def delete_pool(hostname, username, password, name): ''' Delete an existing pool. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool which will be deleted ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __opts__['test']: return _test_output(ret, 'delete', params={ 'hostname': hostname, 'username': username, 'password': password, 'name': name, } ) #is this pool currently configured? existing = __salt__['bigip.list_pool'](hostname, username, password, name) # if it exists by name if existing['code'] == 200: deleted = __salt__['bigip.delete_pool'](hostname, username, password, name) # did we get rid of it? if deleted['code'] == 200: ret['result'] = True ret['comment'] = 'Pool was successfully deleted.' ret['changes']['old'] = existing['content'] ret['changes']['new'] = {} # something bad happened else: ret = _load_result(deleted, ret) # not found elif existing['code'] == 404: ret['result'] = True ret['comment'] = 'This pool already does not exist. No changes made.' ret['changes']['old'] = {} ret['changes']['new'] = {} else: ret = _load_result(existing, ret) return ret
[ "def", "delete_pool", "(", "hostname", ",", "username", ",", "password", ",", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "if", "__opts__", "[", "'test'", "]", ":", "return", "_test_output", "(", "ret", ",", "'delete'", ",", "params", "=", "{", "'hostname'", ":", "hostname", ",", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "'name'", ":", "name", ",", "}", ")", "#is this pool currently configured?", "existing", "=", "__salt__", "[", "'bigip.list_pool'", "]", "(", "hostname", ",", "username", ",", "password", ",", "name", ")", "# if it exists by name", "if", "existing", "[", "'code'", "]", "==", "200", ":", "deleted", "=", "__salt__", "[", "'bigip.delete_pool'", "]", "(", "hostname", ",", "username", ",", "password", ",", "name", ")", "# did we get rid of it?", "if", "deleted", "[", "'code'", "]", "==", "200", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Pool was successfully deleted.'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "existing", "[", "'content'", "]", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "{", "}", "# something bad happened", "else", ":", "ret", "=", "_load_result", "(", "deleted", ",", "ret", ")", "# not found", "elif", "existing", "[", "'code'", "]", "==", "404", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'This pool already does not exist. No changes made.'", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "{", "}", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "{", "}", "else", ":", "ret", "=", "_load_result", "(", "existing", ",", "ret", ")", "return", "ret" ]
27.735849
20.830189
def p_block(self, p): 'block : BEGIN block_statements END' p[0] = Block(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_block", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Block", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
36.75
8.25
def _cleanupConnections(senderkey, signal): """Delete any empty signals for senderkey. Delete senderkey if empty.""" try: receivers = connections[senderkey][signal] except: pass else: if not receivers: # No more connected receivers. Therefore, remove the signal. try: signals = connections[senderkey] except KeyError: pass else: del signals[signal] if not signals: # No more signal connections. Therefore, remove the sender. _removeSender(senderkey)
[ "def", "_cleanupConnections", "(", "senderkey", ",", "signal", ")", ":", "try", ":", "receivers", "=", "connections", "[", "senderkey", "]", "[", "signal", "]", "except", ":", "pass", "else", ":", "if", "not", "receivers", ":", "# No more connected receivers. Therefore, remove the signal.", "try", ":", "signals", "=", "connections", "[", "senderkey", "]", "except", "KeyError", ":", "pass", "else", ":", "del", "signals", "[", "signal", "]", "if", "not", "signals", ":", "# No more signal connections. Therefore, remove the sender.", "_removeSender", "(", "senderkey", ")" ]
34.666667
18
def add_hierarchy(self, parent, edge, child): # XXX DEPRECATED """ Helper function to simplify the addition of part_of style objectProperties to graphs. FIXME make a method of makeGraph? """ if type(parent) != rdflib.URIRef: parent = self.check_thing(parent) if type(edge) != rdflib.URIRef: edge = self.check_thing(edge) if type(child) != infixowl.Class: if type(child) != rdflib.URIRef: child = self.check_thing(child) child = infixowl.Class(child, graph=self.g) restriction = infixowl.Restriction(edge, graph=self.g, someValuesFrom=parent) child.subClassOf = [restriction] + [c for c in child.subClassOf]
[ "def", "add_hierarchy", "(", "self", ",", "parent", ",", "edge", ",", "child", ")", ":", "# XXX DEPRECATED", "if", "type", "(", "parent", ")", "!=", "rdflib", ".", "URIRef", ":", "parent", "=", "self", ".", "check_thing", "(", "parent", ")", "if", "type", "(", "edge", ")", "!=", "rdflib", ".", "URIRef", ":", "edge", "=", "self", ".", "check_thing", "(", "edge", ")", "if", "type", "(", "child", ")", "!=", "infixowl", ".", "Class", ":", "if", "type", "(", "child", ")", "!=", "rdflib", ".", "URIRef", ":", "child", "=", "self", ".", "check_thing", "(", "child", ")", "child", "=", "infixowl", ".", "Class", "(", "child", ",", "graph", "=", "self", ".", "g", ")", "restriction", "=", "infixowl", ".", "Restriction", "(", "edge", ",", "graph", "=", "self", ".", "g", ",", "someValuesFrom", "=", "parent", ")", "child", ".", "subClassOf", "=", "[", "restriction", "]", "+", "[", "c", "for", "c", "in", "child", ".", "subClassOf", "]" ]
42.705882
16.941176
def connect(jclassname, url, driver_args=None, jars=None, libs=None): """Open a connection to a database using a JDBC driver and return a Connection instance. jclassname: Full qualified Java class name of the JDBC driver. url: Database url as required by the JDBC driver. driver_args: Dictionary or sequence of arguments to be passed to the Java DriverManager.getConnection method. Usually sequence of username and password for the db. Alternatively a dictionary of connection arguments (where `user` and `password` would probably be included). See http://docs.oracle.com/javase/7/docs/api/java/sql/DriverManager.html for more details jars: Jar filename or sequence of filenames for the JDBC driver libs: Dll/so filenames or sequence of dlls/sos used as shared library by the JDBC driver """ if isinstance(driver_args, string_type): driver_args = [ driver_args ] if not driver_args: driver_args = [] if jars: if isinstance(jars, string_type): jars = [ jars ] else: jars = [] if libs: if isinstance(libs, string_type): libs = [ libs ] else: libs = [] jconn = _jdbc_connect(jclassname, url, driver_args, jars, libs) return Connection(jconn, _converters)
[ "def", "connect", "(", "jclassname", ",", "url", ",", "driver_args", "=", "None", ",", "jars", "=", "None", ",", "libs", "=", "None", ")", ":", "if", "isinstance", "(", "driver_args", ",", "string_type", ")", ":", "driver_args", "=", "[", "driver_args", "]", "if", "not", "driver_args", ":", "driver_args", "=", "[", "]", "if", "jars", ":", "if", "isinstance", "(", "jars", ",", "string_type", ")", ":", "jars", "=", "[", "jars", "]", "else", ":", "jars", "=", "[", "]", "if", "libs", ":", "if", "isinstance", "(", "libs", ",", "string_type", ")", ":", "libs", "=", "[", "libs", "]", "else", ":", "libs", "=", "[", "]", "jconn", "=", "_jdbc_connect", "(", "jclassname", ",", "url", ",", "driver_args", ",", "jars", ",", "libs", ")", "return", "Connection", "(", "jconn", ",", "_converters", ")" ]
40.333333
18.515152
def verify_id(ctx, param, app): """Verify the experiment id.""" if app is None: raise TypeError("Select an experiment using the --app parameter.") elif app[0:5] == "dlgr-": raise ValueError( "The --app parameter requires the full " "UUID beginning with {}-...".format(app[5:13]) ) return app
[ "def", "verify_id", "(", "ctx", ",", "param", ",", "app", ")", ":", "if", "app", "is", "None", ":", "raise", "TypeError", "(", "\"Select an experiment using the --app parameter.\"", ")", "elif", "app", "[", "0", ":", "5", "]", "==", "\"dlgr-\"", ":", "raise", "ValueError", "(", "\"The --app parameter requires the full \"", "\"UUID beginning with {}-...\"", ".", "format", "(", "app", "[", "5", ":", "13", "]", ")", ")", "return", "app" ]
34.6
17.7
def profile_package(self): """Returns memory stats for a package.""" target_modules = base_profiler.get_pkg_module_names(self._run_object) try: with _CodeEventsTracker(target_modules) as prof: prof.compute_mem_overhead() runpy.run_path(self._run_object, run_name='__main__') except SystemExit: pass return prof, None
[ "def", "profile_package", "(", "self", ")", ":", "target_modules", "=", "base_profiler", ".", "get_pkg_module_names", "(", "self", ".", "_run_object", ")", "try", ":", "with", "_CodeEventsTracker", "(", "target_modules", ")", "as", "prof", ":", "prof", ".", "compute_mem_overhead", "(", ")", "runpy", ".", "run_path", "(", "self", ".", "_run_object", ",", "run_name", "=", "'__main__'", ")", "except", "SystemExit", ":", "pass", "return", "prof", ",", "None" ]
40.3
18.4
def authed_post(self, url, data, response_code=200, follow=False, headers={}): """Does a django test client ``post`` against the given url after logging in the admin first. :param url: URL to fetch :param data: Dictionary to form contents to post :param response_code: Expected response code from the URL fetch. This value is asserted. Defaults to 200 :param headers: Optional dictionary of headers to send in with the request :returns: Django testing ``Response`` object """ if not self.authed: self.authorize() response = self.client.post(url, data, follow=follow, **headers) self.assertEqual(response_code, response.status_code) return response
[ "def", "authed_post", "(", "self", ",", "url", ",", "data", ",", "response_code", "=", "200", ",", "follow", "=", "False", ",", "headers", "=", "{", "}", ")", ":", "if", "not", "self", ".", "authed", ":", "self", ".", "authorize", "(", ")", "response", "=", "self", ".", "client", ".", "post", "(", "url", ",", "data", ",", "follow", "=", "follow", ",", "*", "*", "headers", ")", "self", ".", "assertEqual", "(", "response_code", ",", "response", ".", "status_code", ")", "return", "response" ]
35.695652
17.521739
def cronitor(self): """Wrap run with requests to cronitor.""" url = f'https://cronitor.link/{self.opts.cronitor}/{{}}' try: run_url = url.format('run') self.logger.debug(f'Pinging {run_url}') requests.get(run_url, timeout=self.opts.timeout) except requests.exceptions.RequestException as e: self.logger.exception(e) # Cronitor may be having an outage, but we still want to run our stuff output, exit_status = self.run() endpoint = 'complete' if exit_status == 0 else 'fail' try: ping_url = url.format(endpoint) self.logger.debug('Pinging {}'.format(ping_url)) requests.get(ping_url, timeout=self.opts.timeout) except requests.exceptions.RequestException as e: self.logger.exception(e) return output, exit_status
[ "def", "cronitor", "(", "self", ")", ":", "url", "=", "f'https://cronitor.link/{self.opts.cronitor}/{{}}'", "try", ":", "run_url", "=", "url", ".", "format", "(", "'run'", ")", "self", ".", "logger", ".", "debug", "(", "f'Pinging {run_url}'", ")", "requests", ".", "get", "(", "run_url", ",", "timeout", "=", "self", ".", "opts", ".", "timeout", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "# Cronitor may be having an outage, but we still want to run our stuff", "output", ",", "exit_status", "=", "self", ".", "run", "(", ")", "endpoint", "=", "'complete'", "if", "exit_status", "==", "0", "else", "'fail'", "try", ":", "ping_url", "=", "url", ".", "format", "(", "endpoint", ")", "self", ".", "logger", ".", "debug", "(", "'Pinging {}'", ".", "format", "(", "ping_url", ")", ")", "requests", ".", "get", "(", "ping_url", ",", "timeout", "=", "self", ".", "opts", ".", "timeout", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "return", "output", ",", "exit_status" ]
34.76
20.96
def register_with_password(self, username, password): """ Register for a new account on this HS. Args: username (str): Account username password (str): Account password Returns: str: Access Token Raises: MatrixRequestError """ response = self.api.register( auth_body={"type": "m.login.dummy"}, kind='user', username=username, password=password, ) return self._post_registration(response)
[ "def", "register_with_password", "(", "self", ",", "username", ",", "password", ")", ":", "response", "=", "self", ".", "api", ".", "register", "(", "auth_body", "=", "{", "\"type\"", ":", "\"m.login.dummy\"", "}", ",", "kind", "=", "'user'", ",", "username", "=", "username", ",", "password", "=", "password", ",", ")", "return", "self", ".", "_post_registration", "(", "response", ")" ]
27.35
15.8
def prev(self): """Get the previous segment.""" seg = Segment(segment_t=idaapi.get_prev_seg(self.ea)) if seg.ea >= self.ea: raise exceptions.NoMoreSegments("This is the first segment. no segments exist before it.") return seg
[ "def", "prev", "(", "self", ")", ":", "seg", "=", "Segment", "(", "segment_t", "=", "idaapi", ".", "get_prev_seg", "(", "self", ".", "ea", ")", ")", "if", "seg", ".", "ea", ">=", "self", ".", "ea", ":", "raise", "exceptions", ".", "NoMoreSegments", "(", "\"This is the first segment. no segments exist before it.\"", ")", "return", "seg" ]
33
27.625
def _assign_value(self, pbuf_dp, value): """Assigns a value to the protobuf obj""" self._assign_value_by_type(pbuf_dp, value, _bool=False, error_prefix='Invalid value')
[ "def", "_assign_value", "(", "self", ",", "pbuf_dp", ",", "value", ")", ":", "self", ".", "_assign_value_by_type", "(", "pbuf_dp", ",", "value", ",", "_bool", "=", "False", ",", "error_prefix", "=", "'Invalid value'", ")" ]
54
11.75
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs): """Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with. """ # NOTE: To eliminate the dangers of accidentally signing a condition by # reference, we remove the reference of input_ here # intentionally. If the user of this class knows how to use it, # this should never happen, but then again, never say never. input_ = deepcopy(input_) public_key = input_.owners_before[0] message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format( input_.fulfills.txid, input_.fulfills.output).encode()) try: # cryptoconditions makes no assumptions of the encoding of the # message to sign or verify. It only accepts bytestrings input_.fulfillment.sign( message.digest(), base58.b58decode(key_pairs[public_key].encode())) except KeyError: raise KeypairMismatchException('Public key {} is not a pair to ' 'any of the private keys' .format(public_key)) return input_
[ "def", "_sign_simple_signature_fulfillment", "(", "cls", ",", "input_", ",", "message", ",", "key_pairs", ")", ":", "# NOTE: To eliminate the dangers of accidentally signing a condition by", "# reference, we remove the reference of input_ here", "# intentionally. If the user of this class knows how to use it,", "# this should never happen, but then again, never say never.", "input_", "=", "deepcopy", "(", "input_", ")", "public_key", "=", "input_", ".", "owners_before", "[", "0", "]", "message", "=", "sha3_256", "(", "message", ".", "encode", "(", ")", ")", "if", "input_", ".", "fulfills", ":", "message", ".", "update", "(", "'{}{}'", ".", "format", "(", "input_", ".", "fulfills", ".", "txid", ",", "input_", ".", "fulfills", ".", "output", ")", ".", "encode", "(", ")", ")", "try", ":", "# cryptoconditions makes no assumptions of the encoding of the", "# message to sign or verify. It only accepts bytestrings", "input_", ".", "fulfillment", ".", "sign", "(", "message", ".", "digest", "(", ")", ",", "base58", ".", "b58decode", "(", "key_pairs", "[", "public_key", "]", ".", "encode", "(", ")", ")", ")", "except", "KeyError", ":", "raise", "KeypairMismatchException", "(", "'Public key {} is not a pair to '", "'any of the private keys'", ".", "format", "(", "public_key", ")", ")", "return", "input_" ]
48.633333
22.333333
def taper(self, side='leftright'): """Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy of `self` tapered at one or both ends Raises ------ ValueError if `side` is not one of `('left', 'right', 'leftright')` Examples -------- To see the effect of the Planck-taper window, we can taper a sinusoidal `TimeSeries` at both ends: >>> import numpy >>> from gwpy.timeseries import TimeSeries >>> t = numpy.linspace(0, 1, 2048) >>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t) >>> tapered = series.taper() We can plot it to see how the ends now vary smoothly from 0 to 1: >>> from gwpy.plot import Plot >>> plot = Plot(series, tapered, separate=True, sharex=True) >>> plot.show() Notes ----- The :meth:`TimeSeries.taper` automatically tapers from the second stationary point (local maximum or minimum) on the specified side of the input. However, the method will never taper more than half the full width of the `TimeSeries`, and will fail if there are no stationary points. See :func:`~gwpy.signal.window.planck` for the generic Planck taper window, and see :func:`scipy.signal.get_window` for other common window formats. """ # check window properties if side not in ('left', 'right', 'leftright'): raise ValueError("side must be one of 'left', 'right', " "or 'leftright'") out = self.copy() # identify the second stationary point away from each boundary, # else default to half the TimeSeries width nleft, nright = 0, 0 mini, = signal.argrelmin(out.value) maxi, = signal.argrelmax(out.value) if 'left' in side: nleft = max(mini[0], maxi[0]) nleft = min(nleft, self.size/2) if 'right' in side: nright = out.size - min(mini[-1], maxi[-1]) nright = min(nright, self.size/2) out *= planck(out.size, nleft=nleft, nright=nright) return out
[ "def", "taper", "(", "self", ",", "side", "=", "'leftright'", ")", ":", "# check window properties", "if", "side", "not", "in", "(", "'left'", ",", "'right'", ",", "'leftright'", ")", ":", "raise", "ValueError", "(", "\"side must be one of 'left', 'right', \"", "\"or 'leftright'\"", ")", "out", "=", "self", ".", "copy", "(", ")", "# identify the second stationary point away from each boundary,", "# else default to half the TimeSeries width", "nleft", ",", "nright", "=", "0", ",", "0", "mini", ",", "=", "signal", ".", "argrelmin", "(", "out", ".", "value", ")", "maxi", ",", "=", "signal", ".", "argrelmax", "(", "out", ".", "value", ")", "if", "'left'", "in", "side", ":", "nleft", "=", "max", "(", "mini", "[", "0", "]", ",", "maxi", "[", "0", "]", ")", "nleft", "=", "min", "(", "nleft", ",", "self", ".", "size", "/", "2", ")", "if", "'right'", "in", "side", ":", "nright", "=", "out", ".", "size", "-", "min", "(", "mini", "[", "-", "1", "]", ",", "maxi", "[", "-", "1", "]", ")", "nright", "=", "min", "(", "nright", ",", "self", ".", "size", "/", "2", ")", "out", "*=", "planck", "(", "out", ".", "size", ",", "nleft", "=", "nleft", ",", "nright", "=", "nright", ")", "return", "out" ]
35.939394
20.681818
def get_all_templates(self, params=None): """ Get all templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_templates_per_page, resource=TEMPLATES, **{'params': params})
[ "def", "get_all_templates", "(", "self", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "return", "self", ".", "_iterate_through_pages", "(", "self", ".", "get_templates_per_page", ",", "resource", "=", "TEMPLATES", ",", "*", "*", "{", "'params'", ":", "params", "}", ")" ]
38.333333
22
def init(paths, output, **kwargs): """Init data package from list of files. It will also infer tabular data's schemas from their contents. """ dp = goodtables.init_datapackage(paths) click.secho( json_module.dumps(dp.descriptor, indent=4), file=output ) exit(dp.valid)
[ "def", "init", "(", "paths", ",", "output", ",", "*", "*", "kwargs", ")", ":", "dp", "=", "goodtables", ".", "init_datapackage", "(", "paths", ")", "click", ".", "secho", "(", "json_module", ".", "dumps", "(", "dp", ".", "descriptor", ",", "indent", "=", "4", ")", ",", "file", "=", "output", ")", "exit", "(", "dp", ".", "valid", ")" ]
23.307692
20.615385
def _generate_filenames(sources): """Generate filenames. :param tuple sources: Sequence of strings representing path to file(s). :return: Path to file(s). :rtype: :py:class:`str` """ for source in sources: if os.path.isdir(source): for path, dirlist, filelist in os.walk(source): for fname in filelist: if nmrstarlib.VERBOSE: print("Processing file: {}".format(os.path.abspath(fname))) if GenericFilePath.is_compressed(fname): if nmrstarlib.VERBOSE: print("Skipping compressed file: {}".format(os.path.abspath(fname))) continue else: yield os.path.join(path, fname) elif os.path.isfile(source): yield source elif GenericFilePath.is_url(source): yield source elif source.isdigit(): try: urlopen(nmrstarlib.BMRB_REST + source) yield nmrstarlib.BMRB_REST + source except HTTPError: urlopen(nmrstarlib.PDB_REST + source + ".cif") yield nmrstarlib.PDB_REST + source + ".cif" elif re.match("[\w\d]{4}", source): yield nmrstarlib.PDB_REST + source + ".cif" else: raise TypeError("Unknown file source.")
[ "def", "_generate_filenames", "(", "sources", ")", ":", "for", "source", "in", "sources", ":", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "for", "path", ",", "dirlist", ",", "filelist", "in", "os", ".", "walk", "(", "source", ")", ":", "for", "fname", "in", "filelist", ":", "if", "nmrstarlib", ".", "VERBOSE", ":", "print", "(", "\"Processing file: {}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "fname", ")", ")", ")", "if", "GenericFilePath", ".", "is_compressed", "(", "fname", ")", ":", "if", "nmrstarlib", ".", "VERBOSE", ":", "print", "(", "\"Skipping compressed file: {}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "fname", ")", ")", ")", "continue", "else", ":", "yield", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")", "elif", "os", ".", "path", ".", "isfile", "(", "source", ")", ":", "yield", "source", "elif", "GenericFilePath", ".", "is_url", "(", "source", ")", ":", "yield", "source", "elif", "source", ".", "isdigit", "(", ")", ":", "try", ":", "urlopen", "(", "nmrstarlib", ".", "BMRB_REST", "+", "source", ")", "yield", "nmrstarlib", ".", "BMRB_REST", "+", "source", "except", "HTTPError", ":", "urlopen", "(", "nmrstarlib", ".", "PDB_REST", "+", "source", "+", "\".cif\"", ")", "yield", "nmrstarlib", ".", "PDB_REST", "+", "source", "+", "\".cif\"", "elif", "re", ".", "match", "(", "\"[\\w\\d]{4}\"", ",", "source", ")", ":", "yield", "nmrstarlib", ".", "PDB_REST", "+", "source", "+", "\".cif\"", "else", ":", "raise", "TypeError", "(", "\"Unknown file source.\"", ")" ]
33.731707
19.512195
def remove_major_minor_suffix(scripts): """Checks if executables already contain a "-MAJOR.MINOR" suffix. """ minor_major_regex = re.compile("-\d.?\d?$") return [x for x in scripts if not minor_major_regex.search(x)]
[ "def", "remove_major_minor_suffix", "(", "scripts", ")", ":", "minor_major_regex", "=", "re", ".", "compile", "(", "\"-\\d.?\\d?$\"", ")", "return", "[", "x", "for", "x", "in", "scripts", "if", "not", "minor_major_regex", ".", "search", "(", "x", ")", "]" ]
56.25
8.5
def initialize(cls) -> None: """Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. Availability: Unix """ if cls._initialized: return io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup), ) cls._initialized = True
[ "def", "initialize", "(", "cls", ")", "->", "None", ":", "if", "cls", ".", "_initialized", ":", "return", "io_loop", "=", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "cls", ".", "_old_sigchld", "=", "signal", ".", "signal", "(", "signal", ".", "SIGCHLD", ",", "lambda", "sig", ",", "frame", ":", "io_loop", ".", "add_callback_from_signal", "(", "cls", ".", "_cleanup", ")", ",", ")", "cls", ".", "_initialized", "=", "True" ]
35.636364
20.545455
def get_Mapping_key_value(mp): """Retrieves the key and value types from a PEP 484 mapping or subclass of such. mp must be a (subclass of) typing.Mapping. """ try: res = _select_Generic_superclass_parameters(mp, typing.Mapping) except TypeError: res = None if res is None: raise TypeError("Has no key/value types: "+type_str(mp)) else: return tuple(res)
[ "def", "get_Mapping_key_value", "(", "mp", ")", ":", "try", ":", "res", "=", "_select_Generic_superclass_parameters", "(", "mp", ",", "typing", ".", "Mapping", ")", "except", "TypeError", ":", "res", "=", "None", "if", "res", "is", "None", ":", "raise", "TypeError", "(", "\"Has no key/value types: \"", "+", "type_str", "(", "mp", ")", ")", "else", ":", "return", "tuple", "(", "res", ")" ]
33.5
17.583333
def FetchRequestsAndResponses(self, session_id, timestamp=None): """Fetches all outstanding requests and responses for this flow. We first cache all requests and responses for this flow in memory to prevent round trips. Args: session_id: The session_id to get the requests/responses for. timestamp: Tuple (start, end) with a time range. Fetched requests and responses will have timestamp in this range. Yields: an tuple (request protobufs, list of responses messages) in ascending order of request ids. Raises: MoreDataException: When there is more data available than read by the limited query. """ if timestamp is None: timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) num_requests = 0 for request, responses in self.data_store.ReadRequestsAndResponses( session_id, timestamp=timestamp, request_limit=self.request_limit, response_limit=self.response_limit): yield (request, responses) num_requests += 1 if num_requests >= self.request_limit: raise MoreDataException()
[ "def", "FetchRequestsAndResponses", "(", "self", ",", "session_id", ",", "timestamp", "=", "None", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "(", "0", ",", "self", ".", "frozen_timestamp", "or", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", ")", "num_requests", "=", "0", "for", "request", ",", "responses", "in", "self", ".", "data_store", ".", "ReadRequestsAndResponses", "(", "session_id", ",", "timestamp", "=", "timestamp", ",", "request_limit", "=", "self", ".", "request_limit", ",", "response_limit", "=", "self", ".", "response_limit", ")", ":", "yield", "(", "request", ",", "responses", ")", "num_requests", "+=", "1", "if", "num_requests", ">=", "self", ".", "request_limit", ":", "raise", "MoreDataException", "(", ")" ]
34.121212
22.212121
def auto_init_default(column): """ Set the default value for a column when it's first accessed rather than first committed to the database. """ if isinstance(column, ColumnProperty): default = column.columns[0].default else: default = column.default @event.listens_for(column, 'init_scalar', retval=True, propagate=True) def init_scalar(target, value, dict_): # A subclass may override the column and not provide a default. Watch out for that. if default: if default.is_callable: value = default.arg(None) elif default.is_scalar: value = default.arg else: raise NotImplementedError("Can't invoke pre-default for a SQL-level column default") dict_[column.key] = value return value
[ "def", "auto_init_default", "(", "column", ")", ":", "if", "isinstance", "(", "column", ",", "ColumnProperty", ")", ":", "default", "=", "column", ".", "columns", "[", "0", "]", ".", "default", "else", ":", "default", "=", "column", ".", "default", "@", "event", ".", "listens_for", "(", "column", ",", "'init_scalar'", ",", "retval", "=", "True", ",", "propagate", "=", "True", ")", "def", "init_scalar", "(", "target", ",", "value", ",", "dict_", ")", ":", "# A subclass may override the column and not provide a default. Watch out for that.", "if", "default", ":", "if", "default", ".", "is_callable", ":", "value", "=", "default", ".", "arg", "(", "None", ")", "elif", "default", ".", "is_scalar", ":", "value", "=", "default", ".", "arg", "else", ":", "raise", "NotImplementedError", "(", "\"Can't invoke pre-default for a SQL-level column default\"", ")", "dict_", "[", "column", ".", "key", "]", "=", "value", "return", "value" ]
37.772727
16.318182
def retrieve_object_from_file(file_name, save_key, file_location): """ Function to retrieve objects from a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns: Returns the stored object """ shelve_store = None file = __os.path.join(file_location, file_name) try: shelve_store = __shelve.open(file) except Exception as e: LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e)) __sys.exit('Storage dB is not readable, closing App!!') stored_object = shelve_store.get(save_key) shelve_store.close() return stored_object
[ "def", "retrieve_object_from_file", "(", "file_name", ",", "save_key", ",", "file_location", ")", ":", "shelve_store", "=", "None", "file", "=", "__os", ".", "path", ".", "join", "(", "file_location", ",", "file_name", ")", "try", ":", "shelve_store", "=", "__shelve", ".", "open", "(", "file", ")", "except", "Exception", "as", "e", ":", "LOGGER", ".", "critical", "(", "'Function retrieve_object_from_file Error {error} ignoring any errors'", ".", "format", "(", "error", "=", "e", ")", ")", "__sys", ".", "exit", "(", "'Storage dB is not readable, closing App!!'", ")", "stored_object", "=", "shelve_store", ".", "get", "(", "save_key", ")", "shelve_store", ".", "close", "(", ")", "return", "stored_object" ]
36.52381
19.47619
def stretch(arr, fields=None, return_indices=False): """Stretch an array. Stretch an array by ``hstack()``-ing multiple array fields while preserving column names and record array structure. If a scalar field is specified, it will be stretched along with array fields. Parameters ---------- arr : NumPy structured or record array The array to be stretched. fields : list of strings or string, optional (default=None) A list of column names or a single column name to stretch. If ``fields`` is a string, then the output array is a one-dimensional unstructured array containing only the stretched elements of that field. If None, then stretch all fields. return_indices : bool, optional (default=False) If True, the array index of each stretched array entry will be returned in addition to the stretched array. This changes the return type of this function to a tuple consisting of a structured array and a numpy int64 array. Returns ------- ret : A NumPy structured array The stretched array. Examples -------- >>> import numpy as np >>> from root_numpy import stretch >>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')]) >>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float)) >>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float)) >>> stretch(arr, ['scalar', 'array']) array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)], dtype=[('scalar', '<i8'), ('array', '<f8')]) """ dtype = [] len_array = None flatten = False if fields is None: fields = arr.dtype.names elif isinstance(fields, string_types): fields = [fields] flatten = True # Construct dtype and check consistency for field in fields: dt = arr.dtype[field] if dt == 'O' or len(dt.shape): if dt == 'O': # Variable-length array field lengths = VLEN(arr[field]) else: # Fixed-length array field lengths = np.repeat(dt.shape[0], arr.shape[0]) if len_array is None: len_array = lengths elif not np.array_equal(lengths, len_array): raise ValueError( "inconsistent lengths of array columns in input") if dt == 'O': dtype.append((field, arr[field][0].dtype)) else: dtype.append((field, arr[field].dtype, dt.shape[1:])) else: # Scalar field dtype.append((field, dt)) if len_array is None: raise RuntimeError("no array column in input") # Build stretched output ret = np.empty(np.sum(len_array), dtype=dtype) for field in fields: dt = arr.dtype[field] if dt == 'O' or len(dt.shape) == 1: # Variable-length or 1D fixed-length array field ret[field] = np.hstack(arr[field]) elif len(dt.shape): # Multidimensional fixed-length array field ret[field] = np.vstack(arr[field]) else: # Scalar field ret[field] = np.repeat(arr[field], len_array) if flatten: ret = ret[fields[0]] if return_indices: idx = np.concatenate(list(map(np.arange, len_array))) return ret, idx return ret
[ "def", "stretch", "(", "arr", ",", "fields", "=", "None", ",", "return_indices", "=", "False", ")", ":", "dtype", "=", "[", "]", "len_array", "=", "None", "flatten", "=", "False", "if", "fields", "is", "None", ":", "fields", "=", "arr", ".", "dtype", ".", "names", "elif", "isinstance", "(", "fields", ",", "string_types", ")", ":", "fields", "=", "[", "fields", "]", "flatten", "=", "True", "# Construct dtype and check consistency", "for", "field", "in", "fields", ":", "dt", "=", "arr", ".", "dtype", "[", "field", "]", "if", "dt", "==", "'O'", "or", "len", "(", "dt", ".", "shape", ")", ":", "if", "dt", "==", "'O'", ":", "# Variable-length array field", "lengths", "=", "VLEN", "(", "arr", "[", "field", "]", ")", "else", ":", "# Fixed-length array field", "lengths", "=", "np", ".", "repeat", "(", "dt", ".", "shape", "[", "0", "]", ",", "arr", ".", "shape", "[", "0", "]", ")", "if", "len_array", "is", "None", ":", "len_array", "=", "lengths", "elif", "not", "np", ".", "array_equal", "(", "lengths", ",", "len_array", ")", ":", "raise", "ValueError", "(", "\"inconsistent lengths of array columns in input\"", ")", "if", "dt", "==", "'O'", ":", "dtype", ".", "append", "(", "(", "field", ",", "arr", "[", "field", "]", "[", "0", "]", ".", "dtype", ")", ")", "else", ":", "dtype", ".", "append", "(", "(", "field", ",", "arr", "[", "field", "]", ".", "dtype", ",", "dt", ".", "shape", "[", "1", ":", "]", ")", ")", "else", ":", "# Scalar field", "dtype", ".", "append", "(", "(", "field", ",", "dt", ")", ")", "if", "len_array", "is", "None", ":", "raise", "RuntimeError", "(", "\"no array column in input\"", ")", "# Build stretched output", "ret", "=", "np", ".", "empty", "(", "np", ".", "sum", "(", "len_array", ")", ",", "dtype", "=", "dtype", ")", "for", "field", "in", "fields", ":", "dt", "=", "arr", ".", "dtype", "[", "field", "]", "if", "dt", "==", "'O'", "or", "len", "(", "dt", ".", "shape", ")", "==", "1", ":", "# Variable-length or 1D fixed-length array field", "ret", "[", "field", "]", "=", "np", ".", "hstack", "(", "arr", "[", "field", "]", ")", "elif", "len", "(", "dt", ".", "shape", ")", ":", "# Multidimensional fixed-length array field", "ret", "[", "field", "]", "=", "np", ".", "vstack", "(", "arr", "[", "field", "]", ")", "else", ":", "# Scalar field", "ret", "[", "field", "]", "=", "np", ".", "repeat", "(", "arr", "[", "field", "]", ",", "len_array", ")", "if", "flatten", ":", "ret", "=", "ret", "[", "fields", "[", "0", "]", "]", "if", "return_indices", ":", "idx", "=", "np", ".", "concatenate", "(", "list", "(", "map", "(", "np", ".", "arange", ",", "len_array", ")", ")", ")", "return", "ret", ",", "idx", "return", "ret" ]
34.226804
19.185567
def load_extra_data(cls, data): """Loads extra JSON configuration parameters from a data buffer. The data buffer must represent a JSON object. Args: data: str, the buffer to load the JSON data from. """ try: cls._extra_config.update(json.loads(data)) except ValueError as exception: sys.stderr.write('Could convert to JSON. {0:s}'.format(exception)) exit(-1)
[ "def", "load_extra_data", "(", "cls", ",", "data", ")", ":", "try", ":", "cls", ".", "_extra_config", ".", "update", "(", "json", ".", "loads", "(", "data", ")", ")", "except", "ValueError", "as", "exception", ":", "sys", ".", "stderr", ".", "write", "(", "'Could convert to JSON. {0:s}'", ".", "format", "(", "exception", ")", ")", "exit", "(", "-", "1", ")" ]
30.461538
19
def nin(self, qfield, *values): ''' Works the same as the query expression method ``nin_`` ''' self.__query_obj.nin(qfield, *values) return self
[ "def", "nin", "(", "self", ",", "qfield", ",", "*", "values", ")", ":", "self", ".", "__query_obj", ".", "nin", "(", "qfield", ",", "*", "values", ")", "return", "self" ]
34.4
18
def select(self, name_or_index): """Locate a dataset. Args:: name_or_index dataset name or index number Returns:: SDS instance for the dataset C library equivalent : SDselect """ if isinstance(name_or_index, type(1)): idx = name_or_index else: try: idx = self.nametoindex(name_or_index) except HDF4Error: raise HDF4Error("select: non-existent dataset") id = _C.SDselect(self._id, idx) _checkErr('select', id, "cannot execute") return SDS(self, id)
[ "def", "select", "(", "self", ",", "name_or_index", ")", ":", "if", "isinstance", "(", "name_or_index", ",", "type", "(", "1", ")", ")", ":", "idx", "=", "name_or_index", "else", ":", "try", ":", "idx", "=", "self", ".", "nametoindex", "(", "name_or_index", ")", "except", "HDF4Error", ":", "raise", "HDF4Error", "(", "\"select: non-existent dataset\"", ")", "id", "=", "_C", ".", "SDselect", "(", "self", ".", "_id", ",", "idx", ")", "_checkErr", "(", "'select'", ",", "id", ",", "\"cannot execute\"", ")", "return", "SDS", "(", "self", ",", "id", ")" ]
27.458333
18.666667
def drill_filter(esfilter, data): """ PARTIAL EVALUATE THE FILTER BASED ON DATA GIVEN TODO: FIX THIS MONUMENALLY BAD IDEA """ esfilter = unwrap(esfilter) primary_nested = [] # track if nested, changes if not primary_column = [] # only one path allowed primary_branch = ( [] ) # CONTAINS LISTS OF RECORDS TO ITERATE: constantly changing as we dfs the tree def parse_field(fieldname, data, depth): """ RETURN (first, rest) OF fieldname """ col = split_field(fieldname) d = data for i, c in enumerate(col): try: d = d[c] except Exception as e: Log.error("{{name}} does not exist", name=fieldname) if is_list(d) and len(col) > 1: if len(primary_column) <= depth + i: primary_nested.append(True) primary_column.append(c) primary_branch.append(d) elif primary_nested[depth] and primary_column[depth + i] != c: Log.error("only one branch of tree allowed") else: primary_nested[depth + i] = True primary_column[depth + i] = c primary_branch[depth + i] = d return c, join_field(col[i + 1 :]) else: if len(primary_column) <= depth + i: primary_nested.append(False) primary_column.append(c) primary_branch.append([d]) return fieldname, None def pe_filter(filter, data, depth): """ PARTIAL EVALUATE THE filter BASED ON data GIVEN """ if filter is TRUE: return True if filter is FALSE: return False filter = wrap(filter) if filter["and"]: result = True output = FlatList() for a in filter["and"]: f = pe_filter(a, data, depth) if f is False: result = False elif f is not True: output.append(f) if result and output: return {"and": output} else: return result elif filter["or"]: output = FlatList() for o in filter["or"]: f = pe_filter(o, data, depth) if f is True: return True elif f is not False: output.append(f) if output: return {"or": output} else: return False elif filter["not"]: f = pe_filter(filter["not"], data, depth) if f is True: return False elif f is False: return True else: return {"not": f} elif filter.term or filter.eq: eq = coalesce(filter.term, filter.eq) result = True output = {} for col, val in eq.items(): first, rest = parse_field(col, data, depth) d = data[first] if not rest: if d != val: result = False else: output[rest] = val if result and output: return {"term": output} else: return result elif filter.equal: a, b = filter["equal"] first_a, rest_a = parse_field(a, data, depth) first_b, rest_b = parse_field(b, data, depth) val_a = data[first_a] val_b = data[first_b] if not rest_a: if not rest_b: if val_a != val_b: return False else: return True else: return {"term": {rest_b: val_a}} else: if not rest_b: return {"term": {rest_a: val_b}} else: return {"equal": [rest_a, rest_b]} elif filter.terms: result = True output = {} for col, vals in filter["terms"].items(): first, rest = parse_field(col, data, depth) d = data[first] if not rest: if d not in vals: result = False else: output[rest] = vals if result and output: return {"terms": output} else: return result elif filter.range: result = True output = {} for col, ranges in filter["range"].items(): first, rest = parse_field(col, data, depth) d = data[first] if not rest: for sign, val in ranges.items(): if sign in ("gt", ">") and d <= val: result = False if sign == "gte" and d < val: result = False if sign == "lte" and d > val: result = False if sign == "lt" and d >= val: result = False else: output[rest] = ranges if result and output: return {"range": output} else: return result elif filter.missing: if is_text(filter.missing): field = filter["missing"] else: field = filter["missing"]["field"] first, rest = parse_field(field, data, depth) d = data[first] if not rest: if d == None: return True return False else: return {"missing": rest} elif filter.prefix: result = True output = {} for col, val in filter["prefix"].items(): first, rest = parse_field(col, data, depth) d = data[first] if not rest: if d == None or not d.startswith(val): result = False else: output[rest] = val if result and output: return {"prefix": output} else: return result elif filter.exists: if is_text(filter["exists"]): field = filter["exists"] else: field = filter["exists"]["field"] first, rest = parse_field(field, data, depth) d = data[first] if not rest: if d != None: return True return False else: return {"exists": rest} else: Log.error("Can not interpret esfilter: {{esfilter}}", {"esfilter": filter}) output = [] # A LIST OF OBJECTS MAKING THROUGH THE FILTER def main(sequence, esfilter, row, depth): """ RETURN A SEQUENCE OF REFERENCES OF OBJECTS DOWN THE TREE SHORT SEQUENCES MEANS ALL NESTED OBJECTS ARE INCLUDED """ new_filter = pe_filter(esfilter, row, depth) if new_filter is True: seq = list(sequence) seq.append(row) output.append(seq) return elif new_filter is False: return seq = list(sequence) seq.append(row) for d in primary_branch[depth]: main(seq, new_filter, d, depth + 1) # OUTPUT for i, d in enumerate(data): if is_data(d): main([], esfilter, wrap(d), 0) else: Log.error("filter is expecting a dict, not {{type}}", type=d.__class__) # AT THIS POINT THE primary_column[] IS DETERMINED # USE IT TO EXPAND output TO ALL NESTED OBJECTS max = 0 # EVEN THOUGH A ROW CAN HAVE MANY VALUES, WE ONLY NEED UP TO max for i, n in enumerate(primary_nested): if n: max = i + 1 # OUTPUT IS A LIST OF ROWS, # WHERE EACH ROW IS A LIST OF VALUES SEEN DURING A WALK DOWN A PATH IN THE HIERARCHY uniform_output = FlatList() def recurse(row, depth): if depth == max: uniform_output.append(row) else: nested = row[-1][primary_column[depth]] if not nested: # PASSED FILTER, BUT NO CHILDREN, SO ADD NULL CHILDREN for i in range(depth, max): row.append(None) uniform_output.append(row) else: for d in nested: r = list(row) r.append(d) recurse(r, depth + 1) for o in output: recurse(o, 0) if not max: # SIMPLE LIST AS RESULT return wrap([unwrap(u[0]) for u in uniform_output]) return PartFlatList(primary_column[0:max], uniform_output)
[ "def", "drill_filter", "(", "esfilter", ",", "data", ")", ":", "esfilter", "=", "unwrap", "(", "esfilter", ")", "primary_nested", "=", "[", "]", "# track if nested, changes if not", "primary_column", "=", "[", "]", "# only one path allowed", "primary_branch", "=", "(", "[", "]", ")", "# CONTAINS LISTS OF RECORDS TO ITERATE: constantly changing as we dfs the tree", "def", "parse_field", "(", "fieldname", ",", "data", ",", "depth", ")", ":", "\"\"\"\n RETURN (first, rest) OF fieldname\n \"\"\"", "col", "=", "split_field", "(", "fieldname", ")", "d", "=", "data", "for", "i", ",", "c", "in", "enumerate", "(", "col", ")", ":", "try", ":", "d", "=", "d", "[", "c", "]", "except", "Exception", "as", "e", ":", "Log", ".", "error", "(", "\"{{name}} does not exist\"", ",", "name", "=", "fieldname", ")", "if", "is_list", "(", "d", ")", "and", "len", "(", "col", ")", ">", "1", ":", "if", "len", "(", "primary_column", ")", "<=", "depth", "+", "i", ":", "primary_nested", ".", "append", "(", "True", ")", "primary_column", ".", "append", "(", "c", ")", "primary_branch", ".", "append", "(", "d", ")", "elif", "primary_nested", "[", "depth", "]", "and", "primary_column", "[", "depth", "+", "i", "]", "!=", "c", ":", "Log", ".", "error", "(", "\"only one branch of tree allowed\"", ")", "else", ":", "primary_nested", "[", "depth", "+", "i", "]", "=", "True", "primary_column", "[", "depth", "+", "i", "]", "=", "c", "primary_branch", "[", "depth", "+", "i", "]", "=", "d", "return", "c", ",", "join_field", "(", "col", "[", "i", "+", "1", ":", "]", ")", "else", ":", "if", "len", "(", "primary_column", ")", "<=", "depth", "+", "i", ":", "primary_nested", ".", "append", "(", "False", ")", "primary_column", ".", "append", "(", "c", ")", "primary_branch", ".", "append", "(", "[", "d", "]", ")", "return", "fieldname", ",", "None", "def", "pe_filter", "(", "filter", ",", "data", ",", "depth", ")", ":", "\"\"\"\n PARTIAL EVALUATE THE filter BASED ON data GIVEN\n \"\"\"", "if", "filter", "is", "TRUE", ":", "return", "True", "if", "filter", "is", "FALSE", ":", "return", "False", "filter", "=", "wrap", "(", "filter", ")", "if", "filter", "[", "\"and\"", "]", ":", "result", "=", "True", "output", "=", "FlatList", "(", ")", "for", "a", "in", "filter", "[", "\"and\"", "]", ":", "f", "=", "pe_filter", "(", "a", ",", "data", ",", "depth", ")", "if", "f", "is", "False", ":", "result", "=", "False", "elif", "f", "is", "not", "True", ":", "output", ".", "append", "(", "f", ")", "if", "result", "and", "output", ":", "return", "{", "\"and\"", ":", "output", "}", "else", ":", "return", "result", "elif", "filter", "[", "\"or\"", "]", ":", "output", "=", "FlatList", "(", ")", "for", "o", "in", "filter", "[", "\"or\"", "]", ":", "f", "=", "pe_filter", "(", "o", ",", "data", ",", "depth", ")", "if", "f", "is", "True", ":", "return", "True", "elif", "f", "is", "not", "False", ":", "output", ".", "append", "(", "f", ")", "if", "output", ":", "return", "{", "\"or\"", ":", "output", "}", "else", ":", "return", "False", "elif", "filter", "[", "\"not\"", "]", ":", "f", "=", "pe_filter", "(", "filter", "[", "\"not\"", "]", ",", "data", ",", "depth", ")", "if", "f", "is", "True", ":", "return", "False", "elif", "f", "is", "False", ":", "return", "True", "else", ":", "return", "{", "\"not\"", ":", "f", "}", "elif", "filter", ".", "term", "or", "filter", ".", "eq", ":", "eq", "=", "coalesce", "(", "filter", ".", "term", ",", "filter", ".", "eq", ")", "result", "=", "True", "output", "=", "{", "}", "for", "col", ",", "val", "in", "eq", ".", "items", "(", ")", ":", "first", ",", "rest", "=", "parse_field", "(", "col", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "if", "d", "!=", "val", ":", "result", "=", "False", "else", ":", "output", "[", "rest", "]", "=", "val", "if", "result", "and", "output", ":", "return", "{", "\"term\"", ":", "output", "}", "else", ":", "return", "result", "elif", "filter", ".", "equal", ":", "a", ",", "b", "=", "filter", "[", "\"equal\"", "]", "first_a", ",", "rest_a", "=", "parse_field", "(", "a", ",", "data", ",", "depth", ")", "first_b", ",", "rest_b", "=", "parse_field", "(", "b", ",", "data", ",", "depth", ")", "val_a", "=", "data", "[", "first_a", "]", "val_b", "=", "data", "[", "first_b", "]", "if", "not", "rest_a", ":", "if", "not", "rest_b", ":", "if", "val_a", "!=", "val_b", ":", "return", "False", "else", ":", "return", "True", "else", ":", "return", "{", "\"term\"", ":", "{", "rest_b", ":", "val_a", "}", "}", "else", ":", "if", "not", "rest_b", ":", "return", "{", "\"term\"", ":", "{", "rest_a", ":", "val_b", "}", "}", "else", ":", "return", "{", "\"equal\"", ":", "[", "rest_a", ",", "rest_b", "]", "}", "elif", "filter", ".", "terms", ":", "result", "=", "True", "output", "=", "{", "}", "for", "col", ",", "vals", "in", "filter", "[", "\"terms\"", "]", ".", "items", "(", ")", ":", "first", ",", "rest", "=", "parse_field", "(", "col", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "if", "d", "not", "in", "vals", ":", "result", "=", "False", "else", ":", "output", "[", "rest", "]", "=", "vals", "if", "result", "and", "output", ":", "return", "{", "\"terms\"", ":", "output", "}", "else", ":", "return", "result", "elif", "filter", ".", "range", ":", "result", "=", "True", "output", "=", "{", "}", "for", "col", ",", "ranges", "in", "filter", "[", "\"range\"", "]", ".", "items", "(", ")", ":", "first", ",", "rest", "=", "parse_field", "(", "col", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "for", "sign", ",", "val", "in", "ranges", ".", "items", "(", ")", ":", "if", "sign", "in", "(", "\"gt\"", ",", "\">\"", ")", "and", "d", "<=", "val", ":", "result", "=", "False", "if", "sign", "==", "\"gte\"", "and", "d", "<", "val", ":", "result", "=", "False", "if", "sign", "==", "\"lte\"", "and", "d", ">", "val", ":", "result", "=", "False", "if", "sign", "==", "\"lt\"", "and", "d", ">=", "val", ":", "result", "=", "False", "else", ":", "output", "[", "rest", "]", "=", "ranges", "if", "result", "and", "output", ":", "return", "{", "\"range\"", ":", "output", "}", "else", ":", "return", "result", "elif", "filter", ".", "missing", ":", "if", "is_text", "(", "filter", ".", "missing", ")", ":", "field", "=", "filter", "[", "\"missing\"", "]", "else", ":", "field", "=", "filter", "[", "\"missing\"", "]", "[", "\"field\"", "]", "first", ",", "rest", "=", "parse_field", "(", "field", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "if", "d", "==", "None", ":", "return", "True", "return", "False", "else", ":", "return", "{", "\"missing\"", ":", "rest", "}", "elif", "filter", ".", "prefix", ":", "result", "=", "True", "output", "=", "{", "}", "for", "col", ",", "val", "in", "filter", "[", "\"prefix\"", "]", ".", "items", "(", ")", ":", "first", ",", "rest", "=", "parse_field", "(", "col", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "if", "d", "==", "None", "or", "not", "d", ".", "startswith", "(", "val", ")", ":", "result", "=", "False", "else", ":", "output", "[", "rest", "]", "=", "val", "if", "result", "and", "output", ":", "return", "{", "\"prefix\"", ":", "output", "}", "else", ":", "return", "result", "elif", "filter", ".", "exists", ":", "if", "is_text", "(", "filter", "[", "\"exists\"", "]", ")", ":", "field", "=", "filter", "[", "\"exists\"", "]", "else", ":", "field", "=", "filter", "[", "\"exists\"", "]", "[", "\"field\"", "]", "first", ",", "rest", "=", "parse_field", "(", "field", ",", "data", ",", "depth", ")", "d", "=", "data", "[", "first", "]", "if", "not", "rest", ":", "if", "d", "!=", "None", ":", "return", "True", "return", "False", "else", ":", "return", "{", "\"exists\"", ":", "rest", "}", "else", ":", "Log", ".", "error", "(", "\"Can not interpret esfilter: {{esfilter}}\"", ",", "{", "\"esfilter\"", ":", "filter", "}", ")", "output", "=", "[", "]", "# A LIST OF OBJECTS MAKING THROUGH THE FILTER", "def", "main", "(", "sequence", ",", "esfilter", ",", "row", ",", "depth", ")", ":", "\"\"\"\n RETURN A SEQUENCE OF REFERENCES OF OBJECTS DOWN THE TREE\n SHORT SEQUENCES MEANS ALL NESTED OBJECTS ARE INCLUDED\n \"\"\"", "new_filter", "=", "pe_filter", "(", "esfilter", ",", "row", ",", "depth", ")", "if", "new_filter", "is", "True", ":", "seq", "=", "list", "(", "sequence", ")", "seq", ".", "append", "(", "row", ")", "output", ".", "append", "(", "seq", ")", "return", "elif", "new_filter", "is", "False", ":", "return", "seq", "=", "list", "(", "sequence", ")", "seq", ".", "append", "(", "row", ")", "for", "d", "in", "primary_branch", "[", "depth", "]", ":", "main", "(", "seq", ",", "new_filter", ",", "d", ",", "depth", "+", "1", ")", "# OUTPUT", "for", "i", ",", "d", "in", "enumerate", "(", "data", ")", ":", "if", "is_data", "(", "d", ")", ":", "main", "(", "[", "]", ",", "esfilter", ",", "wrap", "(", "d", ")", ",", "0", ")", "else", ":", "Log", ".", "error", "(", "\"filter is expecting a dict, not {{type}}\"", ",", "type", "=", "d", ".", "__class__", ")", "# AT THIS POINT THE primary_column[] IS DETERMINED", "# USE IT TO EXPAND output TO ALL NESTED OBJECTS", "max", "=", "0", "# EVEN THOUGH A ROW CAN HAVE MANY VALUES, WE ONLY NEED UP TO max", "for", "i", ",", "n", "in", "enumerate", "(", "primary_nested", ")", ":", "if", "n", ":", "max", "=", "i", "+", "1", "# OUTPUT IS A LIST OF ROWS,", "# WHERE EACH ROW IS A LIST OF VALUES SEEN DURING A WALK DOWN A PATH IN THE HIERARCHY", "uniform_output", "=", "FlatList", "(", ")", "def", "recurse", "(", "row", ",", "depth", ")", ":", "if", "depth", "==", "max", ":", "uniform_output", ".", "append", "(", "row", ")", "else", ":", "nested", "=", "row", "[", "-", "1", "]", "[", "primary_column", "[", "depth", "]", "]", "if", "not", "nested", ":", "# PASSED FILTER, BUT NO CHILDREN, SO ADD NULL CHILDREN", "for", "i", "in", "range", "(", "depth", ",", "max", ")", ":", "row", ".", "append", "(", "None", ")", "uniform_output", ".", "append", "(", "row", ")", "else", ":", "for", "d", "in", "nested", ":", "r", "=", "list", "(", "row", ")", "r", ".", "append", "(", "d", ")", "recurse", "(", "r", ",", "depth", "+", "1", ")", "for", "o", "in", "output", ":", "recurse", "(", "o", ",", "0", ")", "if", "not", "max", ":", "# SIMPLE LIST AS RESULT", "return", "wrap", "(", "[", "unwrap", "(", "u", "[", "0", "]", ")", "for", "u", "in", "uniform_output", "]", ")", "return", "PartFlatList", "(", "primary_column", "[", "0", ":", "max", "]", ",", "uniform_output", ")" ]
32.457875
14.355311
def search(**criteria): """ Search registered *component* classes matching the given criteria. :param criteria: search criteria of the form: ``a='1', b='x'`` :return: parts registered with the given criteria :rtype: :class:`set` Will return an empty :class:`set` if nothing is found. :: from cqparts.search import search import cqparts_motors # example of a 3rd party lib # Get all DC motor classes dc_motors = search(type='motor', current_class='dc') # For more complex queries: air_cooled = search(cooling='air') non_aircooled_dcmotors = dc_motors - air_cooled # will be all DC motors that aren't air-cooled """ # Find all parts that match the given criteria results = copy(class_list) # start with full list for (category, value) in criteria.items(): results &= index[category][value] return results
[ "def", "search", "(", "*", "*", "criteria", ")", ":", "# Find all parts that match the given criteria", "results", "=", "copy", "(", "class_list", ")", "# start with full list", "for", "(", "category", ",", "value", ")", "in", "criteria", ".", "items", "(", ")", ":", "results", "&=", "index", "[", "category", "]", "[", "value", "]", "return", "results" ]
31.137931
19.62069
def add_grid(self): """Add axis and ticks to figure. Notes ----- I know that visvis and pyqtgraphs can do this in much simpler way, but those packages create too large a padding around the figure and this is pretty fast. """ value = self.config.value # X-AXIS # x-bottom self.scene.addLine(value['x_min'], value['y_min'], value['x_min'], value['y_max'], QPen(QColor(LINE_COLOR), LINE_WIDTH)) # at y = 0, dashed self.scene.addLine(value['x_min'], 0, value['x_max'], 0, QPen(QColor(LINE_COLOR), LINE_WIDTH, Qt.DashLine)) # ticks on y-axis y_high = int(floor(value['y_max'])) y_low = int(ceil(value['y_min'])) x_length = (value['x_max'] - value['x_min']) / value['x_tick'] for y in range(y_low, y_high): self.scene.addLine(value['x_min'], y, value['x_min'] + x_length, y, QPen(QColor(LINE_COLOR), LINE_WIDTH)) # Y-AXIS # left axis self.scene.addLine(value['x_min'], value['y_min'], value['x_max'], value['y_min'], QPen(QColor(LINE_COLOR), LINE_WIDTH)) # larger ticks on x-axis every 10 Hz x_high = int(floor(value['x_max'])) x_low = int(ceil(value['x_min'])) y_length = (value['y_max'] - value['y_min']) / value['y_tick'] for x in range(x_low, x_high, 10): self.scene.addLine(x, value['y_min'], x, value['y_min'] + y_length, QPen(QColor(LINE_COLOR), LINE_WIDTH)) # smaller ticks on x-axis every 10 Hz y_length = (value['y_max'] - value['y_min']) / value['y_tick'] / 2 for x in range(x_low, x_high, 5): self.scene.addLine(x, value['y_min'], x, value['y_min'] + y_length, QPen(QColor(LINE_COLOR), LINE_WIDTH))
[ "def", "add_grid", "(", "self", ")", ":", "value", "=", "self", ".", "config", ".", "value", "# X-AXIS", "# x-bottom", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_min'", "]", ",", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_max'", "]", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# at y = 0, dashed", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "0", ",", "value", "[", "'x_max'", "]", ",", "0", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ",", "Qt", ".", "DashLine", ")", ")", "# ticks on y-axis", "y_high", "=", "int", "(", "floor", "(", "value", "[", "'y_max'", "]", ")", ")", "y_low", "=", "int", "(", "ceil", "(", "value", "[", "'y_min'", "]", ")", ")", "x_length", "=", "(", "value", "[", "'x_max'", "]", "-", "value", "[", "'x_min'", "]", ")", "/", "value", "[", "'x_tick'", "]", "for", "y", "in", "range", "(", "y_low", ",", "y_high", ")", ":", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "y", ",", "value", "[", "'x_min'", "]", "+", "x_length", ",", "y", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# Y-AXIS", "# left axis", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_min'", "]", ",", "value", "[", "'x_max'", "]", ",", "value", "[", "'y_min'", "]", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# larger ticks on x-axis every 10 Hz", "x_high", "=", "int", "(", "floor", "(", "value", "[", "'x_max'", "]", ")", ")", "x_low", "=", "int", "(", "ceil", "(", "value", "[", "'x_min'", "]", ")", ")", "y_length", "=", "(", "value", "[", "'y_max'", "]", "-", "value", "[", "'y_min'", "]", ")", "/", "value", "[", "'y_tick'", "]", "for", "x", "in", "range", "(", "x_low", ",", "x_high", ",", "10", ")", ":", "self", ".", "scene", ".", "addLine", "(", "x", ",", "value", "[", "'y_min'", "]", ",", "x", ",", "value", "[", "'y_min'", "]", "+", "y_length", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# smaller ticks on x-axis every 10 Hz", "y_length", "=", "(", "value", "[", "'y_max'", "]", "-", "value", "[", "'y_min'", "]", ")", "/", "value", "[", "'y_tick'", "]", "/", "2", "for", "x", "in", "range", "(", "x_low", ",", "x_high", ",", "5", ")", ":", "self", ".", "scene", ".", "addLine", "(", "x", ",", "value", "[", "'y_min'", "]", ",", "x", ",", "value", "[", "'y_min'", "]", "+", "y_length", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")" ]
43.25
18.1875
def initHldyDates(self): """ Initialize holidays :class:`~ekmmeters.SerialBlock` """ self.m_hldy["reserved_20"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False] self.m_hldy["Holiday_1_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_1_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_2_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_2_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_3_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_3_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_4_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_4_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_5_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_5_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_6_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_6_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_7_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_7_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_8_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_8_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_9_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_9_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_10_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_10_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_11_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_11_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_12_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_12_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_13_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_13_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_14_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_14_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_15_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_15_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_16_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_16_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_17_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_17_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_18_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_18_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_19_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_19_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_20_Mon"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_20_Day"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Weekend_Schd"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["Holiday_Schd"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_hldy["reserved_21"] = [163, FieldType.Hex, ScaleType.No, "", 0, False, False] self.m_hldy["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False] pass
[ "def", "initHldyDates", "(", "self", ")", ":", "self", ".", "m_hldy", "[", "\"reserved_20\"", "]", "=", "[", "6", ",", "FieldType", ".", "Hex", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "False", "]", "self", ".", "m_hldy", "[", "\"Holiday_1_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_1_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_2_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_2_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_3_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_3_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_4_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_4_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_5_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_5_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_6_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_6_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_7_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_7_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_8_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_8_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_9_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_9_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_10_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_10_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_11_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_11_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_12_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_12_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_13_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_13_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_14_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_14_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_15_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_15_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_16_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_16_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_17_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_17_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_18_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_18_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_19_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_19_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_20_Mon\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_20_Day\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Weekend_Schd\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"Holiday_Schd\"", "]", "=", "[", "2", ",", "FieldType", ".", "Int", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "True", "]", "self", ".", "m_hldy", "[", "\"reserved_21\"", "]", "=", "[", "163", ",", "FieldType", ".", "Hex", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "False", "]", "self", ".", "m_hldy", "[", "\"crc16\"", "]", "=", "[", "2", ",", "FieldType", ".", "Hex", ",", "ScaleType", ".", "No", ",", "\"\"", ",", "0", ",", "False", ",", "False", "]", "pass" ]
87.729167
49
def nutation(date, eop_correction=True, terms=106): # pragma: no cover """Nutation as a rotation matrix """ epsilon_bar, delta_psi, delta_eps = np.deg2rad(_nutation(date, eop_correction, terms)) epsilon = epsilon_bar + delta_eps return rot1(-epsilon_bar) @ rot3(delta_psi) @ rot1(epsilon)
[ "def", "nutation", "(", "date", ",", "eop_correction", "=", "True", ",", "terms", "=", "106", ")", ":", "# pragma: no cover", "epsilon_bar", ",", "delta_psi", ",", "delta_eps", "=", "np", ".", "deg2rad", "(", "_nutation", "(", "date", ",", "eop_correction", ",", "terms", ")", ")", "epsilon", "=", "epsilon_bar", "+", "delta_eps", "return", "rot1", "(", "-", "epsilon_bar", ")", "@", "rot3", "(", "delta_psi", ")", "@", "rot1", "(", "epsilon", ")" ]
43.428571
21
def process_whitelists(): """Download approved top 1M lists.""" import csv import grequests import os import StringIO import zipfile mapping = { 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip': { 'name': 'alexa.txt' }, 'http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip': { 'name': 'cisco.txt' } } rs = (grequests.get(u) for u in mapping.keys()) responses = grequests.map(rs) for r in responses: data = zipfile.ZipFile(StringIO.StringIO(r.content)).read('top-1m.csv') stream = StringIO.StringIO(data) reader = csv.reader(stream, delimiter=',', quoting=csv.QUOTE_MINIMAL) items = [row[1].strip() for row in reader] stream.close() config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, mapping[r.url]['name']) handle = open(file_path, 'w') for item in items: if item.count('.') == 0: continue handle.write(item + "\n") handle.close() return True
[ "def", "process_whitelists", "(", ")", ":", "import", "csv", "import", "grequests", "import", "os", "import", "StringIO", "import", "zipfile", "mapping", "=", "{", "'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip'", ":", "{", "'name'", ":", "'alexa.txt'", "}", ",", "'http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip'", ":", "{", "'name'", ":", "'cisco.txt'", "}", "}", "rs", "=", "(", "grequests", ".", "get", "(", "u", ")", "for", "u", "in", "mapping", ".", "keys", "(", ")", ")", "responses", "=", "grequests", ".", "map", "(", "rs", ")", "for", "r", "in", "responses", ":", "data", "=", "zipfile", ".", "ZipFile", "(", "StringIO", ".", "StringIO", "(", "r", ".", "content", ")", ")", ".", "read", "(", "'top-1m.csv'", ")", "stream", "=", "StringIO", ".", "StringIO", "(", "data", ")", "reader", "=", "csv", ".", "reader", "(", "stream", ",", "delimiter", "=", "','", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ")", "items", "=", "[", "row", "[", "1", "]", ".", "strip", "(", ")", "for", "row", "in", "reader", "]", "stream", ".", "close", "(", ")", "config_path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.config/blockade'", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "config_path", ",", "mapping", "[", "r", ".", "url", "]", "[", "'name'", "]", ")", "handle", "=", "open", "(", "file_path", ",", "'w'", ")", "for", "item", "in", "items", ":", "if", "item", ".", "count", "(", "'.'", ")", "==", "0", ":", "continue", "handle", ".", "write", "(", "item", "+", "\"\\n\"", ")", "handle", ".", "close", "(", ")", "return", "True" ]
34
19.28125
def point_on_line(point, line_start, line_end, accuracy=50.): """Checks whether a point lies on a line The function checks whether the point "point" (P) lies on the line defined by its starting point line_start (A) and its end point line_end (B). This is done by comparing the distance of [AB] with the sum of the distances [AP] and [PB]. If the difference is smaller than [AB] / accuracy, the point P is assumed to be on the line. By increasing the value of accuracy (the default is 50), the tolerance is decreased. :param point: Point to be checked (tuple with x any y coordinate) :param line_start: Starting point of the line (tuple with x any y coordinate) :param line_end: End point of the line (tuple with x any y coordinate) :param accuracy: The higher this value, the less distance is tolerated :return: True if the point is one the line, False if not """ length = dist(line_start, line_end) ds = length / float(accuracy) if -ds < (dist(line_start, point) + dist(point, line_end) - length) < ds: return True return False
[ "def", "point_on_line", "(", "point", ",", "line_start", ",", "line_end", ",", "accuracy", "=", "50.", ")", ":", "length", "=", "dist", "(", "line_start", ",", "line_end", ")", "ds", "=", "length", "/", "float", "(", "accuracy", ")", "if", "-", "ds", "<", "(", "dist", "(", "line_start", ",", "point", ")", "+", "dist", "(", "point", ",", "line_end", ")", "-", "length", ")", "<", "ds", ":", "return", "True", "return", "False" ]
54.15
29.8
def _downgrade_v3(op): """ Downgrade assets db by adding a not null constraint on ``equities.first_traded`` """ op.create_table( '_new_equities', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text), sa.Column('company_symbol', sa.Text), sa.Column('share_class_symbol', sa.Text), sa.Column('fuzzy_symbol', sa.Text), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer), sa.Column('exchange', sa.Text), ) op.execute( """ insert into _new_equities select * from equities where equities.first_traded is not null """, ) op.drop_table('equities') op.rename_table('_new_equities', 'equities') # we need to make sure the indices have the proper names after the rename op.create_index( 'ix_equities_company_symbol', 'equities', ['company_symbol'], ) op.create_index( 'ix_equities_fuzzy_symbol', 'equities', ['fuzzy_symbol'], )
[ "def", "_downgrade_v3", "(", "op", ")", ":", "op", ".", "create_table", "(", "'_new_equities'", ",", "sa", ".", "Column", "(", "'sid'", ",", "sa", ".", "Integer", ",", "unique", "=", "True", ",", "nullable", "=", "False", ",", "primary_key", "=", "True", ",", ")", ",", "sa", ".", "Column", "(", "'symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'company_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'share_class_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'fuzzy_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'asset_name'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'start_date'", ",", "sa", ".", "Integer", ",", "default", "=", "0", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'end_date'", ",", "sa", ".", "Integer", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'first_traded'", ",", "sa", ".", "Integer", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'auto_close_date'", ",", "sa", ".", "Integer", ")", ",", "sa", ".", "Column", "(", "'exchange'", ",", "sa", ".", "Text", ")", ",", ")", "op", ".", "execute", "(", "\"\"\"\n insert into _new_equities\n select * from equities\n where equities.first_traded is not null\n \"\"\"", ",", ")", "op", ".", "drop_table", "(", "'equities'", ")", "op", ".", "rename_table", "(", "'_new_equities'", ",", "'equities'", ")", "# we need to make sure the indices have the proper names after the rename", "op", ".", "create_index", "(", "'ix_equities_company_symbol'", ",", "'equities'", ",", "[", "'company_symbol'", "]", ",", ")", "op", ".", "create_index", "(", "'ix_equities_fuzzy_symbol'", ",", "'equities'", ",", "[", "'fuzzy_symbol'", "]", ",", ")" ]
29.644444
15.088889
def node_from_ini(ini_file, nodefactory=Node, root_name='ini'): """ Convert a .ini file into a Node object. :param ini_file: a filename or a file like object in read mode """ fileobj = open(ini_file) if isinstance(ini_file, str) else ini_file cfp = configparser.RawConfigParser() cfp.read_file(fileobj) root = nodefactory(root_name) sections = cfp.sections() for section in sections: params = dict(cfp.items(section)) root.append(Node(section, params)) return root
[ "def", "node_from_ini", "(", "ini_file", ",", "nodefactory", "=", "Node", ",", "root_name", "=", "'ini'", ")", ":", "fileobj", "=", "open", "(", "ini_file", ")", "if", "isinstance", "(", "ini_file", ",", "str", ")", "else", "ini_file", "cfp", "=", "configparser", ".", "RawConfigParser", "(", ")", "cfp", ".", "read_file", "(", "fileobj", ")", "root", "=", "nodefactory", "(", "root_name", ")", "sections", "=", "cfp", ".", "sections", "(", ")", "for", "section", "in", "sections", ":", "params", "=", "dict", "(", "cfp", ".", "items", "(", "section", ")", ")", "root", ".", "append", "(", "Node", "(", "section", ",", "params", ")", ")", "return", "root" ]
34.066667
13
def as_list(self): """ returns a list version of the object, based on it's attributes """ if hasattr(self, 'cust_list'): return self.cust_list if hasattr(self, 'attr_check'): self.attr_check() cls_bltns = set(dir(self.__class__)) ret = [a for a in dir(self) if a not in cls_bltns and getattr(self, a)] return ret
[ "def", "as_list", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'cust_list'", ")", ":", "return", "self", ".", "cust_list", "if", "hasattr", "(", "self", ",", "'attr_check'", ")", ":", "self", ".", "attr_check", "(", ")", "cls_bltns", "=", "set", "(", "dir", "(", "self", ".", "__class__", ")", ")", "ret", "=", "[", "a", "for", "a", "in", "dir", "(", "self", ")", "if", "a", "not", "in", "cls_bltns", "and", "getattr", "(", "self", ",", "a", ")", "]", "return", "ret" ]
36.363636
12.363636
def get_group(self, group_descriptor): """GetGroup. [Preview API] Get a group by its descriptor. :param str group_descriptor: The descriptor of the desired graph group. :rtype: :class:`<GraphGroup> <azure.devops.v5_0.graph.models.GraphGroup>` """ route_values = {} if group_descriptor is not None: route_values['groupDescriptor'] = self._serialize.url('group_descriptor', group_descriptor, 'str') response = self._send(http_method='GET', location_id='ebbe6af8-0b91-4c13-8cf1-777c14858188', version='5.0-preview.1', route_values=route_values) return self._deserialize('GraphGroup', response)
[ "def", "get_group", "(", "self", ",", "group_descriptor", ")", ":", "route_values", "=", "{", "}", "if", "group_descriptor", "is", "not", "None", ":", "route_values", "[", "'groupDescriptor'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'group_descriptor'", ",", "group_descriptor", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'ebbe6af8-0b91-4c13-8cf1-777c14858188'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'GraphGroup'", ",", "response", ")" ]
53.642857
19.571429
def search_results_last_url(html, xpath, label): """ Get the URL of the 'last' button in a search results listing. """ for container in html.findall(xpath): if container.text_content().strip() == label: return container.find('.//a').get('href')
[ "def", "search_results_last_url", "(", "html", ",", "xpath", ",", "label", ")", ":", "for", "container", "in", "html", ".", "findall", "(", "xpath", ")", ":", "if", "container", ".", "text_content", "(", ")", ".", "strip", "(", ")", "==", "label", ":", "return", "container", ".", "find", "(", "'.//a'", ")", ".", "get", "(", "'href'", ")" ]
53.6
7
def query_lookupd(self): """ Trigger a query of the configured ``nsq_lookupd_http_addresses``. """ endpoint = self.lookupd_http_addresses[self.lookupd_query_index] self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses) # urlsplit() is faulty if scheme not present if '://' not in endpoint: endpoint = 'http://' + endpoint scheme, netloc, path, query, fragment = urlparse.urlsplit(endpoint) if not path or path == "/": path = "/lookup" params = parse_qs(query) params['topic'] = self.topic query = urlencode(_utf8_params(params), doseq=1) lookupd_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) req = tornado.httpclient.HTTPRequest( lookupd_url, method='GET', headers={'Accept': 'application/vnd.nsq; version=1.0'}, connect_timeout=self.lookupd_connect_timeout, request_timeout=self.lookupd_request_timeout) callback = functools.partial(self._finish_query_lookupd, lookupd_url=lookupd_url) self.http_client.fetch(req, callback=callback)
[ "def", "query_lookupd", "(", "self", ")", ":", "endpoint", "=", "self", ".", "lookupd_http_addresses", "[", "self", ".", "lookupd_query_index", "]", "self", ".", "lookupd_query_index", "=", "(", "self", ".", "lookupd_query_index", "+", "1", ")", "%", "len", "(", "self", ".", "lookupd_http_addresses", ")", "# urlsplit() is faulty if scheme not present", "if", "'://'", "not", "in", "endpoint", ":", "endpoint", "=", "'http://'", "+", "endpoint", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", "=", "urlparse", ".", "urlsplit", "(", "endpoint", ")", "if", "not", "path", "or", "path", "==", "\"/\"", ":", "path", "=", "\"/lookup\"", "params", "=", "parse_qs", "(", "query", ")", "params", "[", "'topic'", "]", "=", "self", ".", "topic", "query", "=", "urlencode", "(", "_utf8_params", "(", "params", ")", ",", "doseq", "=", "1", ")", "lookupd_url", "=", "urlparse", ".", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")", ")", "req", "=", "tornado", ".", "httpclient", ".", "HTTPRequest", "(", "lookupd_url", ",", "method", "=", "'GET'", ",", "headers", "=", "{", "'Accept'", ":", "'application/vnd.nsq; version=1.0'", "}", ",", "connect_timeout", "=", "self", ".", "lookupd_connect_timeout", ",", "request_timeout", "=", "self", ".", "lookupd_request_timeout", ")", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_finish_query_lookupd", ",", "lookupd_url", "=", "lookupd_url", ")", "self", ".", "http_client", ".", "fetch", "(", "req", ",", "callback", "=", "callback", ")" ]
41.785714
22
def rnni(self, times=1, **kwargs): """ Applies a NNI operation on a randomly chosen edge. keyword args: use_weighted_choice (True/False) weight the random edge selection by edge length transform (callable) transforms the edges using this function, prior to weighted selection """ nni = NNI(self.copy()) for _ in range(times): nni.rnni(**kwargs) # nni.reroot_tree() return nni.tree
[ "def", "rnni", "(", "self", ",", "times", "=", "1", ",", "*", "*", "kwargs", ")", ":", "nni", "=", "NNI", "(", "self", ".", "copy", "(", ")", ")", "for", "_", "in", "range", "(", "times", ")", ":", "nni", ".", "rnni", "(", "*", "*", "kwargs", ")", "# nni.reroot_tree()", "return", "nni", ".", "tree" ]
41.909091
21.818182
def recv(self): """ Receive a message from the other end. Returns a tuple of the command (a string) and payload (a list). """ # See if we have a message to process... if self._recvbuf: return self._recvbuf_pop() # If it's closed, don't try to read more data if not self._sock: raise ConnectionClosed("Connection closed") # OK, get some data from the socket while True: try: data = self._sock.recv(4096) except socket.error: # We'll need to re-raise e_type, e_value, e_tb = sys.exc_info() # Make sure the socket is closed self.close() # Re-raise raise e_type, e_value, e_tb # Did the connection get closed? if not data: # There can never be anything in the buffer here self.close() raise ConnectionClosed("Connection closed") # Begin parsing the read-in data partial = self._recvbuf_partial + data self._recvbuf_partial = '' while partial: msg, sep, partial = partial.partition('\n') # If we have no sep, then it's not a complete message, # and the remainder is in msg if not sep: self._recvbuf_partial = msg break # Parse the message try: self._recvbuf.append(json.loads(msg)) except ValueError as exc: # Error parsing the message; save the exception, # which we will re-raise self._recvbuf.append(exc) # Make sure we have a message to return if self._recvbuf: return self._recvbuf_pop()
[ "def", "recv", "(", "self", ")", ":", "# See if we have a message to process...", "if", "self", ".", "_recvbuf", ":", "return", "self", ".", "_recvbuf_pop", "(", ")", "# If it's closed, don't try to read more data", "if", "not", "self", ".", "_sock", ":", "raise", "ConnectionClosed", "(", "\"Connection closed\"", ")", "# OK, get some data from the socket", "while", "True", ":", "try", ":", "data", "=", "self", ".", "_sock", ".", "recv", "(", "4096", ")", "except", "socket", ".", "error", ":", "# We'll need to re-raise", "e_type", ",", "e_value", ",", "e_tb", "=", "sys", ".", "exc_info", "(", ")", "# Make sure the socket is closed", "self", ".", "close", "(", ")", "# Re-raise", "raise", "e_type", ",", "e_value", ",", "e_tb", "# Did the connection get closed?", "if", "not", "data", ":", "# There can never be anything in the buffer here", "self", ".", "close", "(", ")", "raise", "ConnectionClosed", "(", "\"Connection closed\"", ")", "# Begin parsing the read-in data", "partial", "=", "self", ".", "_recvbuf_partial", "+", "data", "self", ".", "_recvbuf_partial", "=", "''", "while", "partial", ":", "msg", ",", "sep", ",", "partial", "=", "partial", ".", "partition", "(", "'\\n'", ")", "# If we have no sep, then it's not a complete message,", "# and the remainder is in msg", "if", "not", "sep", ":", "self", ".", "_recvbuf_partial", "=", "msg", "break", "# Parse the message", "try", ":", "self", ".", "_recvbuf", ".", "append", "(", "json", ".", "loads", "(", "msg", ")", ")", "except", "ValueError", "as", "exc", ":", "# Error parsing the message; save the exception,", "# which we will re-raise", "self", ".", "_recvbuf", ".", "append", "(", "exc", ")", "# Make sure we have a message to return", "if", "self", ".", "_recvbuf", ":", "return", "self", ".", "_recvbuf_pop", "(", ")" ]
32.877193
16.45614
def safe_sum(x, alt_value=-np.inf, name=None): """Elementwise adds list members, replacing non-finite results with alt_value. Typically the `alt_value` is chosen so the `MetropolisHastings` `TransitionKernel` always rejects the proposal. Args: x: Python `list` of `Tensors` to elementwise add. alt_value: Python scalar used to replace any elementwise sums which would otherwise be non-finite. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "safe_sum"). Returns: safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s `x` or `alt_value` where sums are non-finite. Raises: TypeError: if `x` is not list-like. ValueError: if `x` is empty. """ with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]): if not is_list_like(x): raise TypeError('Expected list input.') if not x: raise ValueError('Input should not be empty.') in_shape = x[0].shape x = tf.stack(x, axis=-1) x = tf.reduce_sum(input_tensor=x, axis=-1) alt_value = np.array(alt_value, x.dtype.as_numpy_dtype) alt_fill = tf.fill(tf.shape(input=x), value=alt_value) x = tf.where(tf.math.is_finite(x), x, alt_fill) x.set_shape(x.shape.merge_with(in_shape)) return x
[ "def", "safe_sum", "(", "x", ",", "alt_value", "=", "-", "np", ".", "inf", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'safe_sum'", ",", "[", "x", ",", "alt_value", "]", ")", ":", "if", "not", "is_list_like", "(", "x", ")", ":", "raise", "TypeError", "(", "'Expected list input.'", ")", "if", "not", "x", ":", "raise", "ValueError", "(", "'Input should not be empty.'", ")", "in_shape", "=", "x", "[", "0", "]", ".", "shape", "x", "=", "tf", ".", "stack", "(", "x", ",", "axis", "=", "-", "1", ")", "x", "=", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "x", ",", "axis", "=", "-", "1", ")", "alt_value", "=", "np", ".", "array", "(", "alt_value", ",", "x", ".", "dtype", ".", "as_numpy_dtype", ")", "alt_fill", "=", "tf", ".", "fill", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "value", "=", "alt_value", ")", "x", "=", "tf", ".", "where", "(", "tf", ".", "math", ".", "is_finite", "(", "x", ")", ",", "x", ",", "alt_fill", ")", "x", ".", "set_shape", "(", "x", ".", "shape", ".", "merge_with", "(", "in_shape", ")", ")", "return", "x" ]
37.382353
18.882353
def _get_element(name, element_type, server=None, with_properties=True): ''' Get an element with or without properties ''' element = {} name = quote(name, safe='') data = _api_get('{0}/{1}'.format(element_type, name), server) # Format data, get properties if asked, and return the whole thing if any(data['extraProperties']['entity']): for key, value in data['extraProperties']['entity'].items(): element[key] = value if with_properties: element['properties'] = _get_element_properties(name, element_type) return element return None
[ "def", "_get_element", "(", "name", ",", "element_type", ",", "server", "=", "None", ",", "with_properties", "=", "True", ")", ":", "element", "=", "{", "}", "name", "=", "quote", "(", "name", ",", "safe", "=", "''", ")", "data", "=", "_api_get", "(", "'{0}/{1}'", ".", "format", "(", "element_type", ",", "name", ")", ",", "server", ")", "# Format data, get properties if asked, and return the whole thing", "if", "any", "(", "data", "[", "'extraProperties'", "]", "[", "'entity'", "]", ")", ":", "for", "key", ",", "value", "in", "data", "[", "'extraProperties'", "]", "[", "'entity'", "]", ".", "items", "(", ")", ":", "element", "[", "key", "]", "=", "value", "if", "with_properties", ":", "element", "[", "'properties'", "]", "=", "_get_element_properties", "(", "name", ",", "element_type", ")", "return", "element", "return", "None" ]
37.625
23
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray): raise TypeError("`downbeat` must be a numpy array.") if not np.issubdtype(self.downbeat.dtype, np.bool_): raise TypeError("Data type of `downbeat` must be bool.") if self.downbeat.ndim != 1: raise ValueError("`downbeat` must be a 1D numpy array.") # beat_resolution if not isinstance(self.beat_resolution, int): raise TypeError("`beat_resolution` must be int.") if self.beat_resolution < 1: raise ValueError("`beat_resolution` must be a positive integer.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
[ "def", "check_validity", "(", "self", ")", ":", "# tracks", "for", "track", "in", "self", ".", "tracks", ":", "if", "not", "isinstance", "(", "track", ",", "Track", ")", ":", "raise", "TypeError", "(", "\"`tracks` must be a list of \"", "\"`pypianoroll.Track` instances.\"", ")", "track", ".", "check_validity", "(", ")", "# tempo", "if", "not", "isinstance", "(", "self", ".", "tempo", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`tempo` must be int or a numpy array.\"", ")", "elif", "not", "np", ".", "issubdtype", "(", "self", ".", "tempo", ".", "dtype", ",", "np", ".", "number", ")", ":", "raise", "TypeError", "(", "\"Data type of `tempo` must be a subdtype of \"", "\"np.number.\"", ")", "elif", "self", ".", "tempo", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"`tempo` must be a 1D numpy array.\"", ")", "if", "np", ".", "any", "(", "self", ".", "tempo", "<=", "0.0", ")", ":", "raise", "ValueError", "(", "\"`tempo` should contain only positive numbers.\"", ")", "# downbeat", "if", "self", ".", "downbeat", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "downbeat", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"`downbeat` must be a numpy array.\"", ")", "if", "not", "np", ".", "issubdtype", "(", "self", ".", "downbeat", ".", "dtype", ",", "np", ".", "bool_", ")", ":", "raise", "TypeError", "(", "\"Data type of `downbeat` must be bool.\"", ")", "if", "self", ".", "downbeat", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"`downbeat` must be a 1D numpy array.\"", ")", "# beat_resolution", "if", "not", "isinstance", "(", "self", ".", "beat_resolution", ",", "int", ")", ":", "raise", "TypeError", "(", "\"`beat_resolution` must be int.\"", ")", "if", "self", ".", "beat_resolution", "<", "1", ":", "raise", "ValueError", "(", "\"`beat_resolution` must be a positive integer.\"", ")", "# name", "if", "not", "isinstance", "(", "self", ".", "name", ",", "string_types", ")", ":", "raise", "TypeError", "(", "\"`name` must be a string.\"", ")" ]
42.159091
18.659091
def checkValue(self,value,strict=0): """Check and convert a parameter value. Raises an exception if the value is not permitted for this parameter. Otherwise returns the value (converted to the right type.) """ v = self._coerceValue(value,strict) return self.checkOneValue(v,strict)
[ "def", "checkValue", "(", "self", ",", "value", ",", "strict", "=", "0", ")", ":", "v", "=", "self", ".", "_coerceValue", "(", "value", ",", "strict", ")", "return", "self", ".", "checkOneValue", "(", "v", ",", "strict", ")" ]
36.777778
13.444444
def visit_object(self, node): """Fallback rendering for objects. If the current application is in debug-mode (``flask.current_app.debug`` is ``True``), an ``<!-- HTML comment -->`` will be rendered, indicating which class is missing a visitation function. Outside of debug-mode, returns an empty string. """ if current_app.debug: return tags.comment('no implementation in {} to render {}'.format( self.__class__.__name__, node.__class__.__name__, )) return ''
[ "def", "visit_object", "(", "self", ",", "node", ")", ":", "if", "current_app", ".", "debug", ":", "return", "tags", ".", "comment", "(", "'no implementation in {} to render {}'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "node", ".", "__class__", ".", "__name__", ",", ")", ")", "return", "''" ]
37.533333
19.066667
def zeldovich(dim=2, N=256, n=-2.5, t=None, scale=1, seed=None): """Creates a zeldovich DataFrame. """ import vaex.file return vaex.file.other.Zeldovich(dim=dim, N=N, n=n, t=t, scale=scale)
[ "def", "zeldovich", "(", "dim", "=", "2", ",", "N", "=", "256", ",", "n", "=", "-", "2.5", ",", "t", "=", "None", ",", "scale", "=", "1", ",", "seed", "=", "None", ")", ":", "import", "vaex", ".", "file", "return", "vaex", ".", "file", ".", "other", ".", "Zeldovich", "(", "dim", "=", "dim", ",", "N", "=", "N", ",", "n", "=", "n", ",", "t", "=", "t", ",", "scale", "=", "scale", ")" ]
40.2
15.4
def freeze(self, dest_dir): """Freezes every resource within a context""" for resource in self.resources(): if resource.present: resource.freeze(dest_dir)
[ "def", "freeze", "(", "self", ",", "dest_dir", ")", ":", "for", "resource", "in", "self", ".", "resources", "(", ")", ":", "if", "resource", ".", "present", ":", "resource", ".", "freeze", "(", "dest_dir", ")" ]
38.8
4.6
def get_subdomain(url): """Get the subdomain of the given URL. Args: url (str): The URL to get the subdomain from. Returns: str: The subdomain(s) """ if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return ".".join(URLHelper.__cache[url].netloc.split(".")[:-2])
[ "def", "get_subdomain", "(", "url", ")", ":", "if", "url", "not", "in", "URLHelper", ".", "__cache", ":", "URLHelper", ".", "__cache", "[", "url", "]", "=", "urlparse", "(", "url", ")", "return", "\".\"", ".", "join", "(", "URLHelper", ".", "__cache", "[", "url", "]", ".", "netloc", ".", "split", "(", "\".\"", ")", "[", ":", "-", "2", "]", ")" ]
23.933333
22.133333
def _connect(self): "Wrap the socket with SSL support" sock = super(SSLConnection, self)._connect() if hasattr(ssl, "create_default_context"): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = self.cert_reqs if self.certfile and self.keyfile: context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile) if self.ca_certs: context.load_verify_locations(self.ca_certs) sock = context.wrap_socket(sock, server_hostname=self.host) else: # In case this code runs in a version which is older than 2.7.9, # we want to fall back to old code sock = ssl.wrap_socket(sock, cert_reqs=self.cert_reqs, keyfile=self.keyfile, certfile=self.certfile, ca_certs=self.ca_certs) return sock
[ "def", "_connect", "(", "self", ")", ":", "sock", "=", "super", "(", "SSLConnection", ",", "self", ")", ".", "_connect", "(", ")", "if", "hasattr", "(", "ssl", ",", "\"create_default_context\"", ")", ":", "context", "=", "ssl", ".", "create_default_context", "(", ")", "context", ".", "check_hostname", "=", "False", "context", ".", "verify_mode", "=", "self", ".", "cert_reqs", "if", "self", ".", "certfile", "and", "self", ".", "keyfile", ":", "context", ".", "load_cert_chain", "(", "certfile", "=", "self", ".", "certfile", ",", "keyfile", "=", "self", ".", "keyfile", ")", "if", "self", ".", "ca_certs", ":", "context", ".", "load_verify_locations", "(", "self", ".", "ca_certs", ")", "sock", "=", "context", ".", "wrap_socket", "(", "sock", ",", "server_hostname", "=", "self", ".", "host", ")", "else", ":", "# In case this code runs in a version which is older than 2.7.9,", "# we want to fall back to old code", "sock", "=", "ssl", ".", "wrap_socket", "(", "sock", ",", "cert_reqs", "=", "self", ".", "cert_reqs", ",", "keyfile", "=", "self", ".", "keyfile", ",", "certfile", "=", "self", ".", "certfile", ",", "ca_certs", "=", "self", ".", "ca_certs", ")", "return", "sock" ]
48.136364
15.409091
def __set_token_expired(self, value): """Internal helper for oauth code""" self._token_expired = datetime.datetime.now() + datetime.timedelta(seconds=value) return
[ "def", "__set_token_expired", "(", "self", ",", "value", ")", ":", "self", ".", "_token_expired", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "value", ")", "return" ]
46
19.5
def dump2sqlite(records, output_file): """Dumps tests results to database.""" results_keys = list(records.results[0].keys()) pad_data = [] for key in REQUIRED_KEYS: if key not in results_keys: results_keys.append(key) pad_data.append("") conn = sqlite3.connect(os.path.expanduser(output_file), detect_types=sqlite3.PARSE_DECLTYPES) # in each row there needs to be data for every column # last column is current time pad_data.append(datetime.datetime.utcnow()) to_db = [list(row.values()) + pad_data for row in records.results] cur = conn.cursor() cur.execute( "CREATE TABLE testcases ({},sqltime TIMESTAMP)".format( ",".join("{} TEXT".format(key) for key in results_keys) ) ) cur.executemany( "INSERT INTO testcases VALUES ({},?)".format(",".join(["?"] * len(results_keys))), to_db ) if records.testrun: cur.execute("CREATE TABLE testrun (testrun TEXT)") cur.execute("INSERT INTO testrun VALUES (?)", (records.testrun,)) conn.commit() conn.close() logger.info("Data written to '%s'", output_file)
[ "def", "dump2sqlite", "(", "records", ",", "output_file", ")", ":", "results_keys", "=", "list", "(", "records", ".", "results", "[", "0", "]", ".", "keys", "(", ")", ")", "pad_data", "=", "[", "]", "for", "key", "in", "REQUIRED_KEYS", ":", "if", "key", "not", "in", "results_keys", ":", "results_keys", ".", "append", "(", "key", ")", "pad_data", ".", "append", "(", "\"\"", ")", "conn", "=", "sqlite3", ".", "connect", "(", "os", ".", "path", ".", "expanduser", "(", "output_file", ")", ",", "detect_types", "=", "sqlite3", ".", "PARSE_DECLTYPES", ")", "# in each row there needs to be data for every column", "# last column is current time", "pad_data", ".", "append", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ")", "to_db", "=", "[", "list", "(", "row", ".", "values", "(", ")", ")", "+", "pad_data", "for", "row", "in", "records", ".", "results", "]", "cur", "=", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"CREATE TABLE testcases ({},sqltime TIMESTAMP)\"", ".", "format", "(", "\",\"", ".", "join", "(", "\"{} TEXT\"", ".", "format", "(", "key", ")", "for", "key", "in", "results_keys", ")", ")", ")", "cur", ".", "executemany", "(", "\"INSERT INTO testcases VALUES ({},?)\"", ".", "format", "(", "\",\"", ".", "join", "(", "[", "\"?\"", "]", "*", "len", "(", "results_keys", ")", ")", ")", ",", "to_db", ")", "if", "records", ".", "testrun", ":", "cur", ".", "execute", "(", "\"CREATE TABLE testrun (testrun TEXT)\"", ")", "cur", ".", "execute", "(", "\"INSERT INTO testrun VALUES (?)\"", ",", "(", "records", ".", "testrun", ",", ")", ")", "conn", ".", "commit", "(", ")", "conn", ".", "close", "(", ")", "logger", ".", "info", "(", "\"Data written to '%s'\"", ",", "output_file", ")" ]
31.25
24.916667
def find_transactions( self, bundles=None, # type: Optional[Iterable[BundleHash]] addresses=None, # type: Optional[Iterable[Address]] tags=None, # type: Optional[Iterable[Tag]] approvees=None, # type: Optional[Iterable[TransactionHash]] ): # type: (...) -> dict """ Find the transactions which match the specified input and return. All input values are lists, for which a list of return values (transaction hashes), in the same order, is returned for all individual elements. Using multiple of these input fields returns the intersection of the values. :param bundles: List of bundle IDs. :param addresses: List of addresses. :param tags: List of tags. :param approvees: List of approvee transaction IDs. References: - https://iota.readme.io/docs/findtransactions """ return core.FindTransactionsCommand(self.adapter)( bundles=bundles, addresses=addresses, tags=tags, approvees=approvees, )
[ "def", "find_transactions", "(", "self", ",", "bundles", "=", "None", ",", "# type: Optional[Iterable[BundleHash]]", "addresses", "=", "None", ",", "# type: Optional[Iterable[Address]]", "tags", "=", "None", ",", "# type: Optional[Iterable[Tag]]", "approvees", "=", "None", ",", "# type: Optional[Iterable[TransactionHash]]", ")", ":", "# type: (...) -> dict", "return", "core", ".", "FindTransactionsCommand", "(", "self", ".", "adapter", ")", "(", "bundles", "=", "bundles", ",", "addresses", "=", "addresses", ",", "tags", "=", "tags", ",", "approvees", "=", "approvees", ",", ")" ]
28.390244
22.243902