code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def get_request(self, model_run, run_url): return ModelRunRequest( model_run.identifier, model_run.experiment_id, run_url )
Create request object to run model. Requests are handled by SCO worker implementations. Parameters ---------- model_run : ModelRunHandle Handle to model run run_url : string URL for model run information Returns ------- ModelRunRequest Object representing model run request
def create_stream(self, uidList=[]): req_hook = 'pod/v1/im/create' req_args = json.dumps(uidList) status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
create a stream
def iter(self): upperCase = self._upperCase for _file in self._files: with asHandle(_file) as fp: records = SeqIO.parse(fp, 'fasta') while True: try: record = next(records) except StopIteration: break try: structureRecord = next(records) except StopIteration: raise ValueError('Structure file %r has an odd number ' 'of records.' % _file) if len(structureRecord) != len(record): raise ValueError( 'Sequence %r length (%d) is not equal to ' 'structure %r length (%d) in input file %r.' % ( record.description, len(record), structureRecord.description, len(structureRecord), _file)) if upperCase: read = self._readClass( record.description, str(record.seq.upper()), str(structureRecord.seq.upper())) else: read = self._readClass(record.description, str(record.seq), str(structureRecord.seq)) yield read
Iterate over the sequences in self.file_, yielding each as an instance of the desired read class. @raise ValueError: If the input file has an odd number of records or if any sequence has a different length than its predicted secondary structure.
def dloglikarray(self): assert self.dparamscurrent, "dloglikarray requires paramscurrent == True" nparams = len(self._index_to_param) dloglikarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): dloglikarray[i] = self.dloglik[param] elif isinstance(param, tuple): dloglikarray[i] = self.dloglik[param[0]][param[1]] return dloglikarray
Derivative of `loglik` with respect to `paramsarray`.
def create(self, ticket, payload=None, expires=None): if not payload: payload = True self._client.set(str(ticket), payload, expires)
Create a session identifier in memcache associated with ``ticket``.
def find_page_of_state_m(self, state_m): for state_identifier, page_info in list(self.tabs.items()): if page_info['state_m'] is state_m: return page_info['page'], state_identifier return None, None
Return the identifier and page of a given state model :param state_m: The state model to be searched :return: page containing the state and the state_identifier
def send_exception(self, code, exc_info=None, headers=None): "send an error response including a backtrace to the client" if headers is None: headers = {} if not exc_info: exc_info = sys.exc_info() self.send_error_msg(code, traceback.format_exception(*exc_info), headers)
send an error response including a backtrace to the client
def load_transform(fname): if fname is None: return np.eye(4) if fname.endswith('.mat'): return np.loadtxt(fname) elif fname.endswith('.lta'): with open(fname, 'rb') as fobj: for line in fobj: if line.startswith(b'1 4 4'): break lines = fobj.readlines()[:4] return np.genfromtxt(lines) raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)")
Load affine transform from file Parameters ---------- fname : str or None Filename of an LTA or FSL-style MAT transform file. If ``None``, return an identity transform Returns ------- affine : (4, 4) numpy.ndarray
def parse_grid_facets(facets): valid_seqs = ["('var1', '.')", "('var1', 'var2')", "('.', 'var1')", "((var1, var2), (var3, var4))"] error_msg_s = ("Valid sequences for specifying 'facets' look like" " {}".format(valid_seqs)) valid_forms = ['var1 ~ .', 'var1 ~ var2', '. ~ var1', 'var1 + var2 ~ var3 + var4', '. ~ func(var1) + func(var2)', '. ~ func(var1+var3) + func(var2)' ] + valid_seqs error_msg_f = ("Valid formula for 'facet_grid' look like" " {}".format(valid_forms)) if isinstance(facets, (tuple, list)): if len(facets) != 2: raise PlotnineError(error_msg_s) rows, cols = facets if isinstance(rows, str): rows = [] if rows == '.' else [rows] if isinstance(cols, str): cols = [] if cols == '.' else [cols] return rows, cols if not isinstance(facets, str): raise PlotnineError(error_msg_f) try: lhs, rhs = facets.split('~') except ValueError: raise PlotnineError(error_msg_s) else: lhs = lhs.strip() rhs = rhs.strip() lhs = ensure_var_or_dot(lhs) rhs = ensure_var_or_dot(rhs) lsplitter = ' + ' if ' + ' in lhs else '+' rsplitter = ' + ' if ' + ' in rhs else '+' if lhs == '.': rows = [] else: rows = [var.strip() for var in lhs.split(lsplitter)] if rhs == '.': cols = [] else: cols = [var.strip() for var in rhs.split(rsplitter)] return rows, cols
Return two lists of facetting variables, for the rows & columns
def _load_lua_scripts(self): lua_script_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'scripts/lua' ) with open(os.path.join( lua_script_path, 'enqueue.lua'), 'r') as enqueue_file: self._lua_enqueue_script = enqueue_file.read() self._lua_enqueue = self._r.register_script( self._lua_enqueue_script) with open(os.path.join( lua_script_path, 'dequeue.lua'), 'r') as dequeue_file: self._lua_dequeue_script = dequeue_file.read() self._lua_dequeue = self._r.register_script( self._lua_dequeue_script) with open(os.path.join( lua_script_path, 'finish.lua'), 'r') as finish_file: self._lua_finish_script = finish_file.read() self._lua_finish = self._r.register_script(self._lua_finish_script) with open(os.path.join( lua_script_path, 'interval.lua'), 'r') as interval_file: self._lua_interval_script = interval_file.read() self._lua_interval = self._r.register_script( self._lua_interval_script) with open(os.path.join( lua_script_path, 'requeue.lua'), 'r') as requeue_file: self._lua_requeue_script = requeue_file.read() self._lua_requeue = self._r.register_script( self._lua_requeue_script) with open(os.path.join( lua_script_path, 'metrics.lua'), 'r') as metrics_file: self._lua_metrics_script = metrics_file.read() self._lua_metrics = self._r.register_script( self._lua_metrics_script)
Loads all lua scripts required by SharQ.
def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
create new OrderedDict that does not contain `module.`
def pw_score_cosine(self, s1 : ClassId, s2 : ClassId) -> SimScore: df = self.assoc_df slice1 = df.loc[s1].values slice2 = df.loc[s2].values return 1 - cosine(slice1, slice2)
Cosine similarity of two subjects Arguments --------- s1 : str class id Return ------ number A number between 0 and 1
def create_atom_data(self): current_atom_site = self.current_atom_site if current_atom_site.IsHETATM: return None, None, None, None elif current_atom_site.IsATOM: return current_atom_site.convert_to_residue(self.modified_residues) else: raise Exception('current_atom_site')
The atom site work is split into two parts. This function type-converts the tags.
def _build_instance_group_list_args(self, instance_groups): if type(instance_groups) != types.ListType: instance_groups = [instance_groups] params = {} for i, instance_group in enumerate(instance_groups): ig_dict = self._build_instance_group_args(instance_group) for key, value in ig_dict.iteritems(): params['InstanceGroups.member.%d.%s' % (i+1, key)] = value return params
Takes a list of InstanceGroups, or a single InstanceGroup. Returns a comparable dict for use in making a RunJobFlow or AddInstanceGroups request.
def get_all_ids(idVendor = GrizzlyUSB.ID_VENDOR, idProduct=GrizzlyUSB.ID_PRODUCT): all_dev = GrizzlyUSB.get_all_usb_devices(idVendor, idProduct) if len(all_dev) <= 0: raise usb.USBError("Could not find any GrizzlyBear device (idVendor=%d, idProduct=%d)" % (idVendor, idProduct)) else: all_addresses = [] bound_devices = [] for device in all_dev: internal_addr = GrizzlyUSB.get_device_address(device) if internal_addr == GrizzlyUSB.USB_DEVICE_ERROR: bound_devices.append(device) else: all_addresses.append(internal_addr) for device in all_dev: if device not in bound_devices: usb.util.dispose_resources(device) return map(addr_to_id, all_addresses)
Scans for grizzlies that have not been bound, or constructed, and returns a list of their id's, or motor number.
def reraise(exc_type, message=None, *args, **kwargs): last_lineno = inspect.currentframe().f_back.f_lineno line_msg = 'line %s: ' % last_lineno if message: line_msg += str(message) raise exc_type(line_msg, *args, **kwargs).raise_with_traceback(sys.exc_info()[2])
reraises an exception for exception translation. This is primarily used for when you immediately reraise an exception that is thrown in a library, so that your client will not have to depend on various exceptions defined in the library implementation that is being abstracted. The advantage of this helper function is somewhat preserve traceback information although it is polluted by the reraise frame. Example Code: def A(): raise Exception('Whoops') def main(): try: A() except Exception as e: exceptions.reraise(ValueError) main() Traceback (most recent call last): File "exception.py", line 53, in <module> main() File "exception.py", line 49, in main reraise(ValueError) File "exception.py", line 47, in main A() File "exception.py", line 42, in A raise Exception('Whoops') ValueError: line 49 When this code is run, the additional stack frames for calling A() and raising within A() are printed out in exception, whereas a bare exception translation would lose this information. As long as you ignore the reraise stack frame, the stack trace is okay looking. Generally this can be fixed by hacking on CPython to allow modification of traceback objects ala https://github.com/mitsuhiko/jinja2/blob/master/jinja2/debug.py, but this is fixed in Python 3 anyways and that method is the definition of hackery. Args: exc_type: (Exception) Exception class to create. message: (str) Optional message to place in exception instance. Usually not needed as the original exception probably has a message that will be printed out in the modified stacktrace. *args: Args to pass to exception constructor. **kwargs: Kwargs to pass to exception constructor.
def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint): path = '/Assets' full_path = ''.join([path, "('", asset_id, "')", "/$links/ContentKeys"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', "('", encryptionkey_id, "')"]) body = '{"uri": "' + uri + '"}' return do_ams_post(endpoint, full_path_encoded, body, access_token)
Link Media Service Asset and Content Key. Args: access_token (str): A valid Azure authentication token. asset_id (str): A Media Service Asset ID. encryption_id (str): A Media Service Encryption ID. ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body.
def _load_bond_length_data(): with open(os.path.join(os.path.dirname(__file__), "bond_lengths.json")) as f: data = collections.defaultdict(dict) for row in json.load(f): els = sorted(row['elements']) data[tuple(els)][row['bond_order']] = row['length'] return data
Loads bond length data from json file
def ProcessMessages(self, msgs=None, token=None): if not data_store.AFF4Enabled(): return filestore_fd = aff4.FACTORY.Create( legacy_filestore.FileStore.PATH, legacy_filestore.FileStore, mode="w", token=token) for vfs_urn in msgs: with aff4.FACTORY.Open(vfs_urn, mode="rw", token=token) as vfs_fd: try: filestore_fd.AddFile(vfs_fd) except Exception as e: logging.exception("Exception while adding file to filestore: %s", e)
Process the new file and add to the file store.
def interactive_tenant_update_vars(self): api_logger.info('interactive_tenant_update_vars function:') tenant_resp = self._parent_class.get.tenants(self._parent_class.tenant_id) status = tenant_resp.cgx_status tenant_dict = tenant_resp.cgx_content if status: api_logger.debug("new tenant_dict: %s", tenant_dict) self._parent_class.tenant_name = tenant_dict.get('name', self._parent_class.tenant_id) self._parent_class.is_esp = tenant_dict.get('is_esp') address_lookup = tenant_dict.get('address', None) if address_lookup: tenant_address = address_lookup.get('street', "") + ", " tenant_address += (str(address_lookup.get('street2', "")) + ", ") tenant_address += (str(address_lookup.get('city', "")) + ", ") tenant_address += (str(address_lookup.get('state', "")) + ", ") tenant_address += (str(address_lookup.get('post_code', "")) + ", ") tenant_address += (str(address_lookup.get('country', "")) + ", ") else: tenant_address = "Unknown" self._parent_class.address = tenant_address return True else: return False
Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login. **Returns:** Boolean on success/failure,
def parseWigString(line, scoreType=int): parts = line.split("\t") if (len(parts) < 4): raise GenomicIntervalError("failed to parse " + line + " as wig format, too few fields") return GenomicInterval(parts[0].strip(), int(parts[1]), int(parts[2]), None, scoreType(parts[3]))
Parse a string in simple Wig format and return a GenomicInterval. :param line: the string to be parsed :param scoreType: treat the score field as having this type. :return: GenomicInterval object representing this wig line; the name of the interval will be set to 'X', and it's strand to the default.
def delete_extra_files(self, relpaths, cloud_objs): for cloud_obj in cloud_objs: if cloud_obj not in relpaths: if not self.test_run: self.delete_cloud_obj(cloud_obj) self.delete_count += 1 if not self.quiet or self.verbosity > 1: print("Deleted: {0}".format(cloud_obj))
Deletes any objects from the container that do not exist locally.
def _invite(self, name, method, email, uuid, event, password=""): props = { 'uuid': std_uuid(), 'status': 'Open', 'name': name, 'method': method, 'email': email, 'password': password, 'timestamp': std_now() } enrollment = objectmodels['enrollment'](props) enrollment.save() self.log('Enrollment stored', lvl=debug) self._send_invitation(enrollment, event) packet = { 'component': 'hfos.enrol.enrolmanager', 'action': 'invite', 'data': [True, email] } self.fireEvent(send(uuid, packet))
Actually invite a given user
def find_one(self, tname, where=None, where_not=None, columns=None, astype=None): records = self.find(tname, where=where, where_not=where_not, columns=columns, astype='dataframe') return self._output(records, single=True, astype=astype)
Find a single record in the provided table from the database. If multiple match, return the first one based on the internal order of the records. If no records are found, return empty dictionary, string or series depending on the value of `astype`. Parameters ---------- tname : str Table to search records from. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the found records, if any. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict`, `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- records : str, dict or series Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object >>> db.find_one("test", astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", astype="series") __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", astype=None) __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", where={"Name": "John"}, astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", where_not={"Name": "John"}, astype="dict") {}
def _get_distinct_hostnames(self): hostnames = [] for section in self.sections: hostnames.extend(self._group_get_hostnames(section['name'])) return set(hostnames)
Return a set of distinct hostnames found in the entire inventory.
def _fetch_items_helper_no_retries(self, fetch_function): fetched_items = [] while self._continuation or not self._has_started: if not self._has_started: self._has_started = True self._options['continuation'] = self._continuation (fetched_items, response_headers) = fetch_function(self._options) fetched_items continuation_key = http_constants.HttpHeaders.Continuation if self._is_change_feed: continuation_key = http_constants.HttpHeaders.ETag if not self._is_change_feed or len(fetched_items) > 0: self._continuation = response_headers.get(continuation_key) else: self._continuation = None if fetched_items: break return fetched_items
Fetches more items and doesn't retry on failure :return: List of fetched items. :rtype: list
def _format_multicolumn(self, row, ilevels): r row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) else: row2.append(coltext) for c in row[ilevels:]: if c.strip(): if coltext: append_col() coltext = c ncol = 1 else: ncol += 1 if coltext: append_col() return row2
r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
def next_state(self, index, event_time, population_view): return _next_state(index, event_time, self.transition_set, population_view)
Moves a population between different states using information this state's `transition_set`. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp When this transition is occurring. population_view : vivarium.framework.population.PopulationView A view of the internal state of the simulation.
def suggest(self, name, text, **kwargs): s = self._clone() s._suggest[name] = {'text': text} s._suggest[name].update(kwargs) return s
Add a suggestions request to the search. :arg name: name of the suggestion :arg text: text to suggest on All keyword arguments will be added to the suggestions body. For example:: s = Search() s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
def filter(self, node, condition): if not isinstance(node, Node): raise TypeError('not a node') for child in node: if condition(child): yield child for subchild in self.filter(child, condition): yield subchild
This method accepts a node and the condition function; a generator will be returned to yield the nodes that got matched by the condition.
def reducer_metro(self, metro, values): lookup = CachedLookup(precision=POI_GEOHASH_PRECISION) for i, value in enumerate(values): type_tag, lonlat, data = value if type_tag == 1: lookup.insert(i, dict( geometry=dict(type='Point', coordinates=project(lonlat)), properties=dict(tags=data) )) else: if not lookup.data_store: return poi_names = [] kwargs = dict(buffer_size=POI_DISTANCE, multiple=True) for poi in lookup.get(lonlat, **kwargs): has_tag = [ tag in poi['tags'] for tag in POI_TAGS ] if any(has_tag) and 'name' in poi['tags']: poi_names.append(poi['tags']['name']) for poi in set(poi_names): yield (metro, poi), 1
Output tags of POI locations nearby tweet locations Values will be sorted coming into reducer. First element in each value tuple will be either 1 (osm POI) or 2 (geotweet). Build a spatial index with POI records. For each tweet lookup nearby POI, and emit tag values for predefined tags.
def pump_reader(self): origin, message = self.transport.read_packet() if isinstance(origin, MessageTargetWatch): self._handle_watch_message(message) else: self._broadcast_transport_message(origin, message)
Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`.
def second_textx_model(self, model_parser): if self.grammar_parser.debug: self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass") self._resolve_rule_refs(self.grammar_parser, model_parser) self._determine_rule_types(model_parser.metamodel) self._resolve_cls_refs(self.grammar_parser, model_parser) return model_parser
Cross reference resolving for parser model.
def saveform(cls, form): columns = dict() for name, field in cls.form_fields.iteritems(): columns[name] = getattr(form, field).data instance = cls(**columns) return instance.save()
Create and save form model data to database
def multiprocess_with_queues(nproc, func, inputs, verbose=False, **progress_kw): if nproc != 1 and os.name == 'nt': warnings.warn( "multiprocessing is currently not supported on Windows, see " "https://github.com/gwpy/gwpy/issues/880, will continue with " "serial procesing (nproc=1)") nproc = 1 if progress_kw.pop('raise_exceptions', None) is not None: warnings.warn("the `raise_exceptions` keyword to " "multiprocess_with_queues is deprecated, and will be " "removed in a future release, all exceptions will be " "raised if they occur", DeprecationWarning) if bool(verbose): if not isinstance(verbose, bool): progress_kw['desc'] = str(verbose) if isinstance(inputs, (list, tuple)): progress_kw.setdefault('total', len(inputs)) pbar = progress_bar(**progress_kw) else: pbar = None def _inner(x): try: return func(x) except Exception as exc: if nproc == 1: raise return exc finally: if pbar and nproc == 1: pbar.update() if nproc == 1: return list(map(_inner, inputs)) q_in = Queue() q_out = Queue() proclist = [Process(target=process_in_out_queues, args=(_inner, q_in, q_out)) for _ in range(nproc)] for proc in proclist: proc.daemon = True proc.start() sent = [q_in.put(x, block=False) for x in enumerate(inputs)] for _ in range(nproc): q_in.put((None, None)) res = [] for _ in range(len(sent)): x = q_out.get() if pbar: pbar.update() res.append(x) for proc in proclist: proc.join() if pbar: pbar.close() results = [out for _, out in sorted(res, key=itemgetter(0))] for res in results: if isinstance(res, Exception): raise res return results
Map a function over a list of inputs using multiprocess This essentially duplicates `multiprocess.map` but allows for arbitrary functions (that aren't necessarily importable) Parameters ---------- nproc : `int` number of processes to use, if ``1`` is given, the current process is used, and no child processes are forked func : `callable` the function to call in each iteration, should take a single argument that is the next element from ``inputs`` inputs : `iterable` iterable (e.g. `list`) of inputs, each element of which is passed to ``func`` in one of the child processes verbose : `bool`, `str`, optional if `True`, print progress to the console as a bar, pass a `str` to customise the heading for the progress bar, default: `False`, (default heading ``'Processing:'`` if ``verbose=True`) Returns ------- outputs : `list` the `list` of results from calling ``func(x)`` for each element of ``inputs``
def blocking_start(self, waiting_func=None): self.logger.debug('threadless start') try: for job_params in self._get_iterator(): self.config.logger.debug('received %r', job_params) self.quit_check() if job_params is None: if self.config.quit_on_empty_queue: raise KeyboardInterrupt self.logger.info("there is nothing to do. Sleeping " "for %d seconds" % self.config.idle_delay) self._responsive_sleep(self.config.idle_delay) continue self.quit_check() try: args, kwargs = job_params except ValueError: args = job_params kwargs = {} try: self.task_func(*args, **kwargs) except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: self.logger.debug('queuingThread gets quit request') finally: self.quit = True self.logger.debug("ThreadlessTaskManager dies quietly")
this function starts the task manager running to do tasks. The waiting_func is normally used to do something while other threads are running, but here we don't have other threads. So the waiting func will never get called. I can see wanting this function to be called at least once after the end of the task loop.
def encode_list(dynamizer, value): encoded_list = [] dict(map(dynamizer.raw_encode, value)) for v in value: encoded_type, encoded_value = dynamizer.raw_encode(v) encoded_list.append({ encoded_type: encoded_value, }) return 'L', encoded_list
Encode a list for the DynamoDB format
def _get_user_groups(self, user): return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name]
Get user groups. :param user: :return:
def find_by_or(cls, payload): if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") url = os.path.join(cls.URL, "find_by_or") payload = {"find_by_or": payload} cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4))) res = requests.post(url=url, json=payload, headers=HEADERS, verify=False) cls.write_response_html_to_file(res,"bob.html") if res: try: res = res[cls.MODEL_NAME] except KeyError: pass return res
Searches the model in question by OR joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a string (for the OR operator joining to be specified). Only the first hit is returned, and there is not particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to search for by using OR operator joining for each query parameter. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results.
def ensure_unicode_args(function): @wraps(function) def wrapped(*args, **kwargs): if six.PY2: return function( *salt.utils.data.decode_list(args), **salt.utils.data.decode_dict(kwargs) ) else: return function(*args, **kwargs) return wrapped
Decodes all arguments passed to the wrapped function
def fetch_mid(self): current_mid = self._current_mid self._current_mid += 1 self._current_mid %= 65535 return current_mid
Gets the next valid MID. :return: the mid to use
def enum_check(*args, func=None): func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, (enum.EnumMeta, aenum.EnumMeta)): name = type(var).__name__ raise EnumError( f'Function {func} expected enumeration, {name} got instead.')
Check if arguments are of protocol type.
def strip_command(self, command_string, output): output_list = output.split(command_string) return self.RESPONSE_RETURN.join(output_list)
Strip command_string from output string.
async def get_inputs(self) -> List[Input]: res = await self.services["avContent"]["getCurrentExternalTerminalsStatus"]() return [Input.make(services=self.services, **x) for x in res if 'meta:zone:output' not in x['meta']]
Return list of available outputs.
def all_files(self): if self.files_command and not self.files: cmd = self.files_command files = shell_out(cmd, timeout=0, chroot=self.chroot) self.files = files.splitlines() return self.files
Returns a list of files known by the package manager
def transform(images, out_dir, fname): from PIL import Image as PILImage base = os.path.join(out_dir, "media", "images") width, height = images[0].image.size num_images_to_log = len(images) if num_images_to_log > Image.MAX_IMAGES: logging.warn( "The maximum number of images to store per step is %i." % Image.MAX_IMAGES) num_images_to_log = Image.MAX_IMAGES if width * num_images_to_log > Image.MAX_DIMENSION: max_images_by_dimension = Image.MAX_DIMENSION // width logging.warn("The maximum total width for all images in a collection is 65500, or {} images, each with a width of {} pixels. Only logging the first {} images.".format(max_images_by_dimension, width, max_images_by_dimension)) num_images_to_log = max_images_by_dimension total_width = width * num_images_to_log sprite = PILImage.new( mode='RGB', size=(total_width, height), color=(0, 0, 0)) for i, image in enumerate(images[:num_images_to_log]): location = width * i sprite.paste(image.image, (location, 0)) util.mkdir_exists_ok(base) sprite.save(os.path.join(base, fname), transparency=0) meta = {"width": width, "height": height, "count": num_images_to_log, "_type": "images"} grouping = images[0].grouping if grouping: meta["grouping"] = grouping captions = Image.captions(images[:num_images_to_log]) if captions: meta["captions"] = captions return meta
Combines a list of images into a single sprite returning meta information
def reg_copy(reg, keys=None): if keys is None: keys = reg.keys() reg_cls = type(reg) new_reg = reg_cls() mk = {} for m in reg_cls.meta_names: mstar = getattr(reg, m, None) if not mstar: mk[m] = None continue mk[m] = {} for k in keys: kstar = mstar.get(k) if kstar is not None: mk[m][k] = kstar new_reg.register({k: reg[k] for k in keys}, **mk) return new_reg
Make a copy of a subset of a registry. :param reg: source registry :param keys: keys of registry items to copy :return: copied registry subset
def terminate(self): self._backend_client.clear() self._needs_save = False self._started = False self._expire_cookie = True self._send_cookie = True
Terminates an active session
def main(): parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.') parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.') parser.add_argument('output_file_name', help='Name of the output .params file.') args = parser.parse_args() converter = CaffeModelConverter() converter.convert(args.caffemodel, args.output_file_name)
Read .caffemodel path and .params path as input from command line and use CaffeModelConverter to do the conversion
def read_authentication_config (self): section = "authentication" password_fields = [] if self.has_option(section, "entry"): for val in read_multiline(self.get(section, "entry")): auth = val.split() if len(auth) == 3: self.config.add_auth(pattern=auth[0], user=auth[1], password=auth[2]) password_fields.append("entry/%s/%s" % (auth[0], auth[1])) elif len(auth) == 2: self.config.add_auth(pattern=auth[0], user=auth[1]) else: raise LinkCheckerError( _("missing auth part in entry %(val)r") % {"val": val}) if self.has_option(section, "loginurl"): val = self.get(section, "loginurl").strip() if not (val.lower().startswith("http:") or val.lower().startswith("https:")): raise LinkCheckerError(_("invalid login URL `%s'. Only " \ "HTTP and HTTPS URLs are supported.") % val) self.config["loginurl"] = val self.read_string_option(section, "loginuserfield") self.read_string_option(section, "loginpasswordfield") if self.has_option(section, "loginextrafields"): for val in read_multiline(self.get(section, "loginextrafields")): name, value = val.split(":", 1) self.config["loginextrafields"][name] = value self.check_password_readable(section, password_fields)
Read configuration options in section "authentication".
def create_logout_response(self, request, bindings=None, status=None, sign=False, issuer=None, sign_alg=None, digest_alg=None): rinfo = self.response_args(request, bindings) if not issuer: issuer = self._issuer() response = self._status_response(samlp.LogoutResponse, issuer, status, sign, sign_alg=sign_alg, digest_alg=digest_alg, **rinfo) logger.info("Response: %s", response) return response
Create a LogoutResponse. :param request: The request this is a response to :param bindings: Which bindings that can be used for the response If None the preferred bindings are gathered from the configuration :param status: The return status of the response operation If None the operation is regarded as a Success. :param issuer: The issuer of the message :return: HTTP args
def mchirp_sampler_lnm(**kwargs): m1, m2 = draw_lnm_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
Draw chirp mass samples for uniform-in-log model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return SyncMapPermissionInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], identity=self._solution['identity'], )
Fetch a SyncMapPermissionInstance :returns: Fetched SyncMapPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionInstance
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts): from mlabwrap import mlab filename, varnames, outOf = __saveVarsHelper( filename, varNamesStr, outOf, '.mat', **opts) try: for varname in varnames: mlab._set(varname, outOf[varname]) mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0) finally: assert varnames mlab._do("clear('%s')" % "', '".join(varnames), nout=0)
Hacky convinience function to dump a couple of python variables in a .mat file. See `awmstools.saveVars`.
def enable_firewall_ruleset(host, username, password, ruleset_enable, ruleset_name, protocol=None, port=None, esxi_hosts=None, credstore=None): cmd = 'network firewall ruleset set --enabled {0} --ruleset-id={1}'.format( ruleset_enable, ruleset_name ) ret = {} if esxi_hosts: if not isinstance(esxi_hosts, list): raise CommandExecutionError('\'esxi_hosts\' must be a list.') for esxi_host in esxi_hosts: response = salt.utils.vmware.esxcli(host, username, password, cmd, protocol=protocol, port=port, esxi_host=esxi_host, credstore=credstore) ret.update({esxi_host: response}) else: response = salt.utils.vmware.esxcli(host, username, password, cmd, protocol=protocol, port=port, credstore=credstore) ret.update({host: response}) return ret
Enable or disable an ESXi firewall rule set. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. ruleset_enable True to enable the ruleset, false to disable. ruleset_name Name of ruleset to target. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. esxi_hosts If ``host`` is a vCenter host, then use esxi_hosts to execute this function on a list of one or more ESXi machines. credstore Optionally set to path to the credential store file. :return: A standard cmd.run_all dictionary, per host. CLI Example: .. code-block:: bash # Used for ESXi host connection information salt '*' vsphere.enable_firewall_ruleset my.esxi.host root bad-password True 'syslog' # Used for connecting to a vCenter Server salt '*' vsphere.enable_firewall_ruleset my.vcenter.location root bad-password True 'syslog' \ esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
def up_capture(returns, factor_returns, **kwargs): return up(returns, factor_returns, function=capture, **kwargs)
Compute the capture ratio for periods when the benchmark return is positive Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- up_capture : float Note ---- See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for more information.
def _serialized(self): return {'title': self.title, 'summary': self.summary, 'areadesc': self.areadesc, 'event': self.event, 'samecodes': self.samecodes, 'zonecodes': self.zonecodes, 'expiration': self.expiration, 'updated': self.updated, 'effective': self.effective, 'published': self.published, 'severity': self.severity, 'category': self.category, 'urgency': self.urgency, 'msgtype': self.msgtype, 'link': self.link, }
Provides a sanitized & serializeable dict of the alert mainly for forward & backwards compatibility
def setExpertLevel(self): g = get_root(self).globals level = g.cpars['expert_level'] if level == 0: if self.val.get() == 'CCD TECs': self.val.set('Observe') self._changed() self.tecs.grid_forget() else: self.tecs.grid(row=0, column=3, sticky=tk.W)
Modifies widget according to expertise level, which in this case is just matter of hiding or revealing the button to set CCD temps
def list_locales(self) -> List[Optional[Text]]: locales = list(self.dict.keys()) if not locales: locales.append(None) return locales
Returns the list of available locales. The first locale is the default locale to be used. If no locales are known, then `None` will be the first item.
def escape(self, varname, value): f = self.escape_funcs.get(varname) return f(value) if f else value
Escape `value` to fit in the place of this variable into the grammar.
def RQ_sigma(self, sigma): if self._sa_pair: sigma = np.asarray(sigma) sigma_indices = np.empty(self.num_states, dtype=int) _find_indices(self.a_indices, self.a_indptr, sigma, out=sigma_indices) R_sigma, Q_sigma = self.R[sigma_indices], self.Q[sigma_indices] else: R_sigma = self.R[np.arange(self.num_states), sigma] Q_sigma = self.Q[np.arange(self.num_states), sigma] return R_sigma, Q_sigma
Given a policy `sigma`, return the reward vector `R_sigma` and the transition probability matrix `Q_sigma`. Parameters ---------- sigma : array_like(int, ndim=1) Policy vector, of length n. Returns ------- R_sigma : ndarray(float, ndim=1) Reward vector for `sigma`, of length n. Q_sigma : ndarray(float, ndim=2) Transition probability matrix for `sigma`, of shape (n, n).
def transaction_status(transaction): if not transaction or not transaction.get('transactionStatus'): return blank() return FormattedItem( transaction['transactionStatus'].get('name'), transaction['transactionStatus'].get('friendlyName'))
Returns a FormattedItem describing the given transaction. :param item: An object capable of having an active transaction
def notify_done(self, error=False, run_done_callbacks=True): if error: for _session in self._sessions.values(): _session.set_done() self._session_count = 0 else: self._update_session_count(-1) for _session in self._sessions.values(): if not _session.is_done(): return if run_done_callbacks: self._run_done_callbacks() self._done_event.set()
if error clear all sessions otherwise check to see if all other sessions are complete then run the done callbacks
def upload_member_from_dir(member_data, target_member_dir, metadata, access_token, mode='default', max_size=MAX_SIZE_DEFAULT): if not validate_metadata(target_member_dir, metadata): raise ValueError('Metadata should match directory contents!') project_data = {f['basename']: f for f in member_data['data'] if f['source'] not in member_data['sources_shared']} for filename in metadata: if filename in project_data and mode == 'safe': logging.info('Skipping {}, remote exists with matching' ' name'.format(filename)) continue filepath = os.path.join(target_member_dir, filename) remote_file_info = (project_data[filename] if filename in project_data else None) upload_aws(target_filepath=filepath, metadata=metadata[filename], access_token=access_token, project_member_id=member_data['project_member_id'], remote_file_info=remote_file_info) if mode == 'sync': for filename in project_data: if filename not in metadata: logging.debug("Deleting {}".format(filename)) delete_file( file_basename=filename, access_token=access_token, project_member_id=member_data['project_member_id'])
Upload files in target directory to an Open Humans member's account. The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. If the 'mode' parameter is 'safe': matching filenames will not be overwritten. If the 'mode' parameter is 'sync': files on Open Humans that are not in the local directory will be deleted. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory from where data will be uploaded. :param metadata: This field is metadata for files to be uploaded. :param access_token: This field is user specific access token. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param max_size: This field is the maximum file size. It's default value is 128m.
def get_logger_data(self): return { address : stream_capturer[0].dump_handler_config_data() for address, stream_capturer in self._stream_capturers.iteritems() }
Return data on managed loggers. Returns a dictionary of managed logger configuration data. The format is primarily controlled by the :func:`SocketStreamCapturer.dump_handler_config_data` function:: { <capture address>: <list of handler config for data capturers> }
def do_continue(self, args): self._do_print_from_last_cmd = True self._interp.cont() return True
Continue the interpreter
def equal(list1, list2): return [item1 == item2 for item1, item2 in broadcast_zip(list1, list2)]
takes flags returns indexes of True values
def _add_warc_snapshot(self, filename, url): _logger.debug('Adding snapshot record.') extension = os.path.splitext(filename)[1] content_type = { '.pdf': 'application/pdf', '.html': 'text/html', '.png': 'image/png', '.gif': 'image/gif' }[extension] record = WARCRecord() record.set_common_fields('resource', content_type) record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \ .format(wpull.url.percent_encode_query_value(url)) if self._action_warc_record: record.fields['WARC-Concurrent-To'] = \ self._action_warc_record.fields[WARCRecord.WARC_RECORD_ID] with open(filename, 'rb') as in_file: record.block_file = in_file self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record)
Add the snaphot to the WARC file.
def register(self, resource=None, **meta): if resource is None: def wrapper(resource): return self.register(resource, **meta) return wrapper if not issubclass(resource, ResourceView): raise AssertionError("%s not subclass of ResourceView" % resource) if resource._meta.abstract: raise AssertionError("Attempt register of abstract resource: %s." % resource) meta = dict(self.meta, **meta) meta['name'] = meta.get('name', resource._meta.name) options = type('Meta', tuple(), meta) params = dict(api=self, Meta=options, **meta) params['__module__'] = '%s.%s' % ( self.prefix, self.str_version.replace('.', '_')) params['__doc__'] = resource.__doc__ new_resource = type( '%s%s' % (resource.__name__, len(self.resources)), (resource,), params) if self.resources.get(new_resource._meta.url_name): logger.warning( "A resource '%r' is replacing the existing record for '%s'", new_resource, self.resources.get(new_resource._meta.url_name)) self.resources[new_resource._meta.url_name] = new_resource return resource
Add resource to the API. :param resource: Resource class for registration :param **meta: Redefine Meta options for the resource :return adrest.views.Resource: Generated resource.
def start(self): if threading.current_thread().name == 'MainThread': signal.signal(signal.SIGINT, self.stop) logging.info('Started on {}'.format(self.address)) while True: self.process()
Start and listen for calls
def merge(self, merge_area, tab): top, left, bottom, right = merge_area cursor = self.grid.actions.cursor top_left_code = self.code_array((top, left, cursor[2])) selection = Selection([(top, left)], [(bottom, right)], [], [], []) error_msg = _("Overlapping merge area at {} prevents merge.") for row in xrange(top, bottom + 1): for col in xrange(left, right + 1): key = row, col, tab if self.code_array.cell_attributes[key]["merge_area"]: post_command_event(self.main_window, self.StatusBarMsg, text=error_msg.format(str(key))) return self.delete_selection(selection) self.set_code((top, left, cursor[2]), top_left_code) attr = {"merge_area": merge_area, "locked": True} self._set_cell_attr(selection, tab, attr) tl_selection = Selection([], [], [], [], [(top, left)]) attr = {"locked": False} self._set_cell_attr(tl_selection, tab, attr)
Merges top left cell with all cells until bottom_right
def mdf_path(self): try: return self._mdf_path except AttributeError: path = self.outdir.has_abiext("MDF.nc") if path: self._mdf_path = path return path
Absolute path of the MDF file. Empty string if file is not present.
def lookup_ip_host(self, mac): res = self.lookup_by_host(mac=mac) try: return res["ip-address"] except KeyError: raise OmapiErrorAttributeNotFound()
Lookup a host object with with given mac address. @type mac: str @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks a ip @raises socket.error:
def incomplete_relation_data(configs, required_interfaces): complete_ctxts = configs.complete_contexts() incomplete_relations = [ svc_type for svc_type, interfaces in required_interfaces.items() if not set(interfaces).intersection(complete_ctxts)] return { i: configs.get_incomplete_context_data(required_interfaces[i]) for i in incomplete_relations}
Check complete contexts against required_interfaces Return dictionary of incomplete relation data. configs is an OSConfigRenderer object with configs registered required_interfaces is a dictionary of required general interfaces with dictionary values of possible specific interfaces. Example: required_interfaces = {'database': ['shared-db', 'pgsql-db']} The interface is said to be satisfied if anyone of the interfaces in the list has a complete context. Return dictionary of incomplete or missing required contexts with relation status of interfaces and any missing data points. Example: {'message': {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, 'zeromq-configuration': {'related': False}}, 'identity': {'identity-service': {'related': False}}, 'database': {'pgsql-db': {'related': False}, 'shared-db': {'related': True}}}
def build(self): if not self._output_tubes: self._output_tubes.append(self._worker_class.getTubeClass()()) self._worker_class.assemble( self._worker_args, self._input_tube, self._output_tubes, self._size, self._disable_result, self._do_stop_task, ) for stage in self._next_stages: stage.build()
Create and start up the internal workers.
def _set_input_buffer(self, text): if self.current_prompt_pos is not None: self.replace_text(self.current_prompt_pos, 'eol', text) else: self.insert(text) self.set_cursor_position('eof')
Set input buffer
def do_direct_payment(self, paymentaction="Sale", **kwargs): kwargs.update(self._sanitize_locals(locals())) return self._call('DoDirectPayment', **kwargs)
Shortcut for the DoDirectPayment method. ``paymentaction`` could be 'Authorization' or 'Sale' To issue a Sale immediately:: charge = { 'amt': '10.00', 'creditcardtype': 'Visa', 'acct': '4812177017895760', 'expdate': '012010', 'cvv2': '962', 'firstname': 'John', 'lastname': 'Doe', 'street': '1 Main St', 'city': 'San Jose', 'state': 'CA', 'zip': '95131', 'countrycode': 'US', 'currencycode': 'USD', } direct_payment("Sale", **charge) Or, since "Sale" is the default: direct_payment(**charge) To issue an Authorization, simply pass "Authorization" instead of "Sale". You may also explicitly set ``paymentaction`` as a keyword argument: ... direct_payment(paymentaction="Sale", **charge)
def execute_download_request(request): if request.save_response and request.data_folder is None: raise ValueError('Data folder is not specified. ' 'Please give a data folder name in the initialization of your request.') if not request.will_download: return None try_num = SHConfig().max_download_attempts response = None while try_num > 0: try: if request.is_aws_s3(): response = _do_aws_request(request) response_content = response['Body'].read() else: response = _do_request(request) response.raise_for_status() response_content = response.content LOGGER.debug('Successful download from %s', request.url) break except requests.RequestException as exception: try_num -= 1 if try_num > 0 and (_is_temporal_problem(exception) or (isinstance(exception, requests.HTTPError) and exception.response.status_code >= requests.status_codes.codes.INTERNAL_SERVER_ERROR) or _request_limit_reached(exception)): LOGGER.debug('Download attempt failed: %s\n%d attempts left, will retry in %ds', exception, try_num, SHConfig().download_sleep_time) sleep_time = SHConfig().download_sleep_time if _request_limit_reached(exception): sleep_time = max(sleep_time, 60) time.sleep(sleep_time) else: if request.url.startswith(SHConfig().aws_metadata_url) and \ isinstance(exception, requests.HTTPError) and \ exception.response.status_code == requests.status_codes.codes.NOT_FOUND: raise AwsDownloadFailedException('File in location %s is missing' % request.url) raise DownloadFailedException(_create_download_failed_message(exception, request.url)) _save_if_needed(request, response_content) if request.return_data: return decode_data(response_content, request.data_type, entire_response=response) return None
Executes download request. :param request: DownloadRequest to be executed :type request: DownloadRequest :return: downloaded data or None :rtype: numpy array, other possible data type or None :raises: DownloadFailedException
def install_toolset(self, toolset): info = toolset_info[toolset] if sys.platform.startswith('linux'): os.chdir(self.work_dir) if 'ppa' in info: for ppa in info['ppa']: utils.check_call( 'sudo','add-apt-repository','--yes',ppa) if 'deb' in info: utils.make_file('sources.list', "deb %s"%(' '.join(info['deb'])), "deb-src %s"%(' '.join(info['deb']))) utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list') if 'apt-key' in info: for key in info['apt-key']: utils.check_call('wget',key,'-O','apt.key') utils.check_call('sudo','apt-key','add','apt.key') utils.check_call( 'sudo','apt-get','update','-qq') utils.check_call( 'sudo','apt-get','install','-qq',info['package']) if 'debugpackage' in info and info['debugpackage']: utils.check_call( 'sudo','apt-get','install','-qq',info['debugpackage'])
Installs specific toolset on CI system.
def getMetadataId(self, metadata): return str(datamodel.VariantSetMetadataCompoundId( self.getCompoundId(), 'metadata:' + metadata.key))
Returns the id of a metadata
def GetOutputPluginStates(output_plugins, source=None, token=None): output_plugins_states = [] for plugin_descriptor in output_plugins: plugin_class = plugin_descriptor.GetPluginClass() try: _, plugin_state = plugin_class.CreatePluginAndDefaultState( source_urn=source, args=plugin_descriptor.plugin_args, token=token) except Exception as e: raise ValueError("Plugin %s failed to initialize (%s)" % (plugin_class, e)) plugin_state["logs"] = [] plugin_state["errors"] = [] output_plugins_states.append( rdf_flow_runner.OutputPluginState( plugin_state=plugin_state, plugin_descriptor=plugin_descriptor)) return output_plugins_states
Initializes state for a list of output plugins.
def get_grades_by_regid_and_term(regid, term): url = "{}/{},{},{}.json".format(enrollment_res_url_prefix, term.year, term.quarter, regid) return _json_to_grades(get_resource(url), regid, term)
Returns a StudentGrades model for the regid and term.
def module_can_run_parallel(test_module: unittest.TestSuite) -> bool: for test_class in test_module: if hasattr(unittest.loader, '_FailedTest'): if isinstance(test_class, unittest.loader._FailedTest): continue if not isinstance(test_class, collections.Iterable): raise TestClassNotIterable() for test_case in test_class: return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False)
Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise
def nodes_from_line(self, line): return dict([(v, k) for k, v in nx.get_edge_attributes(self, 'line').items()])[line]
Get nodes adjacent to line Here, line refers to the object behind the key 'line' of the attribute dict attached to each edge. Parameters ---------- line: edisgo.grid.components.Line A eDisGo line object Returns ------- tuple Nodes adjacent to this edge
def reset(self): self._positions = [] self._line = 1 self._curr = None self._scope = 0 self.language = None
Reset metric counter.
def cleanup_custom_options(id, weakref=None): try: if Store._options_context: return weakrefs = Store._weakrefs.get(id, []) if weakref in weakrefs: weakrefs.remove(weakref) refs = [] for wr in list(weakrefs): r = wr() if r is None or r.id != id: weakrefs.remove(wr) else: refs.append(r) if not refs: for bk in Store.loaded_backends(): if id in Store._custom_options[bk]: Store._custom_options[bk].pop(id) if not weakrefs: Store._weakrefs.pop(id, None) except Exception as e: raise Exception('Cleanup of custom options tree with id %s ' 'failed with the following exception: %s, ' 'an unreferenced orphan tree may persist in ' 'memory' % (e, id))
Cleans up unused custom trees if all objects referencing the custom id have been garbage collected or tree is otherwise unreferenced.
def _base64_encode(self, string_to_encode): try: return base64.b64encode(string_to_encode) except TypeError: encoding = sys.getdefaultencoding() base64_bytes = base64.b64encode(bytes(string_to_encode, encoding)) return base64_bytes.decode(encoding)
Base64 encodes a string, with either Python 2 or 3. :param string_to_encode: the string to encode
def put_object_acl(self, bucket, object_name, access_control_policy): data = access_control_policy.to_xml() details = self._details( method=b"PUT", url_context=self._url_context( bucket=bucket, object_name='%s?acl' % (object_name,), ), body=data, ) query = self._query_factory(details) d = self._submit(query) d.addCallback(self._parse_acl) return d
Set access control policy on an object.
def start_workers(self, number: int=DEFAULT_WORKER_NUMBER, queue=DEFAULT_QUEUE, block=True, stop_when_queue_empty=False): if self._arbiter or self._workers: raise RuntimeError('Workers are already running') self._working_queue = queue tasks_names = '\n'.join( [' - ' + task.name for task in self._tasks.tasks.values() if task.queue == self._working_queue] ) logger.info('Starting %d workers on queue "%s" with tasks:\n%s', number, self._working_queue, tasks_names) self._broker.start() self._workers = Workers( num_workers=number, namespace=self.namespace, ) self._result_notifier = threading.Thread( target=run_forever, args=(self._result_notifier_func, self._must_stop, logger), name='{}-result-notifier'.format(self.namespace) ) self._result_notifier.start() self._arbiter = threading.Thread( target=run_forever, args=(self._arbiter_func, self._must_stop, logger, stop_when_queue_empty), name='{}-arbiter'.format(self.namespace) ) self._arbiter.start() if block: with handle_sigterm(): try: self._arbiter.join() except KeyboardInterrupt: self.stop_workers() except AttributeError: pass
Start the worker threads. :arg number: number of worker threads to launch :arg queue: name of the queue to consume, see :doc:`queues` :arg block: whether to block the calling thread until a signal arrives and workers get terminated :arg stop_when_queue_empty: automatically stop the workers when the queue is empty. Useful mostly for one-off scripts and testing.
def register_dependency(self, data_src, data_sink): pdebug("registering dependency %s -> %s" % (data_src, data_sink)) if (data_src not in self._gettask(data_sink).depends_on): self._gettask(data_sink).depends_on.append(data_src) if (data_sink not in self._gettask(data_src).provides_for): self._gettask(data_src).provides_for.append(data_sink)
registers a dependency of data_src -> data_sink by placing appropriate entries in provides_for and depends_on
def get_kmgraph_meta(mapper_summary): d = mapper_summary["custom_meta"] meta = ( "<b>N_cubes:</b> " + str(d["n_cubes"]) + " <b>Perc_overlap:</b> " + str(d["perc_overlap"]) ) meta += ( "<br><b>Nodes:</b> " + str(mapper_summary["n_nodes"]) + " <b>Edges:</b> " + str(mapper_summary["n_edges"]) + " <b>Total samples:</b> " + str(mapper_summary["n_total"]) + " <b>Unique_samples:</b> " + str(mapper_summary["n_unique"]) ) return meta
Extract info from mapper summary to be displayed below the graph plot
def render(template: typing.Union[str, Template], **kwargs): if not hasattr(template, 'render'): template = get_environment().from_string(textwrap.dedent(template)) return template.render( cauldron_template_uid=make_template_uid(), **kwargs )
Renders a template string using Jinja2 and the Cauldron templating environment. :param template: The string containing the template to be rendered :param kwargs: Any named arguments to pass to Jinja2 for use in rendering :return: The rendered template string
def portfolio_prices( symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"), start=datetime.datetime(2005, 1, 1), end=datetime.datetime(2011, 12, 31), normalize=True, allocation=None, price_type='actual_close', ): symbols = normalize_symbols(symbols) start = util.normalize_date(start) end = util.normalize_date(end) if allocation is None: allocation = [1. / len(symbols)] * len(symbols) if len(allocation) < len(symbols): allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation)) total = np.sum(allocation.sum) allocation = np.array([(float(a) / total) for a in allocation]) timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) ls_keys = [price_type] ldf_data = da.get_data(timestamps, symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) na_price = d_data[price_type].values if normalize: na_price /= na_price[0, :] na_price *= allocation return np.sum(na_price, axis=1)
Calculate the Sharpe Ratio and other performance metrics for a portfolio Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. normalize (bool): Whether to normalize prices to 1 at the start of the time series. allocation (list of float): The portion of the portfolio allocated to each equity.
def set_environment_variables(json_file_path): if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str
def fit(self, counts_df, val_set=None): if self.stop_crit == 'val-llk': if val_set is None: raise ValueError("If 'stop_crit' is set to 'val-llk', must provide a validation set.") if self.verbose: self._print_st_msg() self._process_data(counts_df) if self.verbose: self._print_data_info() if (val_set is not None) and (self.stop_crit!='diff-norm') and (self.stop_crit!='train-llk'): self._process_valset(val_set) else: self.val_set = None self._cast_before_fit() self._fit() if self.keep_data: if self.users_per_batch == 0: self._store_metadata() else: self._st_ix_user = self._st_ix_user[:-1] if self.produce_dicts and self.reindex: self.user_dict_ = {self.user_mapping_[i]:i for i in range(self.user_mapping_.shape[0])} self.item_dict_ = {self.item_mapping_[i]:i for i in range(self.item_mapping_.shape[0])} self.is_fitted = True del self.input_df del self.val_set return self
Fit Hierarchical Poisson Model to sparse count data Fits a hierarchical Poisson model to count data using mean-field approximation with either full-batch coordinate-ascent or mini-batch stochastic coordinate-ascent. Note ---- DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'. Note ---- Forcibly terminating the procedure should still keep the last calculated shape and rate parameter values, but is not recommended. If you need to make predictions on a forced-terminated object, set the attribute 'is_fitted' to 'True'. Note ---- Fitting in mini-batches is more prone to numerical instability and compared to full-batch variational inference, it is more likely that all your parameters will turn to NaNs (which means the optimization procedure failed). Parameters ---------- counts_df : pandas data frame (nobs, 3) or coo_matrix Input data with one row per non-zero observation, consisting of triplets ('UserId', 'ItemId', 'Count'). Must containin columns 'UserId', 'ItemId', and 'Count'. Combinations of users and items not present are implicitly assumed to be zero by the model. Can also pass a sparse coo_matrix, in which case 'reindex' will be forced to 'False'. val_set : pandas data frame (nobs, 3) Validation set on which to monitor log-likelihood. Same format as counts_df. Returns ------- self : obj Copy of this object
def social_links(context, object, user=None, authed=False, downable=False, vote_down_msg=None): voting = False if hasattr(object, 'votes'): voting = True return { 'object': object, 'url': object.get_absolute_url(), 'site': get_current_site(context.request), 'ctype': ContentType.objects.get_for_model(object), 'user': user, 'voting': voting, 'vote_down': downable, 'vote_down_msg': vote_down_msg, 'authenticated_request': authed, }
Outputs social links. At minimum, this will be Facebook and Twitter. But if possible, it will also output voting and watchlist links. Usage: {% social_links object %} {% social_links object user %} {% social_links object user authenticated_request %}
def post(self, path, data, **kwargs): url = self._make_url(path) return self._make_request("POST", url, data=data, **kwargs)
Perform an HTTP POST request of the specified path in Device Cloud Make an HTTP POST request against Device Cloud with this accounts credentials and base url. This method uses the `requests <http://docs.python-requests.org/en/latest/>`_ library `request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_ and all keyword arguments will be passed on to that method. :param str path: Device Cloud path to POST :param int retries: The number of times the request should be retried if an unsuccessful response is received. Most likely, you should leave this at 0. :param data: The data to be posted in the body of the POST request (see docs for ``requests.post`` :raises DeviceCloudHttpException: if a non-success response to the request is received from Device Cloud :returns: A requests ``Response`` object
def polygonize(layer): output_layer_name = polygonize_steps['output_layer_name'] output_layer_name = output_layer_name % layer.keywords['layer_purpose'] gdal_layer_name = polygonize_steps['gdal_layer_name'] if layer.keywords.get('layer_purpose') == 'exposure': output_field = exposure_type_field else: output_field = hazard_value_field input_raster = gdal.Open(layer.source(), gdal.GA_ReadOnly) srs = osr.SpatialReference() srs.ImportFromWkt(input_raster.GetProjectionRef()) temporary_dir = temp_dir(sub_dir='pre-process') out_shapefile = unique_filename( suffix='-%s.shp' % output_layer_name, dir=temporary_dir) driver = ogr.GetDriverByName("ESRI Shapefile") destination = driver.CreateDataSource(out_shapefile) output_layer = destination.CreateLayer(gdal_layer_name, srs) field_name = output_field['field_name'][0:10] fd = ogr.FieldDefn(field_name, ogr.OFTInteger) output_layer.CreateField(fd) active_band = layer.keywords.get('active_band', 1) input_band = input_raster.GetRasterBand(active_band) gdal.Polygonize(input_band, None, output_layer, 0, [], callback=None) destination.Destroy() vector_layer = QgsVectorLayer(out_shapefile, output_layer_name, 'ogr') request = QgsFeatureRequest() expression = '"%s" = %s' % (field_name, no_data_value) request.setFilterExpression(expression) vector_layer.startEditing() for feature in vector_layer.getFeatures(request): vector_layer.deleteFeature(feature.id()) vector_layer.commitChanges() vector_layer.keywords = layer.keywords.copy() vector_layer.keywords[ layer_geometry['key']] = layer_geometry_polygon['key'] vector_layer.keywords['title'] = output_layer_name vector_layer.keywords['inasafe_fields'] = { output_field['key']: field_name } check_layer(vector_layer) return vector_layer
Polygonize a raster layer into a vector layer using GDAL. Issue https://github.com/inasafe/inasafe/issues/3183 :param layer: The layer to reproject. :type layer: QgsRasterLayer :return: Reprojected memory layer. :rtype: QgsRasterLayer .. versionadded:: 4.0
def run(self, graminit_h, graminit_c): self.parse_graminit_h(graminit_h) self.parse_graminit_c(graminit_c) self.finish_off()
Load the grammar tables from the text files written by pgen.
def delete(name, skip_final_snapshot=None, final_db_snapshot_identifier=None, region=None, key=None, keyid=None, profile=None, tags=None, wait_for_deletion=True, timeout=180): if timeout == 180 and not skip_final_snapshot: timeout = 420 if not skip_final_snapshot and not final_db_snapshot_identifier: raise SaltInvocationError('At least one of the following must' ' be specified: skip_final_snapshot' ' final_db_snapshot_identifier') try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'deleted': bool(conn)} kwargs = {} if locals()['skip_final_snapshot'] is not None: kwargs['SkipFinalSnapshot'] = bool(locals()['skip_final_snapshot']) if locals()['final_db_snapshot_identifier'] is not None: kwargs['FinalDBSnapshotIdentifier'] = str(locals()['final_db_snapshot_identifier']) res = conn.delete_db_instance(DBInstanceIdentifier=name, **kwargs) if not wait_for_deletion: return {'deleted': bool(res), 'message': 'Deleted RDS instance {0}.'.format(name)} start_time = time.time() while True: res = __salt__['boto_rds.exists'](name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if not res.get('exists'): return {'deleted': bool(res), 'message': 'Deleted RDS instance {0} completely.'.format(name)} if time.time() - start_time > timeout: raise SaltInvocationError('RDS instance {0} has not been ' 'deleted completely after {1} ' 'seconds'.format(name, timeout)) log.info('Waiting up to %s seconds for RDS instance %s to be ' 'deleted.', timeout, name) time.sleep(10) except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Delete an RDS instance. CLI example:: salt myminion boto_rds.delete myrds skip_final_snapshot=True \ region=us-east-1