Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,100
def associn(m, path, value): def assoc_recursively(m, path, value): if not path: return value p = path[0] return assoc(m, p, assoc_recursively(m.get(p,{}), path[1:], value)) return assoc_recursively(m, path, value)
Copy-on-write associates a value in a nested dict
15,101
def _start_new_cdx_file(self): self._cdx_filename = .format(self._prefix_filename) if not self._params.appending: wpull.util.truncate_file(self._cdx_filename) self._write_cdx_header() elif not os.path.exists(self._cdx_filename): self._write_cdx_header()
Create and set current CDX file.
15,102
def findAll(self, strSeq) : arr = self.encode(strSeq) lst = [] lst = self._kmp_find(arr[0], self, lst) return lst
Same as find but returns a list of all occurences
15,103
def do_step(self, values, xy_values,coeff, width): forces = {k:[] for k,i in enumerate(xy_values)} for (index1, value1), (index2,value2) in combinations(enumerate(xy_values),2): f = self.calc_2d_forces(value1[0],value1[1],value2[0],value2[1],width) if coeff[index1] < coeff[index2]: if self.b_lenght-coeff[index2]<self.b_lenght/10: forces[index1].append(f[1]) forces[index2].append(f[0]) else: forces[index1].append(f[0]) forces[index2].append(f[1]) else: if self.b_lenght-coeff[index1]<self.b_lenght/10: forces[index1].append(f[0]) forces[index2].append(f[1]) else: forces[index1].append(f[1]) forces[index2].append(f[0]) forces = {k:sum(v) for k,v in forces.items()} energy = sum([abs(x) for x in forces.values()]) return [(forces[k]/10+v) for k, v in enumerate(values)], energy
Calculates forces between two diagrams and pushes them apart by tenth of width
15,104
def get_sort_field(attr, model): try: if model._meta.get_field(attr): return attr except FieldDoesNotExist: if isinstance(attr, basestring): val = getattr(model, attr, None) if val and hasattr(val, ): return getattr(model, attr).sort_field return None
Get's the field to sort on for the given attr. Currently returns attr if it is a field on the given model. If the models has an attribute matching that name and that value has an attribute 'sort_field' than that value is used. TODO: Provide a way to sort based on a non field attribute.
15,105
def commit(self, offset=None, limit=None, dryrun=False): self.stream.command = "rsync -avRK --files-from={path} {source} {destination}" self.stream.append_tasks_to_streamlets(offset=offset, limit=limit) self.stream.commit_streamlets() self.stream.run_streamlets() self.stream.reset_streamlet()
Start the rsync download
15,106
def _compare_columns(self, new_columns, old_columns): add_columns = {} remove_columns = {} rename_columns = {} retype_columns = {} resize_columns = {} for key, value in new_columns.items(): if key not in old_columns.keys(): add_columns[key] = True if value[2]: if value[2] in old_columns.keys(): rename_columns[key] = value[2] del add_columns[key] else: if value[1] != old_columns[key][1]: retype_columns[key] = value[1] if value[3] != old_columns[key][3]: resize_columns[key] = value[3] remove_keys = set(old_columns.keys()) - set(new_columns.keys()) if remove_keys: for key in list(remove_keys): remove_columns[key] = True return add_columns, remove_columns, rename_columns, retype_columns, resize_columns
a helper method for generating differences between column properties
15,107
def get_container_info(self, obj): info = self.get_base_info(obj) info.update({}) return info
Returns the info for a Container
15,108
def get_minimum_score_metadata(self): metadata = dict(self._mdata[]) metadata.update({: self._my_map[]}) return Metadata(**metadata)
Gets the metadata for the minimum score. return: (osid.Metadata) - metadata for the minimum score *compliance: mandatory -- This method must be implemented.*
15,109
def to_dict(self): with self._lock: result = {} if self._gpayload: result.update(self._gpayload) if self._tpayload: result.update(getattr(self._tpayload, "context", {})) return result
Returns: dict: Combined global and thread-specific logging context
15,110
def face_angles_sparse(mesh): matrix = coo_matrix((mesh.face_angles.flatten(), (mesh.faces_sparse.row, mesh.faces_sparse.col)), mesh.faces_sparse.shape) return matrix
A sparse matrix representation of the face angles. Returns ---------- sparse: scipy.sparse.coo_matrix with: dtype: float shape: (len(mesh.vertices), len(mesh.faces))
15,111
def unique_rows(arr, return_index=False, return_inverse=False): b = scipy.ascontiguousarray(arr).view( scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1])) ) try: out = scipy.unique(b, return_index=True, return_inverse=return_inverse) dum = out[0] idx = out[1] if return_inverse: inv = out[2] except TypeError: if return_inverse: raise RuntimeError( "Error in scipy.unique on older versions of numpy prevents " "return_inverse from working!" ) rows = [_Row(row) for row in b] srt_idx = sorted(range(len(rows)), key=rows.__getitem__) rows = scipy.asarray(rows)[srt_idx] row_cmp = [-1] for k in xrange(1, len(srt_idx)): row_cmp.append(rows[k-1].__cmp__(rows[k])) row_cmp = scipy.asarray(row_cmp) transition_idxs = scipy.where(row_cmp != 0)[0] idx = scipy.asarray(srt_idx)[transition_idxs] out = arr[idx] if return_index: out = (out, idx) elif return_inverse: out = (out, inv) elif return_index and return_inverse: out = (out, idx, inv) return out
Returns a copy of arr with duplicate rows removed. From Stackoverflow "Find unique rows in numpy.array." Parameters ---------- arr : :py:class:`Array`, (`m`, `n`) The array to find the unique rows of. return_index : bool, optional If True, the indices of the unique rows in the array will also be returned. I.e., unique = arr[idx]. Default is False (don't return indices). return_inverse: bool, optional If True, the indices in the unique array to reconstruct the original array will also be returned. I.e., arr = unique[inv]. Default is False (don't return inverse). Returns ------- unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m` The array `arr` with duplicate rows removed.
15,112
def serialize_operator_greater_than(self, op): elem = etree.Element() return self.serialize_value_list(elem, op.args)
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <greater-than> <value>text</value> <value><attribute>foobar</attribute></value> </greater-than>
15,113
def parse(self, template): self._compile_delimiters() start_index = 0 content_end_index, parsed_section, section_key = None, None, None parsed_template = ParsedTemplate() states = [] while True: match = self._template_re.search(template, start_index) if match is None: break match_index = match.start() end_index = match.end() matches = match.groupdict() if matches[] is not None: matches.update(tag=, tag_key=matches[]) elif matches[] is not None: matches.update(tag=, tag_key=matches[]) tag_type = matches[] tag_key = matches[] leading_whitespace = matches[] did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS is_tag_interpolating = tag_type in [, ] if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating: if end_index < len(template): end_index += template[end_index] == and 1 or 0 if end_index < len(template): end_index += template[end_index] == and 1 or 0 elif leading_whitespace: match_index += len(leading_whitespace) leading_whitespace = if start_index != match_index: parsed_template.add(template[start_index:match_index]) start_index = end_index if tag_type in (, ): state = (tag_type, end_index, section_key, parsed_template) states.append(state) section_key, parsed_template = tag_key, ParsedTemplate() continue if tag_type == : if tag_key != section_key: raise ParsingError("Section end tag mismatch: %s != %s" % (tag_key, section_key)) parsed_section = parsed_template (tag_type, section_start_index, section_key, parsed_template) = states.pop() node = self._make_section_node(template, tag_type, tag_key, parsed_section, section_start_index, match_index) else: node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace) parsed_template.add(node) if start_index != len(template): parsed_template.add(template[start_index:]) return parsed_template
Parse a template string starting at some index. This method uses the current tag delimiter. Arguments: template: a unicode string that is the template to parse. index: the index at which to start parsing. Returns: a ParsedTemplate instance.
15,114
def __definitions_descriptor(self): self._add_def_paths(prop_value) return result
Describes the definitions section of the OpenAPI spec. Returns: Dictionary describing the definitions of the spec.
15,115
def run(self): if self._args.list: self._print_installed_apps(self._args.controller) sys.exit(0) if not self._args.application: sys.stderr.write() self._arg_parser.print_help() sys.exit(-1) app_module, str(error))) sys.exit(-1)
Evaluate the command line arguments, performing the appropriate actions so the application can be started.
15,116
def service(self): if self._service is not None: return self._service metadata = self._metadata if metadata is None: return None try: searchinfo = self._metadata.searchinfo except AttributeError: return None splunkd_uri = searchinfo.splunkd_uri if splunkd_uri is None: return None uri = urlsplit(splunkd_uri, allow_fragments=False) self._service = Service( scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key) return self._service
Returns a Splunk service object for this command invocation or None. The service object is created from the Splunkd URI and authentication token passed to the command invocation in the search results info file. This data is not passed to a command invocation by default. You must request it by specifying this pair of configuration settings in commands.conf: .. code-block:: python enableheader = true requires_srinfo = true The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The :code:`requires_srinfo` setting is false by default. Hence, you must set it. :return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value of :code:`None` is returned.
15,117
async def get_parameters(self, parameters=None): if parameters is None: parameters = ["all"] else: for parameter in parameters: if not parameter in [ "all", "general", "3d", "6d", "analog", "force", "gazevector", "image", "skeleton", "skeleton:global", ]: raise QRTCommandException("%s is not a valid parameter" % parameter) cmd = "getparameters %s" % " ".join(parameters) return await asyncio.wait_for( self._protocol.send_command(cmd), timeout=self._timeout )
Get the settings for the requested component(s) of QTM in XML format. :param parameters: A list of parameters to request. Could be 'all' or any combination of 'general', '3d', '6d', 'analog', 'force', 'gazevector', 'image'. :rtype: An XML string containing the requested settings. See QTM RT Documentation for details.
15,118
def login_required(func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None): def decorator(view_func): @functools.wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if is_authenticated(request.user): return view_func(request, *args, **kwargs) return handle_redirect_to_login( request, redirect_field_name=redirect_field_name, login_url=login_url ) return _wrapped_view if func: return decorator(func) return decorator
Decorator for views that checks that the user is logged in, redirecting to the log in page if necessary.
15,119
def health(): up_time = time.time() - START_TIME response = dict(service=__service_id__, uptime=.format(up_time)) return response, HTTPStatus.OK
Check the health of this service.
15,120
def _load_config(): fname = _get_config_fname() if fname is None or not op.isfile(fname): return dict() with open(fname, ) as fid: config = json.load(fid) return config
Helper to load prefs from ~/.vispy/vispy.json
15,121
def pseudo_partial_waves(self): pseudo_partial_waves = OrderedDict() for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"): state = attrib["state"] pseudo_partial_waves[state] = RadialFunction(mesh, values) return pseudo_partial_waves
Dictionary with the pseudo partial waves indexed by state.
15,122
def get_gradient_x(shape, px): import scipy.sparse height, width = shape size = height * width c = -np.ones((width,)) c[px] = 0 r = np.zeros(c.shape, dtype=c.dtype) r[:px] = 1 l = np.zeros(c.shape, dtype=c.dtype) l[px:] = 1 block = scipy.sparse.diags([l, c, r], [-1, 0,1], shape=(width,width)) op = scipy.sparse.block_diag([block for n in range(height)]) return op
Calculate the gradient in the x direction to the line at px The y gradient operator is a block diagonal matrix, where each block is the size of the image width. The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
15,123
def tag_list(self, tags): return [ (tag.name, "selected taggit-tag" if tag.name in tags else "taggit-tag") for tag in self.model.objects.all() ]
Generates a list of tags identifying those previously selected. Returns a list of tuples of the form (<tag name>, <CSS class name>). Uses the string names rather than the tags themselves in order to work with tag lists built from forms not fully submitted.
15,124
def create_default_item_node(field, state): default_item = nodes.definition_list_item() default_item.append(nodes.term(text="Default")) default_item_content = nodes.definition() default_item_content.append( nodes.literal(text=repr(field.default)) ) default_item.append(default_item_content) return default_item
Create a definition list item node that describes the default value of a Field config. Parameters ---------- field : ``lsst.pex.config.Field`` A configuration field. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. Returns ------- ``docutils.nodes.definition_list_item`` Definition list item that describes the default target of a ConfigurableField config.
15,125
def get_store_local_final_result(self): fw_dict = self.get_fw_dict() fw_data, fw_data_dict = self.get_fw(fw_dict.get()) res = fw_data.result self.store_local_final_result(res)
Store/Retrieve the final result. Retrieve the final result for FW create/delete from DB and store it locally.
15,126
def get_value(self): if (self._value is None) and (self.expr is not None): self._value = self.expr.get_value() return self._value
Evaluate self.expr to get the parameter's value
15,127
def sky2pix_ellipse(self, pos, a, b, pa): ra, dec = pos x, y = self.sky2pix(pos) x_off, y_off = self.sky2pix(translate(ra, dec, a, pa)) sx = np.hypot((x - x_off), (y - y_off)) theta = np.arctan2((y_off - y), (x_off - x)) x_off, y_off = self.sky2pix(translate(ra, dec, b, pa - 90)) sy = np.hypot((x - x_off), (y - y_off)) theta2 = np.arctan2((y_off - y), (x_off - x)) - np.pi / 2 defect = theta - theta2 sy *= abs(np.cos(defect)) return x, y, sx, sy, np.degrees(theta)
Convert an ellipse from sky to pixel coordinates. Parameters ---------- pos : (float, float) The (ra, dec) of the ellipse center (degrees). a, b, pa: float The semi-major axis, semi-minor axis and position angle of the ellipse (degrees). Returns ------- x,y : float The (x, y) pixel coordinates of the ellipse center. sx, sy : float The major and minor axes (FWHM) in pixels. theta : float The rotation angle of the ellipse (degrees). theta = 0 corresponds to the ellipse being aligned with the x-axis.
15,128
def sample(self, n): row_total_count = 0 row_counts = [] for file in self.files: with _util.open_local_or_gcs(file, ) as f: num_lines = sum(1 for line in f) row_total_count += num_lines row_counts.append(num_lines) names = None dtype = None if self._schema: _MAPPINGS = { : np.float64, : np.int64, : np.datetime64, : np.bool, } names = [x[] for x in self._schema] dtype = {x[]: _MAPPINGS.get(x[], object) for x in self._schema} skip_count = row_total_count - n skip_all = sorted(random.sample(range(0, row_total_count), skip_count)) dfs = [] for file, row_count in zip(self.files, row_counts): skip = [x for x in skip_all if x < row_count] skip_all = [x - row_count for x in skip_all if x >= row_count] with _util.open_local_or_gcs(file, ) as f: dfs.append(pd.read_csv(f, skiprows=skip, names=names, dtype=dtype, header=None)) return pd.concat(dfs, axis=0, ignore_index=True)
Samples data into a Pandas DataFrame. Args: n: number of sampled counts. Returns: A dataframe containing sampled data. Raises: Exception if n is larger than number of rows.
15,129
def get_token_network_events( chain: BlockChainService, token_network_address: Address, contract_manager: ContractManager, events: Optional[List[str]] = ALL_EVENTS, from_block: BlockSpecification = GENESIS_BLOCK_NUMBER, to_block: BlockSpecification = , ) -> List[Dict]: return get_contract_events( chain, contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK), token_network_address, events, from_block, to_block, )
Helper to get all events of the ChannelManagerContract at `token_address`.
15,130
def align(aligner, reads): i = 0 for record in SeqIO.parse(reads, "fastq"): try: next(aligner.map(str(record.seq))) i += 1 except StopIteration: print(record.format("fastq"), end=) sys.stderr.write("NanoLyse: removed {} reads.\n".format(i))
Test if reads can get aligned to the lambda genome, if not: write to stdout
15,131
def percentage_progress(self): if self.total_progress != 0: return float(self.progress) / self.total_progress else: return self.progress
Returns a float between 0 and 1, representing the current job's progress in its task. If total_progress is not given or 0, just return self.progress. :return: float corresponding to the total percentage progress of the job.
15,132
def handle_delivered( chain_state: ChainState, state_change: ReceiveDelivered, ) -> TransitionResult[ChainState]: queueid = QueueIdentifier(state_change.sender, CHANNEL_IDENTIFIER_GLOBAL_QUEUE) inplace_delete_message_queue(chain_state, state_change, queueid) return TransitionResult(chain_state, [])
Check if the "Delivered" message exists in the global queue and delete if found.
15,133
def _fill_sample_count(self, node): node[] += sum( self._fill_sample_count(child) for child in node[]) return node[]
Counts and fills sample counts inside call tree.
15,134
def any_channel_validate_token_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/channel_framework api_path = "/api/v2/any_channel/validate_token" return self.call(api_path, method="POST", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/channel_framework#validate-token
15,135
def to_string(s, encoding=): if six.PY2: return s.encode(encoding) if isinstance(s, bytes): return s.decode(encoding) return s
Accept unicode(py2) or bytes(py3) Returns: py2 type: str py3 type: str
15,136
def parse(soup): if is_direct_match(soup): return {: parse_satisfaction(soup), : parse_ceo(soup), : parse_meta(soup), : parse_salary(soup) } suggestions = parse_suggestions(soup) exact_match = next((s for s in suggestions if s[]), None) if exact_match: return get(company_uri=exact_match[]) return suggestions
Parses the results for a company search and return the results if is_direct_match. If no company is found, a list of suggestions are returned as dict. If one such recommendation is found to be an exact match, re-perform request for this exact match
15,137
def rules(self): rule = lib.EnvGetNextDefrule(self._env, ffi.NULL) while rule != ffi.NULL: yield Rule(self._env, rule) rule = lib.EnvGetNextDefrule(self._env, rule)
Iterate over the defined Rules.
15,138
def parse_field_path(api_repr): field_names = [] for field_name in split_field_path(api_repr): if field_name[0] == "`" and field_name[-1] == "`": field_name = field_name[1:-1] field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK) field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH) field_names.append(field_name) return field_names
Parse a **field path** from into a list of nested field names. See :func:`field_path` for more on **field paths**. Args: api_repr (str): The unique Firestore api representation which consists of either simple or UTF-8 field names. It cannot exceed 1500 bytes, and cannot be empty. Simple field names match ``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are escaped by surrounding them with backticks. Returns: List[str, ...]: The list of field names in the field path.
15,139
def stopped(self): if self.tune and self.tune.get(): return True if self.tune.get() == else False else: raise PyMediaroomError("No information in <node> about @stopped")
Return if the stream is stopped.
15,140
def unicode_compatible(cls): if not is_py3: cls.__unicode__ = cls.__str__ cls.__str__ = lambda self: self.__unicode__().encode() return cls
A decorator that defines ``__str__`` and ``__unicode__`` methods under Python 2.
15,141
def default(self, obj, **kwargs): if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) if isinstance(obj, Timestamp): return obj.time if isinstance(obj, ObjectId): return obj.__str__() return JSONEncoder.default(self, obj)
Handles the adapting of special types from mongo
15,142
def _sample(self, position, stepsize): momentum = np.random.normal(0, 1, len(position)) depth = 0 position_backward, position_forward = position, position momentum_backward, momentum_forward = momentum, momentum candidate_set_size = accept_set_bool = 1 _, log_pdf = self.grad_log_pdf(position, self.model).get_gradient_log_pdf() slice_var = np.random.uniform(0, np.exp(log_pdf - 0.5 * np.dot(momentum, momentum))) while accept_set_bool == 1: direction = np.random.choice([-1, 1], p=[0.5, 0.5]) if direction == -1: (position_backward, momentum_backward, _, _, position_bar, candidate_set_size2, accept_set_bool2) = self._build_tree(position_backward, momentum_backward, slice_var, direction, depth, stepsize) else: (_, _, position_forward, momentum_forward, position_bar, candidate_set_size2, accept_set_bool2) = self._build_tree(position_forward, momentum_forward, slice_var, direction, depth, stepsize) if accept_set_bool2 == 1: if np.random.rand() < candidate_set_size2 / candidate_set_size: position = position_bar.copy() accept_set_bool, candidate_set_size = self._update_acceptance_criteria(position_forward, position_backward, momentum_forward, momentum_backward, accept_set_bool2, candidate_set_size, candidate_set_size2) depth += 1 return position
Returns a sample using a single iteration of NUTS
15,143
def _get_name_and_version(name, version, for_filename=False): if for_filename: name = _FILESAFE.sub(, name) version = _FILESAFE.sub(, version.replace(, )) return % (name, version)
Return the distribution name with version. If for_filename is true, return a filename-escaped form.
15,144
def meta(*bases, **kwargs): metaclass = kwargs.get("metaclass", type) if not bases: bases = (object,) class NewMeta(type): def __new__(mcls, name, mbases, namespace): if name: return metaclass.__new__(metaclass, name, bases, namespace) return super(NewMeta, mcls).__new__(mcls, "", mbases, {}) return NewMeta("", tuple(), {})
Allows unique syntax similar to Python 3 for working with metaclasses in both Python 2 and Python 3. Examples -------- >>> class BadMeta(type): # An usual metaclass definition ... def __new__(mcls, name, bases, namespace): ... if "bad" not in namespace: # A bad constraint ... raise Exception("Oops, not bad enough") ... value = len(name) # To ensure this metaclass is called again ... def really_bad(self): ... return self.bad() * value ... namespace["really_bad"] = really_bad ... return super(BadMeta, mcls).__new__(mcls, name, bases, namespace) ... >>> class Bady(meta(object, metaclass=BadMeta)): ... def bad(self): ... return "HUA " ... >>> class BadGuy(Bady): ... def bad(self): ... return "R" ... >>> issubclass(BadGuy, Bady) True >>> Bady().really_bad() # Here value = 4 'HUA HUA HUA HUA ' >>> BadGuy().really_bad() # Called metaclass ``__new__`` again, so value = 6 'RRRRRR'
15,145
def NameGroups(data_arr,id_key): new_data_arr = [] for data in data_arr: try: data_arr[id_key] = clc._GROUP_MAPPING[data[id_key]] except: pass new_data_arr.append(data) if clc.args: clc.v1.output.Status("ERROR",2,"Group name conversion not yet implemented") return(new_data_arr)
Get group name associated with ID. TODO - not yet implemented
15,146
def _if_statement(test, if_function, else_function) -> None: if isinstance(test, Addr): token = _program_context.set(Program()) if_function() if_program = _program_context.get() _program_context.reset(token) if else_function: token = _program_context.set(Program()) else_function() else_program = _program_context.get() _program_context.reset(token) else: else_program = None program = _program_context.get() program.if_then(test, if_program, else_program) else: if test: if_function() elif else_function: else_function()
Evaluate an if statement within a @magicquil block. If the test value is a Quil Addr then unwind it into quil code equivalent to an if then statement using jumps. Both sides of the if statement need to be evaluated and placed into separate Programs, which is why we create new program contexts for their evaluation. If the test value is not a Quil Addr then fall back to what Python would normally do with an if statement. Params are: if <test>: <if_function> else: <else_function> NB: This function must be named exactly _if_statement and be in scope for the ast transformer
15,147
def checkSimbad(g, target, maxobj=5, timeout=5): url = q = + str(maxobj) + \ \ + target query = urllib.parse.urlencode({: , : q}) resp = urllib.request.urlopen(url, query.encode(), timeout) data = False error = False results = [] for line in resp: line = line.decode() if line.startswith(): data = True if line.startswith(): error = True if data and line.startswith(): name, coords = line[7:].split() results.append( {: name.strip(), : coords.strip(), : }) resp.close() if error and len(results): g.clog.warn( + ) return results
Sends off a request to Simbad to check whether a target is recognised. Returns with a list of results, or raises an exception if it times out
15,148
def update(self, table_name, primary_key, instance): assert isinstance(primary_key, dict) assert isinstance(instance, BaseDocument) collection = self._db[table_name] document = instance.document if in document: document[] = ObjectId(document[]) update_result = collection.replace_one(filter=primary_key, replacement=document, upsert=True) if update_result.upserted_id: instance[] = update_result.upserted_id return update_result.upserted_id
replaces document identified by the primary_key or creates one if a matching document does not exist
15,149
def bond_microcanonical_statistics( perc_graph, num_nodes, num_edges, seed, spanning_cluster=True, auxiliary_node_attributes=None, auxiliary_edge_attributes=None, spanning_sides=None, **kwargs ): sample_states = bond_sample_states( perc_graph=perc_graph, num_nodes=num_nodes, num_edges=num_edges, seed=seed, spanning_cluster=spanning_cluster, auxiliary_node_attributes=auxiliary_node_attributes, auxiliary_edge_attributes=auxiliary_edge_attributes, spanning_sides=spanning_sides, ) return np.fromiter( sample_states, dtype=microcanonical_statistics_dtype(spanning_cluster), count=num_edges + 1 )
Evolve a single run over all microstates (bond occupation numbers) Return the cluster statistics for each microstate Parameters ---------- perc_graph : networkx.Graph The substrate graph on which percolation is to take place num_nodes : int Number ``N`` of sites in the graph num_edges : int Number ``M`` of bonds in the graph seed : {None, int, array_like} Random seed initializing the pseudo-random number generator. Piped through to `numpy.random.RandomState` constructor. spanning_cluster : bool, optional Whether to detect a spanning cluster or not. Defaults to ``True``. auxiliary_node_attributes : optional Value of ``networkx.get_node_attributes(graph, 'span')`` auxiliary_edge_attributes : optional Value of ``networkx.get_edge_attributes(graph, 'span')`` spanning_sides : list, optional List of keys (attribute values) of the two sides of the auxiliary nodes. Return value of ``list(set(auxiliary_node_attributes.values()))`` Returns ------- ret : ndarray of size ``num_edges + 1`` Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'), ('max_cluster_size', 'uint32'), ('moments', 'uint64', 5)]`` ret['n'] : ndarray of int The number of bonds added at the particular iteration ret['edge'] : ndarray of int The index of the edge added at the particular iteration. Note that ``ret['edge'][0]`` is undefined! ret['has_spanning_cluster'] : ndarray of bool ``True`` if there is a spanning cluster, ``False`` otherwise. Only exists if `spanning_cluster` argument is set to ``True``. ret['max_cluster_size'] : int Size of the largest cluster (absolute number of sites) ret['moments'] : 2-D :py:class:`numpy.ndarray` of int Array of shape ``(num_edges + 1, 5)``. The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See also -------- bond_sample_states microcanonical_statistics_dtype numpy.random.RandomState
15,150
def _ReadFlowResponseCounts(self, request_keys, cursor=None): query = condition_template = conditions = [condition_template] * len(request_keys) args = [] for client_id, flow_id, request_id in request_keys: args.append(db_utils.ClientIDToInt(client_id)) args.append(db_utils.FlowIDToInt(flow_id)) args.append(request_id) query = query.format(conditions=" OR ".join(conditions)) cursor.execute(query, args) response_counts = {} for (client_id_int, flow_id_int, request_id, count) in cursor.fetchall(): request_key = (db_utils.IntToClientID(client_id_int), db_utils.IntToFlowID(flow_id_int), request_id) response_counts[request_key] = count return response_counts
Reads counts of responses for the given requests.
15,151
def get_xritdecompress_cmd(): cmd = os.environ.get(, None) if not cmd: raise IOError("XRIT_DECOMPRESS_PATH is not defined (complete path to xRITDecompress)") question = ("Did you set the environment variable XRIT_DECOMPRESS_PATH correctly?") if not os.path.exists(cmd): raise IOError(str(cmd) + " does not exist!\n" + question) elif os.path.isdir(cmd): raise IOError(str(cmd) + " is a directory!\n" + question) return cmd
Find a valid binary for the xRITDecompress command.
15,152
async def serialize_rctsig_prunable(self, ar, type, inputs, outputs, mixin): if type == RctType.Null: return True if type != RctType.Full and type != RctType.Bulletproof and \ type != RctType.Simple and type != RctType.Bulletproof2: raise ValueError() if is_rct_bp(type): await ar.tag() await ar.begin_array() bps = [0] if ar.writing: bps[0] = len(self.bulletproofs) if type == RctType.Bulletproof2: await ar.field(elem=eref(bps, 0), elem_type=x.UVarintType) else: await ar.field(elem=eref(bps, 0), elem_type=x.UInt32) await ar.prepare_container(bps[0], eref(self, ), elem_type=Bulletproof) for i in range(bps[0]): await ar.field(elem=eref(self.bulletproofs, i), elem_type=Bulletproof) await ar.end_array() else: await ar.tag() await ar.begin_array() await ar.prepare_container(outputs, eref(self, ), elem_type=RangeSig) if len(self.rangeSigs) != outputs: raise ValueError() for i in range(len(self.rangeSigs)): await ar.field(elem=eref(self.rangeSigs, i), elem_type=RangeSig) await ar.end_array() await ar.tag() await ar.begin_array() is_full = type == RctType.Full mg_elements = inputs if not is_full else 1 await ar.prepare_container(mg_elements, eref(self, ), elem_type=MgSig) if len(self.MGs) != mg_elements: raise ValueError() for i in range(mg_elements): await ar.begin_object() await ar.tag() await ar.begin_array() await ar.prepare_container(mixin + 1, eref(self.MGs[i], ), elem_type=KeyM) if ar.writing and len(self.MGs[i].ss) != mixin + 1: raise ValueError() for j in range(mixin + 1): await ar.begin_array() mg_ss2_elements = 1 + (1 if not is_full else inputs) await ar.prepare_container(mg_ss2_elements, eref(self.MGs[i].ss, j), elem_type=KeyM.ELEM_TYPE) if ar.writing and len(self.MGs[i].ss[j]) != mg_ss2_elements: raise ValueError() for k in range(mg_ss2_elements): await ar.field(eref(self.MGs[i].ss[j], k), elem_type=KeyV.ELEM_TYPE) await ar.end_array() await ar.tag() await ar.field(eref(self.MGs[i], ), elem_type=ECKey) await ar.end_object() await ar.end_array() if type in (RctType.Bulletproof, RctType.Bulletproof2): await ar.begin_array() await ar.prepare_container(inputs, eref(self, ), elem_type=KeyV) if ar.writing and len(self.pseudoOuts) != inputs: raise ValueError() for i in range(inputs): await ar.field(eref(self.pseudoOuts, i), elem_type=KeyV.ELEM_TYPE) await ar.end_array()
Serialize rct sig :param ar: :type ar: x.Archive :param type: :param inputs: :param outputs: :param mixin: :return:
15,153
def _prepPointsForSegments(points): while 1: point = points[-1] if point.segmentType: break else: point = points.pop() points.insert(0, point) continue break
Move any off curves at the end of the contour to the beginning of the contour. This makes segmentation easier.
15,154
def set_language(self, request, org): if org: lang = org.language or settings.DEFAULT_LANGUAGE translation.activate(lang)
Set the current language from the org configuration.
15,155
def get_default_fields(self): field_names = self._meta.get_all_field_names() if in field_names: field_names.remove() return field_names
get all fields of model, execpt id
15,156
def get_data_port_m(self, data_port_id): for scoped_var_m in self.scoped_variables: if scoped_var_m.scoped_variable.data_port_id == data_port_id: return scoped_var_m return StateModel.get_data_port_m(self, data_port_id)
Searches and returns the model of a data port of a given state The method searches a port with the given id in the data ports of the given state model. If the state model is a container state, not only the input and output data ports are looked at, but also the scoped variables. :param data_port_id: The data port id to be searched :return: The model of the data port or None if it is not found
15,157
def multiclass_logloss(actual, predicted, eps=1e-15): if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class
15,158
def list_issues( self, status=None, tags=None, assignee=None, author=None, milestones=None, priority=None, no_stones=None, since=None, order=None ): request_url = "{}issues".format(self.create_basic_url()) payload = {} if status is not None: payload[] = status if tags is not None: payload[] = tags if assignee is not None: payload[] = assignee if author is not None: payload[] = author if milestones is not None: payload[] = milestones if priority is not None: payload[] = priority if no_stones is not None: payload[] = no_stones if since is not None: payload[] = since if order is not None: payload[] = order return_value = self._call_api(request_url, params=payload) return return_value[]
List all issues of a project. :param status: filters the status of the issues :param tags: filers the tags of the issues :param assignee: filters the assignee of the issues :param author: filters the author of the issues :param milestones: filters the milestones of the issues (list of strings) :param priority: filters the priority of the issues :param no_stones: If True returns only the issues having no milestone, if False returns only the issues having a milestone :param since: Filters the issues updated after this date. The date can either be provided as an unix date or in the format Y-M-D :param order: Set the ordering of the issues. This can be asc or desc. Default: desc :return:
15,159
def reverse_whois(self, query, exclude=[], scope=, mode=None, **kwargs): return self._results(, , terms=delimited(query), exclude=delimited(exclude), scope=scope, mode=mode, **kwargs)
List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ).
15,160
def filters(self, *filters): def filter_constructor(f, shelf=None): if isinstance(f, BinaryExpression): return Filter(f) else: return f for f in filters: self._cauldron.use( self._shelf.find( f, (Filter, Having), constructor=filter_constructor ) ) self.dirty = True return self
Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list
15,161
def cfms(self, cfms): s degrees of freedom. Parameters ---------- cfms : float or sequence of float A CFM value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. CFM', cfms, self.ADOF + self.LDOF)
Set the CFM values for this object's degrees of freedom. Parameters ---------- cfms : float or sequence of float A CFM value to set on all degrees of freedom, or a list containing one such value for each degree of freedom.
15,162
def compute_rollover(self, current_time: int) -> int: result = current_time + self.interval if ( self.when == RolloverInterval.MIDNIGHT or self.when in RolloverInterval.WEEK_DAYS ): if self.utc: t = time.gmtime(current_time) else: t = time.localtime(current_time) current_hour = t[3] current_minute = t[4] current_second = t[5] current_day = t[6] if self.at_time is None: rotate_ts = ONE_DAY_IN_SECONDS else: rotate_ts = ( self.at_time.hour * 60 + self.at_time.minute ) * 60 + self.at_time.second r = rotate_ts - ( (current_hour * 60 + current_minute) * 60 + current_second ) if r < 0: r += ONE_DAY_IN_SECONDS current_day = (current_day + 1) % 7 result = current_time + r if self.when in RolloverInterval.WEEK_DAYS: day = current_day if day != self.day_of_week: if day < self.day_of_week: days_to_wait = self.day_of_week - day else: days_to_wait = 6 - day + self.day_of_week + 1 new_rollover_at = result + (days_to_wait * (60 * 60 * 24)) if not self.utc: dst_now = t[-1] dst_at_rollover = time.localtime(new_rollover_at)[-1] if dst_now != dst_at_rollover: if not dst_now: addend = -ONE_HOUR_IN_SECONDS else: addend = ONE_HOUR_IN_SECONDS new_rollover_at += addend result = new_rollover_at return result
Work out the rollover time based on the specified time. If we are rolling over at midnight or weekly, then the interval is already known. need to figure out is WHEN the next interval is. In other words, if you are rolling over at midnight, then your base interval is 1 day, but you want to start that one day clock at midnight, not now. So, we have to fudge the `rollover_at` value in order to trigger the first rollover at the right time. After that, the regular interval will take care of the rest. Note that this code doesn't care about leap seconds. :)
15,163
def OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY(self): if not self._unauthenticated_session_management_key: self._unauthenticated_session_management_key = .join( random.choice(string.ascii_uppercase + string.digits) for _ in range(100)) return self._unauthenticated_session_management_key
OPTIONAL. Supply a fixed string to use as browser-state key for unauthenticated clients.
15,164
def plot_returns(perf_attrib_data, cost=None, ax=None): if ax is None: ax = plt.gca() returns = perf_attrib_data[] total_returns_label = cumulative_returns_less_costs = _cumulative_returns_less_costs( returns, cost ) if cost is not None: total_returns_label += specific_returns = perf_attrib_data[] common_returns = perf_attrib_data[] ax.plot(cumulative_returns_less_costs, color=, label=total_returns_label) ax.plot(ep.cum_returns(specific_returns), color=, label=) ax.plot(ep.cum_returns(common_returns), color=, label=) if cost is not None: ax.plot(-ep.cum_returns(cost), color=, label=) ax.set_title() ax.set_ylabel() configure_legend(ax) return ax
Plot total, specific, and common returns. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index. Assumes the `total_returns` column is NOT cost adjusted. - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 cost : pd.Series, optional if present, gets subtracted from `perf_attrib_data['total_returns']`, and gets plotted separately ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes
15,165
def _setup(app, *, schema, title=None, app_key=APP_KEY, db=None): admin = web.Application(loop=app.loop) app[app_key] = admin loader = jinja2.FileSystemLoader([TEMPLATES_ROOT, ]) aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY) if title: schema.title = title resources = [ init(db, info[], url=info[]) for init, info in schema.resources ] admin_handler = AdminOnRestHandler( admin, resources=resources, loop=app.loop, schema=schema, ) admin[] = admin_handler setup_admin_on_rest_handlers(admin, admin_handler) return admin
Initialize the admin-on-rest admin
15,166
def process_event(self, event_name: str, data: dict) -> None: if event_name == "after_validation": if data[] > self._learning_rate_last_impatience: self._learning_rate_cur_impatience += 1 else: self._learning_rate_cur_impatience = 0 self._learning_rate_last_impatience = data[] if (self._learning_rate_drop_patience is not None) and\ (self._learning_rate_cur_impatience >= self._learning_rate_drop_patience): self._learning_rate_cur_impatience = 0 self._learning_rate_cur_div *= self._learning_rate_drop_div self._lr /= self._learning_rate_drop_div self._update_graph_variables(learning_rate=self._lr) log.info(f"New learning rate dividor = {self._learning_rate_cur_div}") if event_name == : if (self._lr is not None) and self._lr_update_on_batch: self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div self._update_graph_variables(learning_rate=self._lr) if (self._mom is not None) and self._mom_update_on_batch: self._mom = min(1., max(0., self._mom_schedule.next_val())) self._update_graph_variables(momentum=self._mom) if event_name == : if (self._lr is not None) and not self._lr_update_on_batch: self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div self._update_graph_variables(learning_rate=self._lr) if (self._mom is not None) and not self._mom_update_on_batch: self._mom = min(1., max(0., self._mom_schedule.next_val())) self._update_graph_variables(momentum=self._mom) if event_name == : if (self._lr is not None) and ( not in data): data[] = self._lr if (self._mom is not None) and ( not in data): data[] = self._mom
Update learning rate and momentum variables after event (given by `event_name`) Args: event_name: name of event after which the method was called. Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"` data: dictionary with parameters values Returns: None
15,167
def start_http_server(self, port, host=, endpoint=): if is_running_from_reloader(): return app = Flask( % port) self.register_endpoint(endpoint, app) def run_app(): app.run(host=host, port=port) thread = threading.Thread(target=run_app) thread.setDaemon(True) thread.start()
Start an HTTP server for exposing the metrics. This will be an individual Flask application, not the one registered with this class. :param port: the HTTP port to expose the metrics endpoint on :param host: the HTTP host to listen on (default: `0.0.0.0`) :param endpoint: the URL path to expose the endpoint on (default: `/metrics`)
15,168
def save_as(self, fname, obj=None): writer = gdcm.Writer() writer.SetFileName(fname) if obj is None and self._anon_obj: obj = self._anon_obj else: raise ValueError("Need DICOM object, e.g. obj=gdcm.Anonymizer()") writer.SetFile(obj.GetFile()) if not writer.Write(): raise IOError("Could not save DICOM file") return True
Save DICOM file given a GDCM DICOM object. Examples of a GDCM DICOM object: * gdcm.Writer() * gdcm.Reader() * gdcm.Anonymizer() :param fname: DICOM file name to be saved :param obj: DICOM object to be saved, if None, Anonymizer() is used
15,169
def _multi_permission_mask(mode): def compose(f, g): return lambda *args, **kwargs: g(f(*args, **kwargs)) return functools.reduce(compose, map(_permission_mask, mode.split()))
Support multiple, comma-separated Unix chmod symbolic modes. >>> _multi_permission_mask('a=r,u+w')(0) == 0o644 True
15,170
def do_file(self, line): opts = self.FILE_OPTS if not self.all_ontologies: self._help_nofiles() return line = line.split() if not line or line[0] not in opts: self.help_file() return if line[0] == "rename": self._rename_file() elif line[0] == "delete": self._delete_file() else: return
PErform some file operation
15,171
def blackbody_spectral_radiance(T, wavelength): r try: return 2.*h*c**2/wavelength**5/(exp(h*c/(wavelength*T*k)) - 1.) except OverflowError: return 0.0
r'''Returns the spectral radiance, in units of W/m^2/sr/µm. .. math:: I_{\lambda,blackbody,e}(\lambda,T)=\frac{2hc_o^2} {\lambda^5[\exp(hc_o/\lambda k T)-1]} Parameters ---------- T : float Temperature of the surface, [K] wavelength : float Length of the wave to be considered, [m] Returns ------- I : float Spectral radiance [W/(m^2*sr*m)] Notes ----- Can be used to derive the Stefan-Boltzman law, or determine the maximum radiant frequency for a given temperature. Examples -------- Checked with Spectral-calc.com, at [2]_. >>> blackbody_spectral_radiance(800., 4E-6) 1311692056.2430143 Calculation of power from the sun (earth occupies 6.8E-5 steradian of the sun): >>> from scipy.integrate import quad >>> rad = lambda l: blackbody_spectral_radiance(5778., l)*6.8E-5 >>> quad(rad, 1E-10, 1E-4)[0] 1367.9808043781559 References ---------- .. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011. .. [2] Spectral-calc.com. Blackbody Calculator, 2015. http://www.spectralcalc.com/blackbody_calculator/blackbody.php
15,172
def get_metric_group_definitions(self): group_names = self.properties.get(, None) if not group_names: group_names = self.manager.get_metric_group_definition_names() mg_defs = [] for group_name in group_names: try: mg_def = self.manager.get_metric_group_definition(group_name) mg_defs.append(mg_def) except ValueError: pass return mg_defs
Get the faked metric group definitions for this context object that are to be returned from its create operation. If a 'metric-groups' property had been specified for this context, only those faked metric group definitions of its manager object that are in that list, are included in the result. Otherwise, all metric group definitions of its manager are included in the result. Returns: iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked metric group definitions, in the order they had been added.
15,173
def buildFromJsbString(self, jsb, nocompressjs=False): tempconffile = cmd = [, , , tempconffile, , self.outdir] if nocompressjs: cmd.append() open(tempconffile, ).write(jsb) log.info(, .join(cmd)) try: call(cmd) finally: remove(tempconffile)
Build from the given config file using ``sencha build``. :param jsb: The JSB config as a string. :param nocompressjs: Compress the javascript? If ``True``, run ``sencha build --nocompress``.
15,174
def interleave_keys(a, b): def interleave(args): return .join([x for t in zip(*args) for x in t]) return int(.join(interleave(format(x, ) for x in (a, b))), base=2)
Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference.
15,175
def remove_children(self, reset_parent=True): if reset_parent: for child in self.children: child.parent = None self.__children = []
Remove all the children of this node. :param bool reset_parent: if ``True``, set to ``None`` the parent attribute of the children
15,176
def read_into(self, buf: bytearray, partial: bool = False) -> Awaitable[int]: future = self._start_read() available_bytes = self._read_buffer_size n = len(buf) if available_bytes >= n: end = self._read_buffer_pos + n buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos : end] del self._read_buffer[:end] self._after_user_read_buffer = self._read_buffer elif available_bytes > 0: buf[:available_bytes] = memoryview(self._read_buffer)[ self._read_buffer_pos : ] self._user_read_buffer = True self._read_buffer = buf self._read_buffer_pos = 0 self._read_buffer_size = available_bytes self._read_bytes = n self._read_partial = partial try: self._try_inline_read() except: future.add_done_callback(lambda f: f.exception()) raise return future
Asynchronously read a number of bytes. ``buf`` must be a writable buffer into which data will be read. If ``partial`` is true, the callback is run as soon as any bytes have been read. Otherwise, it is run when the ``buf`` has been entirely filled with read data. .. versionadded:: 5.0 .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead.
15,177
def _graph_connected_component(graph, node_id): connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool) connected_components[node_id] = True n_node = graph.shape[0] for i in range(n_node): last_num_component = connected_components.sum() _, node_to_add = np.where(graph[connected_components] != 0) connected_components[node_to_add] = True if last_num_component >= connected_components.sum(): break return connected_components
Find the largest graph connected components the contains one given node Parameters ---------- graph : array-like, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes node_id : int The index of the query node of the graph Returns ------- connected_components : array-like, shape: (n_samples,) An array of bool value indicates the indexes of the nodes belong to the largest connected components of the given query node
15,178
def qual_name(self) -> QualName: p, s, loc = self._key.partition(":") return (loc, p) if s else (p, self.namespace)
Return the receiver's qualified name.
15,179
def get_quizzes(self, course_id): url = QUIZZES_API.format(course_id) data = self._get_resource(url) quizzes = [] for datum in data: quizzes.append(Quiz(data=datum)) return quizzes
List quizzes for a given course https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index
15,180
def read_event(suppress=False): queue = _queue.Queue(maxsize=1) hooked = hook(queue.put, suppress=suppress) while True: event = queue.get() unhook(hooked) return event
Blocks until a keyboard event happens, then returns that event.
15,181
def standardize(self): self.reset_query_marks() seen = set() total = 0 for n, atom in self.atoms(): if n in seen: continue for k, center in central.items(): if center != atom: continue shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items()) for shell_query, shell_patch, atom_patch in query_patch[k]: if shell_query != shell: continue total += 1 for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell): bond.update(bond_patch) for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) seen.add(n) seen.update(self._adj[n]) break else: continue break if total: self.flush_cache() return total
standardize functional groups :return: number of found groups
15,182
def _checkConsistency(richInputs, fsm, inputContext): for richInput in richInputs: for state in fsm: for input in fsm[state]: if richInput.symbol() == input: outputs = fsm[state][input].output for output in outputs: try: required = inputContext[output] except KeyError: continue if required.implementedBy(richInput): continue raise DoesNotImplement( "%r not implemented by %r, " "required by %r in state %r" % ( required, richInput, input, state))
Verify that the outputs that can be generated by fsm have their requirements satisfied by the given rich inputs. @param richInputs: A L{list} of all of the types which will serve as rich inputs to an L{IFiniteStateMachine}. @type richInputs: L{list} of L{IRichInput} providers @param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be delivered. @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses. Rich inputs which result in these outputs being produced by C{fsm} must provide the corresponding interface. @raise DoesNotImplement: If any of the rich input types fails to implement the interfaces required by the outputs C{fsm} can produce when they are received.
15,183
def find_package_data(): l = list() for start in (, ): for root, dirs, files in os.walk(start): for f in files: if f.endswith(): continue path = os.path.join(root, f).replace(, ) l.append(path) return {: l}
Returns package_data, because setuptools is too stupid to handle nested directories. Returns: dict: key is "ambry", value is list of paths.
15,184
def _damerau_levenshtein(a, b): memo = {} def distance(x, y): if (x, y) in memo: return memo[x, y] if not x: d = len(y) elif not y: d = len(x) else: d = min( distance(x[1:], y) + 1, distance(x, y[1:]) + 1, distance(x[1:], y[1:]) + (x[0] != y[0])) if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]: t = distance(x[2:], y[2:]) + 1 if d > t: d = t memo[x, y] = d return d return distance(a, b)
Returns Damerau-Levenshtein edit distance from a to b.
15,185
def train_net(net, train_path, num_classes, batch_size, data_shape, mean_pixels, resume, finetune, pretrained, epoch, prefix, ctx, begin_epoch, end_epoch, frequent, learning_rate, momentum, weight_decay, lr_refactor_step, lr_refactor_ratio, freeze_layer_pattern=, num_example=10000, label_pad_width=350, nms_thresh=0.45, force_nms=False, ovp_thresh=0.5, use_difficult=False, class_names=None, voc07_metric=False, nms_topk=400, force_suppress=False, train_list="", val_path="", val_list="", iter_monitor=0, monitor_pattern=".*", log_file=None, kv_store=None): logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) if log_file: fh = logging.FileHandler(log_file) logger.addHandler(fh) if isinstance(data_shape, int): data_shape = (3, data_shape, data_shape) assert len(data_shape) == 3 and data_shape[0] == 3 prefix += + net + + str(data_shape[1]) if isinstance(mean_pixels, (int, float)): mean_pixels = [mean_pixels, mean_pixels, mean_pixels] assert len(mean_pixels) == 3, "must provide all RGB mean values" train_iter = DetRecordIter(train_path, batch_size, data_shape, mean_pixels=mean_pixels, label_pad_width=label_pad_width, path_imglist=train_list, **cfg.train) if val_path: val_iter = DetRecordIter(val_path, batch_size, data_shape, mean_pixels=mean_pixels, label_pad_width=label_pad_width, path_imglist=val_list, **cfg.valid) else: val_iter = None net = get_symbol_train(net, data_shape[1], num_classes=num_classes, nms_thresh=nms_thresh, force_suppress=force_suppress, nms_topk=nms_topk) if freeze_layer_pattern.strip(): re_prog = re.compile(freeze_layer_pattern) fixed_param_names = [name for name in net.list_arguments() if re_prog.match(name)] else: fixed_param_names = None ctx_str = + .join([str(c) for c in ctx]) + if resume > 0: logger.info("Resume training with {} from epoch {}" .format(ctx_str, resume)) _, args, auxs = mx.model.load_checkpoint(prefix, resume) begin_epoch = resume elif finetune > 0: logger.info("Start finetuning with {} from epoch {}" .format(ctx_str, finetune)) _, args, auxs = mx.model.load_checkpoint(prefix, finetune) begin_epoch = finetune optimizer_params=optimizer_params, begin_epoch=begin_epoch, num_epoch=end_epoch, initializer=mx.init.Xavier(), arg_params=args, aux_params=auxs, allow_missing=True, monitor=monitor, kvstore=kv)
Wrapper for training phase. Parameters: ---------- net : str symbol name for the network structure train_path : str record file path for training num_classes : int number of object classes, not including background batch_size : int training batch-size data_shape : int or tuple width/height as integer or (3, height, width) tuple mean_pixels : tuple of floats mean pixel values for red, green and blue resume : int resume from previous checkpoint if > 0 finetune : int fine-tune from previous checkpoint if > 0 pretrained : str prefix of pretrained model, including path epoch : int load epoch of either resume/finetune/pretrained model prefix : str prefix for saving checkpoints ctx : [mx.cpu()] or [mx.gpu(x)] list of mxnet contexts begin_epoch : int starting epoch for training, should be 0 if not otherwise specified end_epoch : int end epoch of training frequent : int frequency to print out training status learning_rate : float training learning rate momentum : float trainig momentum weight_decay : float training weight decay param lr_refactor_ratio : float multiplier for reducing learning rate lr_refactor_step : comma separated integers at which epoch to rescale learning rate, e.g. '30, 60, 90' freeze_layer_pattern : str regex pattern for layers need to be fixed num_example : int number of training images label_pad_width : int force padding training and validation labels to sync their label widths nms_thresh : float non-maximum suppression threshold for validation force_nms : boolean suppress overlaped objects from different classes train_list : str list file path for training, this will replace the embeded labels in record val_path : str record file path for validation val_list : str list file path for validation, this will replace the embeded labels in record iter_monitor : int monitor internal stats in networks if > 0, specified by monitor_pattern monitor_pattern : str regex pattern for monitoring network stats log_file : str log to file if enabled
15,186
def get_modis_tile_list(ds): from demcoreg import modis_grid modis_dict = {} for key in modis_grid.modis_dict: modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key]) geom = geolib.ds_geom(ds) geom_dup = geolib.geom_dup(geom) ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs) geom_dup.Transform(ct) tile_list = [] for key, val in list(modis_dict.items()): if geom_dup.Intersects(val): tile_list.append(key) return tile_list
Helper function to identify MODIS tiles that intersect input geometry modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox) See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
15,187
def wait_for_browser_close(b): if b: if not __ACTIVE: wait_failover(wait_for_browser_close) return wait_for_frame(b.GetBrowserImp().GetMainFrame())
Can be used to wait until a TBrowser is closed
15,188
def delete_user_pin(self, user_token, pin_id): response = _request(, url=self.url_v1( + pin_id), user_agent=self.user_agent, user_token=user_token, ) _raise_for_status(response)
Delete a user pin. :param str user_token: The token of the user. :param str pin_id: The id of the pin to delete. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred.
15,189
def put_attributes(self, item_name, attributes, replace=True, expected_value=None): return self.connection.put_attributes(self, item_name, attributes, replace, expected_value)
Store attributes for a given item. :type item_name: string :param item_name: The name of the item whose attributes are being stored. :type attribute_names: dict or dict-like object :param attribute_names: The name/value pairs to store as attributes :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful
15,190
def volume_delete(name, profile=None, **kwargs): * conn = _auth(profile, **kwargs) return conn.volume_delete(name)
Destroy the volume name Name of the volume profile Profile to build on CLI Example: .. code-block:: bash salt '*' nova.volume_delete myblock profile=openstack
15,191
def concat(self, other): if len(other) == 0: return self elif len(self) == 0: return other else: return LogicalNetworkList(self.hg, np.append(self.__matrix, other.__matrix, axis=0), np.concatenate([self.__networks, other.__networks]))
Returns the concatenation with another :class:`caspo.core.logicalnetwork.LogicalNetworkList` object instance. It is assumed (not checked) that both have the same underlying hypergraph. Parameters ---------- other : :class:`caspo.core.logicalnetwork.LogicalNetworkList` The list to concatenate Returns ------- caspo.core.logicalnetwork.LogicalNetworkList If other is empty returns self, if self is empty returns other, otherwise a new :class:`caspo.core.LogicalNetworkList` is created by concatenating self and other.
15,192
def rand_bivar(X, rho): import numpy as np Y = np.empty(X.shape) Y[:, 0] = X[:, 0] Y[:, 1] = rho * X[:, 0] + np.sqrt(1.0 - rho**2) * X[:, 1] return Y
Transform two unrelated random variables into correlated bivariate data X : ndarray two univariate random variables with N observations as <N x 2> matrix rho : float The Pearson correlations coefficient as number between [-1, +1]
15,193
def execute_on_all_members(self, task): return self.execute_on_members(self._client.cluster.get_member_list(), task)
Executes a task on all of the known cluster members. :param task: (Task), the task executed on the all of the members. :return: (Map), :class:`~hazelcast.future.Future` tuples representing pending completion of the task on each member.
15,194
def examples(self): examples = [] for example in [variant.examples for variant in self._raw_synset.variants if len(variant.examples)]: examples.extend(example) return examples
Returns the examples of the synset. Returns ------- list of str List of its variants' examples.
15,195
def run_later(self, callable_, timeout, *args, **kwargs): self.lock.acquire() try: if self.die: raise RuntimeError( ) job = TimerTask(callable_, *args, **kwargs) self._jobs.append((job, time.time() + timeout)) self._jobs.sort(key=lambda j: j[1]) self.lock.notify() return job finally: self.lock.release()
Schedules the specified callable for delayed execution. Returns a TimerTask instance that can be used to cancel pending execution.
15,196
def hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet(self, **kwargs): config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop() interfacetype = ET.SubElement(arp_entry, "interfacetype") HundredGigabitEthernet = ET.SubElement(interfacetype, "HundredGigabitEthernet") HundredGigabitEthernet = ET.SubElement(HundredGigabitEthernet, "HundredGigabitEthernet") HundredGigabitEthernet.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
15,197
def get_image(row, output_dir): if not download_image(image_id=row[0], url=row[1], x1=float(row[2]), y1=float(row[3]), x2=float(row[4]), y2=float(row[5]), output_dir=output_dir): print("Download failed: " + str(row[0]))
Downloads the image that corresponds to the given row. Prints a notification if the download fails.
15,198
def set_epoch(self, year): fa.loadapxsh(self.datafile, np.float(year)) self.year = year
Updates the epoch for all subsequent conversions. Parameters ========== year : float Decimal year
15,199
def discrete(self): discrete = np.array([self.discretize_path(i) for i in self.paths]) return discrete
A sequence of connected vertices in space, corresponding to self.paths. Returns --------- discrete : (len(self.paths),) A sequence of (m*, dimension) float