Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
3,300
def walk_egg(egg_dir): walker = os.walk(egg_dir) base,dirs,files = walker.next() if in dirs: dirs.remove() yield base,dirs,files for bdf in walker: yield bdf
Walk an unpacked egg's contents, skipping the metadata directory
3,301
def log(self, uuid=None, organization=None, from_date=None, to_date=None): try: enrollments = api.enrollments(self.db, uuid, organization, from_date, to_date) self.display(, enrollments=enrollments) except (NotFoundError, InvalidValueError) as e: self.error(str(e)) return e.code return CMD_SUCCESS
List enrollment information available in the registry. Method that returns a list of enrollments. If <uuid> parameter is set, it will return the enrollments related to that unique identity; if <organization> parameter is given, it will return the enrollments related to that organization; if both parameters are set, the function will return the list of enrollments of <uuid> on the <organization>. Enrollments between a period can also be listed using <from_date> and <to_date> parameters. When these are set, the method will return all those enrollments where Enrollment.start >= from_date AND Enrollment.end <= to_date. Defaults values for these dates are 1900-01-01 and 2100-01-01. :param db: database manager :param uuid: unique identifier :param organization: name of the organization :param from_date: date when the enrollment starts :param to_date: date when the enrollment ends
3,302
def if_has_delegate(delegate): if isinstance(delegate, list): delegate = tuple(delegate) if not isinstance(delegate, tuple): delegate = (delegate,) return lambda fn: _IffHasDelegate(fn, delegate)
Wrap a delegated instance attribute function. Creates a decorator for methods that are delegated in the presence of a results wrapper. This enables duck-typing by ``hasattr`` returning True according to the sub-estimator. This function was adapted from scikit-learn, which defines ``if_delegate_has_method``, but operates differently by injecting methods not based on method presence, but by delegate presence. Examples -------- >>> from pmdarima.utils.metaestimators import if_has_delegate >>> >>> class A(object): ... @if_has_delegate('d') ... def func(self): ... return True >>> >>> a = A() >>> # the delegate does not exist yet >>> assert not hasattr(a, 'func') >>> # inject the attribute >>> a.d = None >>> assert hasattr(a, 'func') and a.func() Parameters ---------- delegate : string, list of strings or tuple of strings Name of the sub-estimator that can be accessed as an attribute of the base object. If a list or a tuple of names are provided, the first sub-estimator that is an attribute of the base object will be used.
3,303
def ping(self): if self.finished is not None: raise AlreadyFinished() with self._db_conn() as conn: success = conn.query( % self._queue.table_name, now=datetime.utcnow(), task_id=self.task_id, execution_id=self.execution_id, ttl=self._queue.execution_ttl) if success != 1: raise TaskDoesNotExist()
Notify the queue that this task is still active.
3,304
def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0): try: return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) except AttributeError: if (rel_tol < 0.0) or (abs_tol < 0.0): raise ValueError("Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}".format(rel_tol, abs_tol)) if math.isnan(a) or math.isnan(b): return False if (a == b): return True if math.isinf(a) or math.isinf(b): return False diff = abs(a - b) return (diff <= rel_tol * abs(b)) or (diff <= rel_tol * abs(a)) or (diff <= abs_tol)
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
3,305
def run_foreach_or_conditional(self, context): logger.debug("starting") if self.foreach_items: self.foreach_loop(context) else: self.run_conditional_decorators(context) logger.debug("done")
Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
3,306
def toggle_item(self, item, test_func, field_name=None): if test_func(item): self.add_item(item, field_name) return True else: self.remove_item(item, field_name) return False
Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined.
3,307
def get_beam(header): if "BPA" not in header: log.warning("BPA not present in fits header, using 0") bpa = 0 else: bpa = header["BPA"] if "BMAJ" not in header: log.warning("BMAJ not present in fits header.") bmaj = None else: bmaj = header["BMAJ"] if "BMIN" not in header: log.warning("BMIN not present in fits header.") bmin = None else: bmin = header["BMIN"] if None in [bmaj, bmin, bpa]: return None beam = Beam(bmaj, bmin, bpa) return beam
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees.
3,308
def _elect_source_replication_group( self, over_replicated_rgs, partition, ): return max( over_replicated_rgs, key=lambda rg: rg.count_replica(partition), )
Decide source replication-group based as group with highest replica count.
3,309
def _remove_qs(self, url): scheme, netloc, path, query, fragment = urlsplit(url) return urlunsplit((scheme, netloc, path, , fragment))
Removes a query string from a URL before signing. :param url: The URL to strip. :type url: str
3,310
def config(self, config): for section, data in config.items(): for variable, value in data.items(): self.set_value(section, variable, value)
Set config values from config dictionary.
3,311
def _Load(self,location): for network in clc.v2.API.Call(, % (self.alias,location),{},session=self.session): self.networks.append(Network(id=network[],alias=self.alias,network_obj=network,session=self.session))
Load all networks associated with the given location. https://www.centurylinkcloud.com/api-docs/v2/#get-network-list#request
3,312
def update_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs): kwargs[] = True if kwargs.get(): return cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) else: (data) = cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) return data
Update StoreCredit Update attributes of StoreCredit This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_store_credit_by_id(store_credit_id, store_credit, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to update. (required) :param StoreCredit store_credit: Attributes of storeCredit to update. (required) :return: StoreCredit If the method is called asynchronously, returns the request thread.
3,313
def compile_foreign_key(line, context, attributes, primary_key, attr_sql, foreign_key_sql, index_sql): from .table import Table from .expression import Projection new_style = True try: result = foreign_key_parser.parseString(line) except pp.ParseException: try: result = foreign_key_parser_old.parseString(line) except pp.ParseBaseException as err: raise DataJointError( % (line, err)) from None else: new_style = False try: ref = eval(result.ref_table, context) except Exception if new_style else NameError: raise DataJointError( % result.ref_table) options = [opt.upper() for opt in result.options] for opt in options: if opt not in {, }: raise DataJointError(.format(opt=opt)) is_nullable = in options is_unique = in options if is_nullable and primary_key is not None: raise DataJointError(.format(line=line)) if not new_style: if not isinstance(ref, type) or not issubclass(ref, Table): raise DataJointError( % result.ref_table) if isinstance(ref, type) and issubclass(ref, Table): ref = ref() if (not isinstance(ref, (Table, Projection)) or len(ref.restriction) or (isinstance(ref, Projection) and (not isinstance(ref._arg, Table) or len(ref._arg.restriction)))): raise DataJointError( % result.ref_table) if not new_style: if not isinstance(ref, Table): DataJointError( % result.ref_table) if not all(r in ref.primary_key for r in result.ref_attrs): raise DataJointError( % line) try: raise DataJointError(.format( attr=next(attr for attr in result.new_attrs if attr in attributes), line=line)) except StopIteration: pass new_attrs = list(result.new_attrs) ref_attrs = list(result.ref_attrs) if new_attrs and not ref_attrs: if len(new_attrs) != 1: raise DataJointError( % line) if len(ref.primary_key) == 1: ref_attrs = ref.primary_key else: ref_attrs = [attr for attr in ref.primary_key if attr not in attributes] if len(ref_attrs) != 1: raise DataJointError( % line) if len(new_attrs) != len(ref_attrs): raise DataJointError( % line) if ref_attrs: ref = ref.proj(**dict(zip(new_attrs, ref_attrs))) base = ref._arg if isinstance(ref, Projection) else ref for attr, ref_attr in zip(ref.primary_key, base.primary_key): if attr not in attributes: attributes.append(attr) if primary_key is not None: primary_key.append(attr) attr_sql.append( base.heading[ref_attr].sql.replace(ref_attr, attr, 1).replace(, , int(is_nullable))) foreign_key_sql.append( .format( fk=.join(ref.primary_key), pk=.join(base.primary_key), ref=base.full_table_name)) if is_unique: index_sql.append(.format(attrs=.join(ref.primary_key)))
:param line: a line from a table definition :param context: namespace containing referenced objects :param attributes: list of attribute names already in the declaration -- to be updated by this function :param primary_key: None if the current foreign key is made from the dependent section. Otherwise it is the list of primary key attributes thus far -- to be updated by the function :param attr_sql: list of sql statements defining attributes -- to be updated by this function. :param foreign_key_sql: list of sql statements specifying foreign key constraints -- to be updated by this function. :param index_sql: list of INDEX declaration statements, duplicate or redundant indexes are ok.
3,314
def gen_table(self, inner_widths, inner_heights, outer_widths): if self.outer_border: yield self.horizontal_border(, outer_widths) row_count = len(self.table_data) last_row_index, before_last_row_index = row_count - 1, row_count - 2 for i, row in enumerate(self.table_data): if self.inner_heading_row_border and i == 0: style = elif self.inner_footing_row_border and i == last_row_index: style = else: style = for line in self.gen_row_lines(row, style, inner_widths, inner_heights[i]): yield line if i == last_row_index: break if self.inner_heading_row_border and i == 0: yield self.horizontal_border(, outer_widths) elif self.inner_footing_row_border and i == before_last_row_index: yield self.horizontal_border(, outer_widths) elif self.inner_row_border: yield self.horizontal_border(, outer_widths) if self.outer_border: yield self.horizontal_border(, outer_widths)
Combine everything and yield every line of the entire table with borders. :param iter inner_widths: List of widths (no padding) for each column. :param iter inner_heights: List of heights (no padding) for each row. :param iter outer_widths: List of widths (with padding) for each column. :return:
3,315
def traceroute(host): * ret = [] if not salt.utils.path.which(): log.info() return ret cmd = .format(salt.utils.network.sanitize_host(host)) out = __salt__[](cmd) if salt.utils.platform.is_sunos() or salt.utils.platform.is_aix(): traceroute_version = [0, 0, 0] else: cmd2 = out2 = __salt__[](cmd2) try: traceroute_version_raw = re.findall(r, out2)[0] log.debug(, traceroute_version_raw) traceroute_version = [] for t in traceroute_version_raw: try: traceroute_version.append(int(t)) except ValueError: traceroute_version.append(t) if len(traceroute_version) < 3: traceroute_version.append(0) log.debug(, traceroute_version) except IndexError: traceroute_version = [0, 0, 0] for line in out.splitlines(): if not in line: continue if line.startswith(): continue if salt.utils.platform.is_aix(): if line.startswith(): continue if line.startswith(): continue if line.startswith(): continue if line.startswith(): continue if in six.text_type(traceroute_version[1]) or \ in six.text_type(traceroute_version[1]) or \ __grains__[] in (, ): try: traceline = re.findall(r, line)[0] except IndexError: traceline = re.findall(r, line)[0] log.debug(, traceline) delays = re.findall(r, six.text_type(traceline)) try: if traceline[1] == : result = { : traceline[0], : } else: result = { : traceline[0], : traceline[1], : traceline[2], } for idx in range(0, len(delays)): result[.format(idx + 1)] = delays[idx] except IndexError: result = {} elif (traceroute_version[0] >= 2 and traceroute_version[2] >= 14 or traceroute_version[0] >= 2 and traceroute_version[1] > 0): comps = line.split() if comps[1] == : result = { : int(comps[0]), : } else: result = { : int(comps[0]), : comps[1].split()[0], : comps[1].split()[1].strip(), : float(comps[2].split()[0]), : float(comps[3].split()[0]), : float(comps[4].split()[0])} else: comps = line.split() result = { : comps[0], : comps[1], : comps[2], : comps[4], : comps[6], : comps[8], : comps[3], : comps[5], : comps[7]} ret.append(result) return ret
Performs a traceroute to a 3rd party host .. versionchanged:: 2015.8.0 Added support for SunOS .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' network.traceroute archlinux.org
3,316
def create(self, create_missing=None): return type(self)( self._server_config, id=self.create_json(create_missing)[], ).read()
Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1381129 <https://bugzilla.redhat.com/show_bug.cgi?id=1381129>`_.
3,317
def compile(self, session=None): if not self.num_data == self.X.shape[0]: self.num_data = self.X.shape[0] self.q_alpha = Parameter(np.zeros((self.num_data, self.num_latent))) self.q_lambda = Parameter(np.ones((self.num_data, self.num_latent)), transforms.positive) return super(VGP_opper_archambeau, self).compile(session=session)
Before calling the standard compile function, check to see if the size of the data has changed and add variational parameters appropriately. This is necessary because the shape of the parameters depends on the shape of the data.
3,318
def config_xml_to_dict(contents, result, parse_job=True): from lxml import etree try: root = etree.fromstring(contents) pairs = [] if parse_job: for elem in root: if (elem.tag != gc.CONFIG_XML_TASKS_TAG) and (elem.text is not None): pairs.append(u"%s%s%s" % ( safe_unicode(elem.tag), gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, safe_unicode(elem.text.strip()) )) return pairs_to_dict(pairs) else: output_list = [] for task in root.find(gc.CONFIG_XML_TASKS_TAG): if task.tag == gc.CONFIG_XML_TASK_TAG: pairs = [] for elem in task: if elem.text is not None: pairs.append(u"%s%s%s" % ( safe_unicode(elem.tag), gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, safe_unicode(elem.text.strip()) )) output_list.append(pairs_to_dict(pairs)) return output_list except: if result is not None: result.passed = False result.add_error("An error occurred while parsing XML file") if parse_job: return {} else: return []
Convert the contents of a XML config file into the corresponding dictionary :: dictionary[key_1] = value_1 dictionary[key_2] = value_2 ... dictionary[key_n] = value_n :param bytes contents: the XML configuration contents :param bool parse_job: if ``True``, parse the job properties; if ``False``, parse the tasks properties :rtype: dict (``parse_job=True``) or list of dict (``parse_job=False``)
3,319
def authorized_purchase_object(self, oid, price, huid): return self.request( , safeformat(, oid), json.dumps({ : price, : huid, : True }))
Does delegated (pre-authorized) purchase of `oid` in the name of `huid`, at price `price` (vingd transferred from `huid` to consumer's acc). :raises GeneralException: :resource: ``objects/<oid>/purchases`` :access: authorized users with ACL flag ``purchase.object.authorize`` + delegate permission required for the requester to charge the user: ``purchase.object``
3,320
def move_editorstack_data(self, start, end): if start < 0 or end < 0: return else: steps = abs(end - start) direction = (end-start) // steps data = self.data self.blockSignals(True) for i in range(start, end, direction): data[i], data[i+direction] = data[i+direction], data[i] self.blockSignals(False) self.refresh()
Reorder editorstack.data so it is synchronized with the tab bar when tabs are moved.
3,321
def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse: kwargs.update({"channel": channel, "purpose": purpose}) return self.api_call("groups.setPurpose", json=kwargs)
Sets the purpose for a private channel. Args: channel (str): The channel id. e.g. 'G1234567890' purpose (str): The new purpose for the channel. e.g. 'My Purpose'
3,322
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False): if disp > 0: print print print print , function, , x0 points = [] values = [] def recordfunction(x): v = function(x) points.append(x) values.append(v) return v (a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print print % len(points), (a, b, c), (va, vb, vc) if disp > 2: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) pause() result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print print % len(points), result if disp > 1 or len(points) > 20: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) if disp > 2: pause() if disp > 0: print print print print global neval neval += len(points) return result
**Optimization method based on Brent's method** First, a bracket (a b c) is sought that contains the minimum (b value is smaller than both a or c). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c, because that could be problematic with the local approximation. Also, if the bracket does not seem to include the minimum, it is expanded generously in the right direction until it covers it. Thus, this function is fail safe, and will always find a local minimum.
3,323
def __process_equalities(self, equalities, momentequalities): monomial_sets = [] n_rows = 0 le = 0 if equalities is not None: for equality in equalities: le += 1 if equality.is_Relational: equality = convert_relational(equality) eq_order = ncdegree(equality) if eq_order > 2 * self.level: raise Exception("An equality constraint has degree %d. " "Choose a higher level of relaxation." % eq_order) localization_order = (2 * self.level - eq_order)//2 index = find_variable_set(self.variables, equality) localizing_monomials = \ pick_monomials_up_to_degree(self.monomial_sets[index], localization_order) if len(localizing_monomials) == 0: localizing_monomials = [S.One] localizing_monomials = unique(localizing_monomials) monomial_sets.append(localizing_monomials) n_rows += len(localizing_monomials) * \ (len(localizing_monomials) + 1) // 2 if momentequalities is not None: for _ in momentequalities: le += 1 monomial_sets.append([S.One]) n_rows += 1 A = np.zeros((n_rows, self.n_vars + 1), dtype=self.F.dtype) n_rows = 0 if self._parallel: pool = Pool() for i, equality in enumerate(flatten([equalities, momentequalities])): func = partial(moment_of_entry, monomials=monomial_sets[i], ineq=equality, substitutions=self.substitutions) lm = len(monomial_sets[i]) if self._parallel and lm > 1: chunksize = max(int(np.sqrt(lm*lm/2) / cpu_count()), 1) iter_ = pool.map(func, ([row, column] for row in range(lm) for column in range(row, lm)), chunksize) else: iter_ = imap(func, ([row, column] for row in range(lm) for column in range(row, lm))) for row, column, polynomial in iter_: if isinstance(polynomial, str): self.__parse_expression(equality, -1, A[n_rows]) else: A[n_rows] = self._get_facvar(polynomial) n_rows += 1 if self.verbose > 0: sys.stdout.write("\r\x1b[KProcessing %d/%d equalities..." % (i+1, le)) sys.stdout.flush() if self._parallel: pool.close() pool.join() if self.verbose > 0: sys.stdout.write("\n") return A
Generate localizing matrices Arguments: equalities -- list of equality constraints equalities -- list of moment equality constraints
3,324
def create_topology(self, topologyName, topology): if not topology or not topology.IsInitialized(): raise_(StateException("Topology protobuf not init properly", StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2]) path = self.get_topology_path(topologyName) LOG.info("Adding topology: {0} to path: {1}".format( topologyName, path)) topologyString = topology.SerializeToString() try: self.client.create(path, value=topologyString, makepath=True) return True except NoNodeError: raise_(StateException("NoNodeError while creating topology", StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2]) except NodeExistsError: raise_(StateException("NodeExistsError while creating topology", StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2]) except ZookeeperError: raise_(StateException("Zookeeper while creating topology", StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2]) except Exception: raise
crate topology
3,325
def _collect_block_lines(self, msgs_store, node, msg_state): for child in node.get_children(): self._collect_block_lines(msgs_store, child, msg_state) first = node.fromlineno last = node.tolineno if ( isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef)) and node.body ): firstchildlineno = node.body[0].fromlineno else: firstchildlineno = last for msgid, lines in msg_state.items(): for lineno, state in list(lines.items()): original_lineno = lineno if first > lineno or last < lineno: continue message_definitions = msgs_store.get_message_definitions(msgid) for message_definition in message_definitions: if message_definition.scope == WarningScope.NODE: if lineno > firstchildlineno: state = True first_, last_ = node.block_range(lineno) else: first_ = lineno last_ = last for line in range(first_, last_ + 1): if line in self._module_msgs_state.get(msgid, ()): continue if line in lines: state = lines[line] original_lineno = line if not state: self._suppression_mapping[(msgid, line)] = original_lineno try: self._module_msgs_state[msgid][line] = state except KeyError: self._module_msgs_state[msgid] = {line: state} del lines[lineno]
Recursively walk (depth first) AST to collect block level options line numbers.
3,326
def api_version(self, v): self._api_version = v if (self._api_version >= ): self.default_quality = self.allowed_qualities = [, , , ] else: self.default_quality = self.allowed_qualities = [, , , ]
Set the api_version and associated configurations.
3,327
def unscale_and_snap_to_nearest(x, tune_params, eps): x_u = [i for i in x] for i, v in enumerate(tune_params.values()): pad = 0.5*eps linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v)) idx = numpy.abs(linspace-x[i]).argmin() idx = min(max(idx, 0), len(v)-1) x_u[i] = v[idx] return x_u
helper func that snaps a scaled variable to the nearest config
3,328
def add_item(self, item, replace = False): if item.jid in self._jids: if replace: self.remove_item(item.jid) else: raise ValueError("JID already in the roster") index = len(self._items) self._items.append(item) self._jids[item.jid] = index
Add an item to the roster. This will not automatically update the roster on the server. :Parameters: - `item`: the item to add - `replace`: if `True` then existing item will be replaced, otherwise a `ValueError` will be raised on conflict :Types: - `item`: `RosterItem` - `replace`: `bool`
3,329
def get_colors(n, cmap=, start=0., stop=1., alpha=1., return_hex=False): colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)] colors = [(r, g, b, alpha) for r, g, b, _ in colors] if return_hex: colors = rgb_color_list_to_hex(colors) return colors
Return n-length list of RGBa colors from the passed colormap name and alpha. Parameters ---------- n : int number of colors cmap : string name of a colormap start : float where to start in the colorspace stop : float where to end in the colorspace alpha : float opacity, the alpha channel for the RGBa colors return_hex : bool if True, convert RGBa colors to a hexadecimal string Returns ------- colors : list
3,330
def check_known_host(user=None, hostname=None, key=None, fingerprint=None, config=None, port=None, fingerprint_hash_type=None): s enough to set up either key or fingerprint, you dont match with stored value, return "update", if no value is found for a given host, return "add", otherwise return "exists". If neither key, nor fingerprint is defined, then additional validation is not performed. CLI Example: .. code-block:: bash salt ssh.check_known_host <user> <hostname> key= statuserrorerrorhostname argument required/etc/ssh/ssh_known_hosts.ssh/known_hostskeyfingerprintaddexistsupdateexistsupdateexists'
Check the record in known_hosts file, either by its value or by fingerprint (it's enough to set up either key or fingerprint, you don't need to set up both). If provided key or fingerprint doesn't match with stored value, return "update", if no value is found for a given host, return "add", otherwise return "exists". If neither key, nor fingerprint is defined, then additional validation is not performed. CLI Example: .. code-block:: bash salt '*' ssh.check_known_host <user> <hostname> key='AAAA...FAaQ=='
3,331
def send(*args, **kwargs): queue_flag = kwargs.pop("queue", False) now_flag = kwargs.pop("now", False) assert not (queue_flag and now_flag), " and cannot both be True." if queue_flag: return queue(*args, **kwargs) elif now_flag: return send_now(*args, **kwargs) else: if QUEUE_ALL: return queue(*args, **kwargs) else: return send_now(*args, **kwargs)
A basic interface around both queue and send_now. This honors a global flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should be queued or not. A per call ``queue`` or ``now`` keyword argument can be used to always override the default global behavior.
3,332
def get_es_label(obj, def_obj): label_flds = LABEL_FIELDS if def_obj.es_defs.get(): label_flds = def_obj.es_defs[] + LABEL_FIELDS try: for label in label_flds: if def_obj.cls_defs.get(label): obj[] = def_obj.cls_defs[label][0] break if not obj.get(): obj[] = def_obj.__class__.__name__.split("_")[-1] except AttributeError: if def_obj.get(): obj[] = def_obj[][-1].value[-1] else: obj[] = "no_label" return obj
Returns object with label for an object that goes into the elacticsearch 'label' field args: obj: data object to update def_obj: the class instance that has defintion values
3,333
def p_param_def_type(p): if p[2] is not None: api.check.check_type_is_explicit(p.lineno(1), p[1], p[2]) p[0] = make_param_decl(p[1], p.lineno(1), p[2])
param_def : ID typedef
3,334
def save_stream(self, key, binary=False): s = io.BytesIO() if binary else io.StringIO() yield s self.save_value(key, s.getvalue())
Return a managed file-like object into which the calling code can write arbitrary data. :param key: :return: A managed stream-like object
3,335
def get_placeholders(self, format_string): placeholders = set() for token in self.tokens(format_string): if token.group("placeholder"): placeholders.add(token.group("key")) elif token.group("command"): commands = dict(parse_qsl(token.group("command"))) if_ = commands.get("if") if if_: placeholders.add(Condition(if_).variable) return placeholders
Parses the format_string and returns a set of placeholders.
3,336
def reset(self, value=None): if value is None: value = time.clock() self.start = value if self.value_on_reset: self.value = self.value_on_reset
Resets the start time of the interval to now or the specified value.
3,337
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7): if mean: x = x - mean if std: x = x / (std + epsilon) return x
Normalize every pixels by the same given mean and std, which are usually compute from all examples. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). mean : float Value for subtraction. std : float Value for division. epsilon : float A small position value for dividing standard deviation. Returns ------- numpy.array A processed image.
3,338
def _getStrippedValue(value, strip): if strip is None: value = value.strip() elif isinstance(strip, str): value = value.strip(strip) elif strip is False: pass return value
Like the strip() string method, except the strip argument describes different behavior: If strip is None, whitespace is stripped. If strip is a string, the characters in the string are stripped. If strip is False, nothing is stripped.
3,339
def clean_helper(B, obj, clean_func): try: clean_func(obj) except B.validation_error() as e: fields = B.detect_uniqueness_error(e) missing = B.detect_missing_relations(obj, e) return fields, missing return (None, None)
Clean object, intercepting and collecting any missing-relation or unique-constraint errors and returning the relevant resource ids/fields. Returns: - tuple: (<dict of non-unique fields>, <dict of missing refs>)
3,340
def _generate_event_listener_caller(executables: List[str]) -> LockEventListener: def event_listener_caller(key: str): for executable in executables: try: process = subprocess.Popen([executable, key], stderr=subprocess.PIPE, stdout=subprocess.PIPE) output, stderr = process.communicate() if len(stderr) > 0: logger.info(f"stderr from executing \"{executable}\": {stderr.decode().strip()}") if process.returncode != 0: logger.error(f"Error when executing \"{executable}\": return code was {process.returncode}") except OSError as e: common_error_string = f"Could not execute \"{executable}\":" if e.errno == errno.ENOEXEC: logger.warning(f"{common_error_string} {e} (perhaps the executable needs a shebang?)") else: logger.warning(f"{common_error_string} {e}") return event_listener_caller
TODO :param executables: :return:
3,341
def skip(self, content): if self.optional(content): v = content.value if v is None: return True if isinstance(v, (list, tuple)) and not v: return True return False
Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool
3,342
def log_combinations(n, counts, name="log_combinations"): with tf.name_scope(name): n = tf.convert_to_tensor(value=n, name="n") counts = tf.convert_to_tensor(value=counts, name="counts") total_permutations = tf.math.lgamma(n + 1) counts_factorial = tf.math.lgamma(counts + 1) redundant_permutations = tf.reduce_sum( input_tensor=counts_factorial, axis=[-1]) return total_permutations - redundant_permutations
Multinomial coefficient. Given `n` and `counts`, where `counts` has last dimension `k`, we compute the multinomial coefficient as: ```n! / sum_i n_i!``` where `i` runs over all `k` classes. Args: n: Floating-point `Tensor` broadcastable with `counts`. This represents `n` outcomes. counts: Floating-point `Tensor` broadcastable with `n`. This represents counts in `k` classes, where `k` is the last dimension of the tensor. name: A name for this operation (optional). Returns: `Tensor` representing the multinomial coefficient between `n` and `counts`.
3,343
def report_error(title=None, data={}, caught=None, is_fatal=False): status, code = , if in data: status = data[].get(, status) code = data[].get(, code) title_details = "%s %s %s" % (ApiPool().current_server_name, status, code) else: title_details = "%s %s()" % (ApiPool().current_server_name, fname) if is_fatal: title_details = % title_details else: title_details = % title_details if title: title = "%s: %s" % (title_details, title) else: title = title_details global error_reporter log.info("Reporting crash...") try: error_reporter(title, json.dumps(data, sort_keys=True, indent=4)) except Exception as e: log.error("Failed to send email report: %s" % str(e))
Format a crash report and send it somewhere relevant. There are two types of crashes: fatal crashes (backend errors) or non-fatal ones (just reporting a glitch, but the api call did not fail)
3,344
def get_response_handler(self): assert self.response_handler is not None, \ \ % self.__class__.__name__ return self.response_handler(self, **self.get_response_handler_params())
Return the Endpoints defined :attr:`Endpoint.response_handler`. :returns: A instance of the Endpoint specified :class:`ResonseHandler`. :rtype: :class:`ResponseHandler`
3,345
def load_HEP_data( ROOT_filename = "output.root", tree_name = "nominal", maximum_number_of_events = None ): ROOT_file = open_ROOT_file(ROOT_filename) tree = ROOT_file.Get(tree_name) number_of_events = tree.GetEntries() data = datavision.Dataset() progress = shijian.Progress() progress.engage_quick_calculation_mode() number_of_events_loaded = 0 log.info("") index = 0 for event in tree: if maximum_number_of_events is not None and\ number_of_events_loaded >= int(maximum_number_of_events): log.info( "loaded maximum requested number of events " + "({maximum_number_of_events})\r".format( maximum_number_of_events = maximum_number_of_events ) ) break print progress.add_datum(fraction = (index + 2) / number_of_events), if select_event(event): index += 1 data.variable(index = index, name = "el_1_pt", value = event.el_pt[0]) number_of_events_loaded += 1 log.info("") return data
Load HEP data and return dataset.
3,346
def refresh_ip(self, si, logger, session, vcenter_data_model, resource_model, cancellation_context, app_request_json): self._do_not_run_on_static_vm(app_request_json=app_request_json) default_network = VMLocation.combine( [vcenter_data_model.default_datacenter, vcenter_data_model.holding_network]) match_function = self.ip_manager.get_ip_match_function( self._get_ip_refresh_ip_regex(resource_model.vm_custom_params)) timeout = self._get_ip_refresh_timeout(resource_model.vm_custom_params) vm = self.pyvmomi_service.find_by_uuid(si, resource_model.vm_uuid) ip_res = self.ip_manager.get_ip(vm, default_network, match_function, cancellation_context, timeout, logger) if ip_res.reason == IpReason.Timeout: raise ValueError({0}\ .format(resource_model.fullname, timeout)) if ip_res.reason == IpReason.Success: self._update_resource_address_with_retry(session=session, resource_name=resource_model.fullname, ip_address=ip_res.ip_address) return ip_res.ip_address
Refreshes IP address of virtual machine and updates Address property on the resource :param vim.ServiceInstance si: py_vmomi service instance :param logger: :param vCenterShell.driver.SecureCloudShellApiSession session: cloudshell session :param GenericDeployedAppResourceModel resource_model: UUID of Virtual Machine :param VMwarevCenterResourceModel vcenter_data_model: the vcenter data model attributes :param cancellation_context:
3,347
def _npy_num2fits(d, table_type=, write_bitcols=False): dim = None name = d[0] npy_dtype = d[1][1:] if npy_dtype[0] == or npy_dtype[0] == : raise ValueError("got S or U type: use _npy_string2fits") if npy_dtype not in _table_npy2fits_form: raise ValueError("unsupported type " % npy_dtype) if table_type == : form = _table_npy2fits_form[npy_dtype] else: form = _table_npy2fits_form_ascii[npy_dtype] if len(d) > 2: if table_type == : raise ValueError( "Ascii table columns must be scalar, got %s" % str(d)) if write_bitcols and npy_dtype == : form = if isinstance(d[2], tuple): count = reduce(lambda x, y: x*y, d[2]) form = % (count, form) if len(d[2]) > 1: dim = list(reversed(d[2])) dim = [str(e) for e in dim] dim = + .join(dim)+ else: count = d[2] form = % (count, form) return name, form, dim
d is the full element from the descr For vector,array columns the form is the total counts followed by the code. For array columns with dimension greater than 1, the dim is set to (dim1, dim2, ...) So it is treated like an extra dimension
3,348
def hwstatus_send(self, Vcc, I2Cerr, force_mavlink1=False): return self.send(self.hwstatus_encode(Vcc, I2Cerr), force_mavlink1=force_mavlink1)
Status of key hardware Vcc : board voltage (mV) (uint16_t) I2Cerr : I2C error count (uint8_t)
3,349
def delta(x_i, j, s, N): flag = j == EMMMixPLAggregator.c(x_i, s) if flag and s < len(x_i): return 1 elif s == N: found_equal = False for l in range(len(x_i)): if j == EMMMixPLAggregator.c(x_i, l): found_equal = True break if not found_equal: return 1 return 0
delta_i_j_s
3,350
def setAnimation(self,obj,animation,transition=None,force=False): self.ensureModelData(obj) data = obj._modeldata if animation not in self.modeldata["animations"]: raise ValueError("There is no animation of name for model "%(animation,self.modelname)) if data.get("_anidata",{}).get("anitype",None)==animation and not force: return anim = self.modeldata["animations"][animation] if transition is None: transition = anim.default_jt anim.startAnimation(data,transition) if "_anidata" not in data: data["_anidata"]={} adata = data["_anidata"] adata["anitype"]=animation if "_schedfunc" in adata: pyglet.clock.unschedule(adata["_schedfunc"]) def schedfunc(*args): anim.tickEntity(data) pyglet.clock.schedule_interval(schedfunc,1./(anim.kps if anim.atype=="keyframes" else 60)) adata["_schedfunc"] = schedfunc
Sets the animation to be used by the object. See :py:meth:`Actor.setAnimation()` for more information.
3,351
def get_detail_view(self, request, object, opts=None): view = self.get_view(request, self.view_class, opts) view.object = object return view
Instantiates and returns the view class that will generate the actual context for this plugin.
3,352
def coef_(self): if getattr(self, , None) is not None and self.booster != : raise AttributeError( .format(self.booster)) b = self.get_booster() coef = np.array(json.loads(b.get_dump(dump_format=)[0])[]) n_classes = getattr(self, , None) if n_classes is not None: if n_classes > 2: assert len(coef.shape) == 1 assert coef.shape[0] % n_classes == 0 coef = coef.reshape((n_classes, -1)) return coef
Coefficients property .. note:: Coefficients are defined only for linear learners Coefficients are only defined when the linear model is chosen as base learner (`booster=gblinear`). It is not defined for other base learner types, such as tree learners (`booster=gbtree`). Returns ------- coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
3,353
def url_for(self, endpoint, explicit=False, **items): if not explicit and not endpoint.startswith(self._namespace): endpoint = % (self._namespace, endpoint) return self._plugin.url_for(endpoint, **items)
Returns a valid XBMC plugin URL for the given endpoint name. endpoint can be the literal name of a function, or it can correspond to the name keyword arguments passed to the route decorator. Currently, view names must be unique across all plugins and modules. There are not namespace prefixes for modules.
3,354
def matchToString(dnaMatch, read1, read2, matchAmbiguous=True, indent=, offsets=None): match = dnaMatch[] identicalMatchCount = match[] ambiguousMatchCount = match[] gapMismatchCount = match[] gapGapMismatchCount = match[] nonGapMismatchCount = match[] if offsets: len1 = len2 = len(offsets) else: len1, len2 = map(len, (read1, read2)) result = [] append = result.append append(countPrint( % indent, identicalMatchCount, len1, len2)) append(countPrint( % indent, ambiguousMatchCount, len1, len2)) if ambiguousMatchCount and identicalMatchCount: anyMatchCount = identicalMatchCount + ambiguousMatchCount append(countPrint( % indent, anyMatchCount, len1, len2)) mismatchCount = (gapMismatchCount + gapGapMismatchCount + nonGapMismatchCount) append(countPrint( % indent, mismatchCount, len1, len2)) conflicts = if matchAmbiguous else append(countPrint( % (indent, conflicts), nonGapMismatchCount, len1, len2)) append(countPrint( % indent, gapMismatchCount, len1, len2)) append(countPrint( % indent, gapGapMismatchCount, len1, len2)) for read, key in zip((read1, read2), (, )): append( % (indent, read.id)) length = len(read) append( % (indent, length)) gapCount = len(dnaMatch[key][]) append(countPrint( % indent, gapCount, length)) if gapCount: append( % (indent, .join(map(lambda offset: str(offset + 1), sorted(dnaMatch[key][]))))) ambiguousCount = len(dnaMatch[key][]) append(countPrint( % indent, ambiguousCount, length)) extraCount = dnaMatch[key][] if extraCount: append(countPrint( % indent, extraCount, length)) return .join(result)
Format a DNA match as a string. @param dnaMatch: A C{dict} returned by C{compareDNAReads}. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param matchAmbiguous: If C{True}, ambiguous nucleotides that are possibly correct were counted as actually being correct. Otherwise, the match was done strictly, insisting that only non-ambiguous nucleotides could contribute to the matching nucleotide count. @param indent: A C{str} to indent all returned lines with. @param offsets: If not C{None}, a C{set} of offsets of interest that were only considered when making C{match}. @return: A C{str} describing the match.
3,355
def configure(self, component, all_dependencies): r = {} builddir = self.buildroot }
Ensure all config-time files have been generated. Return a dictionary of generated items.
3,356
def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False, slemap=None): def keyfunc(entry): if slemap is not None: rle = slemap(entry) if rle in reflist: return reflist.index(rle) else: return sortlist.index(entry) + len(reflist) if fltr: if slemap: sortlist = filter(lambda x: slemap(x) in reflist, sortlist) else: sortlist = filter(lambda x: x in reflist, sortlist) return sorted(sortlist, key=keyfunc, reverse=reverse)
Sort a list according to the order of entries in a reference list. Parameters ---------- sortlist : list List to be sorted reflist : list Reference list defining sorting order reverse : bool, optional (default False) Flag indicating whether to sort in reverse order fltr : bool, optional (default False) Flag indicating whether to filter `sortlist` to remove any entries that are not in `reflist` slemap : function or None, optional (default None) Function mapping a sortlist entry to the form of an entry in `reflist` Returns ------- sortedlist : list Sorted (and possibly filtered) version of sortlist
3,357
def load_modules(self, data=None, proxy=None): self.functions = self.wrapper self.utils = salt.loader.utils(self.opts) self.serializers = salt.loader.serializers(self.opts) locals_ = salt.loader.minion_mods(self.opts, utils=self.utils) self.states = salt.loader.states(self.opts, locals_, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions)
Load up the modules for remote compilation via ssh
3,358
def remove_instance(self): with fields.FieldLock(self.related_field): related_pks = self() for pk in related_pks: related_instance = self.related_field._model(pk) related_field = getattr(related_instance, self.related_field.name) remover = getattr(related_field, , None) if remover is not None: getattr(related_field, remover)(self.instance._pk) else: related_field.delete()
Remove the instance from the related fields (delete the field if it's a simple one, or remove the instance from the field if it's a set/list/ sorted_set)
3,359
def artifacts(self): if self._artifact_manager is None: self._artifact_manager = ArtifactManager(session=self._session) return self._artifact_manager
Property for accessing :class:`ArtifactManager` instance, which is used to manage artifacts. :rtype: yagocd.resources.artifact.ArtifactManager
3,360
def update_points(self): n = max(8, min(72, int(2*sqrt(self.r_x+self.r_y)))) d = pi * 2 / n x, y, r_x, r_y = self.x, self.y, self.r_x, self.r_y ps = [] for i in range(n): ps += [(x + r_x * sin(d * i)), (y + r_y * cos(d * i))] self.points = tuple(ps)
椭圆的近似图形:72边形
3,361
def _raise_if_null(self, other): if self.is_null(): raise ValueError("Cannot compare null Intervals!") if hasattr(other, ) and other.is_null(): raise ValueError("Cannot compare null Intervals!")
:raises ValueError: if either self or other is a null Interval
3,362
def populate_initial_services(): services_list = ( ( , , , ), ( , , , ), ( , , , ), ( , , , ), ( , , , ), ( , , , ), ) esri_endpoint = LOGGER.debug( % esri_endpoint) create_services_from_endpoint(esri_endpoint) for service in services_list: LOGGER.debug( % service[0]) service = Service( title=service[0], abstract=service[1], type=service[2], url=service[3] ) service.save()
Populate a fresh installed Hypermap instances with basic services.
3,363
def _request(self, req_type, url, **kwargs): logger.debug( % (req_type, url)) result = self.session.request(req_type, url, **kwargs) try: result.raise_for_status() except requests.HTTPError: error = result.text try: error = json.loads(error) except ValueError: pass if result.status_code in (401, 403): error_class = LuminosoAuthError elif result.status_code in (400, 404, 405): error_class = LuminosoClientError elif result.status_code >= 500: error_class = LuminosoServerError else: error_class = LuminosoError raise error_class(error) return result
Make a request via the `requests` module. If the result has an HTTP error status, convert that to a Python exception.
3,364
def auto_no_thousands(self): if self._value >= 1000000000000: return self.TiB, if self._value >= 1000000000: return self.GiB, if self._value >= 1000000: return self.MiB, if self._value >= 1000: return self.KiB, else: return self.B,
Like self.auto but calculates the next unit if >999.99.
3,365
def activate(self, tourfile=None, minsize=10000, backuptour=True): if tourfile and (not op.exists(tourfile)): logging.debug("Tourfile `{}` not found".format(tourfile)) tourfile = None if tourfile: logging.debug("Importing tourfile `{}`".format(tourfile)) tour, tour_o = iter_last_tour(tourfile, self) self.active = set(tour) tig_to_idx = self.tig_to_idx tour = [tig_to_idx[x] for x in tour] signs = sorted([(x, FF[o]) for (x, o) in zip(tour, tour_o)]) _, signs = zip(*signs) self.signs = np.array(signs, dtype=int) if backuptour: backup(tourfile) tour = array.array(, tour) else: self.report_active() while True: logdensities = self.calculate_densities() lb, ub = outlier_cutoff(logdensities.values()) logging.debug("Log10(link_densities) ~ [{}, {}]" .format(lb, ub)) remove = set(x for x, d in logdensities.items() if (d < lb and self.tig_to_size[x] < minsize * 10)) if remove: self.active -= remove self.report_active() else: break logging.debug("Remove contigs with size < {}".format(minsize)) self.active = set(x for x in self.active if self.tig_to_size[x] >= minsize) tour = range(self.N) tour = array.array(, tour) self.flip_all(tour) self.report_active() self.tour = tour return tour
Select contigs in the current partition. This is the setup phase of the algorithm, and supports two modes: - "de novo": This is useful at the start of a new run where no tours available. We select the strong contigs that have significant number of links to other contigs in the partition. We build a histogram of link density (# links per bp) and remove the contigs that appear as outliers. The orientations are derived from the matrix decomposition of the pairwise strandedness matrix O. - "hotstart": This is useful when there was a past run, with a given tourfile. In this case, the active contig list and orientations are derived from the last tour in the file.
3,366
def transformer_clean(): hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
No dropout, label smoothing, max_length.
3,367
def image_bytes(b, filename=None, inline=1, width=, height=, preserve_aspect_ratio=None): if preserve_aspect_ratio is None: if width != and height != : preserve_aspect_ratio = False else: preserve_aspect_ratio = True data = { : base64.b64encode((filename or ).encode()).decode(), : inline, : len(b), : base64.b64encode(b).decode(), : width, : height, : int(preserve_aspect_ratio), }
Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
3,368
def decrease_user_property(self, user_id, property_name, value=0, headers=None, endpoint_url=None): endpoint_url = endpoint_url or self._endpoint_url url = endpoint_url + "/users/" + user_id + "/properties/" + property_name + "/decrease/" + value.__str__() headers = headers or self._default_headers(content_type="") response = requests.post(url, headers=headers) return response
Decrease a user's property by a value. :param str user_id: identified user's ID :param str property_name: user property name to increase :param number value: amount by which to decrease the property :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response
3,369
def to_dict(self, properties=None): if not properties: skip = {, , } properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip] return {p: [i.to_dict() for i in getattr(self, p)] if p in {, } else getattr(self, p) for p in properties}
Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request.
3,370
def to_n_ref(self, fill=0, dtype=): out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) if fill != 0: m = self.is_missing() out[m] = fill if self.mask is not None: out[self.mask] = fill return out
Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8)
3,371
def remove_targets(self, type, kept=None): if kept is None: kept = [ i for i, x in enumerate(self._targets) if not isinstance(x, type) ] if len(kept) == len(self._targets): return self self._targets = [self._targets[x] for x in kept] self._labels = [self._labels[x] for x in kept] if not self._groups: return self index_map = { o_idx: n_idx for n_idx, o_idx in zip(range(len(self._targets)), kept) } kept = set(kept) for idx, grp in enumerate(self._groups): self._groups[idx] = _sos_group( [index_map[x] for x in grp._indexes if x in kept], [y for x, y in zip(grp._indexes, grp._labels) if x in kept ]).set(**grp._dict) return self
Remove targets of certain type
3,372
def tile_to_path(self, tile): return os.path.join(self.cache_path, self.service, tile.path())
return full path to a tile
3,373
def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck=): query = { : title, : options, : multi, : permissive, : captcha, : dupcheck } return StrawPoll(requests.post(, data=json.dumps(query)))
Create a strawpoll. Example: new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No']) :param title: :param options: :param multi: :param permissive: :param captcha: :param dupcheck: :return: strawpy.Strawpoll object
3,374
def generate_namelist_file(self, rapid_namelist_file): log("Generating RAPID namelist file ...", "INFO") try: os.remove(rapid_namelist_file) except OSError: pass with open(rapid_namelist_file, ) as new_file: new_file.write() for attr, value in sorted(list(self.__dict__.items())): if not attr.startswith(): if attr.startswith(): new_file.write("{0} = .{1}.\n" .format(attr, str(value).lower())) elif isinstance(value, int): new_file.write("%s = %s\n" % (attr, value)) else: if value: if os.name == "nt": value = self._get_cygwin_path(value) new_file.write("%s = \\n" % (attr, value)) new_file.write("/\n")
Generate rapid_namelist file. Parameters ---------- rapid_namelist_file: str Path of namelist file to generate from parameters added to the RAPID manager.
3,375
def get_extract_method(path): info_path = _get_info_path(path) info = _read_info(info_path) fname = info.get(, path) if info else path return _guess_extract_method(fname)
Returns `ExtractMethod` to use on resource at path. Cannot be None.
3,376
def instantiate(config): for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object
3,377
def _get_programs_dict(): global __programs_dict if __programs_dict is not None: return __programs_dict d = __programs_dict = OrderedDict() for pkgname in COLLABORATORS_S: try: package = importlib.import_module(pkgname) except ImportError: continue path_ = os.path.join(os.path.split(package.__file__)[0], "scripts") bulk = a99.get_exe_info(path_, flag_protected=True) d[pkgname] = {"description": a99.get_obj_doc0(package), "exeinfo": bulk} return __programs_dict
Builds and returns programs dictionary This will have to import the packages in COLLABORATORS_S in order to get their absolute path. Returns: dictionary: {"packagename": [ExeInfo0, ...], ...} "packagename" examples: "f311.explorer", "numpy"
3,378
def scalarVectorDecorator(func): @wraps(func) def scalar_wrapper(*args,**kwargs): if numpy.array(args[1]).shape == () \ and numpy.array(args[2]).shape == (): scalarOut= True args= (args[0],numpy.array([args[1]]),numpy.array([args[2]])) elif numpy.array(args[1]).shape == () \ and not numpy.array(args[2]).shape == (): scalarOut= False args= (args[0],args[1]*numpy.ones_like(args[2]),args[2]) elif not numpy.array(args[1]).shape == () \ and numpy.array(args[2]).shape == (): scalarOut= False args= (args[0],args[1],args[2]*numpy.ones_like(args[1])) else: scalarOut= False result= func(*args,**kwargs) if scalarOut: return result[0] else: return result return scalar_wrapper
Decorator to return scalar outputs as a set
3,379
def get_en_words() -> Set[str]: pull_en_words() with open(config.EN_WORDS_PATH) as words_f: raw_words = words_f.readlines() en_words = set([word.strip().lower() for word in raw_words]) NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i", "bi", "aye", "imi", "ane", "kubba", "kab", "a-", "ad", "a", "mak", "selim", "ngai", "en", "yo", "wud", "mani", "yak", "manu", "ka-", "mong", "manga", "ka-", "mane", "kala", "name", "kayo", "kare", "laik", "bale", "ni", "rey", "bu", "re", "iman", "bom", "wam", "alu", "nan", "kure", "kuri", "wam", "ka", "ng", "yi", "na", "m", "arri", "e", "kele", "arri", "nga", "kakan", "ai", "ning", "mala", "ti", "wolk", "bo", "andi", "ken", "ba", "aa", "kun", "bini", "wo", "bim", "man", "bord", "al", "mah", "won", "ku", "ay", "belen", "wen", "yah", "muni", "bah", "di", "mm", "anu", "nane", "ma", "kum", "birri", "ray", "h", "kane", "mumu", "bi", "ah", "i-", "n", "mi", "bedman", "rud", "le", "babu", "da", "kakkak", "yun", "ande", "naw", "kam", "bolk", "woy", "u", "bi-", ]) EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"]) en_words = en_words.difference(NA_WORDS_IN_EN_DICT) en_words = en_words | EN_WORDS_NOT_IN_EN_DICT return en_words
Returns a list of English words which can be used to filter out code-switched sentences.
3,380
def load_stubs(self, log_mem=False): if log_mem: import psutil process = psutil.Process(os.getpid()) rss = process.memory_info().rss LOG_MEMORY_INT = 1000 MEMORY_LIMIT = 1000.0 def _add_stub_manually(_fname): format(rss, MEMORY_LIMIT, ii, _fname)) self.log.error(err) raise RuntimeError(err) return self.entries
Load all events in their `stub` (name, alias, etc only) form. Used in `update` mode.
3,381
def get_gammadot(F, mc, q, e): mc *= SOLAR2S m = (((1+q)**2)/q)**(3/5) * mc dgdt = 6*np.pi*F * (2*np.pi*F*m)**(2/3) / (1-e**2) * \ (1 + 0.25*(2*np.pi*F*m)**(2/3)/(1-e**2)*(26-15*e**2)) return dgdt
Compute gamma dot from Barack and Cutler (2004) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param e: Eccentricity of binary :returns: dgamma/dt
3,382
def is_std_string(type_): if utils.is_str(type_): return type_ in string_equivalences type_ = remove_alias(type_) type_ = remove_reference(type_) type_ = remove_cv(type_) return type_.decl_string in string_equivalences
Returns True, if type represents C++ `std::string`, False otherwise.
3,383
def Write(self, packet): out = bytearray([0] + packet) os.write(self.dev, out)
See base class.
3,384
def add_batch(self, nlive=500, wt_function=None, wt_kwargs=None, maxiter=None, maxcall=None, save_bounds=True, print_progress=True, print_func=None, stop_val=None): if maxcall is None: maxcall = sys.maxsize if maxiter is None: maxiter = sys.maxsize if wt_function is None: wt_function = weight_function if wt_kwargs is None: wt_kwargs = dict() if print_func is None: print_func = print_fn ncall, niter, n = self.ncall, self.it - 1, self.batch if maxcall > 0 and maxiter > 0: res = self.results lnz, lnzerr = res.logz[-1], res.logzerr[-1] logl_bounds = wt_function(res, wt_kwargs) for results in self.sample_batch(nlive_new=nlive, logl_bounds=logl_bounds, maxiter=maxiter, maxcall=maxcall, save_bounds=save_bounds): (worst, ustar, vstar, loglstar, nc, worst_it, boundidx, bounditer, eff) = results if worst >= 0: ncall += nc niter += 1 results = (worst, ustar, vstar, loglstar, np.nan, np.nan, lnz, lnzerr**2, np.nan, nc, worst_it, boundidx, bounditer, eff, np.nan) if print_progress: print_func(results, niter, ncall, nbatch=n+1, stop_val=stop_val, logl_min=logl_bounds[0], logl_max=logl_bounds[1]) self.combine_runs() return ncall, niter, logl_bounds, results
Allocate an additional batch of (nested) samples based on the combined set of previous samples using the specified weight function. Parameters ---------- nlive : int, optional The number of live points used when adding additional samples in the batch. Default is `500`. wt_function : func, optional A cost function that takes a `Results` instance and returns a log-likelihood range over which a new batch of samples should be generated. The default function simply computes a weighted average of the posterior and evidence information content as:: weight = pfrac * pweight + (1. - pfrac) * zweight wt_kwargs : dict, optional Extra arguments to be passed to the weight function. maxiter : int, optional Maximum number of iterations allowed. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations allowed. Default is `sys.maxsize` (no limit). save_bounds : bool, optional Whether or not to save distributions used to bound the live points internally during dynamic live point allocations. Default is `True`. print_progress : bool, optional Whether to output a simple summary of the current run that updates each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. stop_val : float, optional The value of the stopping criteria to be passed to :meth:`print_func`. Used internally within :meth:`run_nested` to keep track of progress.
3,385
def row(self): row = OrderedDict() row[] = self.retro_game_id row[] = self.game_type row[] = self.game_type_des row[] = self.st_fl row[] = self.regseason_fl row[] = self.playoff_fl row[] = self.local_game_time row[] = self.game_id row[] = self.home_team_id row[] = self.home_team_lg row[] = self.away_team_id row[] = self.away_team_lg row[] = self.home_team_name row[] = self.away_team_name row[] = self.home_team_name_full row[] = self.away_team_name_full row[] = self.interleague_fl row[] = self.park_id row[] = self.park_name row[] = self.park_loc return row
Game Dataset(Row) :return: { 'retro_game_id': Retrosheet Game id 'game_type': Game Type(S/R/F/D/L/W) 'game_type_des': Game Type Description (Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series) 'st_fl': Spring Training FLAG(T or F) 'regseason_fl': Regular Season FLAG(T or F) 'playoff_fl': Play Off Flag(T or F) 'local_game_time': Game Time(UTC -5) 'game_id': Game Id 'home_team_id': Home Team Id 'home_team_lg': Home Team league(AL or NL) 'away_team_id': Away Team Id 'away_team_lg': Away Team league(AL or NL) 'home_team_name': Home Team Name 'away_team_name': Away Team Name 'home_team_name_full': Home Team Name(Full Name) 'away_team_name_full': Away Team Name(Full Name) 'interleague_fl': Inter League Flag(T or F) 'park_id': Park Id 'park_name': Park Name 'park_loc': Park Location }
3,386
def check_coin_a_phrase_from(text): err = "misc.illogic.coin" msg = "You canborrow'?" regex = "to coin a phrase from" return existence_check(text, [regex], err, msg, offset=1)
Check the text.
3,387
def size(self): t = self._type if t.startswith(): return int(t[len():]) if t.startswith(): return int(t[len():]) if t == : return int(8) if t == : return int(160) if t.startswith(): return int(t[len():]) return None
Return the size in bits Return None if the size is not known Returns: int
3,388
def _add_blockhash_to_state_changes(storage: SQLiteStorage, cache: BlockHashCache) -> None: batch_size = 50 batch_query = storage.batch_query_state_changes( batch_size=batch_size, filters=[ (, ), (, ), ], logical_and=False, ) for state_changes_batch in batch_query: query_records = [] for state_change in state_changes_batch: data = json.loads(state_change.data) assert not in data, record = BlockQueryAndUpdateRecord( block_number=int(data[]), data=data, state_change_identifier=state_change.state_change_identifier, cache=cache, ) query_records.append(record) updated_state_changes = [] pool_generator = Pool(batch_size).imap( _query_blocknumber_and_update_statechange_data, query_records, ) for entry in pool_generator: updated_state_changes.append(entry) storage.update_state_changes(updated_state_changes)
Adds blockhash to ContractReceiveXXX and ActionInitChain state changes
3,389
def ray_shooting(self, x, y, kwargs, k=None): return self.lens_model.ray_shooting(x, y, kwargs, k=k)
maps image to source position (inverse deflection) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: source plane positions corresponding to (x, y) in the image plane
3,390
def write_matrix_to_tsv(net, filename=None, df=None): import pandas as pd if df is None: df = net.dat_to_df() return df[].to_csv(filename, sep=)
This will export the matrix in net.dat or a dataframe (optional df in arguments) as a tsv file. Row/column categories will be saved as tuples in tsv, which can be read back into the network object.
3,391
def generate_variables(name, n_vars=1, hermitian=None, commutative=True): variables = [] for i in range(n_vars): if n_vars > 1: var_name = % (name, i) else: var_name = % name if commutative: if hermitian is None or hermitian: variables.append(Symbol(var_name, real=True)) else: variables.append(Symbol(var_name, complex=True)) elif hermitian is not None and hermitian: variables.append(HermitianOperator(var_name)) else: variables.append(Operator(var_name)) return variables
Generates a number of commutative or noncommutative variables :param name: The prefix in the symbolic representation of the noncommuting variables. This will be suffixed by a number from 0 to n_vars-1 if n_vars > 1. :type name: str. :param n_vars: The number of variables. :type n_vars: int. :param hermitian: Optional parameter to request Hermitian variables . :type hermitian: bool. :param commutative: Optional parameter to request commutative variables. Commutative variables are Hermitian by default. :type commutative: bool. :returns: list of :class:`sympy.physics.quantum.operator.Operator` or :class:`sympy.physics.quantum.operator.HermitianOperator` variables or `sympy.Symbol` :Example: >>> generate_variables('y', 2, commutative=True) [y0, y1]
3,392
def analog_write(self, pin, value): if self._command_handler.ANALOG_MESSAGE + pin < 0xf0: command = [self._command_handler.ANALOG_MESSAGE + pin, value & 0x7f, (value >> 7) & 0x7f] self._command_handler.send_command(command) else: self.extended_analog(pin, value)
Set the specified pin to the specified value. :param pin: Pin number :param value: Pin value :return: No return value
3,393
def shutdown(self): self.stop_balance.set() self.motor_left.stop() self.motor_right.stop() self.gyro_file.close() self.touch_file.close() self.encoder_left_file.close() self.encoder_right_file.close() self.dc_left_file.close() self.dc_right_file.close()
Close all file handles and stop all motors.
3,394
async def _unwatch(self, conn): "Unwatches all previously specified keys" await conn.send_command() res = await conn.read_response() return self.watching and res or True
Unwatches all previously specified keys
3,395
def get_mean_threshold_from_calibration(gdac, mean_threshold_calibration): interpolation = interp1d(mean_threshold_calibration[], mean_threshold_calibration[], kind=, bounds_error=True) return interpolation(gdac)
Calculates the mean threshold from the threshold calibration at the given gdac settings. If the given gdac value was not used during caluibration the value is determined by interpolation. Parameters ---------- gdacs : array like The GDAC settings where the threshold should be determined from the calibration mean_threshold_calibration : pytable The table created during the calibration scan. Returns ------- numpy.array, shape=(len(gdac), ) The mean threshold values at each value in gdacs.
3,396
def plotAccuracyDuringSequenceInference(dirName, title="", yaxis=""): with open(os.path.join(dirName, "sequence_batch_high_dec_normal_features.pkl"), "rb") as f: results = cPickle.load(f) locationRange = [] featureRange = [] for r in results: if r["numLocations"] not in locationRange: locationRange.append(r["numLocations"]) if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"]) locationRange.sort() featureRange.sort() if 10 in featureRange: featureRange.remove(10) print "locationRange=",locationRange print "featureRange=",featureRange L2Accuracies = defaultdict(list) TMAccuracies = defaultdict(list) for r in results: if r["numFeatures"] in featureRange: L2Accuracies[(r["numLocations"], r["numFeatures"])].append(r["sequenceAccuracyL2"]) TMAccuracies[r["numLocations"]].append(r["sequenceCorrectSparsityTM"]) meanL2Accuracy = numpy.zeros((max(locationRange)+1, max(featureRange) + 1)) stdevL2 = numpy.zeros((max(locationRange)+1, max(featureRange) + 1)) meanTMAccuracy = numpy.zeros(max(locationRange)+1) stdevTM = numpy.zeros(max(locationRange)+1) for o in locationRange: for f in featureRange: a = numpy.array(L2Accuracies[(o, f)]) meanL2Accuracy[o, f] = 100.0*a.mean() stdevL2[o, f] = 100.0*a.std() a = numpy.array(TMAccuracies[o]) meanTMAccuracy[o] = 100.0*a.mean() stdevTM[o] = 100.0*a.std() plt.figure() plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plots", "accuracy_during_sequence_inference.pdf") legendList = [] colorList = [, , , , , , ] for i in range(len(featureRange)): f = featureRange[i] legendList.append(.format(f)) plt.errorbar(locationRange, meanL2Accuracy[locationRange, f], yerr=stdevL2[locationRange, f], color=colorList[i]) plt.errorbar(locationRange, meanTMAccuracy[locationRange], yerr=stdevTM[locationRange], color=colorList[len(featureRange)]) legendList.append() plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102), loc="right", prop={:10}) plt.xlabel("Size of location pool") plt.ylim(-10.0, 110.0) plt.ylabel(yaxis) plt.title(title) plt.savefig(plotPath) plt.close()
Plot accuracy vs number of locations
3,397
def get_sample_size(self, key=None): if key is None: return len(self.Y) else: return len(self.get_partitions(self.persistence)[key])
Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples.
3,398
def _request_auth(self, registry): if registry: if registry.auth: registry.auth.load_dockercfg() try: self._client_session.login(username=registry.auth.user, password=registry.auth.passwd, dockercfg_path=registry.auth.config_path, reauth=True if registry.auth.auth_type == else False, registry=registry.auth.registry) except Exception: raise else: raise Exception("a registry is required when requesting auth.")
self, username, password=None, email=None, registry=None, reauth=False, insecure_registry=False, dockercfg_path=None):
3,399
def main(args): cmd = [,args.reference,,,args.tempdir,, str(args.threads),,args.tempdir+] sys.stderr.write(cmd+"\n") gpd_sort(cmd) cmd = [,args.gpd,,,args.tempdir,, str(args.threads),,args.tempdir+] sys.stderr.write(cmd+"\n") gpd_sort(cmd) rstream = GPDStream(open(args.tempdir+)) mstream = GPDStream(open(args.tempdir+)) stream = MultiLocusStream([rstream,mstream]) of = sys.stdout if args.output != : if args.output[-3:] == : of = gzip.open(args.output,) else: of = open(args.output,) for locus_rng in stream: (rgpds, mgpds) = locus_rng.get_payload() if len(mgpds) == 0: continue sys.stderr.write(locus_rng.get_range_string()+" "+str(len(rgpds))+" "+str(len(mgpds))+" \r") ref_juncs = {} for ref in rgpds: ref_juncs[ref.get_junction_string()] = ref annotated = [] unannotated = [] annotated = [ref_juncs[x.get_junction_string()] for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() in ref_juncs] unannotated = [x for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() not in ref_juncs] my_unannotated = [x for x in mgpds if x.get_exon_count() == 1] single_reference = [x for x in rgpds if x.get_exon_count() == 1] single_annotated = [] single_unannotated = [] for gpd in my_unannotated: overs = sorted([x for x in single_reference if x.overlap_size(gpd) > 0],\ key=lambda y: y.avg_mutual_coverage(gpd), reverse=True) if len(overs) > 0: single_annotated.append(overs[0]) else: single_unannotated.append(gpd) unannotated += single_unannotated gene_annotated = [] no_annotation = [] for m in unannotated: overs = sorted([x for x in rgpds if x.overlap_size(m) > 0],\ key=lambda y: y.avg_mutual_coverage(m), reverse=True) if len(overs) > 0: gname = overs[0].value() f = overs[0].get_gpd_line().rstrip().split("\t") f[0] = gname f[1] = str(uuid.uuid4()) g = GPD("\t".join(f)) gene_annotated.append(g) else: no_annotation.append(m) finished = [] while len(no_annotation) > 0: m = no_annotation.pop(0) matched = False for i in range(0,len(finished)): if len([x for x in finished[i] if x.overlap_size(m) > 0]) > 0: finished[i].append(m) matched = True break if not matched: finished.append([m]) original = [] for group in finished: gname = str(uuid.uuid4()) for member in group: tname = str(uuid.uuid4()) f = member.get_gpd_line().rstrip().split("\t") f[0] = gname f[1] = tname g = GPD("\t".join(f)) original.append(g) for gpd in original + annotated + single_annotated + gene_annotated: of.write(gpd.get_gpd_line()+"\n") of.close() sys.stderr.write("\n") if not args.specific_tempdir: rmtree(args.tempdir)
first we need sorted genepreds