Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
18,200
def on_state_changed(self, state): if state: self.editor.painted.connect(self._paint_margin) self.editor.repaint() else: self.editor.painted.disconnect(self._paint_margin) self.editor.repaint()
Connects/Disconnects to the painted event of the editor :param state: Enable state
18,201
def write_index_and_rst_files(self, overwrite: bool = False, mock: bool = False) -> None: for f in self.files_to_index: if isinstance(f, FileToAutodocument): f.write_rst( prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, overwrite=overwrite, mock=mock, ) elif isinstance(f, AutodocIndex): f.write_index_and_rst_files(overwrite=overwrite, mock=mock) else: fail("Unknown thing in files_to_index: {!r}".format(f)) self.write_index(overwrite=overwrite, mock=mock)
Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
18,202
def murmur_hash3_x86_32(data, offset, size, seed=0x01000193): key = bytearray(data[offset: offset + size]) length = len(key) nblocks = int(length / 4) h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 for block_start in range(0, nblocks * 4, 4): k1 = key[block_start + 3] << 24 | \ key[block_start + 2] << 16 | \ key[block_start + 1] << 8 | \ key[block_start + 0] k1 = c1 * k1 & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF k1 = (c2 * k1) & 0xFFFFFFFF h1 ^= k1 h1 = (h1 << 13 | h1 >> 19) & 0xFFFFFFFF h1 = (h1 * 5 + 0xe6546b64) & 0xFFFFFFFF tail_index = nblocks * 4 k1 = 0 tail_size = length & 3 if tail_size >= 3: k1 ^= key[tail_index + 2] << 16 if tail_size >= 2: k1 ^= key[tail_index + 1] << 8 if tail_size >= 1: k1 ^= key[tail_index + 0] if tail_size != 0: k1 = (k1 * c1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF k1 = (k1 * c2) & 0xFFFFFFFF h1 ^= k1 result = _fmix(h1 ^ length) return -(result & 0x80000000) | (result & 0x7FFFFFFF)
murmur3 hash function to determine partition :param data: (byte array), input byte array :param offset: (long), offset. :param size: (long), byte length. :param seed: murmur hash seed hazelcast uses 0x01000193 :return: (int32), calculated hash value.
18,203
def subdir_findall(dir, subdir): strip_n = len(dir.split()) path = .join((dir, subdir)) return [.join(s.split()[strip_n:]) for s in setuptools.findall(path)]
Find all files in a subdirectory and return paths relative to dir This is similar to (and uses) setuptools.findall However, the paths returned are in the form needed for package_data
18,204
def update_event(self, event, uid): ev_for_change = self.calendar.get(uid) ev_for_change.content = event ev_for_change.save()
Edit event Parameters ---------- event : iCalendar file as a string (calendar containing one event to be updated) uid : uid of event to be updated
18,205
def get_meta_fields(self, fields, kwargs={}): fields = to_list(fields) meta = self.get_meta() return {field: meta.get(field) for field in fields}
Return a dictionary of metadata fields
18,206
def brightness_multi(x, gamma=1, gain=1, is_random=False): if is_random: gamma = np.random.uniform(1 - gamma, 1 + gamma) results = [] for data in x: results.append(exposure.adjust_gamma(data, gamma, gain)) return np.asarray(results)
Change the brightness of multiply images, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- x : list of numpyarray List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.brightness``. Returns ------- numpy.array A list of processed images.
18,207
def _apply_sources(self): if self.settings[] == : f1 = 0.5 else: f1 = 1 phase = self.project.phases()[self.settings[]] relax = self.settings[] for item in self.settings[]: Ps = self.pores(item) datadiag = self._A.diagonal().copy() S1_old = phase[item++][Ps].copy() S2_old = phase[item++][Ps].copy() self._update_physics() S1 = phase[item++][Ps] S2 = phase[item++][Ps] S1 = relax*S1 + (1-relax)*S1_old S2 = relax*S2 + (1-relax)*S2_old phase[item++][Ps] = S1 phase[item++][Ps] = S2 datadiag[Ps] = datadiag[Ps] - f1*S1 self._A.setdiag(datadiag) self._b[Ps] = self._b[Ps] + f1*S2
r Update 'A' and 'b' applying source terms to specified pores Notes ----- Applying source terms to 'A' and 'b' is performed after (optionally) under-relaxing the source term to improve numerical stability. Physics are also updated before applying source terms to ensure that source terms values are associated with the current value of 'quantity'. In the case of a transient simulation, the updates in 'A' and 'b' also depend on the time scheme.
18,208
def _reset(self): self.records = list() self.featsbyid = dict() self.featsbyparent = dict() self.countsbytype = dict()
Clear internal data structure.
18,209
def _payload(fields, values): for field_name, field in fields.items(): if field_name in values: if isinstance(field, OneToOneField): values[field_name + ] = ( getattr(values.pop(field_name), , None) ) elif isinstance(field, OneToManyField): values[field_name + ] = [ entity.id for entity in values.pop(field_name) ] elif isinstance(field, ListField): def parse(obj): if isinstance(obj, Entity): return _payload(obj.get_fields(), obj.get_values()) return obj values[field_name] = [ parse(obj) for obj in values[field_name]] return values
Implement the ``*_payload`` methods. It's frequently useful to create a dict of values that can be encoded to JSON and sent to the server. Unfortunately, there are mismatches between the field names used by NailGun and the field names the server expects. This method provides a default translation that works in many cases. For example: >>> from nailgun.entities import Product >>> product = Product(name='foo', organization=1) >>> set(product.get_fields()) { 'description', 'gpg_key', 'id', 'label', 'name', 'organization', 'sync_plan', } >>> set(product.get_values()) {'name', 'organization'} >>> product.create_payload() {'organization_id': 1, 'name': 'foo'} :param fields: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_fields`. :param values: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_values`. :returns: A dict mapping field names to field values.
18,210
def namedtuple_with_defaults(typename, field_names, default_values=[]): the_tuple = collections.namedtuple(typename, field_names) the_tuple.__new__.__defaults__ = (None, ) * len(the_tuple._fields) if isinstance(default_values, collections.Mapping): prototype = the_tuple(**default_values) else: prototype = the_tuple(*default_values) the_tuple.__new__.__defaults__ = tuple(prototype) return the_tuple
Create a namedtuple with default values >>> Node = namedtuple_with_defaults('Node', 'val left right') >>> Node() Node(val=None, left=None, right=None) >>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3]) >>> Node() Node(val=1, left=2, right=3) >>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7}) >>> Node() Node(val=None, left=None, right=7) >>> Node(4) Node(val=4, left=None, right=7)
18,211
def connect(self, source, target, witnesses): if self.graph.has_edge(source, target): self.graph[source][target]["label"] += ", " + str(witnesses) else: self.graph.add_edge(source, target, label=witnesses)
:type source: integer :type target: integer
18,212
def has_layer(self, class_: Type[L], became: bool=True) -> bool: return (class_ in self._index or (became and class_ in self._transformed))
Test the presence of a given layer type. :param class_: Layer class you're interested in. :param became: Allow transformed layers in results
18,213
def interval_intersection_width(a, b, c, d): return max(0, min(b, d) - max(a, c))
returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)
18,214
def append_column(self, header, column): self.insert_column(self._column_count, header, column)
Append a column to end of the table. Parameters ---------- header : str Title of the column column : iterable Any iterable of appropriate length.
18,215
def extraction_to_conll(ex: Extraction) -> List[str]: ex = split_predicate(ex) toks = ex.sent.split() ret = [] * len(toks) args = [ex.arg1] + ex.args2 rels_and_args = [("ARG{}".format(arg_ind), arg) for arg_ind, arg in enumerate(args)] + \ [(rel_part.elem_type, rel_part) for rel_part in ex.rel] for rel, arg in rels_and_args: cur_start_ind = char_to_word_index(arg.span[0], ex.sent) cur_end_ind = char_to_word_index(arg.span[1], ex.sent) ret[cur_start_ind] = "({}{}".format(rel, ret[cur_start_ind]) ret[cur_end_ind] += return ret
Return a conll representation of a given input Extraction.
18,216
def parse_list_objects_v2(data, bucket_name): root = S3Element.fromstring(, data) is_truncated = root.get_child_text().lower() == continuation_token = root.get_child_text(, strict=False) objects, object_dirs = _parse_objects_from_xml_elts( bucket_name, root.findall(), root.findall() ) return objects + object_dirs, is_truncated, continuation_token
Parser for list objects version 2 response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Returns three distinct components: - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Continuation Token for the next request.
18,217
def op(cls,text,*args,**kwargs): return cls.fn(text,*args,**kwargs)
This method must be overriden in derived classes
18,218
def run_simulations(self, parameter_list, data_folder): for idx, parameter in enumerate(parameter_list): current_result = { : {}, : {} } current_result[].update(parameter) command = [self.script_executable] + [ % (param, value) for param, value in parameter.items()] current_result[][] = str(uuid.uuid4()) temp_dir = os.path.join(data_folder, current_result[][]) os.makedirs(temp_dir) start = time.time() stdout_file_path = os.path.join(temp_dir, ) stderr_file_path = os.path.join(temp_dir, ) with open(stdout_file_path, ) as stdout_file, open( stderr_file_path, ) as stderr_file: return_code = subprocess.call(command, cwd=temp_dir, env=self.environment, stdout=stdout_file, stderr=stderr_file) end = time.time() if return_code > 0: complete_command = [self.script] complete_command.extend(command[1:]) complete_command = "python waf --run \"%s\"" % ( .join(complete_command)) with open(stdout_file_path, ) as stdout_file, open( stderr_file_path, ) as stderr_file: raise Exception(( % (parameter, stderr_file.read(), stdout_file.read(), complete_command))) current_result[][] = end-start yield current_result
Run several simulations using a certain combination of parameters. Yields results as simulations are completed. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to save subfolders containing simulation output.
18,219
def transform(self, transformer): self.transformers.append(transformer) from languageflow.transformer.tagged import TaggedTransformer if isinstance(transformer, TaggedTransformer): self.X, self.y = transformer.transform(self.sentences) if isinstance(transformer, TfidfVectorizer): self.X = transformer.fit_transform(self.X) if isinstance(transformer, CountVectorizer): self.X = transformer.fit_transform(self.X) if isinstance(transformer, NumberRemover): self.X = transformer.transform(self.X) if isinstance(transformer, MultiLabelBinarizer): self.y = transformer.fit_transform(self.y)
Add transformer to flow and apply transformer to data in flow Parameters ---------- transformer : Transformer a transformer to transform data
18,220
def addDrizKeywords(self,hdr,versions): _geom = _imgnum = 0 for pl in self.parlist: _imgnum += 1 drizdict = DRIZ_KEYWORDS.copy() drizdict[][] = pl[][:44] drizdict[][] = pl[][:64] drizdict[][] = pl[] drizdict[][] = pl[][:64] drizdict[][] = pl[][:64] if pl[] is None: outcontext = "" else: outcontext = pl[][:64] drizdict[][] = outcontext if self.single: drizdict[][] = pl[][:64] else: drizdict[][] = pl[][:64] if in pl: _wtscl = pl[] else: if pl[] == : _wtscl = pl[] elif pl[] == : _wtscl = pl[]*pl[] else: _wtscl = pl[] drizdict[][] = _wtscl drizdict[][] = pl[] drizdict[][] = pl[] drizdict[][] = self.units if pl[] is None: _fillval = else: _fillval = pl[] drizdict[][] = _fillval drizdict[][] = pl[] drizdict[] = {:pl[],:} drizdict[] = {:pl[],:} writeDrizKeywords(hdr,_imgnum,drizdict) del drizdict if versions is not None: ver_str = "AstroDrizzle processing performed using: " hdr.add_history(ver_str) for k in versions.keys(): ver_str = +str(k)++str(versions[k]) hdr.add_history(ver_str)
Add drizzle parameter keywords to header.
18,221
def applyHotspot(self, lon, lat): self.loadRealResults() cut_detect_real = (self.data_real[] >= self.config[self.algorithm][]) lon_real = self.data_real[][cut_detect_real] lat_real = self.data_real[][cut_detect_real] cut_hotspot = np.tile(True, len(lon)) for ii in range(0, len(lon)): cut_hotspot[ii] = ~np.any(angsep(lon[ii], lat[ii], lon_real, lat_real) < self.config[self.algorithm][]) return cut_hotspot
Exclude objects that are too close to hotspot True if passes hotspot cut
18,222
def notify(self, level, value, target=None, ntype=None, rule=None): if target in self.state and level == self.state[target]: return False if target not in self.state and level == \ and not self.reactor.options[]: return False self.state[target] = level return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
Notify main reactor about event.
18,223
def variables(template): vars = set() for varlist in TEMPLATE.findall(template): if varlist[0] in OPERATOR: varlist = varlist[1:] varspecs = varlist.split() for var in varspecs: var = var.split()[0] if var.endswith(): var = var[:-1] vars.add(var) return vars
Returns the set of keywords in a uri template
18,224
def leaveEvent(self, event): if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.leaveEvent(self, event)
If cursor has not been restored yet, do it now
18,225
def from_dict(data, ctx): data = data.copy() if data.get() is not None: data[] = \ ctx.order.UnitsAvailableDetails.from_dict( data[], ctx ) if data.get() is not None: data[] = \ ctx.order.UnitsAvailableDetails.from_dict( data[], ctx ) if data.get() is not None: data[] = \ ctx.order.UnitsAvailableDetails.from_dict( data[], ctx ) if data.get() is not None: data[] = \ ctx.order.UnitsAvailableDetails.from_dict( data[], ctx ) return UnitsAvailable(**data)
Instantiate a new UnitsAvailable from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailable is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
18,226
def standard_to_absl(level): if not isinstance(level, int): raise TypeError(.format(type(level))) if level < 0: level = 0 if level < STANDARD_DEBUG: return STANDARD_DEBUG - level + 1 elif level < STANDARD_INFO: return ABSL_DEBUG elif level < STANDARD_WARNING: return ABSL_INFO elif level < STANDARD_ERROR: return ABSL_WARNING elif level < STANDARD_CRITICAL: return ABSL_ERROR else: return ABSL_FATAL
Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging.
18,227
def get_docs(r_session, url, encoder=None, headers=None, **params): keys_list = params.pop(, None) keys = None if keys_list is not None: keys = json.dumps({: keys_list}, cls=encoder) f_params = python_to_couch(params) resp = None if keys is not None: resp = r_session.post(url, headers=headers, params=f_params, data=keys) else: resp = r_session.get(url, headers=headers, params=f_params) resp.raise_for_status() return resp
Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint
18,228
def validateAQLQuery(self, query, bindVars = None, options = None) : "returns the server answer is the query is valid. Raises an AQLQueryError if not" if bindVars is None : bindVars = {} if options is None : options = {} payload = { : query, : bindVars, : options} r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload, default=str)) data = r.json() if r.status_code == 201 and not data["error"] : return data else : raise AQLQueryError(data["errorMessage"], query, data)
returns the server answer is the query is valid. Raises an AQLQueryError if not
18,229
def compute_log_degrees(brands, exemplars): counts = Counter() for followers in brands.values(): counts.update(followers) counts.update(counts.keys()) for k in counts: counts[k] = 1. / math.log(counts[k]) return counts
For each follower, let Z be the total number of brands they follow. Return a dictionary of 1. / log(Z), for each follower.
18,230
def _load_history_from_file(path, size=-1): if size == 0: return [] if os.path.exists(path): with codecs.open(path, , encoding=) as histfile: lines = [line.rstrip() for line in histfile] if size > 0: lines = lines[-size:] return lines else: return []
Load a history list from a file and split it into lines. :param path: the path to the file that should be loaded :type path: str :param size: the number of lines to load (0 means no lines, < 0 means all lines) :type size: int :returns: a list of history items (the lines of the file) :rtype: list(str)
18,231
def cli(env, identifier): manager = PlacementManager(env.client) group_id = helpers.resolve_id(manager.resolve_ids, identifier, ) result = manager.get_object(group_id) table = formatting.Table(["Id", "Name", "Backend Router", "Rule", "Created"]) table.add_row([ result[], result[], result[][], result[][], result[] ]) guest_table = formatting.Table([ "Id", "FQDN", "Primary IP", "Backend IP", "CPU", "Memory", "Provisioned", "Transaction" ]) for guest in result[]: guest_table.add_row([ guest.get(), guest.get(), guest.get(), guest.get(), guest.get(), guest.get(), guest.get(), formatting.active_txn(guest) ]) env.fout(table) env.fout(guest_table)
View details of a placement group. IDENTIFIER can be either the Name or Id of the placement group you want to view
18,232
def build(self, builder): params = {} if self.edit_point is not None: params["EditPoint"] = self.edit_point if self.used_imputation_method is not None: params[] = bool_to_yes_no(self.used_imputation_method) if self.audit_id is not None: params[] = str(self.audit_id) if self.include_file_oid is not None: params[] = bool_to_yes_no(self.include_file_oid) builder.start("AuditRecord", params) if self.user_ref is None: raise ValueError("User Reference not set.") self.user_ref.build(builder) if self.location_ref is None: raise ValueError("Location Reference not set.") self.location_ref.build(builder) if self.date_time_stamp is None: raise ValueError("DateTime not set.") self.date_time_stamp.build(builder) if self.source_id: self.source_id.build(builder) if self.reason_for_change is not None: self.reason_for_change.build(builder) builder.end("AuditRecord")
Build XML by appending to builder
18,233
def rotate(name, **kwargs): ret = {: name, : {}, : None, : } kwargs = salt.utils.args.clean_kwargs(**kwargs) if not in kwargs: kwargs[] = name if not in kwargs or not kwargs[]: if in kwargs and kwargs[]: if kwargs[].startswith(): kwargs[] = kwargs[] if not in kwargs or not kwargs[]: ret[] = False ret[] = else: old_config = __salt__[]() if kwargs[] in old_config: res = __salt__[](kwargs[] if in kwargs else kwargs[]) ret[] = not in res if not ret[]: ret[] = res[] ret[] = {} res = __salt__[](name, **kwargs) ret[] = not in res if ret[]: new_config = __salt__[]() ret[] = .format( if kwargs[] in old_config else ) if kwargs[] in old_config: for key, val in salt.utils.data.compare_dicts(old_config[kwargs[]], new_config[kwargs[]]).items(): ret[][key] = val[] else: ret[] = new_config[kwargs[]] log.debug(ret[]) else: ret[] = res[] if kwargs[] in old_config: ret[] = {kwargs[]: None} else: ret[] = {} return ret
Add a log to the logadm configuration name : string alias for entryname kwargs : boolean|string|int optional additional flags and parameters
18,234
def _get_or_insert(*args, **kwds): cls, args = args[0], args[1:] return cls._get_or_insert_async(*args, **kwds).get_result()
Transactionally retrieves an existing entity or creates a new one. Positional Args: name: Key name to retrieve or create. Keyword Args: namespace: Optional namespace. app: Optional app ID. parent: Parent entity key, if any. context_options: ContextOptions object (not keyword args!) or None. **kwds: Keyword arguments to pass to the constructor of the model class if an instance for the specified key name does not already exist. If an instance with the supplied key_name and parent already exists, these arguments will be discarded. Returns: Existing instance of Model class with the specified key name and parent or a new one that has just been created.
18,235
def initLogger(): s Suspenders for initializing a logger root') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch)
This code taken from Matt's Suspenders for initializing a logger
18,236
def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None, prepend=False, delayed_for=None, delayed_until=None, **fields_if_new): delayed_until = compute_delayed_until(delayed_for, delayed_until) job_kwargs = {: identifier, : } retries = 0 while retries < 10: retries += 1 try: job, created = cls.get_or_connect(**job_kwargs) except IndexError: continue except ValueError: try: job = cls.collection(**job_kwargs).instances()[0] except IndexError: continue else: created = False break try: queue_name = cls._get_queue_name(queue_name) if not created: current_priority = int(job.priority.hget() or 0) if not prepend and current_priority >= priority: return job if queue_model is None: queue_model = cls.queue_model current_queue = queue_model.get_queue(queue_name, current_priority) current_queue.waiting.lrem(0, job.ident) else: job.set_fields(added=str(datetime.utcnow()), **(fields_if_new or {})) job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model) return job except Exception: job.queued.delete() raise
Add a job to a queue. If this job already exists, check it's current priority. If its higher than the new one, don't touch it, else move the job to the wanted queue. Before setting/moving the job to the queue, check for a `delayed_for` (int/foat/timedelta) or `delayed_until` (datetime) argument to see if it must be delayed instead of queued. If the job is created, fields in fields_if_new will be set for the new job. Finally return the job.
18,237
def resolve_return_value_options(self, options): for key, value in options.items(): if isinstance(value, str) and value.startswith(RETURN_VALUE_OPTION_PREFIX): path, name = value[len(RETURN_VALUE_OPTION_PREFIX) :].rsplit(".", 1) result = self._find_result_by_path(path) options[key] = result.return_values.get(name)
Handle dynamic option value lookups in the format ^^task_name.attr
18,238
def gps_date_time_send(self, year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed, force_mavlink1=False): return self.send(self.gps_date_time_encode(year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed), force_mavlink1=force_mavlink1)
Pilot console PWM messges. year : Year reported by Gps (uint8_t) month : Month reported by Gps (uint8_t) day : Day reported by Gps (uint8_t) hour : Hour reported by Gps (uint8_t) min : Min reported by Gps (uint8_t) sec : Sec reported by Gps (uint8_t) clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t) visSat : Visible satellites reported by Gps (uint8_t) useSat : Used satellites in Solution (uint8_t) GppGl : GPS+GLONASS satellites in Solution (uint8_t) sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t) percentUsed : Percent used GPS (uint8_t)
18,239
def metadata_path(self, m_path): if not m_path: self.metadata_dir = None self.metadata_file = None else: if not op.exists(m_path): raise OSError(.format(m_path)) if not op.dirname(m_path): self.metadata_dir = else: self.metadata_dir = op.dirname(m_path) self.metadata_file = op.basename(m_path)
Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file
18,240
def sys_info(): p = os.path path = p.dirname(p.abspath(p.join(__file__, ))) return pprint.pformat(pkg_info(path))
Return useful information about IPython and the system, as a string. Example ------- In [2]: print sys_info() {'commit_hash': '144fdae', # random 'commit_source': 'repository', 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython', 'ipython_version': '0.11.dev', 'os_name': 'posix', 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick', 'sys_executable': '/usr/bin/python', 'sys_platform': 'linux2', 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
18,241
def derivatives(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, f_xx=None, f_yy=None, f_xy=None): n = len(np.atleast_1d(x)) if n <= 1 and np.shape(x) == (): f_x_out = self.f_x_interp(x, y, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x, y, grid_interp_x, grid_interp_y, f_y) return f_x_out[0][0], f_y_out[0][0] else: if self._grid and n >= self._min_grid_number: x_, y_ = util.get_axes(x, y) f_x_out = self.f_x_interp(x_, y_, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x_, y_, grid_interp_x, grid_interp_y, f_y) f_x_out = util.image2array(f_x_out) f_y_out = util.image2array(f_y_out) else: f_x_out, f_y_out = np.zeros(n), np.zeros(n) for i in range(n): f_x_out[i] = self.f_x_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_x) f_y_out[i] = self.f_y_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_y) return f_x_out, f_y_out
returns df/dx and df/dy of the function
18,242
def vms(message, level=1): if verbose is not None and verbose != False: if isinstance(verbose, bool) or (isinstance(verbose, int) and level <= verbose): std(message)
Writes the specified message *only* if verbose output is enabled.
18,243
def get_alerts_unarchived(self): js = json.dumps({: , : False}) params = urllib.urlencode({: js}) return self._read(self.api_url + , params)
Return a list of Alerts unarchived.
18,244
def _implicit_solver(self): newstate = {} for varname, value in self.state.items(): if self.use_banded_solver: newvar = _solve_implicit_banded(value, self._diffTriDiag) else: newvar = np.linalg.solve(self._diffTriDiag, value) newstate[varname] = newvar return newstate
Invertes and solves the matrix problem for diffusion matrix and temperature T. The method is called by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class and solves the matrix problem .. math:: A \\cdot T_{\\textrm{new}} = T_{\\textrm{old}} for diffusion matrix A and corresponding temperatures. :math:`T_{\\textrm{old}}` is in this case the current state variable which already has been adjusted by the explicit processes. :math:`T_{\\textrm{new}}` is the new state of the variable. To derive the temperature tendency of the diffusion process the adjustment has to be calculated and muliplied with the timestep which is done by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class. This method calculates the matrix inversion for every state variable and calling either :func:`solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` dependent on the flag ``self.use_banded_solver``. :ivar dict state: method uses current state variables but does not modify them :ivar bool use_banded_solver: input flag whether to use :func:`_solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` to do the matrix inversion :ivar array _diffTriDiag: the diffusion matrix which is given with the current state variable to the method solving the matrix problem
18,245
def interpret(self, msg): for gallery in msg.get(, []): self.add_folder(gallery) image_file = msg.get() if not image_file: return return self.find_image(image_file)
Try and find the image file some magic here would be good. FIXME move elsewhere and make so everyone can use. interpreter that finds things?
18,246
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict): inputs = {: ref_fasta, : ref_fai, : ref_dict, : vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = [, , , , , , , filter_name, , filter_expression, , ] job.fileStore.logToMaster( .format(name=filter_name, expression=filter_expression)) docker_parameters = [, , , , .format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool=, dockerParameters=docker_parameters) malformed_header = os.path.join(work_dir, ) fixed_header = os.path.join(work_dir, ) filter_regex = re.escape( % filter_expression) with open(malformed_header, ) as f, open(fixed_header, ) as g: for line in f: g.write(re.sub(filter_regex, filter_expression, line)) return job.fileStore.writeGlobalFile(fixed_header)
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that may interfere with other VCF tools. :param JobFunctionWrappingJob job: passed automatically by Toil :param str vcf_id: FileStoreID for input VCF file :param str filter_name: Name of filter for VCF header :param str filter_expression: JEXL filter expression :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF file :rtype: str
18,247
def download_cf_standard_name_table(version, location=None): if location is None: location = resource_filename(, ) url = "http://cfconventions.org/Data/cf-standard-names/{0}/src/cf-standard-name-table.xml".format(version) r = requests.get(url, allow_redirects=True) if r.status_code == 200: print("Downloading cf-standard-names table version {0} from: {1}".format(version, url), file=sys.stderr) with open(location, ) as f: f.write(r.content) else: r.raise_for_status() return
Downloads the specified CF standard name table version and saves it to file :param str version: CF standard name table version number (i.e 34) :param str location: Path/filename to write downloaded xml file to
18,248
def get_val(self, x): try: if self.subtype == : return int(round(x[self.col_name])) else: if np.isnan(x[self.col_name]): return self.default_val return x[self.col_name] except (ValueError, TypeError): return self.default_val
Converts to int.
18,249
def parse_charset(header_string): match = re.search( r]?([a-z0-9_-]+)', header_string, re.IGNORECASE ) if match: return match.group(1)
Parse a "Content-Type" string for the document encoding. Returns: str, None
18,250
def get_ids_in_region( self, resource, resolution, x_range, y_range, z_range, time_range=[0, 1]): return self.service.get_ids_in_region( resource, resolution, x_range, y_range, z_range, time_range, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get all ids in the region defined by x_range, y_range, z_range. Args: resource (intern.resource.Resource): An annotation channel. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1]. Returns: (list[int]): Example: [1, 2, 25]. Raises: requests.HTTPError TypeError: if resource is not an annotation channel.
18,251
def markets(self): with self.client.connect(*self.bestip): data = self.client.get_markets() return self.client.to_df(data) return None
获取实时市场列表 :return: pd.dataFrame or None
18,252
def danke(client, event, channel, nick, rest): if rest: rest = rest.strip() Karma.store.change(rest, 1) rcpt = rest else: rcpt = channel return f
Danke schön!
18,253
def childRecords(self): if self._childRecords is not None: return self._childRecords tree = self.treeWidget() try: table, column = tree.hierarchyLookup(self.record()) except AttributeError: table = None column = if table and column: return table.select(where=Q(column) == self.record()) return RecordSet()
Returns a record set of children for this item based on the record. If no record set is manually set for this instance, then it will use the hierarchyColumn value from the tree widget with this record. If no hierarchyColumn is speified, then a blank record set is returned. :return <orb.RecordSet>
18,254
def flatten_dict(d, prefix=, sep=): def apply_and_resolve_conflicts(dest, item, prefix): for k, v in flatten_dict(item, prefix=prefix, sep=sep).items(): new_key = k i = 2 while new_key in d: new_key = .format(key=k, sep=sep, index=i) i += 1 dest[new_key] = v for key in list(d.keys()): if any(unicode(prefix)): new_key = u.format(p=prefix, key=key, sep=sep) else: new_key = key if isinstance(d[key], (dict, collections.Mapping)): apply_and_resolve_conflicts(d, d.pop(key), new_key) elif isinstance(d[key], six.string_types): d[new_key] = d.pop(key) elif isinstance(d[key], (list, collections.Mapping)): array = d.pop(key) for i in range(len(array)): index_key = .format(key=key, sep=sep, i=i) while index_key in d: i += 1 apply_and_resolve_conflicts(d, array[i], index_key) else: d[new_key] = d.pop(key) return d
In place dict flattening.
18,255
def determine_hbonds_for_drawing(self, analysis_cutoff): self.frequency = defaultdict(int) for traj in self.hbonds_by_type: for bond in self.hbonds_by_type[traj]: if bond["donor_resnm"]!="LIG": self.frequency[(bond["donor_idx"],bond["acceptor_atom"],bond["donor_atom"],bond["acceptor_idx"])] += bond["frequency"] else: self.frequency[(bond["acceptor_idx"],bond["donor_atom"],bond["acceptor_atom"],bond["donor_idx"])] += bond["frequency"] self.frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*analysis_cutoff)} self.hbonds_for_drawing = {} for bond in self.frequency: atomname = bond[1] if atomname.startswith("O",0) or atomname.startswith("N",0): lig_atom=atomname else: atomindex = [index for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if atom.name==atomname][0] rdkit_atom = self.topology_data.mol.GetAtomWithIdx(atomindex) for neigh in rdkit_atom.GetNeighbors(): neigh_atom_id = neigh.GetIdx() lig_atom = [atom.name for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if index==neigh_atom_id][0] self.hbonds_for_drawing[(bond[0],lig_atom,bond[2],bond[3])]=self.frequency[bond]
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple- mented. In this function the frequency of each hydrogen bond is summated and the total compared against analysis cutoff - a fraction multiplied by trajectory count. Those hydrogen bonds that are present for longer than analysis cutoff will be plotted in the final plot. Takes: * analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be present for to be plotted (default - 0.3). It is multiplied by number of trajectories Output: * frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies These hydrogen bonds will be plotted in the final image.
18,256
def _http_req_user_agent(self): try: return .format( self.settings[], self.settings[]) except (AttributeError, KeyError): pass if hasattr(self, ): try: return .format( self._process.consumer_name, self._process.consumer_version) except AttributeError: pass return DEFAULT_USER_AGENT
Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str
18,257
def update_path(self): if WINDOWS: return self.add_to_windows_path() export_string = self.get_export_string() addition = "\n{}\n".format(export_string) updated = [] profiles = self.get_unix_profiles() for profile in profiles: if not os.path.exists(profile): continue with open(profile, "r") as f: content = f.read() if addition not in content: with open(profile, "a") as f: f.write(addition) updated.append(os.path.relpath(profile, HOME))
Tries to update the $PATH automatically.
18,258
def delete_agent(self, agent_id): self._check_agent_id(agent_id) req_url = "{}/agents/{}".format(self._base_url, agent_id) resp = self._requests_session.delete(req_url) decoded_resp = self._decode_response(resp) return decoded_resp
Delete an agent. :param str agent_id: The id of the agent to delete. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :return: agent deleted. :rtype: dict.
18,259
def append(self, other, inplace=False, **kwargs): if not isinstance(other, MAGICCData): other = MAGICCData(other, **kwargs) if inplace: super().append(other, inplace=inplace) self.metadata.update(other.metadata) else: res = super().append(other, inplace=inplace) res.metadata = deepcopy(self.metadata) res.metadata.update(other.metadata) return res
Append any input which can be converted to MAGICCData to self. Parameters ---------- other : MAGICCData, pd.DataFrame, pd.Series, str Source of data to append. inplace : bool If True, append ``other`` inplace, otherwise return a new ``MAGICCData`` instance. **kwargs Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a ``MAGICCData`` instance).
18,260
def injector_ui_tree_menu_entity_2_json(self, ignore_genealogy=False): LOGGER.debug("InjectorUITreeEntity.injector_ui_tree_menu_entity_2_json") if ignore_genealogy: json_obj = { : self.id, : self.value, : self.type, : self.description if self.description is not None else "", : self.context_address if self.context_address is not None else "", : self.icon if self.icon is not None else "" } if self.display_permissions is not None: json_obj[] = self.display_permissions if self.display_roles is not None: json_obj[] = self.display_roles if self.other_actions_perms is not None: json_obj[] = self.other_actions_perms if self.other_actions_roles is not None: json_obj[] = self.other_actions_roles if self.remote_injector_tree_entity_gears_cache_id is not None and \ self.remote_injector_tree_entity_gears_cache_id: json_obj[] = self.remote_injector_tree_entity_gears_cache_id if self.remote_injector_tree_entity_components_cache_id is not None and \ self.remote_injector_tree_entity_components_cache_id: json_obj[] = \ self.remote_injector_tree_entity_components_cache_id else: json_obj = { : self.id, : self.value, : self.type, : self.description if self.description is not None else "", : self.context_address if self.context_address is not None else "", : self.icon if self.icon is not None else "", : self.parent_id } if self.child_ids is not None: json_obj[] = self.child_ids if self.display_permissions is not None: json_obj[] = self.display_permissions if self.display_roles is not None: json_obj[] = self.display_roles if self.other_actions_perms is not None: json_obj[] = self.other_actions_perms if self.other_actions_roles is not None: json_obj[] = self.other_actions_roles if self.remote_injector_tree_entity_gears_cache_id is not None and \ self.remote_injector_tree_entity_gears_cache_id: json_obj[] = self.remote_injector_tree_entity_gears_cache_id if self.remote_injector_tree_entity_components_cache_id is not None and \ self.remote_injector_tree_entity_components_cache_id: json_obj[] = \ self.remote_injector_tree_entity_components_cache_id return json_obj
transform this local object to JSON :param ignore_genealogy: ignore the genealogy of this object if true (awaited format for Ariane server) :return: the resulting JSON of transformation
18,261
def _handle_argument(self, token): name = None self._push() while self._tokens: token = self._tokens.pop() if isinstance(token, tokens.ArgumentSeparator): name = self._pop() self._push() elif isinstance(token, tokens.ArgumentClose): if name is not None: return Argument(name, self._pop()) return Argument(self._pop()) else: self._write(self._handle_token(token)) raise ParserError("_handle_argument() missed a close token")
Handle a case where an argument is at the head of the tokens.
18,262
def GetNodes(r, bulk=False): if bulk: return r.request("get", "/2/nodes", query={"bulk": 1}) else: nodes = r.request("get", "/2/nodes") return r.applier(itemgetters("id"), nodes)
Gets all nodes in the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or str @return: if bulk is true, info about nodes in the cluster, else list of nodes in the cluster
18,263
def _apply_Create(self, change): ar = _AzureRecord(self._resource_group, change.new) create = self._dns_client.record_sets.create_or_update create(resource_group_name=ar.resource_group, zone_name=ar.zone_name, relative_record_set_name=ar.relative_record_set_name, record_type=ar.record_type, parameters=ar.params) self.log.debug(.format(ar))
A record from change must be created. :param change: a change object :type change: octodns.record.Change :type return: void
18,264
def _make_summary_tables(self): try: self._Bhat except: raise Exception("Regression hasnnum_componentsplain') headers = self._results[0] table = self._results[1:] print tabulate(table, headers, tablefmt="rst") print "Formula Used: %s" % self._designmatrix_object._formula print "Degrees of Freedom (n - p - k): %s" % str(total_dof) print "Condition Number of X^T*X: %.2f" % np.linalg.cond(np.dot(self._X.T, self._X))
prints the summary of the regression. It shows the waveform metadata, diagnostics of the fit, and results of the hypothesis tests for each comparison encoded in the design matrix
18,265
def filtered_rows_from_args(self, args): if len(self.manifests) == 0: print("fw: No manifests downloaded. Try ") return None (filters,remainder) = self.filters_from_args(args) all = self.all_firmwares() rows = self.rows_for_firmwares(all) filtered = self.filter_rows(filters, rows) return (filtered, remainder)
extracts filters from args, rows from manifests, returns filtered rows
18,266
def version(self): if not self.__version: command = (self.name, ) logger.debug(command) stdout, _ = subprocess.Popen( command, stdout=subprocess.PIPE).communicate() version_output = str(stdout) match = re.search(self.version_patt, version_output) if match is None: raise ServersError( % (self.name, version_output)) version_string = match.group() self.__version = tuple(map(int, version_string.split())) return self.__version
Get the version of MongoDB that this Server runs as a tuple.
18,267
def set_state_process(self, context, process): LOGGER.info( % (context, process)) self.state[context]["process"].append(process)
Method to append process for a context in the IF state. :param context: It can be a layer purpose or a section (impact function, post processor). :type context: str, unicode :param process: A text explain the process. :type process: str, unicode
18,268
def decimal_day_to_day_hour_min_sec( self, daysFloat): self.log.info( ) daysInt = int(daysFloat) hoursFloat = (daysFloat - daysInt) * 24. hoursInt = int(hoursFloat) minsFloat = (hoursFloat - hoursInt) * 60. minsInt = int(minsFloat) secFloat = (minsFloat - minsInt) * 60. strday = repr(daysFloat) if "." not in strday: precisionUnit = "day" precision = 0 hoursInt = None minsInt = None secFloat = None else: lenDec = len(strday.split(".")[-1]) if lenDec < 2: precisionUnit = "day" precision = 0 hoursInt = None minsInt = None secFloat = None elif lenDec < 3: precisionUnit = "hour" precision = 0 minsInt = None secFloat = None elif lenDec < 5: precisionUnit = "minute" precision = 0 secFloat = None else: precisionUnit = "second" precision = lenDec - 5 if precision > 3: precision = 3 secFloat = "%02.*f" % (precision, secFloat) self.log.info( ) return daysInt, hoursInt, minsInt, secFloat
*Convert a day from decimal format to hours mins and sec* Precision should be respected. **Key Arguments:** - ``daysFloat`` -- the day as a decimal. **Return:** - ``daysInt`` -- day as an integer - ``hoursInt`` -- hour as an integer (None if input precsion too low) - ``minsInt`` -- mins as an integer (None if input precsion too low) - ``secFloat`` -- secs as a float (None if input precsion too low) **Usage:** .. todo:: - replace `decimal_day_to_day_hour_min_sec` in all other code .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.2453 ) print daysInt, hoursInt, minsInt, secFloat # OUTPUT: 24, 5, 53, None daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.1232435454 ) print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals() # OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
18,269
def fast_boolean(operandA, operandB, operation, precision=0.001, max_points=199, layer=0, datatype=0): polyA = [] polyB = [] for poly, obj in zip((polyA, polyB), (operandA, operandB)): if isinstance(obj, PolygonSet): poly.extend(obj.polygons) elif isinstance(obj, CellReference) or isinstance(obj, CellArray): poly.extend(obj.get_polygons()) elif obj is not None: for inobj in obj: if isinstance(inobj, PolygonSet): poly.extend(inobj.polygons) elif isinstance(inobj, CellReference) or isinstance( inobj, CellArray): poly.extend(inobj.get_polygons()) else: poly.append(inobj) if len(polyB) == 0: polyB.append(polyA.pop()) result = clipper.clip(polyA, polyB, operation, 1 / precision) return None if len(result) == 0 else PolygonSet( result, layer, datatype, verbose=False).fracture( max_points, precision)
Execute any boolean operation between 2 polygons or polygon sets. Parameters ---------- operandA : polygon or array-like First operand. Must be a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operandB : polygon, array-like or ``None`` Second operand. Must be ``None``, a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operation : {'or', 'and', 'xor', 'not'} Boolean operation to be executed. The 'not' operation returns the difference ``operandA - operandB``. precision : float Desired precision for rounding vertice coordinates. max_points : integer If greater than 4, fracture the resulting polygons to ensure they have at most ``max_points`` vertices. This is not a tessellating function, so this number should be as high as possible. For example, it should be set to 199 for polygons being drawn in GDSII files. layer : integer The GDSII layer number for the resulting element. datatype : integer The GDSII datatype for the resulting element (between 0 and 255). Returns ------- out : PolygonSet or ``None`` Result of the boolean operation.
18,270
def _set_id_from_xml_frameid(self, xml, xmlpath, var): e = xml.find(xmlpath) if e is not None: setattr(self, var, e.attrib[])
Set a single variable with the frameids of matching entity
18,271
def sparse_surface(self): if self._method == : func = voxelize_ray elif self._method == : func = voxelize_subdivide else: raise ValueError() voxels, origin = func( mesh=self._data[], pitch=self._data[], max_iter=self._data[][0]) self._cache[] = origin return voxels
Filled cells on the surface of the mesh. Returns ---------------- voxels: (n, 3) int, filled cells on mesh surface
18,272
def begin(self): if self.start: self.at_beginning = True self.pos = 0 else: self.at_beginning = False self._new_song() return self._get_song()
Start over and get a track.
18,273
def call(self, callname, data=None, **args): url = f"{self.url_base}/{callname}" payload = self.payload.copy() payload.update(**args) if data is not None: payload.update(data) res = self.session.post(url, data=payload) if res.status_code > 299: self.log.error(f"URL: {url}") self.log.error(f"Payload: {payload}") self.log.error(f"STATUS: {res.status_code}") self.log.error(f"RESPONSE: {res.text}") return elif in res.json(): self.log.error(res.json()[]) return return res.json()
Generic interface to REST apiGeneric interface to REST api :param callname: query name :param data: dictionary of inputs :param args: keyword arguments added to the payload :return:
18,274
def _ann_store_annotations(self, item_with_annotations, node, overwrite=False): if overwrite is True or overwrite == : annotated = self._all_get_from_attrs(node, HDF5StorageService.ANNOTATED) if annotated: current_attrs = node._v_attrs for attr_name in current_attrs._v_attrnames: if attr_name.startswith(HDF5StorageService.ANNOTATION_PREFIX): delattr(current_attrs, attr_name) delattr(current_attrs, HDF5StorageService.ANNOTATED) self._hdf5file.flush() if not item_with_annotations.v_annotations.f_is_empty(): anno_dict = item_with_annotations.v_annotations._dict current_attrs = node._v_attrs changed = False for field_name in anno_dict: val = anno_dict[field_name] field_name_with_prefix = HDF5StorageService.ANNOTATION_PREFIX + field_name if field_name_with_prefix not in current_attrs: setattr(current_attrs, field_name_with_prefix, val) changed = True if changed: setattr(current_attrs, HDF5StorageService.ANNOTATED, True) self._hdf5file.flush()
Stores annotations into an hdf5 file.
18,275
def update_url_params(url, replace_all=False, **url_params): if not (replace_all is True or replace_all is False): url_params[] = replace_all if not url or not url_params: return url or None scheme, netloc, url_path, url_query, fragment = _urlsplit(url) if replace_all is True: url_query = url_params else: url_query = _parse_qs(url_query) url_query.update(url_params) return _urlunsplit((scheme, netloc, url_path, _unquote(_urlencode(url_query, doseq=True)), fragment))
:return: url with its query updated from url_query (non-matching params are retained)
18,276
def list(self, request, *args, **kwargs): return super(MergedPriceListItemViewSet, self).list(request, *args, **kwargs)
To get a list of price list items, run **GET** against */api/merged-price-list-items/* as authenticated user. If service is not specified default price list items are displayed. Otherwise service specific price list items are displayed. In this case rendered object contains {"is_manually_input": true} In order to specify service pass query parameters: - service_type (Azure, OpenStack etc.) - service_uuid Example URL: http://example.com/api/merged-price-list-items/?service_type=Azure&service_uuid=cb658b491f3644a092dd223e894319be
18,277
def process_text(text, output_fmt=, outbuf=None, cleanup=True, key=, **kwargs): nxml_str = make_nxml_from_text(text) return process_nxml_str(nxml_str, output_fmt, outbuf, cleanup, key, **kwargs)
Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
18,278
def has_tensor(obj) -> bool: if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
Given a possibly complex data structure, check if it has any torch.Tensors in it.
18,279
def await_message(self, *args, **kwargs) -> : fut = asyncio.Future() @self.on_message(*args, **kwargs) async def handler(message): fut.set_result(message) fut.add_done_callback(lambda _: self.remove_message_handler(handler)) return fut
Block until a message matches. See `on_message`
18,280
def getFields(self): d = {} for i in self._attrsList: key = i value = getattr(self, i) d[key] = value return d
Returns all the class attributues. @rtype: dict @return: A dictionary containing all the class attributes.
18,281
def get_definition(self, name): if name not in SERVICES: raise ONVIFError( % name) wsdl_file = SERVICES[name][] ns = SERVICES[name][] wsdlpath = os.path.join(self.wsdl_dir, wsdl_file) if not os.path.isfile(wsdlpath): raise ONVIFError( % wsdlpath) if name == : xaddr = % (self.host, self.port) return xaddr, wsdlpath xaddr = self.xaddrs.get(ns) if not xaddr: raise ONVIFError( % name) return xaddr, wsdlpath
Returns xaddr and wsdl of specified service
18,282
def put(self): return self.manager.put( id=self.id, name=self.name, description=self.description, command_to_run=self.command_to_run, environment_variables=self.environment_variables, required_arguments=self.required_arguments, required_arguments_default_values=( self.required_arguments_default_values ), json_file_option=self.json_file_option, )
Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ExecutableTaskType`: An executable task type model instance representing the task type just updated.
18,283
def errorprint(): try: yield except ConfigurationError as e: click.secho( % e, err=True, fg=) sys.exit(1)
Print out descriptions from ConfigurationError.
18,284
def find_group(self, star, starlist): star_distance = np.hypot(star[] - starlist[], star[] - starlist[]) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria][])
Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``.
18,285
def load(cls, fpath): module_name = os.path.splitext(os.path.basename(fpath))[0] sys.path.insert(0, os.path.dirname(fpath)) try: module = import_module(module_name) finally: sys.path = sys.path[1:] return module
Loads a module and returns its object. :param str|unicode fpath: :rtype: module
18,286
def recipe(package, repository=None, depends_on=None, release=False, output_path=None, auto=False, overwrite=False, name=None): upgrader = InvenioUpgrader() logger = upgrader.get_logger() try: path, found_repository = _upgrade_recipe_find_path(package) if output_path: path = output_path if not repository: repository = found_repository if not os.path.exists(path): raise RuntimeError("Path does not exists: %s" % path) if not os.path.isdir(path): raise RuntimeError("Path is not a directory: %s" % path) if release: filename = "%s_release_x_y_z.py" % repository else: filename = "%s_%s_%s.py" % (repository, date.today().strftime("%Y_%m_%d"), name or ) test_repository = upgrader._parse_plugin_id(filename[:-3]) if repository != test_repository: raise RuntimeError( "Generated repository name cannot be parsed. " "Please override it with --repository option." ) upgrade_file = os.path.join(path, filename) if os.path.exists(upgrade_file) and not overwrite: raise RuntimeError( "Could not generate upgrade - %s already exists." % upgrade_file ) if depends_on is None: depends_on = ["CHANGE_ME"] u = upgrader.latest_applied_upgrade(repository=repository) if u: depends_on = [u] _write_template( upgrade_file, name or , depends_on, repository, auto=auto) logger.info("Created new upgrade %s" % upgrade_file) except RuntimeError as e: for msg in e.args: logger.error(unicode(msg)) raise
Create a new upgrade recipe, for developers.
18,287
def parse_string(progression): acc = 0 roman_numeral = suffix = i = 0 for c in progression: if c == : acc += 1 elif c == : acc -= 1 elif c.upper() == or c.upper() == : roman_numeral += c.upper() else: break i += 1 suffix = progression[i:] return (roman_numeral, acc, suffix)
Return a tuple (roman numeral, accidentals, chord suffix). Examples: >>> parse_string('I') ('I', 0, '') >>> parse_string('bIM7') ('I', -1, 'M7')
18,288
def get_queryset(self): try: qs = super(UserManager, self).get_queryset() except AttributeError: qs = super(UserManager, self).get_query_set() return qs
Fixes get_query_set vs get_queryset for Django <1.6
18,289
def parse_unit(name, parse_strict=, format=): if name is None or isinstance(name, units.UnitBase): return name try: return UNRECOGNIZED_UNITS[name] except KeyError: try: return units.Unit(name, parse_strict=) except ValueError as exc: if (parse_strict == or not in str(exc)): raise GWpyFormat.warn = parse_strict != return units.Unit(name, parse_strict=, format=format) finally: GWpyFormat.warn = True
Attempt to intelligently parse a `str` as a `~astropy.units.Unit` Parameters ---------- name : `str` unit name to parse parse_strict : `str` one of 'silent', 'warn', or 'raise' depending on how pedantic you want the parser to be format : `~astropy.units.format.Base` the formatter class to use when parsing the unit string Returns ------- unit : `~astropy.units.UnitBase` the unit parsed by `~astropy.units.Unit` Raises ------ ValueError if the unit cannot be parsed and `parse_strict='raise'`
18,290
def editAccountInfo(self, short_name=None, author_name=None, author_url=None): return self.make_method("editAccountInfo", { "access_token": self.access_token, "short_name": short_name, "author_name": author_name, "author_url": author_url })
Use this method to update information about a Telegraph account. :param short_name: Optional. New account name. :type short_name: str :param author_name: Optional. New default author name used when creating new articles. :type author_name: str :param author_url: Optional. New default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel. :type author_url: str :returns: Account object with the default fields.
18,291
def openFile(self, openDQ=False): if self._im.closed: if not self._dq.closed: self._dq.release() assert(self._dq.closed) fi = FileExtMaskInfo(clobber=False, doNotOpenDQ=not openDQ, im_fmode=self.open_mode) fi.image = self.name self._im = fi.image fi.append_ext(spu.get_ext_list(self._im, extname=)) fi.finalize() self._im = fi.image self._dq = fi.DQimage self._imext = fi.fext self._dqext = fi.dqext
Open file and set up filehandle for image file
18,292
def cache_as_field(cache_name): def cache_wrapper(func): @functools.wraps(func) def inner_wrapper(self, *args, **kwargs): value = getattr(self, cache_name, UndefToken) if value != UndefToken: return value ret = func(self, *args, **kwargs) setattr(self, cache_name, ret) return ret return inner_wrapper return cache_wrapper
Cache a functions return value as the field 'cache_name'.
18,293
def bhattacharyya(Ks, dim, required, clamp=True, to_self=False): r est = required if clamp: est = np.minimum(est, 1) return est
r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,).
18,294
def pull(self, path, use_sudo=False, user=None, force=False): if path is None: raise ValueError("Path to the working copy is needed to pull from a remote repository.") options = [] if force: options.append() options = .join(options) cmd = % options with cd(path): if use_sudo and user is None: run_as_root(cmd) elif use_sudo: sudo(cmd, user=user) else: run(cmd)
Fetch changes from the default remote repository and merge them. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to pull from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param force: If ``True``, append the ``--force`` option to the command. :type force: bool
18,295
def sequence_set(self) -> SequenceSet: try: seqset_crit = next(crit for crit in self.all_criteria if isinstance(crit, SequenceSetSearchCriteria)) except StopIteration: return SequenceSet.all() else: return seqset_crit.seq_set
The sequence set to use when finding the messages to match against. This will default to all messages unless the search criteria set contains a sequence set.
18,296
def parse_refresh_header(self, refresh): ii = refresh.find(";") if ii != -1: pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:] jj = newurl_spec.find("=") key = None if jj != -1: key, newurl = newurl_spec[:jj], newurl_spec[jj+1:] newurl = self.clean_refresh_url(newurl) if key is None or key.strip().lower() != "url": raise ValueError() else: pause, newurl = float(refresh), None return pause, newurl
>>> parse_refresh_header("1; url=http://example.com/") (1.0, 'http://example.com/') >>> parse_refresh_header("1; url='http://example.com/'") (1.0, 'http://example.com/') >>> parse_refresh_header("1") (1.0, None) >>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for float(): blah
18,297
def StartingKey(self, evt): key = evt.GetKeyCode() ch = None if key in [ wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3, wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7, wx.WXK_NUMPAD8, wx.WXK_NUMPAD9]: ch = ch = chr(ord() + key - wx.WXK_NUMPAD0) elif key < 256 and key >= 0 and chr(key) in string.printable: ch = chr(key) if ch is not None and self._tc.IsEnabled(): self._tc.SetValue(ch) self._tc.SetInsertionPointEnd() else: evt.Skip()
If the editor is enabled by pressing keys on the grid, this will be called to let the editor do something about that first key if desired.
18,298
def temperature(self, what): self._temperature = units.validate_quantity(what, u.K)
Set temperature.
18,299
def estimate(self, significance_level=0.01): skel, separating_sets = self.estimate_skeleton(significance_level) pdag = self.skeleton_to_pdag(skel, separating_sets) model = self.pdag_to_dag(pdag) return model
Estimates a DAG for the data set, using the PC constraint-based structure learning algorithm. Independencies are identified from the data set using a chi-squared statistic with the acceptance threshold of `significance_level`. PC identifies a partially directed acyclic graph (PDAG), given that the tested independencies admit a faithful Bayesian network representation. This method returns a DAG that is a completion of this PDAG. Parameters ---------- significance_level: float, default: 0.01 The significance level to use for conditional independence tests in the data set. `significance_level` is the desired Type 1 error probability of falsely rejecting the null hypothesis that variables are independent, given that they are. The lower `significance_level`, the less likely we are to accept dependencies, resulting in a sparser graph. Returns ------- model: DAG()-instance An estimate for the DAG for the data set (not yet parametrized). Reference --------- Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550) http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import ConstraintBasedEstimator >>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ')) >>> data['sum'] = data.sum(axis=1) >>> print(data) X Y Z sum 0 3 0 1 4 1 1 4 3 8 2 0 0 3 3 3 0 2 3 5 4 2 1 1 4 ... .. .. .. ... 2495 2 3 0 5 2496 1 1 2 4 2497 0 4 2 6 2498 0 0 0 0 2499 2 4 0 6 [2500 rows x 4 columns] >>> c = ConstraintBasedEstimator(data) >>> model = c.estimate() >>> print(model.edges()) [('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')]