Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
16,600
def read_socket_input(connection, socket_obj): count = connection.needs_input if count <= 0: return count while True: try: sock_data = socket_obj.recv(count) break except socket.timeout as e: LOG.debug("Socket timeout exception %s", str(e)) raise except socket.error as e: err = e.errno if err in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]: return 0 LOG.debug("Socket error exception %s", str(e)) raise except Exception as e: LOG.debug("unknown socket exception %s", str(e)) raise if len(sock_data) > 0: count = connection.process_input(sock_data) else: LOG.debug("Socket closed") count = Connection.EOS connection.close_input() connection.close_output() return count
Read from the network layer and processes all data read. Can support both blocking and non-blocking sockets. Returns the number of input bytes processed, or EOS if input processing is done. Any exceptions raised by the socket are re-raised.
16,601
def variantAnnotationsGenerator(self, request): compoundId = datamodel.VariantAnnotationSetCompoundId.parse( request.variant_annotation_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) variantAnnotationSet = variantSet.getVariantAnnotationSet( request.variant_annotation_set_id) iterator = paging.VariantAnnotationsIntervalIterator( request, variantAnnotationSet) return iterator
Returns a generator over the (variantAnnotaitons, nextPageToken) pairs defined by the specified request.
16,602
def deactivate_in_ec(self, ec_index): with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) ec = self.participating_ecs[ec_index] else: ec = self.owned_ecs[ec_index] ec.deactivate_component(self._obj)
Deactivate this component in an execution context. @param ec_index The index of the execution context to deactivate in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
16,603
def get_tracerinfo(tracerinfo_file): widths = [rec.width for rec in tracer_recs] col_names = [rec.name for rec in tracer_recs] dtypes = [rec.type for rec in tracer_recs] usecols = [name for name in col_names if not name.startswith()] tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names, dtypes=dtypes, comment=" usecols=usecols) " recommend you manually check that file to see that all" " tracers are properly recorded." .format(tracerinfo_file)) tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs if not tracer.name.startswith()} def _assign_hydrocarbon(row): if row[] != 1: row[] = True row[] = C_MOLECULAR_WEIGHT else: row[] = False return row tracer_df = ( tracer_df .apply(_assign_hydrocarbon, axis=1) .assign(chemical=lambda x: x[].astype(bool)) ) return tracer_df, tracer_desc
Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information.
16,604
def filter_by(self, string): self._reatach() if string == : self.filter_remove() return self._expand_all() self.treeview.selection_set() children = self.treeview.get_children() for item in children: _, detached = self._detach(item) if detached: self._detached.extend(detached) for i, p, idx in self._detached: self.treeview.detach(i) self.filter_on = True
Filters treeview
16,605
def CRRAutility(c, gam): if gam == 1: return np.log(c) else: return( c**(1.0 - gam) / (1.0 - gam) )
Evaluates constant relative risk aversion (CRRA) utility of consumption c given risk aversion parameter gam. Parameters ---------- c : float Consumption value gam : float Risk aversion Returns ------- (unnamed) : float Utility Tests ----- Test a value which should pass: >>> c, gamma = 1.0, 2.0 # Set two values at once with Python syntax >>> utility(c=c, gam=gamma) -1.0
16,606
def add_file_recursive(self, filename, trim=False): assert not self.final, self.add_source_file(filename) queue = collections.deque([filename]) seen = set() while queue: filename = queue.popleft() self.graph.add_node(filename) try: deps, broken = self.get_file_deps(filename) except parsepy.ParseError: self.unreadable_files.add(filename) else: self.graph.remove_node(filename) continue for f in broken: self.broken_deps[filename].add(f) for f in deps: if self.follow_file(f, seen, trim): queue.append(f) seen.add(f) self.graph.add_node(f) self.graph.add_edge(filename, f)
Add a file and all its recursive dependencies to the graph. Args: filename: The name of the file. trim: Whether to trim the dependencies of builtin and system files.
16,607
def unwrap_state_dict(self, obj: Dict[str, Any]) -> Union[Tuple[str, Any], Tuple[None, None]]: if len(obj) == 2: typename = obj.get(self.type_key) state = obj.get(self.state_key) if typename is not None: return typename, state return None, None
Unwraps a marshalled state previously wrapped using :meth:`wrap_state_dict`.
16,608
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True): evaluated_data = [v.evaluate(verbose, decode, passes, num_threads, apply_experimental) for v in self.values] return MultiIndex(evaluated_data, self.names)
Evaluates by creating a MultiIndex containing evaluated data and index. See `LazyResult` Returns ------- MultiIndex MultiIndex with evaluated data.
16,609
def create_parser(self, prog_name, subcommand): parser = optparse.OptionParser( prog=prog_name, usage=self.usage(subcommand), version=self.get_version(), option_list=self.get_option_list()) for name, description, option_list in self.get_option_groups(): group = optparse.OptionGroup(parser, name, description); list(map(group.add_option, option_list)) parser.add_option_group(group) return parser
Customize the parser to include option groups.
16,610
def has_active_condition(self, condition, instances): return_value = None for instance in instances + [None]: if not self.can_execute(instance): continue result = self.is_active(instance, condition) if result is False: return False elif result is True: return_value = True return return_value
Given a list of instances, and the condition active for this switch, returns a boolean representing if the conditional is met, including a non-instance default.
16,611
def xpathNextAncestor(self, ctxt): if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlXPathNextAncestor(ctxt__o, self._o) if ret is None:raise xpathError() __tmp = xmlNode(_obj=ret) return __tmp
Traversal function for the "ancestor" direction the ancestor axis contains the ancestors of the context node; the ancestors of the context node consist of the parent of context node and the parent's parent and so on; the nodes are ordered in reverse document order; thus the parent is the first node on the axis, and the parent's parent is the second node on the axis
16,612
def set(self, instance, value, **kw): ref = [] if api.is_uid(value): ref.append(api.get_object_by_uid(value)) if api.is_at_content(value): ref.append(value) if u.is_dict(value): results = api.search(portal_type=self.allowed_types, **value) ref = map(api.get_object, results) if u.is_list(value): for item in value: if api.is_uid(item): ref.append(api.get_object_by_uid(item)) continue if api.is_at_content(item): ref.append(api.get_object(item)) continue if api.is_path(item): ref.append(api.get_object_by_path(item)) continue if u.is_dict(item): uid = item.get(, None) if uid: obj = api.get_object_by_uid(uid) ref.append(obj) else: results = api.search(portal_type=self.allowed_types, **item) objs = map(api.get_object, results) ref.extend(objs) continue if isinstance(item, basestring): results = api.search(portal_type=self.allowed_types, title=item) objs = map(api.get_object, results) ref.extend(objs) continue if api.is_path(value): ref.append(api.get_object_by_path(value)) if not self.multi_valued: if len(ref) > 1: raise ValueError("Multiple values given for single valued " "field {}".format(repr(self.field))) else: ref = ref[0] return self._set(instance, ref, **kw)
Set the value of the refernce field
16,613
def _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir): log_dir = utils.safe_makedir(os.path.join(work_dir, "log")) example_dir = utils.safe_makedir(os.path.join(work_dir, "examples")) if len(glob.glob(os.path.join(example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data)))) == 0: with tx_tmpdir(data) as tx_example_dir: cmd = ["dv_make_examples.py", "--cores", dd.get_num_cores(data), "--ref", ref_file, "--reads", bam_file, "--regions", region_bed, "--logdir", log_dir, "--examples", tx_example_dir, "--sample", dd.get_sample_name(data)] do.run(cmd, "DeepVariant make_examples %s" % dd.get_sample_name(data)) for fname in glob.glob(os.path.join(tx_example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data))): utils.copy_plus(fname, os.path.join(example_dir, os.path.basename(fname))) return example_dir
Create example pileup images to feed into variant calling.
16,614
def rewrite_references_json(json_content, rewrite_json): for ref in json_content: if ref.get("id") and ref.get("id") in rewrite_json: for key, value in iteritems(rewrite_json.get(ref.get("id"))): ref[key] = value return json_content
general purpose references json rewriting by matching the id value
16,615
def prepare_check(data): if not data: return None, {} if isinstance(data, str): return data, {} result = {} if "ID" in data: result["CheckID"] = data["ID"] for k in ("Node", "CheckID", "Name", "Notes", "Status", "ServiceID"): if k in data: result[k] = data[k] if list(result) == ["CheckID"]: return result["CheckID"], {} return result.get("CheckID"), result
Prepare check for catalog endpoint Parameters: data (Object or ObjectID): Check ID or check definition Returns: Tuple[str, dict]: where first is ID and second is check definition
16,616
def _copyAllocatedStates(self): if self.verbosity > 1 or self.retrieveLearningStates: (activeT, activeT1, predT, predT1) = self.cells4.getLearnStates() self.lrnActiveState[] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnActiveState[] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState[] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState[] = predT.reshape((self.numberOfCols, self.cellsPerColumn)) if self.allocateStatesInCPP: assert False (activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT, confidenceT1) = self.cells4.getStates() self.cellConfidence[] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn)) self.cellConfidence[] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.colConfidence[] = colConfidenceT.reshape(self.numberOfCols) self.colConfidence[] = colConfidenceT1.reshape(self.numberOfCols) self.infActiveState[] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infActiveState[] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState[] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState[] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
If state is allocated in CPP, copy over the data into our numpy arrays.
16,617
def merge_dicts(base, updates): if not base: base = dict() if not updates: updates = dict() z = base.copy() z.update(updates) return z
Given two dicts, merge them into a new dict as a shallow copy. Parameters ---------- base: dict The base dictionary. updates: dict Secondary dictionary whose values override the base.
16,618
def _get_new_column_header(self, vcf_reader): mutect_dict = self._build_mutect_dict(vcf_reader.metaheaders) new_header_list = [] required_keys = set([self._NORMAL_SAMPLE_KEY, self._TUMOR_SAMPLE_KEY]) mutect_keys = set(mutect_dict.keys()) if not required_keys.issubset(mutect_keys): raise utils.JQException("Unable to determine normal " "and tumor sample ordering " "based on MuTect metaheader.") for field_name in vcf_reader.column_header.split("\t"): if field_name == mutect_dict[self._NORMAL_SAMPLE_KEY]: field_name = "NORMAL" elif field_name == mutect_dict[self._TUMOR_SAMPLE_KEY]: field_name = "TUMOR" new_header_list.append(field_name) return "\t".join(new_header_list)
Returns a standardized column header. MuTect sample headers include the name of input alignment, which is nice, but doesn't match up with the sample names reported in Strelka or VarScan. To fix this, we replace with NORMAL and TUMOR using the MuTect metadata command line to replace them correctly.
16,619
def submit_vasp_directory(self, rootdir, authors, projects=None, references=, remarks=None, master_data=None, master_history=None, created_at=None, ncpus=None): from pymatgen.apps.borg.hive import VaspToComputedEntryDrone from pymatgen.apps.borg.queen import BorgQueen drone = VaspToComputedEntryDrone(inc_structure=True, data=["filename", "initial_structure"]) queen = BorgQueen(drone, number_of_drones=ncpus) queen.parallel_assimilate(rootdir) structures = [] metadata = [] histories = [] for e in queen.get_data(): structures.append(e.structure) m = { "_vasp": { "parameters": e.parameters, "final_energy": e.energy, "final_energy_per_atom": e.energy_per_atom, "initial_structure": e.data["initial_structure"].as_dict() } } if "history" in e.parameters: histories.append(e.parameters["history"]) if master_data is not None: m.update(master_data) metadata.append(m) if master_history is not None: histories = master_history * len(structures) return self.submit_structures( structures, authors, projects=projects, references=references, remarks=remarks, data=metadata, histories=histories, created_at=created_at)
Assimilates all vasp run directories beneath a particular directory using BorgQueen to obtain structures, and then submits thhem to the Materials Project as SNL files. VASP related meta data like initial structure and final energies are automatically incorporated. .. note:: As of now, this MP REST feature is open only to a select group of users. Opening up submissions to all users is being planned for the future. Args: rootdir (str): Rootdir to start assimilating VASP runs from. authors: *List* of {"name":'', "email":''} dicts, *list* of Strings as 'John Doe <[email protected]>', or a single String with commas separating authors. The same list of authors should apply to all runs. projects ([str]): List of Strings ['Project A', 'Project B']. This applies to all structures. references (str): A String in BibTeX format. Again, this applies to all structures. remarks ([str]): List of Strings ['Remark A', 'Remark B'] master_data (dict): A free form dict. Namespaced at the root level with an underscore, e.g. {"_materialsproject":<custom data>}. This data is added to all structures detected in the directory, in addition to other vasp data on a per structure basis. master_history: A master history to be added to all entries. created_at (datetime): A datetime object ncpus (int): Number of cpus to use in using BorgQueen to assimilate. Defaults to None, which means serial.
16,620
def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False): sql = select([table]) sql_to_csv(sql, engine, filepath, chunksize)
Export entire table to a csv file. :param table: :class:`sqlalchemy.Table` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将整个表中的所有数据, 写入csv文件。
16,621
def fetch_uri(self, uri, start=None, end=None): namespace, alias = uri_re.match(uri).groups() return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
fetch sequence for URI/CURIE of the form namespace:alias, such as NCBI:NM_000059.3.
16,622
async def login(self, email: str, password: str) -> bool: login_resp = await self._request( , API_URL_USER, json={ : , : , : { : email, : password, : }, : 0 }) _LOGGER.debug(, login_resp) if login_resp.get() != 0: return False self.account_id = login_resp[][] return True
Login to the profile.
16,623
def respects_language(fun): @wraps(fun) def _inner(*args, **kwargs): with respect_language(kwargs.pop(, None)): return fun(*args, **kwargs) return _inner
Decorator for tasks with respect to site's current language. You can use this decorator on your tasks together with default @task decorator (remember that the task decorator must be applied last). See also the with-statement alternative :func:`respect_language`. **Example**: .. code-block:: python @task @respects_language def my_task() # localize something. The task will then accept a ``language`` argument that will be used to set the language in the task, and the task can thus be called like: .. code-block:: python from django.utils import translation from myapp.tasks import my_task # Pass the current language on to the task my_task.delay(language=translation.get_language()) # or set the language explicitly my_task.delay(language='no.no')
16,624
def connection_key(self): return "{host}:{namespace}:{username}".format(host=self.host, namespace=self.namespace, username=self.username)
Return an index key used to cache the sampler connection.
16,625
def _layout(dict_vars, dict_vars_extra): desc = [(v, m.description) for v, m in dict_vars.items()] desc.extend((v, baredoc(m.description)) for v, m in dict_vars_extra.items()) _pretty_print(desc, min_col_width=26)
Print nicely [(var, description)] from phyvars
16,626
def http_purge_url(url): url = urlparse(url) connection = HTTPConnection(url.hostname, url.port or 80) path = url.path or connection.request(, % (path, url.query) if url.query else path, , {: % (url.hostname, url.port) if url.port else url.hostname}) response = connection.getresponse() if response.status != 200: logging.error( % response.status) return response
Do an HTTP PURGE of the given asset. The URL is run through urlparse and must point to the varnish instance not the varnishadm
16,627
def load_external_components(typesys): from iotile.core.dev.registry import ComponentRegistry reg = ComponentRegistry() modules = reg.list_components() typelibs = reduce(lambda x, y: x+y, [reg.find_component(x).find_products() for x in modules], []) for lib in typelibs: if lib.endswith(): lib = lib[:-3] typesys.load_external_types(lib)
Load all external types defined by iotile plugins. This allows plugins to register their own types for type annotations and allows all registered iotile components that have associated type libraries to add themselves to the global type system.
16,628
def xml_compare(expected, found): if expected == found: return True if set(expected.items()) != set(found.items()): return False expected_children = list(expected) found_children = list(found) if len(expected_children) != len(found_children): return False if not all([xml_compare(a, b) for a, b in zip(expected_children, found_children)]): return False if (expected.text is None or expected.text.strip() == "") \ and (found.text is None or found.text.strip() == ""): return True else: return expected.tag == found.tag and expected.text == found.text \ and expected.attrib == found.attrib
Checks equality of two ``ElementTree`` objects. :param expected: An ``ElementTree`` object. :param found: An ``ElementTree`` object. :return: ``Boolean``, whether the two objects are equal.
16,629
def get_output_structure(self): bohr_to_angstrom = 0.529177249 natoms = int(float(self._get_line(, self.outputf).split()[-1])) alat = float(self._get_line(, self.outputf).split()[-1].split()[0]) unit_cell = [] with open(self.outputf, ) as fp: for line in fp: if "crystal axes:" in line: for i in range(3): unit_cell.append([float(j)*alat*bohr_to_angstrom for j in next(fp).split()[-1].split()[0].split()]) break if len(unit_cell) == 0: raise Exception() coords = [] ; atom_symbols = [] with open(self.outputf, ) as fp: for line in fp: if "site n." in line and "atom" in line and "positions" in line and "alat units" in line: for i in range(natoms): coordline = next(fp) atom_symbols.append(.join([i for i in coordline.split()[1] if not i.isdigit()])) coord_conv_factor = alat*bohr_to_angstrom coords.append([float(j)*coord_conv_factor for j in coordline.rstrip().split()[-1].split()[-1].split()[0].split()]) break if len(coords) == 0: raise Exception() if type(self.is_relaxed()) == type(None): structure = Atoms(symbols=atom_symbols, cell=unit_cell, pbc=True) structure.set_positions(coords) return structure else: with open(self.outputf) as fp: for line in fp: if "Begin final coordinates" in line: if in next(fp): next(fp) unit_cell = [] cellheader = next(fp) if in cellheader.lower(): cell_conv_factor = bohr_to_angstrom elif in cellheader.lower(): cell_conv_factor = 1.0 else: alat = float(cellheader.split()[-1].replace(, )) cell_conv_factor = alat*bohr_to_angstrom for i in range(3): unit_cell.append([float(j)*cell_conv_factor for j in next(fp).split()]) next(fp) coordtype = next(fp).split()[-1].replace(, ).replace(, ) if coordtype == : coord_conv_factor = bohr_to_angstrom elif coordtype == or coordtype == : coord_conv_factor = 1.0 else: coord_conv_factor = alat*bohr_to_angstrom coords = [] for i in range(natoms): coordline = next(fp).split() coords.append([float(j)*coord_conv_factor for j in coordline[1:4]]) structure = Atoms(symbols=atom_symbols, cell=unit_cell, pbc=True) if coordtype == : structure.set_scaled_positions(coords) else: structure.set_positions(coords) return structure raise Exception()
Determine the structure from the output
16,630
def format_message(self, msg): return {: int(msg.created * 1000), : self.format(msg), : self.log_stream or msg.name, : self.log_group}
format message.
16,631
def get_tournament_prize_pool(self, leagueid=None, **kwargs): if not in kwargs: kwargs[] = leagueid url = self.__build_url(urls.GET_TOURNAMENT_PRIZE_POOL, **kwargs) req = self.executor(url) if self.logger: self.logger.info(.format(url)) if not self.__check_http_err(req.status_code): return response.build(req, url, self.raw_mode)
Returns a dictionary that includes community funded tournament prize pools :param leagueid: (int, optional) :return: dictionary of prize pools, see :doc:`responses </responses>`
16,632
def todegdec(origin): try: return float(origin) except ValueError: pass m = dms_re.search(origin) if m: degrees = int(m.group()) minutes = float(m.group()) seconds = float(m.group()) return degrees + minutes / 60 + seconds / 3600 m = mindec_re.search(origin) if m: degrees = int(m.group()) minutes = float(m.group()) return degrees + minutes / 60
Convert from [+/-]DDD°MMM'SSS.SSSS" or [+/-]DDD°MMM.MMMM' to [+/-]DDD.DDDDD
16,633
def to_dict(self): fields = dict(vars(self).items()) if self.populated: fields[] = [] fields[] = [] for ip in self.ip_addresses: fields[].append({ : ip.address, : ip.access, : ip.family }) for storage in self.storage_devices: fields[].append({ : storage.address, : storage.uuid, : storage.size, : storage.title, : storage.type, }) del fields[] del fields[] return fields
Prepare a JSON serializable dict for read-only purposes. Includes storages and IP-addresses. Use prepare_post_body for POST and .save() for PUT.
16,634
def import_data(self, data): _completed_num = 0 for trial_info in data: logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data))) _completed_num += 1 if self.algorithm_name == : return assert "parameter" in trial_info _params = trial_info["parameter"] assert "value" in trial_info _value = trial_info[] if not _value: logger.info("Useless trial data, value is %s, skip this trial data." %_value) continue self.supplement_data_num += 1 _parameter_id = .join(["ImportData", str(self.supplement_data_num)]) self.total_data[_parameter_id] = _add_index(in_x=self.json, parameter=_params) self.receive_trial_result(parameter_id=_parameter_id, parameters=_params, value=_value) logger.info("Successfully import data to TPE/Anneal tuner.")
Import additional data for tuning Parameters ---------- data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
16,635
def scan_forever(queue, *args, **kwargs): process_once_now = kwargs.get(, True) if process_once_now: for work in scan(queue, *args, **kwargs): yield work while True: with open(fsq_path.trigger(queue), ) as t: t.read(1) for work in scan(queue, *args, **kwargs): yield work
Return an infinite iterator over an fsq queue that blocks waiting for the queue trigger. Work is yielded as FSQWorkItem objects when available, assuming the default generator (FSQScanGenerator) is in use. Essentially, this function wraps fsq.scan() and blocks for more work. It takes all the same parameters as scan(), plus process_once_now, which is a boolean to determine if an initial .scan() is run before listening to the trigger. This argument defaults to True.
16,636
def _ProcessAudio(self, tag, wall_time, step, audio): event = AudioEvent(wall_time=wall_time, step=step, encoded_audio_string=audio.encoded_audio_string, content_type=audio.content_type, sample_rate=audio.sample_rate, length_frames=audio.length_frames) self.audios.AddItem(tag, event)
Processes a audio by adding it to accumulated state.
16,637
def run(self, plugins, context, callback=None, callback_args=[]): self.data["state"]["is_running"] = True util.timer("publishing") stats = {"requestCount": self.host.stats()["totalRequestCount"]} def on_next(result): if isinstance(result, StopIteration): return on_finished(str(result)) self.data["models"]["item"].update_with_result(result) self.data["models"]["result"].update_with_result(result) util.defer(self.host.context, callback=update_context) def update_context(ctx): item_model = self.data["models"]["item"] instance_items = {item.id: item for item in item_model.instances} for instance in ctx: id = instance.id item = instance_items.get(id) if item is not None: proxy = next((i for i in context if i.id == id), None) update_instance(item, proxy, instance.data) continue context.append(instance) item_model.add_instance(instance.to_json()) if len(ctx) < item_model.instance_count(): remove_instance(ctx, instance_items) util.defer(lambda: next(iterator), callback=on_next) def update_instance(item, proxy, data): item.isToggled = data.get("publish", True) item.optional = data.get("optional", True) item.category = data.get("category", data["family"]) families = [data["family"]] families.extend(data.get("families", [])) item.familiesConcatenated = ", ".join(families) if proxy is None: return proxy.data["publish"] = data.get("publish", True) proxy.data["family"] = data["family"] proxy.data["families"] = data.get("families", []) def remove_instance(ctx, items): instances = {i.id: i for i in context} instance_ids = set(i.id for i in ctx) instance_ids.add(ctx.id) for id, item in items.items(): if id not in instance_ids: self.data["models"]["item"].remove_instance(item) context.remove(instances[id]) def on_finished(message=None): self.data["state"]["is_running"] = False self.finished.emit() if message: self.info.emit(message) stats["requestCount"] -= self.host.stats()["totalRequestCount"] util.timer_end("publishing", "Spent %.2f ms resetting") util.echo("Made %i requests during publish." % abs(stats["requestCount"])) if callback: callback(*callback_args) iterator = self.iterator(plugins, context) util.defer(lambda: next(iterator), callback=on_next)
Commence asynchronous tasks This method runs through the provided `plugins` in an asynchronous manner, interrupted by either completion or failure of a plug-in. Inbetween processes, the GUI is fed information from the task and redraws itself. Arguments: plugins (list): Plug-ins to process context (list): Instances to process callback (func, optional): Called on finish callback_args (list, optional): Arguments passed to callback
16,638
def polylog2(x): rs pade approximation. Required for the entropy integral of :obj:`thermo.heat_capacity.Zabransky_quasi_polynomial`. Examples -------- >>> polylog2(0.5) 0.5822405264516294 Approximation is valid between 0 and 1 only.') x = x - offset return horner(p, x)/horner(q, x)
r'''Simple function to calculate PolyLog(2, x) from ranges 0 <= x <= 1, with relative error guaranteed to be < 1E-7 from 0 to 0.99999. This is a Pade approximation, with three coefficient sets with splits at 0.7 and 0.99. An exception is raised if x is under 0 or above 1. Parameters ---------- x : float Value to evaluate PolyLog(2, x) T Returns ------- y : float Evaluated result Notes ----- Efficient (2-4 microseconds). No implementation of this function exists in SciPy. Derived with mpmath's pade approximation. Required for the entropy integral of :obj:`thermo.heat_capacity.Zabransky_quasi_polynomial`. Examples -------- >>> polylog2(0.5) 0.5822405264516294
16,639
def decrypt_file(file, key): if not file.endswith(): raise ValueError("%s does not end with .enc" % file) fer = Fernet(key) with open(file, ) as f: decrypted_file = fer.decrypt(f.read()) with open(file[:-4], ) as f: f.write(decrypted_file) os.chmod(file[:-4], 0o600)
Decrypts the file ``file``. The encrypted file is assumed to end with the ``.enc`` extension. The decrypted file is saved to the same location without the ``.enc`` extension. The permissions on the decrypted file are automatically set to 0o600. See also :func:`doctr.local.encrypt_file`.
16,640
def get(method, hmc, uri, uri_parms, logon_required): cpc_oid = uri_parms[0] query_str = uri_parms[1] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) result_lpars = [] if not cpc.dpm_enabled: filter_args = parse_query_parms(method, uri, query_str) for lpar in cpc.lpars.list(filter_args): result_lpar = {} for prop in lpar.properties: if prop in (, , ): result_lpar[prop] = lpar.properties[prop] result_lpars.append(result_lpar) return {: result_lpars}
Operation: List Logical Partitions of CPC (empty result in DPM mode.
16,641
def koji_instance(config, message, instance=None, *args, **kw): instance = kw.get(, instance) if not instance: return False instances = [item.strip() for item in instance.split()] return message[].get() in instances
Particular koji instances You may not have even known it, but we have multiple instances of the koji build system. There is the **primary** buildsystem at `koji.fedoraproject.org <http://koji.fedoraproject.org>`_ and also secondary instances for `ppc <http://ppc.koji.fedoraproject.org>`_, `arm <http://arm.koji.fedoraproject.org>`_, and `s390 <http://s390.koji.fedoraproject.org>`_. With this rule, you can limit messages to only those from particular koji instances (like the **primary** one if you want to ignore the secondary ones). You should use this rule **in combination** with other koji rules so you get only a *certain subset* of messages from one instance. You almost certainly do not want **all** messages from a given instance. You can specify several instances by separating them with a comma ',', i.e.: ``primary,ppc``.
16,642
def get_bss_load(_, data): answers = { : (data[1] << 8) | data[0], : data[2] / 255.0, : (data[4] << 8) | data[3], } return answers
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n935. Positional arguments: data -- bytearray data to read. Returns: Dict.
16,643
def add_file(self, file_obj): BalancedDiscStorage._check_interface(file_obj) file_hash = self._get_hash(file_obj) dir_path = self._create_dir_path(file_hash) final_path = os.path.join(dir_path, file_hash) def copy_to_file(from_file, to_path): with open(to_path, "wb") as out_file: for part in self._get_file_iterator(from_file): out_file.write(part) try: copy_to_file(from_file=file_obj, to_path=final_path) except Exception: os.unlink(final_path) raise return PathAndHash(path=final_path, hash=file_hash)
Add new file into the storage. Args: file_obj (file): Opened file-like object. Returns: obj: Path where the file-like object is stored contained with hash\ in :class:`.PathAndHash` object. Raises: AssertionError: If the `file_obj` is not file-like object. IOError: If the file couldn't be added to storage.
16,644
def _Close(self): super(EWFFile, self)._Close() for file_object in self._file_objects: file_object.close() self._file_objects = []
Closes the file-like object.
16,645
def bucket_policy_to_dict(policy): import json if not isinstance(policy, dict): policy = json.loads(policy) statements = {s[]: s for s in policy[]} d = {} for rw in (, ): for prefix in TOP_LEVEL_DIRS: sid = rw.title() + prefix.title() if sid in statements: if isinstance(statements[sid][][], list): for principal in statements[sid][][]: user_name = principal.split().pop() d[(user_name, prefix)] = rw[0] else: user_name = statements[sid][][].split().pop() d[(user_name, prefix)] = rw[0] return d
Produce a dictionary of read, write permissions for an existing bucket policy document
16,646
def centroid(self): if self.v is None: raise ValueError() return np.mean(self.v, axis=0)
Return the geometric center.
16,647
def save_aggregate_report_to_elasticsearch(aggregate_report, index_suffix=None, monthly_indexes=False): logger.debug("Saving aggregate report to Elasticsearch") aggregate_report = aggregate_report.copy() metadata = aggregate_report["report_metadata"] org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S") if monthly_indexes: index_date = begin_date.strftime("%Y-%m") else: index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match=dict(org_name=org_name))) report_id_query = Q(dict(match=dict(report_id=report_id))) domain_query = Q(dict(match={"published_policy.domain": domain})) begin_date_query = Q(dict(match=dict(date_range=begin_date))) end_date_query = Q(dict(match=dict(date_range=end_date))) search = Search(index="dmarc_aggregate*") query = org_name_query & report_id_query & domain_query query = query & begin_date_query & end_date_query search.query = query existing = search.execute() if len(existing) > 0: raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " "with a date range of {3} UTC to {4} UTC already " "exists in " "Elasticsearch".format(report_id, org_name, domain, begin_date_human, end_date_human)) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], aspf=aggregate_report["policy_published"]["aspf"], p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], fo=aggregate_report["policy_published"]["fo"] ) for record in aggregate_report["records"]: agg_doc = _AggregateReportDoc( xml_schemea=aggregate_report["xml_schema"], org_name=metadata["org_name"], org_email=metadata["org_email"], org_extra_contact_info=metadata["org_extra_contact_info"], report_id=metadata["report_id"], date_range=date_range, errors=metadata["errors"], published_policy=published_policy, source_ip_address=record["source"]["ip_address"], source_country=record["source"]["country"], source_reverse_dns=record["source"]["reverse_dns"], source_base_domain=record["source"]["base_domain"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], dkim_aligned=record["policy_evaluated"]["dkim"] == "pass", spf_aligned=record["policy_evaluated"]["spf"] == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], envelope_to=record["identifiers"]["envelope_to"] ) for override in record["policy_evaluated"]["policy_override_reasons"]: agg_doc.add_policy_override(type_=override["type"], comment=override["comment"]) for dkim_result in record["auth_results"]["dkim"]: agg_doc.add_dkim_result(domain=dkim_result["domain"], selector=dkim_result["selector"], result=dkim_result["result"]) for spf_result in record["auth_results"]["spf"]: agg_doc.add_spf_result(domain=spf_result["domain"], scope=spf_result["scope"], result=spf_result["result"]) index = "dmarc_aggregate" if index_suffix: index = "{0}_{1}".format(index, index_suffix) index = "{0}-{1}".format(index, index_date) create_indexes([index]) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
Saves a parsed DMARC aggregate report to ElasticSearch Args: aggregate_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved
16,648
def _save_if_needed(request, response_content): if request.save_response: file_path = request.get_file_path() create_parent_folder(file_path) with open(file_path, ) as file: file.write(response_content) LOGGER.debug(, request.url, file_path)
Save data to disk, if requested by the user :param request: Download request :type request: DownloadRequest :param response_content: content of the download response :type response_content: bytes
16,649
def get_representation(self, prefix="", suffix="\n"): res = prefix + "Section " + self.get_section_name().upper() + suffix return res
return the string representation of the current object.
16,650
def get_decode_value(self): if self._store_type == PUBLIC_KEY_STORE_TYPE_HEX: value = bytes.fromhex(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE64: value = b64decode(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE85: value = b85decode(self._value) elif self._store_type == PUBLIC_KEY_STORE_TYPE_JWK: raise NotImplementedError else: value = self._value return value
Return the key value based on it's storage type.
16,651
async def connect(self, hostname=None, port=None, tls=False, **kwargs): if not port: if tls: port = DEFAULT_TLS_PORT else: port = rfc1459.protocol.DEFAULT_PORT return await super().connect(hostname, port, tls=tls, **kwargs)
Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters.
16,652
def require_condition(cls, expr, message, *format_args, **format_kwds): if not expr: raise cls(message, *format_args, **format_kwds)
used to assert a certain state. If the expression renders a false value, an exception will be raised with the supplied message :param: message: The failure message to attach to the raised Buzz :param: expr: A boolean value indicating an evaluated expression :param: format_args: Format arguments. Follows str.format convention :param: format_kwds: Format keyword args. Follows str.format convetion
16,653
def credentials(self): if self._credentials is None: self._credentials, _ = google.auth.default() return self._credentials
google.auth.credentials.Credentials: Credentials to use for queries performed through IPython magics Note: These credentials do not need to be explicitly defined if you are using Application Default Credentials. If you are not using Application Default Credentials, manually construct a :class:`google.auth.credentials.Credentials` object and set it as the context credentials as demonstrated in the example below. See `auth docs`_ for more information on obtaining credentials. Example: Manually setting the context credentials: >>> from google.cloud.bigquery import magics >>> from google.oauth2 import service_account >>> credentials = (service_account ... .Credentials.from_service_account_file( ... '/path/to/key.json')) >>> magics.context.credentials = credentials .. _auth docs: http://google-auth.readthedocs.io /en/latest/user-guide.html#obtaining-credentials
16,654
def search_channels(self, query, limit=25, offset=0): r = self.kraken_request(, , params={: query, : limit, : offset}) return models.Channel.wrap_search(r)
Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None
16,655
def _patch_expand_paths(self, settings, name, value): return [self._patch_expand_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Returns: list: Patched path list to an absolute path.
16,656
def assertSameType(a, b): if not isinstance(b, type(a)): raise NotImplementedError("This operation is only supported for " \ "elements of the same type. Instead found {} and {}". format(type(a), type(b)))
Raises an exception if @b is not an instance of type(@a)
16,657
def _path_polygon(self, points): "Low-level polygon-drawing routine." (xmin, ymin, xmax, ymax) = _compute_bounding_box(points) if invisible_p(xmax, ymax): return self.setbb(xmin, ymin) self.setbb(xmax, ymax) self.newpath() self.moveto(xscale(points[0][0]), yscale(points[0][1])) for point in points[1:]: self.lineto(xscale(point[0]), yscale(point[1])) self.closepath()
Low-level polygon-drawing routine.
16,658
def _revert_categories(self): for column, dtype in self._categories.items(): if column in self.columns: self[column] = self[column].astype(dtype)
Inplace conversion to categories.
16,659
def vx(self,*args,**kwargs): thiso= self(*args,**kwargs) if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1)) if len(thiso[:,0]) == 2: return thiso[1,:] if len(thiso[:,0]) != 4 and len(thiso[:,0]) != 6: raise AttributeError("orbit must track azimuth to use vx()") elif len(thiso[:,0]) == 4: theta= thiso[3,:] else: theta= thiso[5,:] return thiso[1,:]*nu.cos(theta)-thiso[2,:]*nu.sin(theta)
NAME: vx PURPOSE: return x velocity at time t INPUT: t - (optional) time at which to get the velocity vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vx(t) HISTORY: 2010-11-30 - Written - Bovy (NYU)
16,660
def export_node(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="", who="", add_join=False): node_type = node[1][consts.Consts.type] if node_type == consts.Consts.start_event: return BpmnDiagramGraphCsvExport.export_start_event(bpmn_graph, export_elements, node, nodes_classification, order=order, prefix=prefix, condition=condition, who=who) elif node_type == consts.Consts.end_event: return BpmnDiagramGraphCsvExport.export_end_event(export_elements, node, order=order, prefix=prefix, condition=condition, who=who) else: return BpmnDiagramGraphCsvExport.export_element(bpmn_graph, export_elements, node, nodes_classification, order=order, prefix=prefix, condition=condition, who=who, add_join=add_join)
General method for node exporting :param bpmn_graph: an instance of BpmnDiagramGraph class, :param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that will be used in exported CSV document, :param node: networkx.Node object, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels, :param order: the order param of exported node, :param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify the branch :param condition: the condition param of exported node, :param who: the condition param of exported node, :param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV. :return: None or the next node object if the exported node was a gateway join.
16,661
def _add_none_handler(validation_callable, none_policy ): if none_policy is NonePolicy.SKIP: return _none_accepter(validation_callable) elif none_policy is NonePolicy.FAIL: return _none_rejecter(validation_callable) elif none_policy is NonePolicy.VALIDATE: return validation_callable else: raise ValueError( + str(none_policy))
Adds a wrapper or nothing around the provided validation_callable, depending on the selected policy :param validation_callable: :param none_policy: an int representing the None policy, see NonePolicy :return:
16,662
def symmetric_difference_update(self, that): _set = self._set _list = self._list _set.symmetric_difference_update(that) _list.clear() _list.update(_set) return self
Update the set, keeping only elements found in either *self* or *that*, but not in both.
16,663
def get_first_n_queues(self, n): try: while len(self.queues) < n: self.__fetch__() except StopIteration: pass values = list(self.queues.values()) missing = n - len(values) values.extend(iter([]) for n in range(missing)) return values
Run through the sequence until n queues are created and return them. If fewer are created, return those plus empty iterables to compensate.
16,664
def _shuffled_order(w, h): rowsize = 4 * w for row in range(0, rowsize * h, rowsize): for offset in range(row, row + w): for x in range(offset, offset + rowsize, w): yield x
Generator for the order of 4-byte values. 32bit channels are also encoded using delta encoding, but it make no sense to apply delta compression to bytes. It is possible to apply delta compression to 2-byte or 4-byte words, but it seems it is not the best way either. In PSD, each 4-byte item is split into 4 bytes and these bytes are packed together: "123412341234" becomes "111222333444"; delta compression is applied to the packed data. So we have to (a) decompress data from the delta compression and (b) recombine data back to 4-byte values.
16,665
def add(self, doc, attributes=None): doc_ref = str(doc[self._ref]) self._documents[doc_ref] = attributes or {} self.document_count += 1 for field_name, field in self._fields.items(): extractor = field.extractor field_value = doc[field_name] if extractor is None else extractor(doc) tokens = Tokenizer(field_value) terms = self.pipeline.run(tokens) field_ref = FieldRef(doc_ref, field_name) field_terms = defaultdict(int) self.field_term_frequencies[str(field_ref)] = field_terms self.field_lengths[str(field_ref)] = len(terms) for term in terms: term_key = str(term) field_terms[term_key] += 1 if term_key not in self.inverted_index: posting = {_field_name: {} for _field_name in self._fields} posting["_index"] = self.term_index self.term_index += 1 self.inverted_index[term_key] = posting if doc_ref not in self.inverted_index[term_key][field_name]: self.inverted_index[term_key][field_name][doc_ref] = defaultdict( list ) for metadata_key in self.metadata_whitelist: metadata = term.metadata[metadata_key] self.inverted_index[term_key][field_name][doc_ref][ metadata_key ].append(metadata)
Adds a document to the index. Before adding documents to the index it should have been fully setup, with the document ref and all fields to index already having been specified. The document must have a field name as specified by the ref (by default this is 'id') and it should have all fields defined for indexing, though None values will not cause errors. Args: - doc (dict): The document to be added to the index. - attributes (dict, optional): A set of attributes corresponding to the document, currently a single `boost` -> int will be taken into account.
16,666
def _read_header(stream, decoder, strict=False): name_len = stream.read_ushort() name = stream.read_utf8_string(name_len) required = bool(stream.read_uchar()) data_len = stream.read_ulong() pos = stream.tell() data = decoder.readElement() if strict and pos + data_len != stream.tell(): raise pyamf.DecodeError( "Data read from stream does not match header length") return (name, required, data)
Read AMF L{Message} header from the stream. @type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} @param decoder: An AMF0 decoder. @param strict: Use strict decoding policy. Default is C{False}. Will raise a L{pyamf.DecodeError} if the data that was read from the stream does not match the header length. @return: A C{tuple} containing the name of the header, a C{bool} determining if understanding this header is required and the decoded data. @note: Quite what understanding required headers actually means is unknown.
16,667
def choose_one(things): choice = SystemRandom().randint(0, len(things) - 1) return things[choice].strip()
Returns a random entry from a list of things
16,668
def account_settings_update(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/account_settings api_path = "/api/v2/account/settings.json" return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/account_settings#update-account-settings
16,669
def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime: if isinstance(value, datetime): return value number = get_numeric(value) if number is not None: return from_unix_seconds(number) match = datetime_re.match(cast(str, value)) if not match: raise errors.DateTimeError() kw = match.groupdict() if kw[]: kw[] = kw[].ljust(6, ) tzinfo_str = kw.pop() if tzinfo_str == : tzinfo = timezone.utc elif tzinfo_str is not None: offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0 offset = 60 * int(tzinfo_str[1:3]) + offset_mins if tzinfo_str[0] == : offset = -offset tzinfo = timezone(timedelta(minutes=offset)) else: tzinfo = None kw_: Dict[str, Union[int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} kw_[] = tzinfo with change_exception(errors.DateTimeError, ValueError): return datetime(**kw_)
Parse a datetime/int/float/string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raise ValueError if the input is well formatted but not a valid datetime. Raise ValueError if the input isn't well formatted.
16,670
def save(self, filesto, upload_to=None, name=None, secret=None, prefix=None, allowed=None, denied=None, max_size=None, **kwargs): if not filesto: return None upload_to = upload_to or self.upload_to secret = secret or self.secret prefix = prefix or self.prefix original_filename = filesto.filename allowed = allowed or self.allowed denied = denied or self.denied self.validate(filesto, allowed, denied, max_size) if callable(upload_to): filepath = upload_to(original_filename) else: filepath = upload_to oname, ext = os.path.splitext(original_filename) if name: new_name = name(original_filename) if callable(name) else name else: new_name = get_random_filename() if secret else prefix + oname filename = get_unique_filename(self.base_path, filepath, new_name, ext=ext) fullpath = os.path.join( make_dirs(self.base_path, filepath), filename ) filesto.save(fullpath) filesize = os.path.getsize(fullpath) if max_size and filesize > max_size: self.delete_file(fullpath) raise RequestEntityTooLarge return os.path.join(filepath, filename)
Except for `filesto`, all of these parameters are optional, so only bother setting the ones relevant to *this upload*. filesto : A `werkzeug.FileUploader`. upload_to : Relative path to where to upload secret : If True, instead of the original filename, a random one'll be used. prefix : To avoid race-conditions between users uploading files with the same name at the same time. If `secret` is True, this will be ignored. name : If set, it'll be used as the name of the uploaded file. Instead of a string, this can also be a callable. allowed : List of allowed file extensions. `None` to allow all of them. If the uploaded file doesn't have one of these extensions, an `UnsupportedMediaType` exception will be raised. denied : List of forbidden extensions. Set to `None` to disable. If the uploaded file *does* have one of these extensions, a `UnsupportedMediaType` exception will be raised. max_size : Maximum file size, in bytes, that file can have. Note: The attribute `max_content_length` defined in the `request` object has higher priority.
16,671
def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None, host=None): prepared_statement = None if isinstance(query, six.string_types): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) if self.cluster._config_mode == _ConfigMode.LEGACY: if execution_profile is not EXEC_PROFILE_DEFAULT: raise ValueError("Cannot specify execution_profile while using legacy parameters.") if timeout is _NOT_SET: timeout = self.default_timeout cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level retry_policy = query.retry_policy or self.cluster.default_retry_policy row_factory = self.row_factory load_balancing_policy = self.cluster.load_balancing_policy spec_exec_policy = None else: execution_profile = self._maybe_get_execution_profile(execution_profile) if timeout is _NOT_SET: timeout = execution_profile.request_timeout cl = query.consistency_level if query.consistency_level is not None else execution_profile.consistency_level serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else execution_profile.serial_consistency_level retry_policy = query.retry_policy or execution_profile.retry_policy row_factory = execution_profile.row_factory load_balancing_policy = execution_profile.load_balancing_policy spec_exec_policy = execution_profile.speculative_execution_policy fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: fetch_size = self.default_fetch_size elif self._protocol_version == 1: fetch_size = None start_time = time.time() if self._protocol_version >= 3 and self.use_client_timestamp: timestamp = self.cluster.timestamp_generator() else: timestamp = None if isinstance(query, SimpleStatement): query_string = query.query_string statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None if parameters: query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( query_string, cl, serial_cl, fetch_size, timestamp=timestamp, keyspace=statement_keyspace) elif isinstance(query, BoundStatement): prepared_statement = query.prepared_statement message = ExecuteMessage( prepared_statement.query_id, query.values, cl, serial_cl, fetch_size, timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata), result_metadata_id=prepared_statement.result_metadata_id) elif isinstance(query, BatchStatement): if self._protocol_version < 2: raise UnsupportedOperation( "BatchStatement execution is only supported with protocol version " "2 or higher (supported in Cassandra 2.0 and higher). Consider " "setting Cluster.protocol_version to 2 to support this operation.") statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None message = BatchMessage( query.batch_type, query._statements_and_parameters, cl, serial_cl, timestamp, statement_keyspace) message.tracing = trace message.update_custom_payload(query.custom_payload) message.update_custom_payload(custom_payload) message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version message.paging_state = paging_state spec_exec_plan = spec_exec_policy.new_plan(query.keyspace or self.keyspace, query) if query.is_idempotent and spec_exec_policy else None return ResponseFuture( self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, host=host)
Returns the ResponseFuture before calling send_request() on it
16,672
def call_sockeye_train(model: str, bpe_dir: str, model_dir: str, log_fname: str, num_gpus: int, test_mode: bool = False): fnames = ["--source={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_SRC_GZ)), "--target={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_TRG_GZ)), "--validation-source={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_SRC_GZ)), "--validation-target={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_TRG_GZ)), "--output={}".format(model_dir)] command = [sys.executable, "-m", "sockeye.train"] + fnames + MODELS[model] if num_gpus > 0: command.append("--device-ids=-{}".format(num_gpus)) else: command.append("--use-cpu") if test_mode: command += MODEL_TEST_ARGS[model] command_fname = os.path.join(model_dir, FILE_COMMAND.format("sockeye.train")) if not os.path.exists(command_fname): with open(log_fname, "wb") as log: logging.info("sockeye.train: %s", model_dir) logging.info("Log: %s", log_fname) logging.info("(This step can take several days. See log file or TensorBoard for progress)") subprocess.check_call(command, stderr=log) logging.info("Command: %s", command_fname) print_command(command, command_fname)
Call sockeye.train with specified arguments on prepared inputs. Will resume partial training or skip training if model is already finished. Record command for future use. :param model: Type of translation model to train. :param bpe_dir: Directory of BPE-encoded input data. :param model_dir: Model output directory. :param log_fname: Location to write log file. :param num_gpus: Number of GPUs to use for training (0 for CPU). :param test_mode: Run in test mode, stopping after a small number of updates.
16,673
def update_status(self, helper, status): if status: self.status(status[0]) if status[0] == 0: self.add_long_output(status[1]) else: self.add_summary(status[1])
update the helper
16,674
def get_available_user_FIELD_transitions(instance, user, field): for transition in get_available_FIELD_transitions(instance, field): if transition.has_perm(instance, user): yield transition
List of transitions available in current model state with all conditions met and user have rights on it
16,675
def acorr(blk, max_lag=None): if max_lag is None: max_lag = len(blk) - 1 return [sum(blk[n] * blk[n + tau] for n in xrange(len(blk) - tau)) for tau in xrange(max_lag + 1)]
Calculate the autocorrelation of a given 1-D block sequence. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! max_lag : The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``, since any lag beyond would result in zero. Returns ------- A list with lags from 0 up to max_lag, where its ``i``-th element has the autocorrelation for a lag equals to ``i``. Be careful with negative lags! You should use abs(lag) indexes when working with them. Examples -------- >>> seq = [1, 2, 3, 4, 3, 4, 2] >>> acorr(seq) # Default max_lag is len(seq) - 1 [59, 52, 42, 30, 17, 8, 2] >>> acorr(seq, 9) # Zeros at the end [59, 52, 42, 30, 17, 8, 2, 0, 0, 0] >>> len(acorr(seq, 3)) # Resulting length is max_lag + 1 4 >>> acorr(seq, 3) [59, 52, 42, 30]
16,676
def as_dict(self): d = {"comment": self.comment, "nkpoints": self.num_kpts, "generation_style": self.style.name, "kpoints": self.kpts, "usershift": self.kpts_shift, "kpts_weights": self.kpts_weights, "coord_type": self.coord_type, "labels": self.labels, "tet_number": self.tet_number, "tet_weight": self.tet_weight, "tet_connections": self.tet_connections} optional_paras = ["genvec1", "genvec2", "genvec3", "shift"] for para in optional_paras: if para in self.__dict__: d[para] = self.__dict__[para] d["@module"] = self.__class__.__module__ d["@class"] = self.__class__.__name__ return d
json friendly dict representation of Kpoints
16,677
def coroutine(func): @wraps(func) def initialization(*args, **kwargs): start = func(*args, **kwargs) next(start) return start return initialization
Initializes coroutine essentially priming it to the yield statement. Used as a decorator over functions that generate coroutines. .. code-block:: python # Basic coroutine producer/consumer pattern from translate import coroutine @coroutine def coroutine_foo(bar): try: while True: baz = (yield) bar.send(baz) except GeneratorExit: bar.close() :param func: Unprimed Generator :type func: Function :return: Initialized Coroutine :rtype: Function
16,678
def get_properties(self): variables = self.model.nodes() property_tag = {} for variable in sorted(variables): properties = self.model.node[variable] properties = collections.OrderedDict(sorted(properties.items())) property_tag[variable] = [] for prop, val in properties.items(): property_tag[variable].append(str(prop) + " = " + str(val)) return property_tag
Add property to variables in BIF Returns ------- dict: dict of type {variable: list of properties } Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.get_properties() {'bowel-problem': ['position = (335, 99)'], 'dog-out': ['position = (300, 195)'], 'family-out': ['position = (257, 99)'], 'hear-bark': ['position = (296, 268)'], 'light-on': ['position = (218, 195)']}
16,679
def get_results(self, hql, schema=, fetch_size=None, hive_conf=None): results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf) header = next(results_iter) results = { : list(results_iter), : header } return results
Get results of the provided hql in target schema. :param hql: hql to be executed. :type hql: str or list :param schema: target schema, default to 'default'. :type schema: str :param fetch_size: max size of result to fetch. :type fetch_size: int :param hive_conf: hive_conf to execute alone with the hql. :type hive_conf: dict :return: results of hql execution, dict with data (list of results) and header :rtype: dict
16,680
def create_cursor(self, name=None): return Cursor(self.client_connection, self.connection, self.djongo_connection)
Returns an active connection cursor to the database.
16,681
def placeholder(type_): typetuple = type_ if isinstance(type_, tuple) else (type_,) if any in typetuple: typetuple = any if typetuple not in EMPTY_VALS: EMPTY_VALS[typetuple] = EmptyVal(typetuple) return EMPTY_VALS[typetuple]
Returns the EmptyVal instance for the given type
16,682
def get_resource_by_urn(self, urn): search_query = % urn try: assert isinstance(urn, CTS_URN) except Exception as e: return self._session.get_resource(resource_uri, Person)
Fetch the resource corresponding to the input CTS URN. Currently supports only HucitAuthor and HucitWork. :param urn: the CTS URN of the resource to fetch :return: either an instance of `HucitAuthor` or of `HucitWork`
16,683
def proto_avgRange(theABF,m1=None,m2=None): abf=ABF(theABF) abf.log.info("analyzing as a fast IV") if m1 is None: m1=abf.sweepLength if m2 is None: m2=abf.sweepLength I1=int(abf.pointsPerSec*m1) I2=int(abf.pointsPerSec*m2) Ts=np.arange(abf.sweeps)*abf.sweepInterval Yav=np.empty(abf.sweeps)*np.nan Ysd=np.empty(abf.sweeps)*np.nan for sweep in abf.setsweeps(): Yav[sweep]=np.average(abf.sweepY[I1:I2]) Ysd[sweep]=np.std(abf.sweepY[I1:I2]) plot=ABFplot(abf) plt.figure(figsize=(SQUARESIZE*2,SQUARESIZE/2)) plt.subplot(131) plot.title="first sweep" plot.figure_sweep(0) plt.title("First Sweep\n(shaded measurement range)") plt.axvspan(m1,m2,color=,ec=None,alpha=.1) plt.subplot(132) plt.grid(alpha=.5) for i,t in enumerate(abf.comment_times): plt.axvline(t/60,color=,alpha=.5,lw=2,ls=) plt.plot(Ts/60,Yav,,alpha=.75) plt.title("Range Average\nTAGS: %s"%(", ".join(abf.comment_tags))) plt.ylabel(abf.units2) plt.xlabel("minutes") plt.margins(0,.1) plt.subplot(133) plt.grid(alpha=.5) for i,t in enumerate(abf.comment_times): plt.axvline(t/60,color=,alpha=.5,lw=2,ls=) plt.plot(Ts/60,Ysd,,alpha=.5,color=,ms=15,mew=0) plt.title("Range Standard Deviation\nTAGS: %s"%(", ".join(abf.comment_tags))) plt.ylabel(abf.units2) plt.xlabel("minutes") plt.margins(0,.1) plt.axis([None,None,0,np.percentile(Ysd,99)*1.25]) plt.tight_layout() frameAndSave(abf,"sweep vs average","experiment") plt.close()
experiment: generic VC time course experiment.
16,684
def get_recipe_env(self, arch, with_flags_in_cc=True): env = super(ScryptRecipe, self).get_recipe_env(arch, with_flags_in_cc) openssl_recipe = self.get_recipe(, self.ctx) env[] += openssl_recipe.include_flags(arch) env[] += .format(self.ctx.get_libs_dir(arch.arch)) env[] += .format(self.ctx.libs_dir) env[] += openssl_recipe.link_dirs_flags(arch) env[] = env.get(, ) + openssl_recipe.link_libs_flags() return env
Adds openssl recipe to include and library path.
16,685
def hash_file(filepath: str) -> str: md5 = hashlib.md5() acc_hash(filepath, md5) return md5.hexdigest()
Return the hexdigest MD5 hash of content of file at `filepath`.
16,686
def exception_handle(method): def wrapper(*args, **kwargs): try: result = method(*args, **kwargs) return result except ProxyError: LOG.exception(, args) raise ProxyError() except ConnectionException: LOG.exception(, args) raise ConnectionException() except Timeout: LOG.exception(, args) raise Timeout() except RequestException: LOG.exception(, args) raise RequestException() return wrapper
Handle exception raised by requests library.
16,687
def revcomp(sequence): "returns reverse complement of a string" sequence = sequence[::-1].strip()\ .replace("A", "t")\ .replace("T", "a")\ .replace("C", "g")\ .replace("G", "c").upper() return sequence
returns reverse complement of a string
16,688
def handle_message(self, msg): if msg.msg_id not in self.msg_types: self.report_message_type(msg) self.msg_types.add(msg.msg_id) self.tc.message(, typeId=msg.msg_id, message=msg.msg, file=os.path.relpath(msg.abspath).replace(, ), line=str(msg.line), SEVERITY=TC_SEVERITY.get(msg.category))
Issues an `inspection` service message based on a PyLint message. Registers each message type upon first encounter. :param utils.Message msg: a PyLint message
16,689
def __search(self, obj, item, parent="root", parents_ids=frozenset({})): if self.__skip_this(item, parent): return elif isinstance(obj, strings) and isinstance(item, strings): self.__search_str(obj, item, parent) elif isinstance(obj, strings) and isinstance(item, numbers): return elif isinstance(obj, numbers): self.__search_numbers(obj, item, parent) elif isinstance(obj, MutableMapping): self.__search_dict(obj, item, parent, parents_ids) elif isinstance(obj, tuple): self.__search_tuple(obj, item, parent, parents_ids) elif isinstance(obj, (set, frozenset)): if self.warning_num < 10: logger.warning( "Set item detected in the path." " objects do NOT support indexing. But DeepSearch will still report a path." ) self.warning_num += 1 self.__search_iterable(obj, item, parent, parents_ids) elif isinstance(obj, Iterable): self.__search_iterable(obj, item, parent, parents_ids) else: self.__search_obj(obj, item, parent, parents_ids)
The main search method
16,690
def all(self, data={}, **kwargs): return super(Refund, self).all(data, **kwargs)
Fetch All Refund Returns: Refund dict
16,691
def _parse_sections(self): def _list_to_dict(_dict, path, sec): tmp = _dict for elm in path[:-1]: tmp = tmp[elm] tmp[sec] = OrderedDict() self._sections = list() section_regexp = r"\n==* .* ==*\n" found_obj = re.findall(section_regexp, self.content) res = OrderedDict() path = list() last_depth = 0 for obj in found_obj: depth = obj.count("=") / 2 depth -= 2 sec = obj.lstrip("\n= ").rstrip(" =\n") if depth == 0: last_depth = 0 path = [sec] res[sec] = OrderedDict() elif depth > last_depth: last_depth = depth path.append(sec) _list_to_dict(res, path, sec) elif depth < last_depth: while last_depth > depth: path.pop() last_depth -= 1 path.pop() path.append(sec) _list_to_dict(res, path, sec) last_depth = depth else: path.pop() path.append(sec) _list_to_dict(res, path, sec) last_depth = depth self._sections.append(sec) self._table_of_contents = res
parse sections and TOC
16,692
def on_any_event(self, event): if os.path.isfile(event.src_path): self.callback(event.src_path, **self.kwargs)
File created or modified
16,693
def startProcesses(self): self.process_map = {} for mvision_class in self.mvision_classes: name = mvision_class.name tag = mvision_class.tag num = mvision_class.max_instances if (tag not in self.process_map): self.process_map[tag] = [] for n in range(0, num): p = mvision_class() p.start() self.process_map[tag].append(p)
Create and start python multiprocesses Starting a multiprocess creates a process fork. In theory, there should be no problem in first starting the multithreading environment and after that perform forks (only the thread requestin the fork is copied), but in practice, all kinds of weird behaviour arises. Read all about it in here : http://www.linuxprogrammingblog.com/threads-and-fork-think-twice-before-using-them
16,694
def init_debug(self): import signal def debug_trace(sig, frame): self.log() self.log(.join(traceback.format_stack(frame))) signal.signal(signal.SIGUSR2, debug_trace)
Initialize debugging features, such as a handler for USR2 to print a trace
16,695
def push_resource_cache(resourceid, info): if not resourceid: raise ResourceInitError("Resource id missing") if not DutInformationList._cache.get(resourceid): DutInformationList._cache[resourceid] = dict() DutInformationList._cache[resourceid] = merge(DutInformationList._cache[resourceid], info)
Cache resource specific information :param resourceid: Resource id as string :param info: Dict to push :return: Nothing
16,696
def _get_dirs(user_dir, startup_dir): try: users = os.listdir(user_dir) except WindowsError: users = [] full_dirs = [] for user in users: full_dir = os.path.join(user_dir, user, startup_dir) if os.path.exists(full_dir): full_dirs.append(full_dir) return full_dirs
Return a list of startup dirs
16,697
def get_asset_form_for_create(self, asset_record_types): for arg in asset_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument() if asset_record_types == []: obj_form = objects.AssetForm( repository_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssetForm( repository_id=self._catalog_id, record_types=asset_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
Gets the asset form for creating new assets. A new form should be requested for each create transaction. arg: asset_record_types (osid.type.Type[]): array of asset record types return: (osid.repository.AssetForm) - the asset form raise: NullArgument - ``asset_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
16,698
def kernels_initialize(self, folder): if not os.path.isdir(folder): raise ValueError( + folder) resources = [] resource = {: } resources.append(resource) username = self.get_config_value(self.CONFIG_NAME_USER) meta_data = { : username + , : , : , : , : , : , : , : , : [], : [], : [], } meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE) with open(meta_file, ) as f: json.dump(meta_data, f, indent=2) return meta_file
create a new kernel in a specified folder from template, including json metadata that grabs values from the configuration. Parameters ========== folder: the path of the folder
16,699
def mask(self): if not hasattr(self, "_mask"): self._mask = Mask(self) if self.has_mask() else None return self._mask
Returns mask associated with this layer. :return: :py:class:`~psd_tools.api.mask.Mask` or `None`