Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
384,900
def _cache_key(self, attr_name): dn = self._ldap_user.dn return valid_cache_key( "auth_ldap.{}.{}.{}".format(self.__class__.__name__, attr_name, dn) )
Memcache keys can't have spaces in them, so we'll remove them from the DN for maximum compatibility.
384,901
def _update_record(self, record_id, name, address, ttl): data = json.dumps({: {: name, : address, : ttl}}) headers = {: } request = self._session.put(self._baseurl + % record_id, data=data, headers=headers) if not request.ok: raise RuntimeError( % (self._format_hostname(name), request.json())) record = request.json() if not in record or not in record[]: raise RuntimeError( % (self._format_hostname(name), request.json())) return record[]
Updates an existing record.
384,902
def _get_bandgap_from_bands(energies, nelec): nelec = int(nelec) valence = [x[nelec-1] for x in energies] conduction = [x[nelec] for x in energies] return max(min(conduction) - max(valence), 0.0)
Compute difference in conduction band min and valence band max
384,903
def file_download_using_requests(self,url): file_name=url.split()[-1] if os.path.exists(os.path.join(os.getcwd(),file_name)): print return try: r=requests.get(url,stream=True,timeout=200) except requests.exceptions.SSLError: try: response=requests.get(url,stream=True,verify=False,timeout=200) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() chunk_size = 1024 total_size = int(r.headers[]) total_chunks = total_size/chunk_size file_iterable = r.iter_content(chunk_size = chunk_size) tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = , leave = False ) with open(file_name,) as f: for data in tqdm_iter: f.write(data) Total size of file to be downloaded %.2f MB wb{0:.0%} Downloaded print %file_name
It will download file specified by url using requests module
384,904
def _copy_dist_from_dir(link_path, location): if os.path.isdir(location): rmtree(location) setup_py = sdist_args = [sys.executable] sdist_args.append() sdist_args.append(SETUPTOOLS_SHIM % setup_py) sdist_args.append() sdist_args += [, location] logger.info(, link_path) with indent_log(): call_subprocess(sdist_args, cwd=link_path, show_stdout=False) sdist = os.path.join(location, os.listdir(location)[0]) logger.info(, sdist, location) unpack_file(sdist, location, content_type=None, link=None)
Copy distribution files in `link_path` to `location`. Invoked when user requests to install a local directory. E.g.: pip install . pip install ~/dev/git-repos/python-prompt-toolkit
384,905
async def eventuallyAll(*coroFuncs: FlexFunc, totalTimeout: float, retryWait: float=0.1, acceptableExceptions=None, acceptableFails: int=0, override_timeout_limit=False): start = time.perf_counter() def remaining(): return totalTimeout + start - time.perf_counter() funcNames = [] others = 0 fails = 0 rem = None for cf in coroFuncs: if len(funcNames) < 2: funcNames.append(get_func_name(cf)) else: others += 1 try: rem = remaining() if rem <= 0: break await eventually(cf, retryWait=retryWait, timeout=rem, acceptableExceptions=acceptableExceptions, verbose=True, override_timeout_limit=override_timeout_limit) except Exception as ex: if acceptableExceptions and type(ex) not in acceptableExceptions: raise fails += 1 logger.debug("a coro {} with args {} timed out without succeeding; fail count: " "{}, acceptable: {}". format(get_func_name(cf), get_func_args(cf), fails, acceptableFails)) if fails > acceptableFails: raise if rem is not None and rem <= 0: fails += 1 if fails > acceptableFails: err = \ .format(-1 * rem if rem < 0 else 0) raise EventuallyTimeoutException(err) if others: funcNames.append("and {} others".format(others)) desc = ", ".join(funcNames) logger.debug("{} succeeded with {:.2f} seconds to spare". format(desc, remaining()))
:param coroFuncs: iterable of no-arg functions :param totalTimeout: :param retryWait: :param acceptableExceptions: :param acceptableFails: how many of the passed in coroutines can ultimately fail and still be ok :return:
384,906
def getmembers(object, predicate=None): if inspect.isclass(object): mro = (object,) + inspect.getmro(object) else: mro = () results = [] processed = set() names = dir(object) try: for base in object.__bases__: for k, v in base.__dict__.items(): if isinstance(v, types.DynamicClassAttribute): names.append(k) except AttributeError: pass for key in names: try: value = getattr(object, key) if key in processed: raise AttributeError except AttributeError: for base in mro: if key in base.__dict__: value = base.__dict__[key] break else: continue except Exception as e: value = (RAISES_EXCEPTION, e) if not predicate or predicate(value): results.append((key, value)) processed.add(key) results.sort(key=lambda pair: pair[0]) return results
Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.
384,907
def sr(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): s = conf.L3socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) result = sndrcv(s, x, *args, **kargs) s.close() return result
Send and receive packets at layer 3
384,908
def _ensure_api_keys(task_desc, failure_ret=None): def check_func_wrapper(func): @wraps(func) def check_api_keys(*args, **kwargs): global ELSEVIER_KEYS if ELSEVIER_KEYS is None: ELSEVIER_KEYS = {} if not has_config(INST_KEY_ENV_NAME): logger.warning( % (INST_KEY_ENV_NAME, task_desc)) ELSEVIER_KEYS[] = get_config(INST_KEY_ENV_NAME) if not has_config(API_KEY_ENV_NAME): logger.error( % (API_KEY_ENV_NAME, task_desc)) return failure_ret ELSEVIER_KEYS[] = get_config(API_KEY_ENV_NAME) elif not in ELSEVIER_KEYS.keys(): logger.error( % (API_KEY_ENV_NAME, task_desc)) return failure_ret return func(*args, **kwargs) return check_api_keys return check_func_wrapper
Wrap Elsevier methods which directly use the API keys. Ensure that the keys are retrieved from the environment or config file when first called, and store global scope. Subsequently use globally stashed results and check for required ids.
384,909
def multi_reciprocal_extra(xs, ys, noise=False): ns = np.linspace(0.5, 6.0, num=56) best = [, np.inf] fit_results = {} weights = get_weights(xs, ys) for n in ns: popt = extrapolate_reciprocal(xs, ys, n, noise) m = measure(reciprocal, xs, ys, popt, weights) pcov = [] fit_results.update({n: {: m, : popt, : pcov}}) for n in fit_results: if fit_results[n][] <= best[1]: best = reciprocal, fit_results[n][], n return fit_results[best[2]][], fit_results[best[2]][], best
Calculates for a series of powers ns the parameters for which the last two points are at the curve. With these parameters measure how well the other data points fit. return the best fit.
384,910
def p_param_args_noname(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
param_args_noname : param_args_noname COMMA param_arg_noname
384,911
def set_pos(self, pos): self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self
set the position of this column in the Table
384,912
def construct_infrastructure_factory(self, *args, **kwargs): factory_class = self.infrastructure_factory_class assert issubclass(factory_class, InfrastructureFactory) return factory_class( record_manager_class=self.record_manager_class, integer_sequenced_record_class=self.stored_event_record_class, sequenced_item_class=self.sequenced_item_class, contiguous_record_ids=self.contiguous_record_ids, application_name=self.name, pipeline_id=self.pipeline_id, snapshot_record_class=self.snapshot_record_class, *args, **kwargs )
:rtype: InfrastructureFactory
384,913
def to_html(self): id, classes, kvs = self.id, self.classes, self.kvs id_str = .format(id) if id else class_str = .format(.join(classes)) if classes else key_str = .join(.format(k, v) for k, v in kvs.items()) return .join((id_str, class_str, key_str)).strip()
Returns attributes formatted as html.
384,914
def update_location(self, text=): self.text_project_name.setEnabled(self.radio_new_dir.isChecked()) name = self.text_project_name.text().strip() if name and self.radio_new_dir.isChecked(): path = osp.join(self.location, name) self.button_create.setDisabled(os.path.isdir(path)) elif self.radio_from_dir.isChecked(): self.button_create.setEnabled(True) path = self.location else: self.button_create.setEnabled(False) path = self.location self.text_location.setText(path)
Update text of location.
384,915
def go_to_place(self, place, weight=): if hasattr(place, ): placen = place.name else: placen = place curloc = self["location"] orm = self.character.engine turns = self.engine._portal_objs[ (self.character.name, curloc, place)].get(weight, 1) with self.engine.plan(): orm.turn += turns self[] = placen return turns
Assuming I'm in a :class:`Place` that has a :class:`Portal` direct to the given :class:`Place`, schedule myself to travel to the given :class:`Place`, taking an amount of time indicated by the ``weight`` stat on the :class:`Portal`, if given; else 1 turn. Return the number of turns the travel will take.
384,916
def _twofilter_smoothing_ON(self, t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info): if modif_info is not None: lwinfo += modif_info Winfo = rs.exp_and_normalise(lwinfo) I = rs.multinomial(Winfo) if modif_forward is not None: lw = self.wgt[t].lw + modif_forward W = rs.exp_and_normalise(lw) else: W = self.wgt[t].W J = rs.multinomial(W) log_omega = self.model.logpt(t + 1, self.X[t][J], info.hist.X[ti][I]) if modif_forward is not None: log_omega -= modif_forward[J] if modif_info is not None: log_omega -= modif_info[I] Om = rs.exp_and_normalise(log_omega) est = np.average(phi(self.X[t][J], info.hist.X[ti][I]), axis=0, weights=Om) if return_ess: return (est, 1. / np.sum(Om**2)) else: return est
O(N) version of two-filter smoothing. This method should not be called directly, see twofilter_smoothing.
384,917
def download_object(self, container, obj, directory, structure=True): return container.download(obj, directory, structure=structure)
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
384,918
def iftrain(self, then_branch, else_branch): return ifelse(self._training_flag, then_branch, else_branch, name="iftrain")
Execute `then_branch` when training.
384,919
def console(loop, log): parser = argparse.ArgumentParser(description=console.__doc__) parser.add_argument(, default=, help=) parser.add_argument(, default=, help=) parser.add_argument(, , action=) args = parser.parse_args() if args.verbose: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level) def log_callback(message): log.info( % message) host = args.host port = int(args.port) log.info( % (host, port)) conn = yield from anthemav.Connection.create( host=host, port=port, loop=loop, update_callback=log_callback) log.info(+str(conn.protocol.power)) conn.protocol.power = True log.info(+str(conn.protocol.power)) yield from asyncio.sleep(10, loop=loop) log.info(+str(conn.protocol.panel_brightness)) log.info(+str(conn.protocol.panel_brightness_text))
Connect to receiver and show events as they occur. Pulls the following arguments from the command line (not method arguments): :param host: Hostname or IP Address of the device. :param port: TCP port number of the device. :param verbose: Show debug logging.
384,920
def sample_stats_to_xarray(self): rename_key = {"model_logp": "lp"} data = {} for stat in self.trace.stat_names: name = rename_key.get(stat, stat) data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False)) log_likelihood, dims = self._extract_log_likelihood() if log_likelihood is not None: data["log_likelihood"] = log_likelihood dims = {"log_likelihood": dims} else: dims = None return dict_to_dataset(data, library=self.pymc3, dims=dims, coords=self.coords)
Extract sample_stats from PyMC3 trace.
384,921
def container_running(self, container_name): filters = { "name": container_name, "status": "running", } for container in self.client.containers.list(filters=filters): if container_name == container.name: return container return None
Finds out if a container with name ``container_name`` is running. :return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise. :rtype: Optional[docker.models.container.Container]
384,922
def list(self, request, *args, **kwargs): return super(ServiceSettingsViewSet, self).list(request, *args, **kwargs)
To get a list of service settings, run **GET** against */api/service-settings/* as an authenticated user. Only settings owned by this user or shared settings will be listed. Supported filters are: - ?name=<text> - partial matching used for searching - ?type=<type> - choices: OpenStack, DigitalOcean, Amazon, JIRA, GitLab, Oracle - ?state=<state> - choices: New, Creation Scheduled, Creating, Sync Scheduled, Syncing, In Sync, Erred - ?shared=<bool> - allows to filter shared service settings
384,923
def add_ephemeral_listener(self, callback, event_type=None): listener_id = uuid4() self.ephemeral_listeners.append( { : listener_id, : callback, : event_type } ) return listener_id
Add a callback handler for ephemeral events going to this room. Args: callback (func(room, event)): Callback called when an ephemeral event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
384,924
def top_segment_proportions(mtx, ns): if not (max(ns) <= mtx.shape[1] and min(ns) > 0): raise IndexError("Positions outside range of features.") if issparse(mtx): if not isspmatrix_csr(mtx): mtx = csr_matrix(mtx) return top_segment_proportions_sparse_csr(mtx.data, mtx.indptr, np.array(ns, dtype=np.int)) else: return top_segment_proportions_dense(mtx, ns)
Calculates total percentage of counts in top ns genes. Parameters ---------- mtx : `Union[np.array, sparse.spmatrix]` Matrix, where each row is a sample, each column a feature. ns : `Container[Int]` Positions to calculate cumulative proportion at. Values are considered 1-indexed, e.g. `ns=[50]` will calculate cumulative proportion up to the 50th most expressed gene.
384,925
def next_history(self, e): u self._history.next_history(self.l_buffer) self.finalize()
u'''Move forward through the history list, fetching the next command.
384,926
def getTextBlocks(page, images=False): CheckParent(page) dl = page.getDisplayList() flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE if images: flags |= TEXT_PRESERVE_IMAGES tp = dl.getTextPage(flags) l = tp._extractTextBlocks_AsList() del tp del dl return l
Return the text blocks on a page. Notes: Lines in a block are concatenated with line breaks. Args: images: (bool) also return meta data of any images. Image data are never returned with this method. Returns: A list of the blocks. Each item contains the containing rectangle coordinates, text lines, block type and running block number.
384,927
def crc16(data): m_crc = 0xffff for this in data: m_crc ^= ord_byte(this) for _ in range(8): j = m_crc & 1 m_crc >>= 1 if j: m_crc ^= 0x8408 return m_crc
Calculate an ISO13239 CRC checksum of the input buffer (bytestring).
384,928
def get_list_information(self, query_params=None): return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get information for this list. Returns a dictionary of values.
384,929
def previous_sibling(self): stmts = self.parent.child_sequence(self) index = stmts.index(self) if index >= 1: return stmts[index - 1] return None
The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None
384,930
def write_molden(*args, **kwargs): message = with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn(message, DeprecationWarning) return to_molden(*args, **kwargs)
Deprecated, use :func:`~chemcoord.xyz_functions.to_molden`
384,931
def observer(self, component_type=ComponentType): def inner(func): self.add_observer(func, component_type) return func return inner
You can use ``@broker.observer()`` as a decorator to your callback instead of :func:`Broker.add_observer`.
384,932
def dictmerge(x, y): z = x.copy() z.update(y) return z
merge two dictionaries
384,933
def interact(self, banner=None): sys.ps1 = getattr(sys, , ) sys.ps2 = getattr(sys, , ) if banner is None: print ( % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro, sys.platform, _WELCOME_MSG)) else: print banner more = False while True: try: if more: prompt = sys.ps2 else: prompt = self.StatusLine() + + sys.ps1 try: line = self.raw_input(prompt) except EOFError: print break else: more = self.push(line) except KeyboardInterrupt: print self.resetbuffer() more = False
Closely emulate the interactive Python console. This method overwrites its superclass' method to specify a different help text and to enable proper handling of the debugger status line. Args: banner: Text to be displayed on interpreter startup.
384,934
def _prob_match(self, features): probs = self.kernel.predict_proba(features) classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties.
384,935
def kernel_restarted_message(self, msg): if not self.is_error_shown: else: self.shellwidget._append_html("<br>%s<hr><br>" % msg, before_prompt=False)
Show kernel restarted/died messages.
384,936
def digest(self, elimseq=False, notrunc=False): if self._state == ffi.NULL: raise InternalError("State object is NULL") flags = (binding.lib.FUZZY_FLAG_ELIMSEQ if elimseq else 0) | \ (binding.lib.FUZZY_FLAG_NOTRUNC if notrunc else 0) result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_digest(self._state, result, flags) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
Obtain the fuzzy hash. This operation does not change the state at all. It reports the hash for the concatenation of the data previously fed using update(). :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error
384,937
def voip_play2(s1, **kargs): dsp, rd = os.popen2(sox_base % "-c 2") global x1, x2 x1 = "" x2 = "" def play(pkt): global x1, x2 if not pkt: return if not pkt.haslayer(UDP) or not pkt.haslayer(IP): return ip = pkt.getlayer(IP) if s1 in [ip.src, ip.dst]: if ip.dst == s1: x1 += pkt.getlayer(conf.raw_layer).load[12:] else: x2 += pkt.getlayer(conf.raw_layer).load[12:] x1, x2, r = _merge_sound_bytes(x1, x2) dsp.write(r) sniff(store=0, prn=play, **kargs)
Same than voip_play, but will play both incoming and outcoming packets. The sound will surely suffer distortion. Only supports sniffing. .. seealso:: voip_play to play only incoming packets.
384,938
def directions(self, origin, destination, mode=None, alternatives=None, waypoints=None, optimize_waypoints=False, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None, sensor=None): if optimize_waypoints: waypoints.insert(0, "optimize:true") parameters = dict( origin=self.assume_latlon_or_address(origin), destination=self.assume_latlon_or_address(destination), mode=mode, alternatives=alternatives, waypoints=waypoints or [], avoid=avoid, language=language, units=units, region=region, departure_time=departure_time, arrival_time=arrival_time, sensor=sensor, ) return self._make_request(self.DIRECTIONS_URL, parameters, "routes")
Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC.
384,939
def shift(self, modelResult): inferencesToWrite = {} if self._inferenceBuffer is None: maxDelay = InferenceElement.getMaxDelay(modelResult.inferences) self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1) self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences)) for inferenceElement, inference in modelResult.inferences.iteritems(): if isinstance(inference, dict): inferencesToWrite[inferenceElement] = {} for key, _ in inference.iteritems(): delay = InferenceElement.getTemporalDelay(inferenceElement, key) if len(self._inferenceBuffer) > delay: prevInference = self._inferenceBuffer[delay][inferenceElement][key] inferencesToWrite[inferenceElement][key] = prevInference else: inferencesToWrite[inferenceElement][key] = None else: delay = InferenceElement.getTemporalDelay(inferenceElement) if len(self._inferenceBuffer) > delay: inferencesToWrite[inferenceElement] = ( self._inferenceBuffer[delay][inferenceElement]) else: if type(inference) in (list, tuple): inferencesToWrite[inferenceElement] = [None] * len(inference) else: inferencesToWrite[inferenceElement] = None shiftedResult = ModelResult(rawInput=modelResult.rawInput, sensorInput=modelResult.sensorInput, inferences=inferencesToWrite, metrics=modelResult.metrics, predictedFieldIdx=modelResult.predictedFieldIdx, predictedFieldName=modelResult.predictedFieldName) return shiftedResult
Shift the model result and return the new instance. Queues up the T(i+1) prediction value and emits a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. :param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance to shift. :return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that has been shifted
384,940
def create(self, validated_data): email_query = models.EmailAddress.objects.filter( email=self.validated_data["email"] ) if email_query.exists(): email = email_query.get() email.send_duplicate_notification() else: email = super(EmailSerializer, self).create(validated_data) email.send_confirmation() user = validated_data.get("user") query = models.EmailAddress.objects.filter( is_primary=True, user=user ) if not query.exists(): email.set_primary() return email
Create a new email and send a confirmation to it. Returns: The newly creating ``EmailAddress`` instance.
384,941
def htmlFormat(output, pathParts = (), statDict = None, query = None): statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) _htmlRenderDict(pathParts, statDict, output)
Formats as HTML, writing to the given object.
384,942
def get_variable_scope_name(value): value = getattr(value, "variable_scope", value) if isinstance(value, tf.VariableScope): return value.name elif isinstance(value, six.string_types): return value else: raise ValueError("Not a variable scope: {}".format(value))
Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope.
384,943
def formfield_for_dbfield(self, db_field, **kwargs): if db_field.name == "gradings": request=kwargs[] try: obj=resolve(request.path).args[0] filterexpr=Q(schemes=obj) | Q(schemes=None) kwargs[] = Grading.objects.filter(filterexpr).distinct() except: pass return super(GradingSchemeAdmin, self).formfield_for_dbfield(db_field, **kwargs)
Offer only gradings that are not used by other schemes, which means they are used by this scheme or not at all.
384,944
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when): kwargs = {} if filename is None: filename = getattr(sys.modules[], , ) filename = os.path.basename(filename.replace(, )) filename = os.path.join(, filename) if not os.path.exists(os.path.dirname(filename)): os.mkdir(os.path.dirname(filename)) kwargs[] = filename if backup_count == 0: cls = logging.FileHandler kwargs[] = mode elif when is None: cls = logging.handlers.RotatingFileHandler kwargs[] = limit kwargs[] = backup_count kwargs[] = mode else: cls = logging.handlers.TimedRotatingFileHandler kwargs[] = when kwargs[] = limit kwargs[] = backup_count return add_handler(cls, level, fmt, False, **kwargs)
Add a file handler to the global logger.
384,945
def svd_convolution(inp, outmaps, kernel, r, pad=None, stride=None, dilation=None, uv_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): assert r > 0, "svd_convolution: The rank must larger than zero" if uv_init is None: uv_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if type(uv_init) is np.ndarray: uv = uv_init else: uv = uv_init((outmaps, inp.shape[base_axis]) + tuple(kernel)) uv = uv.reshape((outmaps, inp.shape[base_axis], np.prod(kernel))) u = get_parameter() v = get_parameter() if (u is None) or (v is None): inmaps = inp.shape[base_axis] u_low_rank = np.zeros((inmaps, np.prod(kernel), r)) v_low_rank = np.zeros((inmaps, r, outmaps)) for i in range(inmaps): K = np.transpose(uv[:, i, :]) u_, s_, v_ = np.linalg.svd(K, full_matrices=False) u_low_rank[i, :, :] = np.dot(u_[:, :r], np.diag(s_[:r])) v_low_rank[i, :, :] = v_[:r, :] u = nn.Variable((inmaps * r,) + tuple(kernel), need_grad=True) u.d = (np.transpose(u_low_rank, axes=(0, 2, 1)) .reshape((inmaps * r,) + tuple(kernel))) nn.parameter.set_parameter("U", u) kernel_one = (1,) * len(kernel) v = nn.Variable((outmaps, inmaps * r) + kernel_one, need_grad=True) v.d = (np.transpose(v_low_rank, axes=(2, 0, 1)) .reshape((outmaps, inmaps * r) + kernel_one)) nn.parameter.set_parameter("V", v) if fix_parameters == u.need_grad: u = u.get_unlinked_variable(need_grad=not fix_parameters) if fix_parameters == v.need_grad: v = v.get_unlinked_variable(need_grad=not fix_parameters) if with_bias and b_init is None: b_init = ConstantInitializer() b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) y = F.depthwise_convolution(inp, u, bias=None, base_axis=base_axis, pad=pad, stride=stride, dilation=dilation, multiplier=r) y = F.convolution(y, v, bias=b, base_axis=base_axis, pad=None, stride=None, dilation=None, group=1) return y
SVD convolution is a low rank approximation of the convolution layer. It can be seen as a depth wise convolution followed by a 1x1 convolution. The flattened kernels for the i-th input map are expressed by their low rank approximation. The kernels for the i-th input :math:`{\\mathbf W_i}` are approximated with the singular value decomposition (SVD) and by selecting the :math:`{R}` dominant singular values and the corresponding singular vectors. .. math:: {\\mathbf W_{:,i,:}} ~ {\\mathbf U_i} {\\mathbf V_i}. :math:`{\\mathbf U}` contains the weights of the depthwise convolution with multiplier :math:`{R}` and :math:`{\\mathbf V}` contains the weights of the 1x1 convolution. If `uv_init` is a numpy array, :math:`{\\mathbf U}` and :math:`{\\mathbf V}` are computed such that `uv_init` is approximated by :math:`{\\mathbf{UV}}`. If `uv_init` is `None` or an initializer, the product of :math:`{\\mathbf U}` and :math:`{\\mathbf V}` approximates the random initialization. If :math:`{\\mathbf U}` and :math:`{\\mathbf V}` exist in the context, they take precedence over `uv_init`. Suppose the kernel tensor of the convolution is of :math:`{O \\times I \\times K \\times K}` and the compression rate you want to specify is :math:`{CR}`, then you set :math:`{R}` as .. math:: R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{I(O + K^2)} \\right\\rfloor. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (tuple): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3, 5). r (int): Rank of the factorized layer. pad (tuple): Padding sizes (`int`) for dimensions. stride (tuple): Stride sizes (`int`) for dimensions. dilation (tuple): Dilation sizes (`int`) for dimensions. uv_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
384,946
def _merge_statement_lists(stmsA: List["HdlStatement"], stmsB: List["HdlStatement"])\ -> List["HdlStatement"]: if stmsA is None and stmsB is None: return None tmp = [] a_it = iter(stmsA) b_it = iter(stmsB) a = None b = None a_empty = False b_empty = False while not a_empty and not b_empty: while not a_empty: a = next(a_it, None) if a is None: a_empty = True break elif a.rank == 0: tmp.append(a) a = None else: break while not b_empty: b = next(b_it, None) if b is None: b_empty = True break elif b.rank == 0: tmp.append(b) b = None else: break if a is not None or b is not None: a._merge_with_other_stm(b) tmp.append(a) a = None b = None return tmp
Merge two lists of statements into one :return: list of merged statements
384,947
def get_new_call(group_name, app_name, search_path, filename, require_load, version, secure): new_call_kwargs = { : group_name, : filename } new_call_lookup_options = {} new_call_lookup_options[] = secure if search_path: new_call_lookup_options[] = search_path if require_load: new_call_lookup_options[] = require_load if version: new_call_lookup_options[] = version if new_call_lookup_options: new_call_kwargs[] = new_call_lookup_options output = build_call_str(, (app_name,), new_call_kwargs) return output
Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``.
384,948
def from_rgb(cls, r: int, g: int, b: int) -> : c = cls() c._init_rgb(r, g, b) return c
Return a ColorCode from a RGB tuple.
384,949
def verify(self, type_): raw_missing, mistyped, mismatched = self._diff_signatures(type_) missing = [] defaults_to_use = {} for name in raw_missing: try: defaults_to_use[name] = self._defaults[name].implementation except KeyError: missing.append(name) if not any((missing, mistyped, mismatched)): return defaults_to_use raise self._invalid_implementation(type_, missing, mistyped, mismatched)
Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None
384,950
def pix2canvas(self, pt): x, y = pt[:2] mm = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX) pm = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX) vp = gl.glGetIntegerv(gl.GL_VIEWPORT) win_x, win_y = float(x), float(vp[3] - y) win_z = gl.glReadPixels(int(win_x), int(win_y), 1, 1, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT) pos = glu.gluUnProject(win_x, win_y, win_z, mm, pm, vp) return pos
Takes a 2-tuple of (x, y) in window coordinates and gives the (cx, cy, cz) coordinates on the canvas.
384,951
def get_parser(): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-m", "--model", dest="model", help="where is the model folder (with a info.yml)?", metavar="FOLDER", type=lambda x: utils.is_valid_folder(parser, x), default=utils.default_model()) return parser
Return the parser object for this script.
384,952
def real_time_statistics(self): if self._real_time_statistics is None: self._real_time_statistics = TaskQueueRealTimeStatisticsList( self._version, workspace_sid=self._solution[], task_queue_sid=self._solution[], ) return self._real_time_statistics
Access the real_time_statistics :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList
384,953
def from_cmdstan( posterior=None, *, posterior_predictive=None, prior=None, prior_predictive=None, observed_data=None, observed_data_var=None, log_likelihood=None, coords=None, dims=None ): return CmdStanConverter( posterior=posterior, posterior_predictive=posterior_predictive, prior=prior, prior_predictive=prior_predictive, observed_data=observed_data, observed_data_var=observed_data_var, log_likelihood=log_likelihood, coords=coords, dims=dims, ).to_inference_data()
Convert CmdStan data into an InferenceData object. Parameters ---------- posterior : List[str] List of paths to output.csv files. CSV file can be stacked csv containing all the chains cat output*.csv > combined_output.csv posterior_predictive : str, List[Str] Posterior predictive samples for the fit. If endswith ".csv" assumes file. prior : List[str] List of paths to output.csv files CSV file can be stacked csv containing all the chains. cat output*.csv > combined_output.csv prior_predictive : str, List[Str] Prior predictive samples for the fit. If endswith ".csv" assumes file. observed_data : str Observed data used in the sampling. Path to data file in Rdump format. observed_data_var : str, List[str] Variable(s) used for slicing observed_data. If not defined, all data variables are imported. log_likelihood : str Pointwise log_likelihood for the data. coords : dict[str, iterable] A dictionary containing the values that are used as index. The key is the name of the dimension, the values are the index values. dims : dict[str, List(str)] A mapping from variables to a list of coordinate names for the variable. Returns ------- InferenceData object
384,954
def get_path(self, path, query=None): return self.get(self.url_path(path), query)
Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path
384,955
def mapfi(ol,map_func_args,**kwargs): diff_funcs_arr = kwargs[] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = map_func_args ele = func(index,*args) rslt.append(ele) return(rslt)
#mapfi 共享相同的o,v不作为map_func参数 # share common other_args,NOT take value as a param for map_func #map_func diff_func(index,*common_args)
384,956
def update(): branch = git.current_branch(refresh=True) develop = conf.get(, ) common.assert_branch_type() common.git_checkout(develop) common.git_pull(develop) common.git_checkout(branch.name) common.git_merge(branch.name, develop)
Update the feature with updates committed to develop. This will merge current develop into the current branch.
384,957
def copyText( self ): view = self.currentWebView() QApplication.clipboard().setText(view.page().selectedText())
Copies the selected text to the clipboard.
384,958
def setupTable_head(self): if "head" not in self.tables: return self.otf["head"] = head = newTable("head") font = self.ufo head.checkSumAdjustment = 0 head.tableVersion = 1.0 head.magicNumber = 0x5F0F3CF5 versionMajor = getAttrWithFallback(font.info, "versionMajor") versionMinor = getAttrWithFallback(font.info, "versionMinor") fullFontRevision = float("%d.%03d" % (versionMajor, versionMinor)) head.fontRevision = round(fullFontRevision, 3) if head.fontRevision != fullFontRevision: logger.warning( "Minor version in %s has too many digits and wons fontRevision field; rounded to %s.", fullFontRevision, head.fontRevision) head.unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm")) head.created = dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated")) - mac_epoch_diff head.modified = dateStringToTimeValue(dateStringForNow()) - mac_epoch_diff xMin, yMin, xMax, yMax = self.fontBoundingBox head.xMin = otRound(xMin) head.yMin = otRound(yMin) head.xMax = otRound(xMax) head.yMax = otRound(yMax) styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName") macStyle = [] if styleMapStyleName == "bold": macStyle = [0] elif styleMapStyleName == "bold italic": macStyle = [0, 1] elif styleMapStyleName == "italic": macStyle = [1] head.macStyle = intListToNum(macStyle, 0, 16) head.flags = intListToNum(getAttrWithFallback(font.info, "openTypeHeadFlags"), 0, 16) head.lowestRecPPEM = otRound(getAttrWithFallback(font.info, "openTypeHeadLowestRecPPEM")) head.fontDirectionHint = 2 head.indexToLocFormat = 0 head.glyphDataFormat = 0
Make the head table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired.
384,959
def reserve(self, doc): try: self.pid.reserve() self.api.metadata_post(doc) except (DataCiteError, HttpError): logger.exception("Failed to reserve in DataCite", extra=dict(pid=self.pid)) raise logger.info("Successfully reserved in DataCite", extra=dict(pid=self.pid)) return True
Reserve a DOI (amounts to upload metadata, but not to mint). :param doc: Set metadata for DOI. :returns: `True` if is reserved successfully.
384,960
def get_current_structure(self): struct = self.__class__.get_structure() struct.update(self.__field_types__) return struct
Returns a dictionary with model field objects. :return: dict
384,961
def ticket_tags(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tags api_path = "/api/v2/tickets/{id}/tags.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/tags#show-tags
384,962
def _estimate_centers_widths( self, unique_R, inds, X, W, init_centers, init_widths, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci): init_estimate = np.hstack( (init_centers.ravel(), init_widths.ravel())) data_sigma = 1.0 / math.sqrt(2.0) * np.std(X) final_estimate = least_squares( self._residual_multivariate, init_estimate, args=( unique_R, inds, X, W, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci, data_sigma), method=self.nlss_method, loss=self.nlss_loss, bounds=self.bounds, verbose=0, x_scale=self.x_scale, tr_solver=self.tr_solver) return final_estimate.x, final_estimate.cost
Estimate centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. X : 2D array, with shape [n_voxel, n_tr] fMRI data from one subject. W : 2D array, with shape [K, n_tr] The weight matrix. init_centers : 2D array, with shape [K, n_dim] The initial values of centers. init_widths : 1D array The initial values of widths. template_centers: 1D array The template prior on centers template_widths: 1D array The template prior on widths template_centers_mean_cov: 2D array, with shape [K, cov_size] The template prior on centers' mean template_widths_mean_var_reci: 1D array The reciprocal of template prior on variance of widths' mean Returns ------- final_estimate.x: 1D array The newly estimated centers and widths. final_estimate.cost: float The cost value.
384,963
def run(self): try: self.listen() except Exception as e: logger.critical("JobListener instence crashed. Error: %s", str(e)) logger.critical(traceback.format_exc())
Run thread to listen for jobs and reschedule successful ones.
384,964
def merge_tops(self, tops): top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == : continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if in comp: matches.append(comp) if in comp: order = comp[] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get(, False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders)
Cleanly merge the top files
384,965
def _compile_models(models): tasklist = [] for m in models: if isinstance(m, MultiFitterModel): tasklist += [(, [m])] tasklist += [(, None)] elif hasattr(m, ): tasklist += [(, m)] elif isinstance(m, tuple): tasklist += [(, list(m))] tasklist += [(, None)] elif isinstance(m, list): for sm in m: if isinstance(sm, MultiFitterModel): tasklist += [(, [sm])] elif isinstance(sm, tuple): tasklist += [(, list(sm))] else: raise ValueError( .format( str(type(sm)) ) ) tasklist += [(, len(m))] tasklist += [(, None)] else: raise RuntimeError() return tasklist
Convert ``models`` into a list of tasks. Each task is tuple ``(name, data)`` where ``name`` indicates the task task and ``data`` is the relevant data for that task. Supported tasks and data: - ``'fit'`` and list of models - ``'update-kargs'`` and ``None`` - ``'update-prior'`` and ``None`` - ``'wavg'`` and number of (previous) fits to average
384,966
def polygon(self): points = [] for fp in self.points[1:]: points.append((fp.lat, fp.lng)) return points
return a polygon for the fence
384,967
def backoff( max_tries=constants.BACKOFF_DEFAULT_MAXTRIES, delay=constants.BACKOFF_DEFAULT_DELAY, factor=constants.BACKOFF_DEFAULT_FACTOR, exceptions=None): if max_tries <= 0: raise ValueError(.format(max_tries)) if delay <= 0: raise ValueError(.format(delay)) if factor <= 1: raise ValueError(.format(factor)) def outter(f): def inner(*args, **kwargs): m_max_tries, m_delay = max_tries, delay while m_max_tries > 0: try: retval = f(*args, **kwargs) except exceptions: logger.exception( , f, max_tries, delay, factor, exceptions) m_max_tries -= 1 if m_max_tries <= 0: raise time.sleep(m_delay) m_delay *= factor else: return retval return inner return outter
Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception]
384,968
def _proxy(self): if self._context is None: self._context = FeedbackContext( self._version, account_sid=self._solution[], call_sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
384,969
def get_or_create_candidate(self, row, party, race): person = self.get_or_create_person(row) id_components = row["id"].split("-") candidate_id = "{0}-{1}".format(id_components[1], id_components[2]) defaults = {"party": party, "incumbent": row.get("incumbent")} if person.last_name == "None of these candidates": candidate_id = "{0}-{1}".format(id_components[0], candidate_id) candidate, created = election.Candidate.objects.update_or_create( person=person, race=race, ap_candidate_id=candidate_id, defaults=defaults, ) return candidate
Gets or creates the Candidate object for the given row of AP data. In order to tie with live data, this will synthesize the proper AP candidate id. This function also calls `get_or_create_person` to get a Person object to pass to Django.
384,970
def find_related(self, fullname): stack = [fullname] found = set() while stack: name = stack.pop(0) names = self.find_related_imports(name) stack.extend(set(names).difference(set(found).union(stack))) found.update(names) found.discard(fullname) return sorted(found)
Return a list of non-stdlib modules that are imported directly or indirectly by `fullname`, plus their parents. This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. :param fullname: Fully qualified name of an _already imported_ module for which source code can be retrieved :type fullname: str
384,971
def set_release_description(self, description, **kwargs): id = self.get_id().replace(, ) path = % (self.manager.path, id) data = {: description} if self.release is None: try: server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) except exc.GitlabHttpError as e: raise exc.GitlabCreateError(e.response_code, e.error_message) else: try: server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs) except exc.GitlabHttpError as e: raise exc.GitlabUpdateError(e.response_code, e.error_message) self.release = server_data
Set the release notes on the tag. If the release doesn't exist yet, it will be created. If it already exists, its description will be updated. Args: description (str): Description of the release. **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server fails to create the release GitlabUpdateError: If the server fails to update the release
384,972
def with_metaclass(meta, *bases): class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("NewBase", None, {})
Create a base class with a metaclass. For example, if you have the metaclass >>> class Meta(type): ... pass Use this as the metaclass by doing >>> from symengine.compatibility import with_metaclass >>> class MyClass(with_metaclass(Meta, object)): ... pass This is equivalent to the Python 2:: class MyClass(object): __metaclass__ = Meta or Python 3:: class MyClass(object, metaclass=Meta): pass That is, the first argument is the metaclass, and the remaining arguments are the base classes. Note that if the base class is just ``object``, you may omit it. >>> MyClass.__mro__ (<class 'MyClass'>, <... 'object'>) >>> type(MyClass) <class 'Meta'>
384,973
def _find_v1_settings(self, settings): if in settings: modname = settings[] if not in settings or len(settings[]) == 0: raise DataError("No modules defined in module_settings.json file") elif len(settings[]) > 1: raise DataError("Multiple modules defined in module_settings.json file", modules=[x for x in settings[]]) else: modname = list(settings[])[0] if modname not in settings[]: raise DataError("Module name does not correspond with an entry in the modules directory", name=modname, modules=[x for x in settings[]]) release_info = self._load_release_info(settings) modsettings = settings[][modname] architectures = settings.get(, {}) target_defs = settings.get(, {}) targets = target_defs.get(modname, []) return TileInfo(modname, modsettings, architectures, targets, release_info)
Parse a v1 module_settings.json file. V1 is the older file format that requires a modules dictionary with a module_name and modules key that could in theory hold information on multiple modules in a single directory.
384,974
def _create(self): from samplerate.lowlevel import ffi, src_callback_new, src_delete from samplerate.exceptions import ResamplingError state, handle, error = src_callback_new( self._callback, self._converter_type.value, self._channels) if error != 0: raise ResamplingError(error) self._state = ffi.gc(state, src_delete) self._handle = handle
Create new callback resampler.
384,975
def setcontents(source, identifier, pointer): record = Record.get_record(identifier) Document(record, pointer).setcontents(source)
Patch existing bibliographic record.
384,976
def get_nn_info(self, structure, n): vire = ValenceIonicRadiusEvaluator(structure) site = vire.structure[n] neighs_dists = vire.structure.get_neighbors(site, self.cutoff) rn = vire.radii[vire.structure[n].species_string] reldists_neighs = [] for neigh, dist in neighs_dists: reldists_neighs.append([dist / ( vire.radii[neigh.species_string] + rn), neigh]) siw = [] min_reldist = min([reldist for reldist, neigh in reldists_neighs]) for reldist, s in reldists_neighs: if reldist < (1.0 + self.tol) * min_reldist: w = min_reldist / reldist siw.append({: s, : self._get_image(vire.structure, s), : w, : self._get_original_site( vire.structure, s)}) return siw
Get all near-neighbor sites as well as the associated image locations and weights of the site with index n using the closest relative neighbor distance-based method with VIRE atomic/ionic radii. Args: structure (Structure): input structure. n (integer): index of site for which to determine near neighbors. Returns: siw (list of tuples (Site, array, float)): tuples, each one of which represents a neighbor site, its image location, and its weight.
384,977
def convert_table(self, block): lines_orig = block.split() lines_orig.pop() widest_cell = [] widest_word = [] widths = [] rows = [] lines = [] has_border = False width_unit = 0.0 if not self.test(None, block): return lines_orig if lines_orig[0].startswith(): has_border = True for i in range(0, len(self._split_row(lines_orig[0], has_border))): widest_cell.append(0) widest_word.append(0) widths.append(0) for line in lines_orig: row = self._split_row(line, has_border) for i in range(0, len(row) - len(widest_cell)): widest_cell.append(0) widest_word.append(0) widths.append(0) for i in range(0, len(row)): if len(row[i]) > widest_cell[i]: widest_cell[i] = len(row[i]) words = row[i].split() for word in words: match = re.match(r, word) if match: word = match.group(1) if len(word) > widest_word[i]: widest_word[i] = len(word) rows.append(row) rows.pop(1) for width in widest_cell: width_unit += float(width) width_unit = self.width / width_unit for i in range(0, len(widest_cell)): widths[i] = int(widest_cell[i] * width_unit) if sum(widths) < self.width: widths[widths.index(min(widths))] += self.width - sum(widths) return lines
Converts a table to grid table format
384,978
def dev_null_wrapper(func, *a, **kwargs): os.dup2(dev_null, sys.stdout.fileno()) return_object = func(*a, **kwargs) sys.stdout.flush() os.dup2(tmp_stdout, sys.stdout.fileno()) return return_object
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null. This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
384,979
def prepare(self): if settings[]: log_request(self) for i in self.PREPARES: getattr(self, + i)() if self._finished: return
Behaves like a middleware between raw request and handling process, If `PREPARES` is defined on handler class, which should be a list, for example, ['auth', 'context'], method whose name is constitute by prefix '_prepare_' and string in this list will be executed by sequence. In this example, those methods are `_prepare_auth` and `_prepare_context`
384,980
def randstr(self): return gen_rand_str( 4, 10, use=self.random, keyspace=list(string.ascii_letters))
-> #str result of :func:gen_rand_str
384,981
def ceilpow2(n): signif,exponent = frexp(n) if (signif < 0): return 1; if (signif == 0.5): exponent -= 1; return (1) << exponent;
convenience function to determine a power-of-2 upper frequency limit
384,982
def lint(filename, options=()): full_path = osp.abspath(filename) parent_path = osp.dirname(full_path) child_path = osp.basename(full_path) while parent_path != "/" and osp.exists(osp.join(parent_path, "__init__.py")): child_path = osp.join(osp.basename(parent_path), child_path) parent_path = osp.dirname(parent_path) run_cmd = "import sys; from pylint.lint import Run; Run(sys.argv[1:])" cmd = ( [sys.executable, "-c", run_cmd] + [ "--msg-template", "{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}", "-r", "n", child_path, ] + list(options) ) process = Popen( cmd, stdout=PIPE, cwd=parent_path, env=_get_env(), universal_newlines=True ) for line in process.stdout: if line.startswith("No config file found"): continue parts = line.split(":") if parts and parts[0] == child_path: line = ":".join([filename] + parts[1:]) print(line, end=" ") process.wait() return process.returncode
Pylint the given file. When run from emacs we will be in the directory of a file, and passed its filename. If this file is part of a package and is trying to import other modules from within its own package or another package rooted in a directory below it, pylint will classify it as a failed import. To get around this, we traverse down the directory tree to find the root of the package this module is in. We then invoke pylint from this directory. Finally, we must correct the filenames in the output generated by pylint so Emacs doesn't become confused (it will expect just the original filename, while pylint may extend it with extra directories if we've traversed down the tree)
384,983
def _start_element (self, tag, attrs, end): tag = tag.encode(self.encoding, "ignore") self.fd.write("<%s" % tag.replace("/", "")) for key, val in attrs.items(): key = key.encode(self.encoding, "ignore") if val is None: self.fd.write(" %s" % key) else: val = val.encode(self.encoding, "ignore") self.fd.write( % (key, quote_attrval(val))) self.fd.write(end)
Print HTML element with end string. @param tag: tag name @type tag: string @param attrs: tag attributes @type attrs: dict @param end: either > or /> @type end: string @return: None
384,984
def cee_map_priority_table_map_cos1_pgid(self, **kwargs): config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name_key = ET.SubElement(cee_map, "name") name_key.text = kwargs.pop() priority_table = ET.SubElement(cee_map, "priority-table") map_cos1_pgid = ET.SubElement(priority_table, "map-cos1-pgid") map_cos1_pgid.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
384,985
def members(self): if self._members is None: self._members = MemberList( self._version, service_sid=self._solution[], channel_sid=self._solution[], ) return self._members
Access the members :returns: twilio.rest.chat.v1.service.channel.member.MemberList :rtype: twilio.rest.chat.v1.service.channel.member.MemberList
384,986
def get_folders(self): path = response = self.request(path) items = response[] folders = [] for item in items: if item.get() == : raise Exception(item.get()) elif item.get() == : folders.append(Folder(self, **item)) return folders
Return list of user's folders. :rtype: list
384,987
def _set_offset_base1(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 1}, u: {: 0}, u: {: 4}, u: {: 2}, u: {: 3}},), is_leaf=True, yang_name="offset-base1", rest_name="offset-base1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "brocade-uda-access-list:uda-offset-base-type", : , }) self.__offset_base1 = t if hasattr(self, ): self._set()
Setter method for offset_base1, mapped from YANG variable /uda_key/profile/uda_profile_offsets/offset_base1 (uda-offset-base-type) If this variable is read-only (config: false) in the source YANG file, then _set_offset_base1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_offset_base1() directly.
384,988
def setBackground(self,bg): self.bg = bg if isinstance(bg,list) or isinstance(bg,tuple): if len(bg)==3 and isinstance(bg,list): bg.append(255) self.bg_vlist.colors = bg*4 elif bg in ["flat","gradient","oldshadow","material"]: self.bg = ContainerButtonBackground(self,borderstyle=bg,batch=self.batch2d) self.redraw()
Sets the background of the Container. Similar to :py:meth:`peng3d.gui.SubMenu.setBackground()`\ , but only effects the region covered by the Container.
384,989
def check_style(value): if sys.version_info[:2] >= (3, 2): if value not in FORMAT_STYLE_PATTERNS: msg = "Unsupported logging format style! (%r)" raise ValueError(format(msg, value)) elif value != DEFAULT_FORMAT_STYLE: msg = "Format string styles other than %r require Python 3.2+!" raise ValueError(msg, DEFAULT_FORMAT_STYLE) return value
Validate a logging format style. :param value: The logging format style to validate (any value). :returns: The logging format character (a string of one character). :raises: :exc:`~exceptions.ValueError` when the given style isn't supported. On Python 3.2+ this function accepts the logging format styles ``%``, ``{`` and ``$`` while on older versions only ``%`` is accepted (because older Python versions don't support alternative logging format styles).
384,990
def detect_complexity(bam_in, genome, out): if not genome: logger.info("No genome given. skipping.") return None out_file = op.join(out, op.basename(bam_in) + "_cov.tsv") if file_exists(out_file): return None fai = genome + ".fai" cov = pybedtools.BedTool(bam_in).genome_coverage(g=fai, max=1) cov.saveas(out_file) total = 0 for region in cov: if region[0] == "genome" and int(region[1]) != 0: total += float(region[4]) logger.info("Total genome with sequences: %s " % total)
genome coverage of small RNA
384,991
def sign_decorated(self, data): signature = self.sign(data) hint = self.signature_hint() return Xdr.types.DecoratedSignature(hint, signature)
Sign a bytes-like object and return the decorated signature. Sign a bytes-like object by signing the data using the signing (private) key, and return a decorated signature, which includes the last four bytes of the public key as a signature hint to go along with the signature as an XDR DecoratedSignature object. :param bytes data: A sequence of bytes to sign, typically a transaction.
384,992
def lookup_ids(self, keys): keys_len = len(keys) ids = {namespace_key: None for namespace_key in keys} start = 0 bulk_insert = self.bulk_insert query = check = cursor = self.cursor execute = cursor.execute while start < keys_len: rows = keys[start:start+bulk_insert] params = [param for params in rows for param in params] id_query = query + (check + ) * (len(rows) - 1) + check execute(id_query, params) for namespace, key, id_ in cursor: ids[(namespace, key)] = id_ start += bulk_insert return ids
Lookup the integer ID associated with each (namespace, key) in the keys list
384,993
def column_coordinates(self, X): utils.validation.check_is_fitted(self, ) _, _, _, col_names = util.make_labels_and_names(X) if isinstance(X, pd.SparseDataFrame): X = X.to_coo() elif isinstance(X, pd.DataFrame): X = X.to_numpy() if self.copy: X = X.copy() if isinstance(X, np.ndarray): X = X.T / X.T.sum(axis=1)[:, None] else: X = X.T / X.T.sum(axis=1) return pd.DataFrame( data=X @ sparse.diags(self.row_masses_.to_numpy() ** -0.5) @ self.U_, index=col_names )
The column principal coordinates.
384,994
def from_frame(klass, frame, connection): event = frame.headers[] data = json.loads(frame.body) info = data[] build = Build.fromDict(info) build.connection = connection return klass(build, event)
Create a new BuildStateChange event from a Stompest Frame.
384,995
def _buildvgrid(self,R,phi,nsigma,t,sigmaR1,sigmaT1,meanvR,meanvT, gridpoints,print_progress,integrate_method,deriv): out= evolveddiskdfGrid() out.sigmaR1= sigmaR1 out.sigmaT1= sigmaT1 out.meanvR= meanvR out.meanvT= meanvT out.vRgrid= nu.linspace(meanvR-nsigma*sigmaR1,meanvR+nsigma*sigmaR1, gridpoints) out.vTgrid= nu.linspace(meanvT-nsigma*sigmaT1,meanvT+nsigma*sigmaT1, gridpoints) if isinstance(t,(list,nu.ndarray)): nt= len(t) out.df= nu.zeros((gridpoints,gridpoints,nt)) for ii in range(gridpoints): for jj in range(gridpoints-1,-1,-1): if print_progress: sys.stdout.write(+"Velocity gridpoint %i out of %i" % \ (jj+ii*gridpoints+1,gridpoints*gridpoints)) sys.stdout.flush() thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi]) out.df[ii,jj,:]= self(thiso,nu.array(t).flatten(), integrate_method=integrate_method, deriv=deriv,use_physical=False) out.df[ii,jj,nu.isnan(out.df[ii,jj,:])]= 0. if print_progress: sys.stdout.write() else: out.df= nu.zeros((gridpoints,gridpoints)) for ii in range(gridpoints): for jj in range(gridpoints): if print_progress: sys.stdout.write(+"Velocity gridpoint %i out of %i" % \ (jj+ii*gridpoints+1,gridpoints*gridpoints)) sys.stdout.flush() thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi]) out.df[ii,jj]= self(thiso,t, integrate_method=integrate_method, deriv=deriv,use_physical=False) if nu.isnan(out.df[ii,jj]): out.df[ii,jj]= 0. if print_progress: sys.stdout.write() return out
Internal function to grid the vDF at a given location
384,996
def routes(family=None): * if family != and family != and family is not None: raise CommandExecutionError(.format(family)) if __grains__[] == : if not salt.utils.path.which(): routes_ = _ip_route_linux() else: routes_ = _netstat_route_linux() elif __grains__[] == : routes_ = _netstat_route_sunos() elif __grains__[] in [, , ]: routes_ = _netstat_route_freebsd() elif __grains__[] in []: routes_ = _netstat_route_netbsd() elif __grains__[] in []: routes_ = _netstat_route_openbsd() elif __grains__[] in []: routes_ = _netstat_route_aix() else: raise CommandExecutionError() if not family: return routes_ else: ret = [route for route in routes_ if route[] == family] return ret
Return currently configured routes from routing table .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' network.routes
384,997
def report_parsing_problems(parsing_out): _, empty, faulty = parsing_out if CONFIG_FILE in empty or CONFIG_FILE in faulty: print(, CONFIG_FILE, file=sys.stderr) print(, sep=, end=, file=sys.stderr) if CONFIG_LOCAL in faulty: print(, CONFIG_LOCAL, file=sys.stderr) print(, sep=, end=, file=sys.stderr)
Output message about potential parsing problems.
384,998
def set_locs(self, locs): _check_implicitly_registered() self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax)
Sets the locations of the ticks
384,999
def get(self, key, value): if key == : response = self._swimlane.request(, "app/{0}/record/{1}".format(self._app.id, value)) return Record(self._app, response.json()) if key == : response = self._swimlane.request(, "app/{0}/record/tracking/{1}".format(self._app.id, value)) return Record(self._app, response.json())
Get a single record by id Supports resource cache .. versionchanged:: 2.17.0 Added option to retrieve record by tracking_id Keyword Args: id (str): Full record ID tracking_id (str): Record Tracking ID Returns: Record: Matching Record instance returned from API Raises: TypeError: No id argument provided