Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,000
def to_string(x): if isinstance(x, bytes): return x.decode() if isinstance(x, basestring): return x
Utf8 conversion :param x: :return:
381,001
def cache_from_source(path, debug_override=None): debug = not sys.flags.optimize if debug_override is None else debug_override if debug: suffixes = DEBUG_BYTECODE_SUFFIXES else: suffixes = OPTIMIZED_BYTECODE_SUFFIXES pass head, tail = os.path.split(path) base_filename, sep, _ = tail.partition() if not hasattr(sys, ): raise NotImplementedError() tag = sys.implementation.cache_tag if tag is None: raise NotImplementedError() filename = .join([base_filename, sep, tag, suffixes[0]]) return os.path.join(head, _PYCACHE, filename)
Given the path to a .py file, return the path to its .pyc/.pyo file. The .py file does not need to exist; this simply returns the path to the .pyc/.pyo file calculated as if the .py file were imported. The extension will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo. If debug_override is not None, then it must be a boolean and is used in place of sys.flags.optimize. If sys.implementation.cache_tag is None then NotImplementedError is raised.
381,002
def Parse(self,url,song_name,flag): file_download=FileDownload() html=file_download.get_html_response(url) if flag == False: soup=BeautifulSoup(html) a_list=soup.findAll(,) text=[str(x) for x in a_list] text=.join(text) text=text.lower() string1= string2= string3= href= if string3 in text: print href=a_list[2].get() elif string2 in text: print href=a_list[1].get() elif string1 in text: print href=a_list[0].get() else: self.missing_schema(html,song_name) quit() return href else: x,href=self.check_if_song_name(html) links = [] if x==True: links=self.list_of_all_href(html) else: file_download=FileDownload() file_download.file_download_cross_platform(href) quit() return links
It will the resource URL if song is found, Otherwise it will return the list of songs that can be downloaded
381,003
def get_file_link(self, file_key): self._raise_unimplemented_error() uri = .join([self.api_uri, self.files_suffix, file_key, self.file_link_suffix, ]) return self._req(, uri)
Gets link to file Args: file_key key for the file return (status code, ?)
381,004
def check_exc_info(self, node): if self.current_logging_level not in (, ): return for kw in node.keywords: if kw.arg == : if self.current_logging_level == : violation = ERROR_EXC_INFO_VIOLATION else: violation = REDUNDANT_EXC_INFO_VIOLATION self.violations.append((node, violation))
Reports a violation if exc_info keyword is used with logging.error or logging.exception.
381,005
def rm_parameter(self, name): if name not in self._parameters: raise ValueError("no parameter found" % (name)) del self._parameters[name] del self.__dict__[name]
Removes a parameter to the existing Datamat. Fails if parameter doesn't exist.
381,006
def scale_subplots(subplots=None, xlim=, ylim=): auto_axis = if xlim == : auto_axis += if ylim == : auto_axis += autoscale_subplots(subplots, auto_axis) for loc, ax in numpy.ndenumerate(subplots): if not in auto_axis: ax.set_xlim(xlim) if not in auto_axis: ax.set_ylim(ylim)
Set the x and y axis limits for a collection of subplots. Parameters ----------- subplots : ndarray or list of matplotlib.axes.Axes xlim : None | 'auto' | (xmin, xmax) 'auto' : sets the limits according to the most extreme values of data encountered. ylim : None | 'auto' | (ymin, ymax)
381,007
def build_modules(is_training, vocab_size): if is_training: estimator_mode = tf.constant(bbb.EstimatorModes.sample) else: estimator_mode = tf.constant(bbb.EstimatorModes.mean) lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter( posterior_builder=lstm_posterior_builder, prior_builder=custom_scale_mixture_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=estimator_mode) non_lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter( posterior_builder=non_lstm_posterior_builder, prior_builder=custom_scale_mixture_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=estimator_mode) embed_layer = snt.Embed( vocab_size=vocab_size, embed_dim=FLAGS.embedding_size, custom_getter=non_lstm_bbb_custom_getter, name="input_embedding") cores = [] for i in range(FLAGS.n_layers): cores.append( snt.LSTM(FLAGS.hidden_size, custom_getter=lstm_bbb_custom_getter, forget_bias=0.0, name="lstm_layer_{}".format(i))) rnn_core = snt.DeepRNN( cores, skip_connections=False, name="deep_lstm_core") output_linear = snt.Linear( vocab_size, custom_getter={"w": non_lstm_bbb_custom_getter}) return embed_layer, rnn_core, output_linear
Construct the modules used in the graph.
381,008
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext): from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()] if ctx.ebnfSuffix(): self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix())
arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET;
381,009
def send_s3_xsd(self, url_xsd): if self.check_s3(self.domain, urlparse(url_xsd).path[1:]): return url_xsd response = urllib2.urlopen(url_xsd) content = response.read() cached = NamedTemporaryFile(delete=False) named = cached.name urls = re.findall(r"]?([^\, content) for orig_url in in_urls: content = content.replace( orig_url, self.s3_url(orig_url)) cached = NamedTemporaryFile(delete=False) with cached as cache: cache.write(content) named = cached.name new_url = self.cache_s3(original_url, named) print( % new_url) return created_url
This method will not be re-run always, only locally and when xsd are regenerated, read the test_008_force_s3_creation on test folder
381,010
def _add_references(self, rec): for ref in self.document.getElementsByTagName(): for ref_type, doi, authors, collaboration, journal, volume, page, year,\ label, arxiv, publisher, institution, unstructured_text,\ external_link, report_no, editors in self._get_reference(ref): subfields = [] if doi: subfields.append((, doi)) for author in authors: subfields.append((, author)) for editor in editors: subfields.append((, editor)) if year: subfields.append((, year)) if unstructured_text: if page: subfields.append((, unstructured_text + + page)) else: subfields.append((, unstructured_text)) if collaboration: subfields.append((, collaboration)) if institution: subfields.append((, institution)) if publisher: subfields.append((, publisher)) if arxiv: subfields.append((, arxiv)) if report_no: subfields.append((, report_no)) if external_link: subfields.append((, external_link)) if label: subfields.append((, label)) if ref_type == : if journal: subfields.append((, journal)) if volume: subfields.append((, volume)) elif page and not unstructured_text: subfields.append((, page)) else: if volume and page: subfields.append((, journal + "," + volume + "," + page)) elif journal: subfields.append((, journal)) if ref_type: subfields.append((, ref_type)) if not subfields: try: r = ref.getElementsByTagName()[0] text = xml_to_text(r) label = text.split()[0] text = " ".join(text.split()[1:]) subfields.append((, text)) record_add_field(rec, , ind1=, ind2=, subfields=subfields) except IndexError: try: r = ref.getElementsByTagName()[0] subfields.append((, xml_to_text(r))) record_add_field(rec, , ind1=, ind2=, subfields=subfields) except IndexError: subfields.append((, xml_to_text(ref))) record_add_field(rec, , ind1=, ind2=, subfields=subfields) else: record_add_field(rec, , ind1=, ind2=, subfields=subfields)
Adds the reference to the record
381,011
def log_loss(oracle, test_seq, ab=[], m_order=None, verbose=False): if not ab: ab = oracle.get_alphabet() if verbose: print() logP = 0.0 context = [] increment = np.floor((len(test_seq) - 1) / 100) bar_count = -1 maxContextLength = 0 avgContext = 0 for i, t in enumerate(test_seq): p, c = predict(oracle, context, ab, verbose=False) if len(c) < len(context): context = context[-len(c):] logP -= np.log2(p[ab[t]]) context.append(t) if m_order is not None: if len(context) > m_order: context = context[-m_order:] avgContext += float(len(context)) / len(test_seq) if verbose: percentage = np.mod(i, increment) if percentage == 0: bar_count += 1 if len(context) > maxContextLength: maxContextLength = len(context) sys.stdout.write() sys.stdout.write("\r[" + "=" * bar_count + " " * (100 - bar_count) + "] " + str(bar_count) + "% " + str(i) + "/" + str(len(test_seq) - 1) + " Current max length: " + str( maxContextLength)) sys.stdout.flush() return logP / len(test_seq), avgContext
Evaluate the average log-loss of a sequence given an oracle
381,012
def _sort_resources_per_hosting_device(resources): hosting_devices = {} for key in resources.keys(): for r in resources.get(key) or []: if r.get() is None: continue hd_id = r[][] hosting_devices.setdefault(hd_id, {}) hosting_devices[hd_id].setdefault(key, []).append(r) return hosting_devices
This function will sort the resources on hosting device. The sorting on hosting device is done by looking up the `hosting_device` attribute of the resource, and its `id`. :param resources: a dict with key of resource name :return dict sorted on the hosting device of input resource. Format: hosting_devices = { 'hd_id1' : {'routers':[routers], 'removed_routers':[routers], .... } 'hd_id2' : {'routers':[routers], .. } ....... }
381,013
def stop(self): unit, start_instant, size = self year, month, day = start_instant if unit == ETERNITY: return Instant((float("inf"), float("inf"), float("inf"))) if unit == : if size > 1: day += size - 1 month_last_day = calendar.monthrange(year, month)[1] while day > month_last_day: month += 1 if month == 13: year += 1 month = 1 day -= month_last_day month_last_day = calendar.monthrange(year, month)[1] else: if unit == : month += size while month > 12: year += 1 month -= 12 else: assert unit == , .format(unit, type(unit)) year += size day -= 1 if day < 1: month -= 1 if month == 0: year -= 1 month = 12 day += calendar.monthrange(year, month)[1] else: month_last_day = calendar.monthrange(year, month)[1] if day > month_last_day: month += 1 if month == 13: year += 1 month = 1 day -= month_last_day return Instant((year, month, day))
Return the last day of the period as an Instant instance. >>> period('year', 2014).stop Instant((2014, 12, 31)) >>> period('month', 2014).stop Instant((2014, 12, 31)) >>> period('day', 2014).stop Instant((2014, 12, 31)) >>> period('year', '2012-2-29').stop Instant((2013, 2, 28)) >>> period('month', '2012-2-29').stop Instant((2012, 3, 28)) >>> period('day', '2012-2-29').stop Instant((2012, 2, 29)) >>> period('year', '2012-2-29', 2).stop Instant((2014, 2, 28)) >>> period('month', '2012-2-29', 2).stop Instant((2012, 4, 28)) >>> period('day', '2012-2-29', 2).stop Instant((2012, 3, 1))
381,014
def brightness(im): im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(im_hsv) height, weight = v.shape[:2] total_bright = 0 for i in v: total_bright = total_bright+sum(i) return float(total_bright)/(height*weight)
Return the brightness of an image Args: im(numpy): image Returns: float, average brightness of an image
381,015
def complete_extra(self, args): "Completions for the command." if len(args) == 0: return self._listdir() return self._complete_path(args[-1])
Completions for the 'extra' command.
381,016
def setEditorData( self, editor, value ): if ( isinstance(editor, XMultiTagEdit) ): if ( not isinstance(value, list) ): value = [nativestring(value)] else: value = map(nativestring, value) editor.setTags(value) editor.setCurrentItem(editor.createItem()) elif ( isinstance(editor, QComboBox) ): i = editor.findText(nativestring(value)) editor.setCurrentIndex(i) editor.lineEdit().selectAll() elif ( isinstance(editor, QLineEdit) ): editor.setText(nativestring(value)) editor.selectAll()
Sets the value for the given editor to the inputed value. :param editor | <QWidget> value | <variant>
381,017
def strfdelta(tdelta: Union[datetime.timedelta, int, float, str], fmt=, inputtype=): if inputtype == : remainder = int(tdelta.total_seconds()) elif inputtype in [, ]: remainder = int(tdelta) elif inputtype in [, ]: remainder = int(tdelta) * 60 elif inputtype in [, ]: remainder = int(tdelta) * 3600 elif inputtype in [, ]: remainder = int(tdelta) * 86400 elif inputtype in [, ]: remainder = int(tdelta) * 604800 else: raise ValueError("Bad inputtype: {}".format(inputtype)) f = Formatter() desired_fields = [field_tuple[1] for field_tuple in f.parse(fmt)] possible_fields = (, , , , ) constants = {: 604800, : 86400, : 3600, : 60, : 1} values = {} for field in possible_fields: if field in desired_fields and field in constants: values[field], remainder = divmod(remainder, constants[field]) return f.format(fmt, **values)
Convert a ``datetime.timedelta`` object or a regular number to a custom- formatted string, just like the ``strftime()`` method does for ``datetime.datetime`` objects. The ``fmt`` argument allows custom formatting to be specified. Fields can include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each field is optional. Some examples: .. code-block:: none '{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default) '{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02' '{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02' '{H}h {S}s' --> '72h 800s' The ``inputtype`` argument allows ``tdelta`` to be a regular number, instead of the default behaviour of treating it as a ``datetime.timedelta`` object. Valid ``inputtype`` strings: .. code-block:: none 'timedelta', # treats input as a datetime.timedelta 's', 'seconds', 'm', 'minutes', 'h', 'hours', 'd', 'days', 'w', 'weeks' Modified from https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
381,018
def components(self): from pandas import DataFrame columns = [, , , , , , ] hasnans = self._hasnans if hasnans: def f(x): if isna(x): return [np.nan] * len(columns) return x.components else: def f(x): return x.components result = DataFrame([f(x) for x in self], columns=columns) if not hasnans: result = result.astype() return result
Return a dataframe of the components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns ------- a DataFrame
381,019
def _structure_frozenset(self, obj, cl): if is_bare(cl) or cl.__args__[0] is Any: return frozenset(obj) else: elem_type = cl.__args__[0] dispatch = self._structure_func.dispatch return frozenset(dispatch(elem_type)(e, elem_type) for e in obj)
Convert an iterable into a potentially generic frozenset.
381,020
def _handle_msg(self, msg): LOG.debug(, self._remotename, msg) if msg.type == BGP_MSG_OPEN: if self.state == BGP_FSM_OPEN_SENT: self._validate_open_msg(msg) self.recv_open_msg = msg self.state = BGP_FSM_OPEN_CONFIRM self._peer.state.bgp_state = self.state self._is_bound = self._peer.bind_protocol(self) if not self._is_bound: raise bgp.CollisionResolution() if msg.hold_time == 0: LOG.info( ) else: self._start_timers(msg.hold_time) self._send_keepalive() return else: LOG.error( ) raise bgp.FiniteStateMachineError() elif msg.type == BGP_MSG_NOTIFICATION: if self._peer: self._signal_bus.bgp_notification_received(self._peer, msg) LOG.error( , msg) self._socket.close() return if (msg.type == BGP_MSG_KEEPALIVE or msg.type == BGP_MSG_UPDATE): if self._expiry: self._expiry.reset() if (msg.type in (BGP_MSG_UPDATE, BGP_MSG_KEEPALIVE, BGP_MSG_ROUTE_REFRESH)): self._peer.handle_msg(msg) self.pause(0)
When a BGP message is received, send it to peer. Open messages are validated here. Peer handler is called to handle each message except for *Open* and *Notification* message. On receiving *Notification* message we close connection with peer.
381,021
def add_fs(self, name, fs, write=False, priority=0): if isinstance(fs, text_type): fs = open_fs(fs) if not isinstance(fs, FS): raise TypeError("fs argument should be an FS object or FS URL") self._filesystems[name] = _PrioritizedFS( priority=(priority, self._sort_index), fs=fs ) self._sort_index += 1 self._resort() if write: self.write_fs = fs self._write_fs_name = name
Add a filesystem to the MultiFS. Arguments: name (str): A unique name to refer to the filesystem being added. fs (FS or str): The filesystem (instance or URL) to add. write (bool): If this value is True, then the ``fs`` will be used as the writeable FS (defaults to False). priority (int): An integer that denotes the priority of the filesystem being added. Filesystems will be searched in descending priority order and then by the reverse order they were added. So by default, the most recently added filesystem will be looked at first.
381,022
def remove_ip(enode, portlbl, addr, shell=None): assert portlbl assert ip_interface(addr) port = enode.ports[portlbl] cmd = .format(addr=addr, port=port) response = enode(cmd, shell=shell) assert not response
Remove an IP address from an interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left "as-is"). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to remove from the interface: - IPv4 address to remove from the interface in the form ``'192.168.20.20'`` or ``'192.168.20.20/24'``. - IPv6 address to remove from the interface in the form ``'2001::1'`` or ``'2001::1/120'``. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell.
381,023
def send(self, target, nick, msg, msgtype, ignore_length=False, filters=None): if not isinstance(msg, str): raise Exception("Trying to send a %s to irc, only strings allowed." % type(msg).__name__) if filters is None: filters = self.outputfilter[target] for i in filters: if target != self.config[][]: msg = i(msg) if not ignore_length: msg = misc.truncate_msg(msg, 800)
Send a message. Records the message in the log.
381,024
def policy_present(name, rules): url = "v1/sys/policy/{0}".format(name) response = __utils__[](, url) try: if response.status_code == 200: return _handle_existing_policy(name, rules, response.json()[]) elif response.status_code == 404: return _create_new_policy(name, rules) else: response.raise_for_status() except Exception as e: return { : name, : {}, : False, : .format(e) }
Ensure a Vault policy with the given name and rules is present. name The name of the policy rules Rules formatted as in-line HCL .. code-block:: yaml demo-policy: vault.policy_present: - name: foo/bar - rules: | path "secret/top-secret/*" { policy = "deny" } path "secret/not-very-secret/*" { policy = "write" }
381,025
def plot_di(fignum, DIblock): global globals X_down, X_up, Y_down, Y_up = [], [], [], [] plt.figure(num=fignum) for rec in DIblock: Up, Down = 0, 0 XY = pmag.dimap(rec[0], rec[1]) if rec[1] >= 0: X_down.append(XY[0]) Y_down.append(XY[1]) else: X_up.append(XY[0]) Y_up.append(XY[1]) if len(X_down) > 0: plt.scatter(X_down, Y_down, marker=, c=) if globals != 0: globals.DIlist = X_down globals.DIlisty = Y_down if len(X_up) > 0: plt.scatter(X_up, Y_up, marker=, facecolor=, edgecolor=) if globals != 0: globals.DIlist = X_up globals.DIlisty = Y_up
plots directions on equal area net Parameters _________ fignum : matplotlib figure number DIblock : nested list of dec, inc pairs
381,026
def get_current_span(): context = RequestContextManager.current_context() if context is not None: return context.span active = opentracing.tracer.scope_manager.active return active.span if active else None
Access current request context and extract current Span from it. :return: Return current span associated with the current request context. If no request context is present in thread local, or the context has no span, return None.
381,027
def emotes(self, emotes): if emotes is None: self._emotes = [] return es = [] for estr in emotes.split(): es.append(Emote.from_str(estr)) self._emotes = es
Set the emotes :param emotes: the key of the emotes tag :type emotes: :class:`str` :returns: None :rtype: None :raises: None
381,028
def name(self) -> str: return OPENSSL_TO_RFC_NAMES_MAPPING[self.ssl_version].get(self.openssl_name, self.openssl_name)
OpenSSL uses a different naming convention than the corresponding RFCs.
381,029
def show_user(self, user): url = % (user) d = defer.Deferred() self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \ .addErrback(lambda e: d.errback(e)) return d
Get the info for a specific user. Returns a delegate that will receive the user in a callback.
381,030
def get_i_name(self, num, is_oai=None): if num not in (1, 2): raise ValueError("`num` parameter have to be 1 or 2!") if is_oai is None: is_oai = self.oai_marc i_name = "ind" if not is_oai else "i" return i_name + str(num)
This method is used mainly internally, but it can be handy if you work with with raw MARC XML object and not using getters. Args: num (int): Which indicator you need (1/2). is_oai (bool/None): If None, :attr:`.oai_marc` is used. Returns: str: current name of ``i1``/``ind1`` parameter based on \ :attr:`oai_marc` property.
381,031
def scan(repos, options): ignore_set = set() repos = repos[::-1] while repos: directory, dotdir = repos.pop() ignore_this = any(pat in directory for pat in options.ignore_patterns) if ignore_this: if options.verbose: output(b % directory) output(b) continue vcsname, get_status = SYSTEMS[dotdir] lines, subrepos = get_status(directory, ignore_set, options) subrepos = [(os.path.join(directory, r), dotdir) for r in subrepos] repos.extend(reversed(subrepos)) if lines is None: continue if lines or options.verbose: output(b % (directory, vcsname)) for line in lines: output(line) output(b)
Given a repository list [(path, vcsname), ...], scan each of them.
381,032
def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): global sess with tf.Graph().as_default(): train_model = GAG(cfg, embed, p_graph) train_model.build_net(is_training=True) tf.get_variable_scope().reuse_variables() dev_model = GAG(cfg, embed, p_graph) dev_model.build_net(is_training=False) with tf.Session() as sess: if restore_path is not None: restore_mapping = dict(zip(restore_shared, restore_shared)) logger.debug(.format(restore_path, restore_shared)) init_from_checkpoint(restore_path, restore_mapping) logger.debug() logger.debug(sess.run(tf.report_uninitialized_variables())) init = tf.global_variables_initializer() sess.run(init) logger.debug() saver = tf.train.Saver() train_loss = None bestacc = 0 patience = 5 patience_increase = 2 improvement_threshold = 0.995 for epoch in range(max_epoch): logger.debug() train_batches = data.get_batches(qp_pairs, cfg.batch_size) train_loss = run_epoch(train_batches, train_model, True) logger.debug( + str(epoch) + + str(train_loss)) dev_batches = list(data.get_batches( dev_qp_pairs, cfg.batch_size)) _, position1, position2, ids, contexts = run_epoch( dev_batches, dev_model, False) answers = generate_predict_json( position1, position2, ids, contexts) if save_path is not None: logger.info(.format(save_path)) with open(os.path.join(save_path, % epoch), ) as file: json.dump(answers, file) else: answers = json.dumps(answers) answers = json.loads(answers) iter = epoch + 1 acc = evaluate.evaluate_with_predictions( args.dev_file, answers) logger.debug(, str(acc)) nni.report_intermediate_result(acc) logger.debug() if acc > bestacc: if acc * improvement_threshold > bestacc: patience = max(patience, iter * patience_increase) bestacc = acc if save_path is not None: logger.info(.format(save_path)) saver.save(sess, os.path.join(save_path, % epoch)) with open(os.path.join(save_path, % epoch), ) as file: pickle.dump( (position1, position2, ids, contexts), file) logger.debug( % (epoch, acc, bestacc)) if patience <= iter: break logger.debug() return train_loss, bestacc
Train a network from a specific graph.
381,033
def construct_routes(self): modules = self.evernode_app.get_modules() for module_name in modules: with self.app.app_context(): module = importlib.import_module( % (module_name)) for route in module.routes: self.routes.append(self.make_route(route)) if self.app.config[]: print() print("Loaded Modules: " + str(modules))
Gets modules routes.py and converts to module imports
381,034
def object_clean(self): for sample in self.metadata: try: delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) delattr(sample[self.analysistype], ) except AttributeError: pass
Remove large attributes from the metadata objects
381,035
def get_key(dotenv_path, key_to_get, verbose=False): key_to_get = str(key_to_get) if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Cant exist.") return None dotenv_as_dict = dotenv_values(dotenv_path) if key_to_get in dotenv_as_dict: return dotenv_as_dict[key_to_get] else: if verbose: warnings.warn(f"key {key_to_get} not found in {dotenv_path}.") return None
Gets the value of a given key from the given .env If the .env path given doesn't exist, fails :param dotenv_path: path :param key_to_get: key :param verbose: verbosity flag, raise warning if path does not exist :return: value of variable from environment file or None
381,036
def create_from_xml(resultFile, resultElem, columns=None, all_columns=False, columns_relevant_for_diff=set()): attributes = RunSetResult._extract_attributes_from_result(resultFile, resultElem) if not columns: columns = RunSetResult._extract_existing_columns_from_result(resultFile, resultElem, all_columns) summary = RunSetResult._extract_summary_from_result(resultElem, columns) return RunSetResult([(result, resultFile) for result in _get_run_tags_from_xml(resultElem)], attributes, columns, summary, columns_relevant_for_diff)
This function extracts everything necessary for creating a RunSetResult object from the "result" XML tag of a benchmark result file. It returns a RunSetResult object, which is not yet fully initialized. To finish initializing the object, call collect_data() before using it for anything else (this is to separate the possibly costly collect_data() call from object instantiation).
381,037
def plot2dhist(xdata,ydata,cmap=,interpolation=, fig=None,logscale=True,xbins=None,ybins=None, nbins=50,pts_only=False,**kwargs): setfig(fig) if pts_only: plt.plot(xdata,ydata,**kwargs) return ok = (~np.isnan(xdata) & ~np.isnan(ydata) & ~np.isinf(xdata) & ~np.isinf(ydata)) if ~ok.sum() > 0: logging.warning(.format(np.isnan(xdata).sum(), np.isnan(ydata).sum())) logging.warning(.format(np.isinf(xdata).sum(), np.isinf(ydata).sum())) if xbins is not None and ybins is not None: H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=(xbins,ybins)) else: H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=nbins) H = H.T if logscale: H = np.log(H) extent = [xs[0],xs[-1],ys[0],ys[-1]] plt.imshow(H,extent=extent,interpolation=interpolation, aspect=,cmap=cmap,origin=,**kwargs)
Plots a 2d density histogram of provided data :param xdata,ydata: (array-like) Data to plot. :param cmap: (optional) Colormap to use for density plot. :param interpolation: (optional) Interpolation scheme for display (passed to ``plt.imshow``). :param fig: (optional) Argument passed to :func:`setfig`. :param logscale: (optional) If ``True`` then the colormap will be based on a logarithmic scale, rather than linear. :param xbins,ybins: (optional) Bin edges to use (if ``None``, then use ``np.histogram2d`` to find bins automatically). :param nbins: (optional) Number of bins to use (if ``None``, then use ``np.histogram2d`` to find bins automatically). :param pts_only: (optional) If ``True``, then just a scatter plot of the points is made, rather than the density plot. :param **kwargs: Keyword arguments passed either to ``plt.plot`` or ``plt.imshow`` depending upon whether ``pts_only`` is set to ``True`` or not.
381,038
def update_dns_server(self, service_name, deployment_name, dns_server_name, address): _validate_not_none(, service_name) _validate_not_none(, deployment_name) _validate_not_none(, dns_server_name) _validate_not_none(, address) return self._perform_put( self._get_dns_server_path(service_name, deployment_name, dns_server_name), _XmlSerializer.dns_server_to_xml(dns_server_name, address), as_async=True)
Updates the ip address of a DNS server. service_name: The name of the service. deployment_name: The name of the deployment. dns_server_name: Specifies the name of the DNS server. address: Specifies the IP address of the DNS server.
381,039
def _get_curvature(nodes, tangent_vec, s): r _, num_nodes = np.shape(nodes) if num_nodes == 2: return 0.0 first_deriv = nodes[:, 1:] - nodes[:, :-1] second_deriv = first_deriv[:, 1:] - first_deriv[:, :-1] concavity = ( (num_nodes - 1) * (num_nodes - 2) * evaluate_multi(second_deriv, np.asfortranarray([s])) ) curvature = _helpers.cross_product( tangent_vec.ravel(order="F"), concavity.ravel(order="F") ) curvature /= np.linalg.norm(tangent_vec[:, 0], ord=2) ** 3 return curvature
r"""Compute the signed curvature of a curve at :math:`s`. Computed via .. math:: \frac{B'(s) \times B''(s)}{\left\lVert B'(s) \right\rVert_2^3} .. image:: ../images/get_curvature.png :align: center .. testsetup:: get-curvature import numpy as np import bezier from bezier._curve_helpers import evaluate_hodograph from bezier._curve_helpers import get_curvature .. doctest:: get-curvature :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [1.0, 0.75, 0.5, 0.25, 0.0], ... [0.0, 2.0 , -2.0, 2.0 , 0.0], ... ]) >>> s = 0.5 >>> tangent_vec = evaluate_hodograph(s, nodes) >>> tangent_vec array([[-1.], [ 0.]]) >>> curvature = get_curvature(nodes, tangent_vec, s) >>> curvature -12.0 .. testcleanup:: get-curvature import make_images make_images.get_curvature(nodes, s, tangent_vec, curvature) .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes of a curve. tangent_vec (numpy.ndarray): The already computed value of :math:`B'(s)` s (float): The parameter value along the curve. Returns: float: The signed curvature.
381,040
def _getTransformation(self): CheckParent(self) val = _fitz.Page__getTransformation(self) val = Matrix(val) return val
_getTransformation(self) -> PyObject *
381,041
def from_mask(cls, dh_mask, lwin, nwin=None, weights=None): if nwin is None: nwin = (lwin + 1)**2 else: if nwin > (lwin + 1)**2: raise ValueError( + .format(lwin, nwin)) if dh_mask.shape[0] % 2 != 0: raise ValueError( + .format(dh_mask.shape[0])) if dh_mask.shape[1] == dh_mask.shape[0]: _sampling = 1 elif dh_mask.shape[1] == 2 * dh_mask.shape[0]: _sampling = 2 else: raise ValueError( + .format(dh_mask.shape[0], dh_mask.shape[1])) mask_lm = _shtools.SHExpandDH(dh_mask, sampling=_sampling, lmax_calc=0) area = mask_lm[0, 0, 0] * 4 * _np.pi tapers, eigenvalues = _shtools.SHReturnTapersMap(dh_mask, lwin, ntapers=nwin) return SHWindowMask(tapers, eigenvalues, weights, area, copy=False)
Construct localization windows that are optimally concentrated within the region specified by a mask. Usage ----- x = SHWindow.from_mask(dh_mask, lwin, [nwin, weights]) Returns ------- x : SHWindow class instance Parameters ---------- dh_mask :ndarray, shape (nlat, nlon) A Driscoll and Healy (1994) sampled grid describing the concentration region R. All elements should either be 1 (for inside the concentration region) or 0 (for outside the concentration region). The grid must have dimensions nlon=nlat or nlon=2*nlat, where nlat is even. lwin : int The spherical harmonic bandwidth of the localization windows. nwin : int, optional, default = (lwin+1)**2 The number of best concentrated eigenvalues and eigenfunctions to return. weights ndarray, optional, default = None Taper weights used with the multitaper spectral analyses.
381,042
def server_sends_binary(self, message, name=None, connection=None, label=None): server, name = self._servers.get_with_name(name) server.send(message, alias=connection) self._register_send(server, label, name, connection=connection)
Send raw binary `message`. If server `name` is not given, uses the latest server. Optional message `label` is shown on logs. Examples: | Server sends binary | Hello! | | Server sends binary | ${some binary} | Server1 | label=DebugMessage | | Server sends binary | ${some binary} | connection=my_connection |
381,043
async def sinter(self, keys, *args): "Return the intersection of sets specified by ``keys``" args = list_or_args(keys, args) return await self.execute_command(, *args)
Return the intersection of sets specified by ``keys``
381,044
def leaders_in(self, leaderboard_name, current_page, **options): if current_page < 1: current_page = 1 page_size = options.get(, self.page_size) index_for_redis = current_page - 1 starting_offset = (index_for_redis * page_size) if starting_offset < 0: starting_offset = 0 ending_offset = (starting_offset + page_size) - 1 raw_leader_data = self._range_method( self.redis_connection, leaderboard_name, int(starting_offset), int(ending_offset), withscores=False) return self._parse_raw_members( leaderboard_name, raw_leader_data, **options)
Retrieve a page of leaders from the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param current_page [int] Page to retrieve from the named leaderboard. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a page of leaders from the named leaderboard.
381,045
def load_json(file): here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, file)) as jfile: data = json.load(jfile) return data
Load JSON file at app start
381,046
def _clean_record(self, record): for k, v in dict(record).items(): if isinstance(v, dict): v = self._clean_record(v) if v is None: record.pop(k) return record
Remove all fields with `None` values
381,047
def getAllAnnotationSets(self): for variantSet in self.getAllVariantSets(): iterator = self._client.search_variant_annotation_sets( variant_set_id=variantSet.id) for variantAnnotationSet in iterator: yield variantAnnotationSet
Returns all variant annotation sets on the server.
381,048
def config(self, averaging=1, datarate=15, mode=MODE_NORMAL): averaging_conf = { 1: 0, 2: 1, 4: 2, 8: 3 } if averaging not in averaging_conf.keys(): raise Exception() datarates = { 0.75: 0, 1.5: 1, 3: 2, 7.5: 4, 15: 5, 30: 6, 75: 7 } if datarate not in datarates.keys(): raise Exception( .format(datarate, .join(datarates.keys()))) config_a = 0 config_a &= averaging_conf[averaging] << 5 config_a &= datarates[datarate] << 2 config_a &= mode self.i2c_write_register(0x00, config_a)
Set the base config for sensor :param averaging: Sets the numer of samples that are internally averaged :param datarate: Datarate in hertz :param mode: one of the MODE_* constants
381,049
def show_top_losses(self, k:int, max_len:int=70)->None: from IPython.display import display, HTML items = [] tl_val,tl_idx = self.top_losses() for i,idx in enumerate(tl_idx): if k <= 0: break k -= 1 tx,cl = self.data.dl(self.ds_type).dataset[idx] cl = cl.data classes = self.data.classes txt = .join(tx.text.split()[:max_len]) if max_len is not None else tx.text tmp = [txt, f, f, f, f] items.append(tmp) items = np.array(items) names = [, , , , ] df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context(, -1): display(HTML(df.to_html(index=False)))
Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed.
381,050
def _edges_replaced(self, object, name, old, new): self._delete_edges(old) self._add_edges(new)
Handles a list of edges being set.
381,051
def add_data(self, id, key, value): self[str(id)][].setdefault(key, []) self[str(id)][][key].append(value)
Add new data item. :param str id: Entry id within ``SDfile``. :param str key: Data item key. :param str value: Data item value. :return: None. :rtype: :py:obj:`None`.
381,052
def most_even_chunk(string, group): counts = [0] + most_even(len(string), group) indices = accumulate(counts) slices = window(indices, 2) return [string[slice(*one)] for one in slices]
Divide a string into a list of strings as even as possible.
381,053
def in_domain(self, points): return all([ domain.in_domain(array) for domain, array in zip(self._domains, separate_struct_array(points, self._dtypes)) ])
Returns ``True`` if all of the given points are in the domain, ``False`` otherwise. :param np.ndarray points: An `np.ndarray` of type `self.dtype`. :rtype: `bool`
381,054
def vertical_gradient(self, x0, y0, x1, y1, start, end): x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1) grad = gradient_list(start, end, y1 - y0) for x in range(x0, x1 + 1): for y in range(y0, y1 + 1): self.point(x, y, grad[y - y0])
Draw a vertical gradient
381,055
def shuffle_step(entries, step): answer = [] for i in range(0, len(entries), step): sub = entries[i:i+step] shuffle(sub) answer += sub return answer
Shuffle the step
381,056
def paste_buffer(pymux, variables): pane = pymux.arrangement.get_active_pane() pane.process.write_input(get_app().clipboard.get_data().text, paste=True)
Paste clipboard content into buffer.
381,057
def _default_return_columns(self): return_columns = [] parsed_expr = [] for key, value in self.components._namespace.items(): if hasattr(self.components, value): sig = signature(getattr(self.components, value)) if len(set(sig.parameters) - {}) == 0: expr = self.components._namespace[key] if not expr in parsed_expr: return_columns.append(key) parsed_expr.append(expr) return return_columns
Return a list of the model elements that does not include lookup functions or other functions that take parameters.
381,058
def update_record(self, name, new_data, condition, update_only=False, debug=False): self.df[] = list(range(len(self.df))) df_data = self.df condition2 = (df_data.index == name) if len(df_data[condition & condition2]) > 0: inds = df_data[condition & condition2][] existing_data = dict(df_data.iloc[inds.iloc[0]]) existing_data.update(new_data) self.update_row(inds.iloc[0], existing_data) if len(inds) > 1: for ind in inds[1:]: print("deleting redundant records for:", name) df_data = self.delete_row(ind) else: if update_only: print("no record found for that condition, not updating ", name) else: print(, name) df_data = self.add_row(name, new_data) df_data.sort_index(inplace=True) df_data[] = list(range(len(df_data))) self.df = df_data return df_data
Find the first row in self.df with index == name and condition == True. Update that record with new_data, then delete any additional records where index == name and condition == True. Change is inplace
381,059
def pipe(): r, w = os.pipe() return File.fromfd(r, ), File.fromfd(w, )
create an inter-process communication pipe :returns: a pair of :class:`File` objects ``(read, write)`` for the two ends of the pipe
381,060
def start_session(self): if self.has_active_session(): raise Exception("Session already in progress.") response = requests.post(self._get_login_url(), headers=self._get_login_headers(), data=self._get_login_xml()) response.raise_for_status() root = ET.fromstring(response.text) for e in root.iter("%ssessionId" % self.SOAP_NS): if self.session_id: raise Exception("Invalid login attempt. Multiple session ids found.") self.session_id = e.text for e in root.iter("%sserverUrl" % self.SOAP_NS): if self.server_url: raise Exception("Invalid login attempt. Multiple server urls found.") self.server_url = e.text if not self.has_active_session(): raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." % (self.session_id, self.server_url)) self.hostname = urlsplit(self.server_url).hostname
Starts a Salesforce session and determines which SF instance to use for future requests.
381,061
def get_sql_type(self, instance, counter_name): with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor: cursor.execute(COUNTER_TYPE_QUERY, (counter_name,)) (sql_type,) = cursor.fetchone() if sql_type == PERF_LARGE_RAW_BASE: self.log.warning("Metric {} is of type Base and shouldn't be reported this way".format(counter_name)) base_name = None if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]: candidates = ( counter_name + " base", counter_name.replace("(ms)", "base"), counter_name.replace("Avg ", "") + " base", ) try: cursor.execute(BASE_NAME_QUERY, candidates) base_name = cursor.fetchone().counter_name.strip() self.log.debug("Got base metric: {} for metric: {}".format(base_name, counter_name)) except Exception as e: self.log.warning("Could not get counter_name of base for metric: {}".format(e)) return sql_type, base_name
Return the type of the performance counter so that we can report it to Datadog correctly If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and PERF_AVERAGE_BULK), the name of the base counter will also be returned
381,062
def route(**kwargs): def routed(request, *args2, **kwargs2): method = request.method if method in kwargs: req_method = kwargs[method] return req_method(request, *args2, **kwargs2) elif in kwargs: return kwargs[](request, *args2, **kwargs2) else: raise Http404() return routed
Route a request to different views based on http verb. Kwargs should be 'GET', 'POST', 'PUT', 'DELETE' or 'ELSE', where the first four map to a view to route to for that type of request method/verb, and 'ELSE' maps to a view to pass the request to if the given request method/verb was not specified.
381,063
def sample_initial(self, nlive=500, update_interval=None, first_update=None, maxiter=None, maxcall=None, logl_max=np.inf, dlogz=0.01, live_points=None): if maxcall is None: maxcall = sys.maxsize if maxiter is None: maxiter = sys.maxsize if nlive <= 2 * self.npdim: warnings.warn("Beware: `nlive_init <= 2 * ndim`!") self.reset() if live_points is None: self.nlive_init = nlive self.live_u = self.rstate.rand(self.nlive_init, self.npdim) if self.use_pool_ptform: self.live_v = np.array(list(self.M(self.prior_transform, np.array(self.live_u)))) else: self.live_v = np.array(list(map(self.prior_transform, np.array(self.live_u)))) if self.use_pool_logl: self.live_logl = np.array(list(self.M(self.loglikelihood, np.array(self.live_v)))) else: self.live_logl = np.array(list(map(self.loglikelihood, np.array(self.live_v)))) else: self.live_u, self.live_v, self.live_logl = live_points self.nlive_init = len(self.live_u) for i, logl in enumerate(self.live_logl): if not np.isfinite(logl): if np.sign(logl) < 0: self.live_logl[i] = -1e300 else: raise ValueError("The log-likelihood ({0}) of live " "point {1} located at u={2} v={3} " " is invalid." .format(logl, i, self.live_u[i], self.live_v[i])) live_points = [self.live_u, self.live_v, self.live_logl] self.live_init = [np.array(l) for l in live_points] self.ncall += self.nlive_init self.live_bound = np.zeros(self.nlive_init, dtype=) self.live_it = np.zeros(self.nlive_init, dtype=) if update_interval is None: update_interval = self.update_interval if isinstance(update_interval, float): update_interval = int(round(self.update_interval * nlive)) bounding = self.bounding if bounding == : update_interval = np.inf if first_update is None: first_update = self.first_update self.sampler = _SAMPLERS[bounding](self.loglikelihood, self.prior_transform, self.npdim, self.live_init, self.method, update_interval, first_update, self.rstate, self.queue_size, self.pool, self.use_pool, self.kwargs) self.bound = self.sampler.bound for i in range(1): for it, results in enumerate(self.sampler.sample(maxiter=maxiter, save_samples=False, maxcall=maxcall, dlogz=dlogz)): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results self.base_id.append(worst) self.base_u.append(ustar) self.base_v.append(vstar) self.base_logl.append(loglstar) self.base_logvol.append(logvol) self.base_logwt.append(logwt) self.base_logz.append(logz) self.base_logzvar.append(logzvar) self.base_h.append(h) self.base_nc.append(nc) self.base_it.append(worst_it) self.base_n.append(self.nlive_init) self.base_boundidx.append(boundidx) self.base_bounditer.append(bounditer) self.base_scale.append(self.sampler.scale) self.saved_id.append(worst) self.saved_u.append(ustar) self.saved_v.append(vstar) self.saved_logl.append(loglstar) self.saved_logvol.append(logvol) self.saved_logwt.append(logwt) self.saved_logz.append(logz) self.saved_logzvar.append(logzvar) self.saved_h.append(h) self.saved_nc.append(nc) self.saved_it.append(worst_it) self.saved_n.append(self.nlive_init) self.saved_boundidx.append(boundidx) self.saved_bounditer.append(bounditer) self.saved_scale.append(self.sampler.scale) self.ncall += nc self.eff = 100. * self.it / self.ncall self.it += 1 yield (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, self.eff, delta_logz) for it, results in enumerate(self.sampler.add_live_points()): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results self.base_id.append(worst) self.base_u.append(ustar) self.base_v.append(vstar) self.base_logl.append(loglstar) self.base_logvol.append(logvol) self.base_logwt.append(logwt) self.base_logz.append(logz) self.base_logzvar.append(logzvar) self.base_h.append(h) self.base_nc.append(nc) self.base_it.append(worst_it) self.base_n.append(self.nlive_init - it) self.base_boundidx.append(boundidx) self.base_bounditer.append(bounditer) self.base_scale.append(self.sampler.scale) self.saved_id.append(worst) self.saved_u.append(ustar) self.saved_v.append(vstar) self.saved_logl.append(loglstar) self.saved_logvol.append(logvol) self.saved_logwt.append(logwt) self.saved_logz.append(logz) self.saved_logzvar.append(logzvar) self.saved_h.append(h) self.saved_nc.append(nc) self.saved_it.append(worst_it) self.saved_n.append(self.nlive_init - it) self.saved_boundidx.append(boundidx) self.saved_bounditer.append(bounditer) self.saved_scale.append(self.sampler.scale) self.eff = 100. * self.it / self.ncall self.it += 1 yield (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, self.eff, delta_logz) self.base = True self.saved_batch = np.zeros(len(self.saved_id), dtype=) self.saved_batch_nlive.append(self.nlive_init) self.saved_batch_bounds.append((-np.inf, np.inf))
Generate a series of initial samples from a nested sampling run using a fixed number of live points using an internal sampler from :mod:`~dynesty.nestedsamplers`. Instantiates a generator that will be called by the user. Parameters ---------- nlive : int, optional The number of live points to use for the baseline nested sampling run. Default is `500`. update_interval : int or float, optional If an integer is passed, only update the bounding distribution every `update_interval`-th likelihood call. If a float is passed, update the bound after every `round(update_interval * nlive)`-th likelihood call. Larger update intervals can be more efficient when the likelihood function is quick to evaluate. If no value is provided, defaults to the value passed during initialization. first_update : dict, optional A dictionary containing parameters governing when the sampler will first update the bounding distribution from the unit cube (`'none'`) to the one specified by `sample`. maxiter : int, optional Maximum number of iterations. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations. Iteration may stop earlier if termination condition is reached. Default is `sys.maxsize` (no limit). dlogz : float, optional Iteration will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. The default is `0.01`. logl_max : float, optional Iteration will stop when the sampled ln(likelihood) exceeds the threshold set by `logl_max`. Default is no bound (`np.inf`). live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim) A set of live points used to initialize the nested sampling run. Contains `live_u`, the coordinates on the unit cube, `live_v`, the transformed variables, and `live_logl`, the associated loglikelihoods. By default, if these are not provided the initial set of live points will be drawn from the unit `npdim`-cube. **WARNING: It is crucial that the initial set of live points have been sampled from the prior. Failure to provide a set of valid live points will lead to incorrect results.** Returns ------- worst : int Index of the live point with the worst likelihood. This is our new dead point sample. ustar : `~numpy.ndarray` with shape (npdim,) Position of the sample. vstar : `~numpy.ndarray` with shape (ndim,) Transformed position of the sample. loglstar : float Ln(likelihood) of the sample. logvol : float Ln(prior volume) within the sample. logwt : float Ln(weight) of the sample. logz : float Cumulative ln(evidence) up to the sample (inclusive). logzvar : float Estimated cumulative variance on `logz` (inclusive). h : float Cumulative information up to the sample (inclusive). nc : int Number of likelihood calls performed before the new live point was accepted. worst_it : int Iteration when the live (now dead) point was originally proposed. boundidx : int Index of the bound the dead point was originally drawn from. bounditer : int Index of the bound being used at the current iteration. eff : float The cumulative sampling efficiency (in percent). delta_logz : float The estimated remaining evidence expressed as the ln(ratio) of the current evidence.
381,064
def fromid(self, item_id): if not item_id: raise Exception() soup = get_item_soup(item_id) story_id = item_id rank = -1 info_table = soup.findChildren()[2] info_rows = info_table.findChildren() title_row = info_rows[0].findChildren()[1] title = title_row.find().text try: domain = title_row.find().string[2:-2] is_self = False link = title_row.find().get() except AttributeError: domain = BASE_URL is_self = True link = % (BASE_URL, item_id) meta_row = info_rows[1].findChildren()[1].contents points = int(re.match(r, meta_row[0].text).groups()[0]) submitter = meta_row[2].text submitter_profile = % (BASE_URL, meta_row[2].get()) published_time = .join(meta_row[3].strip().split()[:3]) comments_link = % (BASE_URL, item_id) try: num_comments = int(re.match(r, meta_row[ 4].text).groups()[0]) except AttributeError: num_comments = 0 story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) return story
Initializes an instance of Story for given item_id. It is assumed that the story referenced by item_id is valid and does not raise any HTTP errors. item_id is an int.
381,065
def main(arguments=None): su = tools( arguments=arguments, docString=__doc__, logLevel="DEBUG", options_first=False, projectName="tastic" ) arguments, settings, log, dbConn = su.setup() for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if varname == "import": varname = "iimport" if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = " % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug( % (varname, val,)) startTime = times.get_now_sql_datetime() log.info( % (startTime,)) if init: from os.path import expanduser home = expanduser("~") filepath = home + "/.config/tastic/tastic.yaml" try: cmd = % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass try: cmd = % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass if sort or archive: ws = workspace( log=log, settings=settings, fileOrWorkspacePath=pathToFileOrWorkspace ) if sort: ws.sort() if archive: ws.archive_done() if sync: tp = syncc( log=log, settings=settings, workspaceRoot=pathToWorkspace, workspaceName=workspaceName, syncFolder=pathToSyncFolder, editorialRootPath=editorialRootPath, includeFileTags=fileTagsFlag ) tp.sync() if reminders: r = reminderss( log=log, settings=settings ) r.import_list( listName=listName, pathToTaskpaperDoc=pathToTaskpaperDoc ) if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() endTime = times.get_now_sql_datetime() runningTime = times.calculate_time_difference(startTime, endTime) log.info( % (endTime, runningTime, )) return
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
381,066
def intervalAdd(self, a, b, val): self.add(a, +val) self.add(b + 1, -val)
Variant, adds val to t[a], to t[a + 1] ... and to t[b] :param int a b: with 1 <= a <= b
381,067
def check_can_approve(self, request, application, roles): try: authorised_persons = self.get_authorised_persons(application) authorised_persons.get(pk=request.user.pk) return True except Person.DoesNotExist: return False
Check the person's authorization.
381,068
def get_bundle(self, bundle_id=None): if bundle_id is None: return self.__bundle elif isinstance(bundle_id, Bundle): bundle_id = bundle_id.get_bundle_id() return self.__framework.get_bundle_by_id(bundle_id)
Retrieves the :class:`~pelix.framework.Bundle` object for the bundle matching the given ID (int). If no ID is given (None), the bundle associated to this context is returned. :param bundle_id: A bundle ID (optional) :return: The requested :class:`~pelix.framework.Bundle` object :raise BundleException: The given ID doesn't exist or is invalid
381,069
def disable(self, name=None): if name is None: for name in self._actions_dict: self.disable(name) return self._actions_dict[name].qaction.setEnabled(False)
Disable one or all actions.
381,070
def _get_results(self, page): soup = _get_soup(page) details = soup.find_all("tr", class_="odd") even = soup.find_all("tr", class_="even") for i in range(len(even)): details.insert((i * 2)+1, even[i]) return self._parse_details(details)
Find every div tag containing torrent details on given page, then parse the results into a list of Torrents and return them
381,071
def rowsBeforeRow(self, rowObject, count): webID = rowObject[] return self.rowsBeforeItem( self.webTranslator.fromWebID(webID), count)
Wrapper around L{rowsBeforeItem} which accepts the web ID for a item instead of the item itself. @param rowObject: a dictionary mapping strings to column values, sent from the client. One of those column values must be C{__id__} to uniquely identify a row. @param count: an integer, the number of rows to return.
381,072
def request(schema): def wrapper(func): setattr(func, REQUEST, schema) return func return wrapper
Decorate a function with a request schema.
381,073
def _discarded_reads2_out_file_name(self): if self.Parameters[].isOn(): discarded_reads2 = self._absolute(str(self.Parameters[].Value)) else: raise ValueError( "No discarded-reads2 (flag -4) output path specified") return discarded_reads2
Checks if file name is set for discarded reads2 output. Returns absolute path.
381,074
def paste(location): copyData = settings.getDataFile() if not location: location = "." try: data = pickle.load(open(copyData, "rb")) speech.speak("Pasting " + data["copyLocation"] + " to current directory.") except: speech.fail("It doesnve copied anything yet.") speech.fail("Type to copy a file or folder.") return process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate() if "denied" in process: speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!")
paste a file or directory that has been previously copied
381,075
def match(self, other_version): major, minor, patch = _str_to_version(other_version, allow_wildcard=True) return (major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"])
Returns True if other_version matches. Args: other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a number or a wildcard.
381,076
def _compute_video_hash(videofile): seek_positions = [None] * 4 hash_result = [] with open(videofile, ) as fp: total_size = os.fstat(fp.fileno()).st_size if total_size < 8192 + 4096: raise exceptions.InvalidFileError( .format(os.path.basename(videofile))) seek_positions[0] = 4096 seek_positions[1] = total_size // 3 * 2 seek_positions[2] = total_size // 3 seek_positions[3] = total_size - 8192 for pos in seek_positions: fp.seek(pos, 0) data = fp.read(4096) m = hashlib.md5(data) hash_result.append(m.hexdigest()) return .join(hash_result)
compute videofile's hash reference: https://docs.google.com/document/d/1w5MCBO61rKQ6hI5m9laJLWse__yTYdRugpVyz4RzrmM/preview
381,077
def list_theme(): from engineer.themes import ThemeManager themes = ThemeManager.themes() col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()])) themes = ThemeManager.themes_by_finder() for finder in sorted(themes.iterkeys()): if len(themes[finder]) > 0: puts("%s: " % finder) for theme in sorted(themes[finder], key=lambda _: _.id): with indent(4): puts( columns( [colored.cyan("%s:" % theme.id), col1], [colored.white(theme.root_path, bold=True), col2] ) )
List all available Engineer themes.
381,078
def random_density(qubits: Union[int, Qubits]) -> Density: N, qubits = qubits_count_tuple(qubits) size = (2**N, 2**N) ginibre_ensemble = (np.random.normal(size=size) + 1j * np.random.normal(size=size)) / np.sqrt(2.0) matrix = ginibre_ensemble @ np.transpose(np.conjugate(ginibre_ensemble)) matrix /= np.trace(matrix) return Density(matrix, qubits=qubits)
Returns: A randomly sampled Density from the Hilbert–Schmidt ensemble of quantum states Ref: "Induced measures in the space of mixed quantum states" Karol Zyczkowski, Hans-Juergen Sommers, J. Phys. A34, 7111-7125 (2001) https://arxiv.org/abs/quant-ph/0012101
381,079
def handle_device_json(self, data): self._device_json.insert(0, data) self._device_json.pop()
Manage the device json list.
381,080
def extract_token_and_qualifier(text, line=0, column=0): qualifier = temp_token[-1] else: qualifier = temp_token return TokenAndQualifier(token, qualifier)
Extracts the token a qualifier from the text given the line/colum (see test_extract_token_and_qualifier for examples). :param unicode text: :param int line: 0-based :param int column: 0-based
381,081
def plot(self): figure() plot_envelope(self.M, self.C, self.xplot) for i in range(3): f = Realization(self.M, self.C) plot(self.xplot,f(self.xplot)) plot(self.abundance, self.frye, , markersize=4) xlabel() ylabel() title(self.name) axis()
Plot posterior from simple nonstochetric regression.
381,082
def prepend_to_file(path, data, bufsize=1<<15): backupname = path + os.extsep + try: os.unlink(backupname) except OSError: pass os.rename(path, backupname) outputfile.write(data) buf = inputfile.read(bufsize) while buf: outputfile.write(buf) buf = inputfile.read(bufsize) os.remove(backupname)
TODO: * Add a random string to the backup file. * Restore permissions after copy.
381,083
def ai(board, who=): return sorted(board.possible(), key=lambda b: value(b, who))[-1]
Returns best next board >>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']] >>> ai(b) < Board |xo.xo.x..| >
381,084
def read_namespaced_network_policy(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs) return data
read the specified NetworkPolicy This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_network_policy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the NetworkPolicy (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. :param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. :return: V1beta1NetworkPolicy If the method is called asynchronously, returns the request thread.
381,085
def get_preview_url(self, data_type=): if self.data_source is DataSource.SENTINEL2_L1C or self.safe_type is EsaSafeType.OLD_TYPE: return self.get_url(AwsConstants.PREVIEW_JP2) return self.get_qi_url(.format(data_type))
Returns url location of full resolution L1C preview :return:
381,086
def load_vcf( path, genome=None, reference_vcf_key="reference", only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True): require_string(path, "Path or URL to VCF") parsed_path = parse_url_or_path(path) if parsed_path.scheme and parsed_path.scheme.lower() != "file":
Load reference name and Variant objects from the given VCF filename. Currently only local files are supported by this function (no http). If you call this on an HTTP URL, it will fall back to `load_vcf`. Parameters ---------- path : str Path to VCF (*.vcf) or compressed VCF (*.vcf.gz). genome : {pyensembl.Genome, reference name, Ensembl version int}, optional Optionally pass in a PyEnsembl Genome object, name of reference, or PyEnsembl release version to specify the reference associated with a VCF (otherwise infer reference from VCF using reference_vcf_key) reference_vcf_key : str, optional Name of metadata field which contains path to reference FASTA file (default = 'reference') only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. allow_extended_nucleotides : boolean, default False Allow characters other that A,C,T,G in the ref and alt strings. include_info : boolean, default True Whether to parse the INFO and per-sample columns. If you don't need these, set to False for faster parsing. chunk_size: int, optional Number of records to load in memory at once. max_variants : int, optional If specified, return only the first max_variants variants. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : boolean, default True Don't keep repeated variants
381,087
def mine_block(self, *args: Any, **kwargs: Any) -> BaseBlock: packed_block = self.pack_block(self.block, *args, **kwargs) final_block = self.finalize_block(packed_block) self.validate_block(final_block) return final_block
Mine the current block. Proxies to self.pack_block method.
381,088
def fetch(self, resource_class): if issubclass(resource_class, Entry): params = None content_type = getattr(resource_class, , None) if content_type is not None: params = {: resource_class.__content_type__} return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links, params=params) else: remote_path = utils.path_for_class(resource_class) if remote_path is None: raise Exception(.format(resource_class)) return RequestArray(self.dispatcher, remote_path, self.config.resolve_links)
Construct a :class:`.Request` for the given resource type. Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly. Examples:: client.fetch(Asset) client.fetch(Entry) client.fetch(ContentType) client.fetch(CustomEntryClass) :param resource_class: The type of resource to be fetched. :return: :class:`.Request` instance.
381,089
def real_time_scheduling(self, availability, oauth, event, target_calendars=()): args = { : oauth, : event, : target_calendars } if availability: options = {} options[] = self.map_availability_participants(availability.get(, None)) options[] = self.map_availability_required_duration(availability.get(, None)) options[] = self.map_availability_required_duration(availability.get(, None)) options[] = self.map_availability_buffer(availability.get(, None)) self.translate_available_periods(availability[]) options[] = availability[] args[] = options return self.request_handler.post(endpoint=, data=args, use_api_key=True).json()
Generates an real time scheduling link to start the OAuth process with an event to be automatically upserted :param dict availability: - A dict describing the availability details for the event: :participants - A dict stating who is required for the availability call :required_duration - A dict stating the length of time the event will last for :available_periods - A dict stating the available periods for the event :start_interval - A Integer representing the start_interval of the event :buffer - A dict representing the buffer for the event :param dict oauth: - A dict describing the OAuth flow required: :scope - A String representing the scopes to ask for within the OAuth flow :redirect_uri - A String containing a url to redirect the user to after completing the OAuth flow. :scope - A String representing additional state to be passed within the OAuth flow. :param dict event: - A dict describing the event :param list target_calendars: - An list of dics stating into which calendars to insert the created event See http://www.cronofy.com/developers/api#upsert-event for reference.
381,090
def end_span(self, *args, **kwargs): cur_span = self.current_span() if cur_span is None and self._spans_list: cur_span = self._spans_list[-1] if cur_span is None: logging.warning() return cur_span.finish() self.span_context.span_id = cur_span.parent_span.span_id if \ cur_span.parent_span else None if isinstance(cur_span.parent_span, trace_span.Span): execution_context.set_current_span(cur_span.parent_span) else: execution_context.set_current_span(None) with self._spans_list_condition: if cur_span in self._spans_list: span_datas = self.get_span_datas(cur_span) self.exporter.export(span_datas) self._spans_list.remove(cur_span) return cur_span
End a span. Update the span_id in SpanContext to the current span's parent span id; Update the current span.
381,091
def handle(self, *args, **kwargs): frequency = kwargs[] frequencies = settings.STATISTIC_FREQUENCY_ALL if frequency == else (frequency.split() if in frequency else [frequency]) if kwargs[]: maintenance.list_statistics()
Command handler for the "metrics" command.
381,092
def isclose(a, b, rtol=1e-5, atol=1e-8): return abs(a - b) < (atol + rtol * abs(b))
This is essentially np.isclose, but slightly faster.
381,093
def GetArchiveInfo(self): self.searchable = extra.is_searchable(self.file) self.lcid = None result, ui = chmlib.chm_resolve_object(self.file, ) if (result != chmlib.CHM_RESOLVE_SUCCESS): sys.stderr.write() return 0 size, text = chmlib.chm_retrieve_object(self.file, ui, 4l, ui.length) if (size == 0): sys.stderr.write() return 0 buff = array.array(, text) index = 0 while (index < size): cursor = buff[index] + (buff[index+1] * 256) if (cursor == 0): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.topics = + text[index:index+cursor-1] elif (cursor == 1): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.index = + text[index:index+cursor-1] elif (cursor == 2): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.home = + text[index:index+cursor-1] elif (cursor == 3): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.title = text[index:index+cursor-1] elif (cursor == 4): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.lcid = buff[index] + (buff[index+1] * 256) elif (cursor == 6): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 tmp = text[index:index+cursor-1] if not self.topics: tmp1 = + tmp + tmp2 = + tmp + res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1) res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2) if not self.topics and res1 == chmlib.CHM_RESOLVE_SUCCESS: self.topics = + tmp + if not self.index and res2 == chmlib.CHM_RESOLVE_SUCCESS: self.index = + tmp + elif (cursor == 16): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.encoding = text[index:index+cursor-1] else: index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 index += cursor self.GetWindowsInfo() if not self.lcid: self.lcid = extra.get_lcid(self.file) return 1
Obtains information on CHM archive. This function checks the /#SYSTEM file inside the CHM archive to obtain the index, home page, topics, encoding and title. It is called from LoadCHM.
381,094
def get_all_fields(obj): fields = [] for f in obj._meta.fields: fname = f.name get_choice = "get_" + fname + "_display" if hasattr(obj, get_choice): value = getattr(obj, get_choice)() else: try: value = getattr(obj, fname) except Exception: value = None if isinstance(value, list): value = ",".join(str(v) for v in value) if f.editable and value and f.name: fields.append( {"label": f.verbose_name, "name": f.name, "value": value} ) return fields
Returns a list of all field names on the instance.
381,095
def copy(self): if self._page_control is not None and self._page_control.hasFocus(): self._page_control.copy() elif self._control.hasFocus(): text = self._control.textCursor().selection().toPlainText() if text: lines = map(self._transform_prompt, text.splitlines()) text = .join(lines) QtGui.QApplication.clipboard().setText(text) else: self.log.debug("frontend widget : unknown copy target")
Copy the currently selected text to the clipboard, removing prompts.
381,096
def _to_DOM(self): root_node = ET.Element("no2index") reference_time_node = ET.SubElement(root_node, "reference_time") reference_time_node.text = str(self._reference_time) reception_time_node = ET.SubElement(root_node, "reception_time") reception_time_node.text = str(self._reception_time) interval_node = ET.SubElement(root_node, "interval") interval_node.text = str(self._interval) no2_samples_node = ET.SubElement(root_node, "no2_samples") for smpl in self._no2_samples: s = smpl.copy() s[] = s[] s[] = .format(s[]) s[] = .format(s[]) xmlutils.create_DOM_node_from_dict(s, "no2_sample", no2_samples_node) root_node.append(self._location._to_DOM()) return root_node
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
381,097
def WaitProcessing(obj, eng, callbacks, exc_info): e = exc_info[1] obj.set_action(e.action, e.message) obj.save(status=eng.object_status.WAITING, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid) eng.save(WorkflowStatus.HALTED) eng.log.warning("Workflow waiting at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message) db.session.commit() TransitionActions.HaltProcessing( obj, eng, callbacks, exc_info )
Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case.
381,098
def check_spot_requests(self, requests, tags=None): instances = [None] * len(requests) ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: if req.instance_id: instance = self.retry_on_ec2_error(self.ec2.get_only_instances, req.instance_id)[0] if not instance: raise EC2ManagerException( % (req.instance_id, req.status.code, req.id)) instances[requests.index(req.id)] = instance self.retry_on_ec2_error(self.ec2.create_tags, [instance.id], tags or {}) logger.info(, req.id, req.status.code, req.state) logger.info(, instance.id, instance.state, instance.public_dns_name, instance.ip_address) elif req.state != "open": instances[requests.index(req.id)] = req return instances
Check status of one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list :param tags: :type tags: dict :return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request still open, boto.ec2.instance.Reservation if request is no longer open) :rtype: list
381,099
def mkdir(name, path): with Session() as session: try: session.VFolder(name).mkdir(path) print_done() except Exception as e: print_error(e) sys.exit(1)
Create an empty directory in the virtual folder. \b NAME: Name of a virtual folder. PATH: The name or path of directory. Parent directories are created automatically if they do not exist.