Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,400
def parse_grain(grain): if not grain: return InstantTime.day if grain.lower() == : return InstantTime.week return InstantTime.day
Parse a string to a granularity, e.g. "Day" to InstantTime.day. :param grain: a string representing a granularity.
10,401
def get_prev_status_from_history(instance, status=None): target = status or api.get_workflow_status_of(instance) history = getReviewHistory(instance, reverse=True) history = map(lambda event: event["review_state"], history) if target not in history or history.index(target) == len(history)-1: return None return history[history.index(target)+1]
Returns the previous status of the object. If status is set, returns the previous status before the object reached the status passed in. If instance has reached the status passed in more than once, only the last one is considered.
10,402
def _X_selected(X, selected): n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True non_sel = np.logical_not(sel) n_selected = np.sum(sel) X_sel = X[:, ind[sel]] X_not_sel = X[:, ind[non_sel]] return X_sel, X_not_sel, n_selected, n_features
Split X into selected features and other features
10,403
def debug(ftn, txt): if debug_p: sys.stdout.write("{0}.{1}:{2}\n".format(modname, ftn, txt)) sys.stdout.flush()
Used for debugging.
10,404
def pull_full_properties(self): full_properties = self.manager.session.get(self._uri) self._properties = dict(full_properties) self._properties_timestamp = int(time.time()) self._full_properties = True
Retrieve the full set of resource properties and cache them in this object. Authorization requirements: * Object-access permission to this resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
10,405
def run_via_binary(self, command=None, foreground=False, volumes=None, additional_opts=None, default_options=None, name=None, *args, **kwargs): command = deepcopy(command) or [] volumes = deepcopy(volumes) or [] additional_opts = deepcopy(additional_opts) or [] internalkw = deepcopy(kwargs) or {} inernalargs = deepcopy(args) or [] if default_options is None: default_options = ["-b"] logger.info("run container via binary in background") machine_name = constants.CONU_ARTIFACT_TAG if name: machine_name += name else: machine_name += random_str() if not foreground: internalkw["stdout"] = subprocess.PIPE internalkw["stderr"] = subprocess.PIPE additional_opts += default_options if volumes: additional_opts += self.get_volume_options(volumes=volumes) logger.debug("starting NSPAWN") systemd_command = [ "systemd-nspawn", "--machine", machine_name, "-i", self.local_location] + additional_opts + command logger.debug("Start command: %s" % " ".join(systemd_command)) callback_method = (subprocess.Popen, systemd_command, inernalargs, internalkw) self.container_process = NspawnContainer.internal_run_container( name=machine_name, callback_method=callback_method, foreground=foreground ) if foreground: return self.container_process else: return NspawnContainer(self, None, name=machine_name, start_process=self.container_process, start_action=callback_method)
Create new instance NspawnContianer in case of not running at foreground, in case foreground run, return process object :param command: list - command to run :param foreground: bool - run process at foreground :param volumes: list - put additional bind mounts :param additional_opts: list of more boot options for systemd-nspawn command :param default_options: default boot option (-b) :param name: str - name of running instance :param args: pass thru params to subprocess.Popen :param kwargs: pass thru params to subprocess.Popen :return: process or NspawnContianer instance
10,406
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None): with tf.variable_scope(name, default_name="transformer_dec"): batch_size = common_layers.shape_list(targets)[0] targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, hparams.num_channels * hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(targets, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_decoder_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, hparams.img_len * hparams.num_channels, hparams.hidden_size]) return decoder_output
Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size].
10,407
def gep(self, ptr, indices, inbounds=False, name=): instr = instructions.GEPInstr(self.block, ptr, indices, inbounds=inbounds, name=name) self._insert(instr) return instr
Compute effective address (getelementptr): name = getelementptr ptr, <indices...>
10,408
def get(self, *args, **kwargs): try: req_func = self.session.get if self.session else requests.get req = req_func(*args, **kwargs) req.raise_for_status() self.failed_last = False return req except requests.exceptions.RequestException as e: self.log_error(e) for i in range(1, self.num_retries): sleep_time = self.retry_rate * i self.log_function("Retrying in %s seconds" % sleep_time) self._sleep(sleep_time) try: req = requests.get(*args, **kwargs) req.raise_for_status() self.log_function("New request successful") return req except requests.exceptions.RequestException: self.log_function("New request failed") if not self.failed_last: self.failed_last = True raise ApiError(e) else: raise FatalApiError(e)
An interface for get requests that handles errors more gracefully to prevent data loss
10,409
def pipe_util(func): @wraps(func) def pipe_util_wrapper(function, *args, **kwargs): if isinstance(function, XObject): function = ~function original_function = function if args or kwargs: function = xpartial(function, *args, **kwargs) name = lambda: % (get_name(func), .join( filter(None, (get_name(original_function), repr_args(*args, **kwargs))))) f = func(function) result = pipe | set_name(name, f) attrs = getattr(f, , {}) for k, v in dict_items(attrs): setattr(result, k, v) return result return pipe_util_wrapper
Decorator that handles X objects and partial application for pipe-utils.
10,410
def advanced_search(pattern): query_parsed = QUERY.parseString(pattern) return Entry.published.filter(query_parsed[0]).distinct()
Parse the grammar of a pattern and build a queryset with it.
10,411
def fisher_by_pol(data): FisherByPoles = {} DIblock, nameblock, locblock = [], [], [] for rec in data: if in list(rec.keys()) and in list(rec.keys()): DIblock.append([float(rec["dec"]), float(rec["inc"])]) else: continue if in list(rec.keys()): nameblock.append(rec[]) else: nameblock.append("") if in list(rec.keys()): locblock.append(rec[]) else: locblock.append("") ppars = doprinc(np.array(DIblock)) reference_DI = [ppars[], ppars[]] if reference_DI[0] > 90 and reference_DI[0] < 270: reference_DI[0] = (reference_DI[0] + 180.) % 360 reference_DI[1] = reference_DI[1] * -1. nDIs, rDIs, all_DI, npars, rpars = [], [], [], [], [] nlist, rlist, alllist = "", "", "" nloclist, rloclist, allloclist = "", "", "" for k in range(len(DIblock)): if angle([DIblock[k][0], DIblock[k][1]], reference_DI) > 90.: rDIs.append(DIblock[k]) rlist = rlist + ":" + nameblock[k] if locblock[k] not in rloclist: rloclist = rloclist + ":" + locblock[k] all_DI.append([(DIblock[k][0] + 180.) % 360., -1. * DIblock[k][1]]) alllist = alllist + ":" + nameblock[k] if locblock[k] not in allloclist: allloclist = allloclist + ":" + locblock[k] else: nDIs.append(DIblock[k]) nlist = nlist + ":" + nameblock[k] if locblock[k] not in nloclist: nloclist = nloclist + ":" + locblock[k] all_DI.append(DIblock[k]) alllist = alllist + ":" + nameblock[k] if locblock[k] not in allloclist: allloclist = allloclist + ":" + locblock[k] for mode in [, , ]: if mode == and len(nDIs) > 2: fpars = fisher_mean(nDIs) fpars[] = nlist.strip() fpars[] = nloclist.strip() FisherByPoles[mode] = fpars elif mode == and len(rDIs) > 2: fpars = fisher_mean(rDIs) fpars[] = rlist.strip() fpars[] = rloclist.strip() FisherByPoles[mode] = fpars elif mode == and len(all_DI) > 2: fpars = fisher_mean(all_DI) fpars[] = alllist.strip() fpars[] = allloclist.strip() FisherByPoles[mode] = fpars return FisherByPoles
input: as in dolnp (list of dictionaries with 'dec' and 'inc') description: do fisher mean after splitting data into two polarity domains. output: three dictionaries: 'A'= polarity 'A' 'B = polarity 'B' 'ALL'= switching polarity of 'B' directions, and calculate fisher mean of all data code modified from eqarea_ell.py b rshaar 1/23/2014
10,412
def indices_for_body(self, name, step=3): for j, body in enumerate(self.bodies): if body.name == name: return list(range(j * step, (j + 1) * step)) return []
Get a list of the indices for a specific body. Parameters ---------- name : str The name of the body to look up. step : int, optional The number of numbers for each body. Defaults to 3, should be set to 4 for body rotation (since quaternions have 4 values). Returns ------- list of int : A list of the index values for quantities related to the named body.
10,413
def mount_disks(self): result = True for disk in self.disks: result = disk.mount() and result return result
Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to use :func:`init` instead. :return: whether all mounts have succeeded :rtype: bool
10,414
def ipv6_acl_ipv6_access_list_standard_name(self, **kwargs): config = ET.Element("config") ipv6_acl = ET.SubElement(config, "ipv6-acl", xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list") ipv6 = ET.SubElement(ipv6_acl, "ipv6") access_list = ET.SubElement(ipv6, "access-list") standard = ET.SubElement(access_list, "standard") name = ET.SubElement(standard, "name") name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
10,415
def get_app(system_version_file: str = None, config_file_override: str = None, name_override: str = None, loop: asyncio.AbstractEventLoop = None) -> web.Application: if not system_version_file: system_version_file = BR_BUILTIN_VERSION_FILE version = get_version(system_version_file) name = name_override or name_management.get_name() config_obj = config.load(config_file_override) LOG.info("Setup: " + .join([ f, f f, f f, f f, f f, f f, f f, f ])) if not loop: loop = asyncio.get_event_loop() app = web.Application(loop=loop, middlewares=[log_error_middleware]) app[config.CONFIG_VARNAME] = config_obj app[constants.RESTART_LOCK_NAME] = asyncio.Lock() app[constants.DEVICE_NAME_VARNAME] = name app.router.add_routes([ web.get(, control.build_health_endpoint(version)), web.post(, update.begin), web.post(, update.cancel), web.get(, update.status), web.post(, update.file_upload), web.post(, update.commit), web.post(, control.restart), web.get(, ssh_key_management.list_keys), web.post(, ssh_key_management.add), web.delete(, ssh_key_management.remove), web.post(, name_management.set_name_endpoint), web.get(, name_management.get_name_endpoint), ]) return app
Build and return the aiohttp.web.Application that runs the server The params can be overloaded for testing.
10,416
def _encrypt_password(self, password): if self.encryption_mode.lower() == : return self._crypt_password(password) elif self.encryption_mode.lower() == : return self._md5_password(password) elif self.encryption_mode.lower() == : return self._md5_base_password(password) else: raise UnknownEncryptionMode(self.encryption_mode)
encrypt the password for given mode
10,417
def patch(func=None, obj=None, name=None, avoid_doublewrap=True): if obj is None: if isinstance(func, (type, ModuleType)): obj = func func = None elif isinstance(func, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in func]): obj = func func = None if func is None: return functools.partial(patch, obj=obj, name=name, avoid_doublewrap=avoid_doublewrap) if name is None: name = func.__name__ if isinstance(obj, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in obj]): return [patch(func=func, obj=o, name=name, avoid_doublewrap=avoid_doublewrap) for o in obj] if not isinstance(obj, (ModuleType, type)): raise ValueError( "Argument passed to @patch decorator must be a " "class or module, or a list of classes and modules") try: call = getattr(obj, name) except AttributeError: raise TypeError("%(func_repr)s does not exist" % { : .join( filter(None, [ getattr(obj, , None), obj.__name__, func.__name__], )), }) if avoid_doublewrap and getattr(call, , None) is func: return if call.im_self: wrapper = classmethod(wrapper) else: wrapper = staticmethod(wrapper) setattr(obj, name, wrapper) return getattr(obj, name)
Decorator for monkeypatching functions on modules and classes. Example:: # This replaces FooClass.bar with our method @monkeybiz.patch(FooClass) def bar(original_bar, *args, **kwargs): print "Patched!" return original_bar(*args, **kwargs) # This replaces FooClass.bar and foomodule.bar with our method @monkeybiz.patch([FooClass, foomodule]) def bar(original_bar, *args, **kwargs): #... The first argument to ``monkeybiz.patch`` can be either a module, a class, or a list of modules and/or classes. The decorator also takes optional ``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is omitted, the name of the function being patched will be the name of the function being decorated. If ``avoid_doublewrap`` is True (the default), then functions and methods can only be patched once using this function. Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its original.
10,418
def create_region_from_border_clip(self, onerror = None): rid = self.display.allocate_resource_id() CreateRegionFromBorderClip( display = self.display, onerror = onerror, opcode = self.display.get_extension_major(extname), region = rid, window = self, ) return rid
Create a region of the border clip of the window, i.e. the area that is not clipped by the parent and any sibling windows.
10,419
def prepare_io_example_1() -> Tuple[devicetools.Nodes, devicetools.Elements]: from hydpy import TestIO TestIO.clear() from hydpy.core.filetools import SequenceManager hydpy.pub.sequencemanager = SequenceManager() with TestIO(): hydpy.pub.sequencemanager.inputdirpath = hydpy.pub.sequencemanager.fluxdirpath = hydpy.pub.sequencemanager.statedirpath = hydpy.pub.sequencemanager.nodedirpath = hydpy.pub.timegrids = , , from hydpy import Node, Nodes, Element, Elements, prepare_model node1 = Node() node2 = Node(, variable=) nodes = Nodes(node1, node2) element1 = Element(, outlets=node1) element2 = Element(, outlets=node1) element3 = Element(, outlets=node1) elements = Elements(element1, element2, element3) from hydpy.models import lland_v1, lland_v2 element1.model = prepare_model(lland_v1) element2.model = prepare_model(lland_v1) element3.model = prepare_model(lland_v2) from hydpy.models.lland import ACKER for idx, element in enumerate(elements): parameters = element.model.parameters parameters.control.nhru(idx+1) parameters.control.lnk(ACKER) parameters.derived.absfhru(10.0) with hydpy.pub.options.printprogress(False): nodes.prepare_simseries() elements.prepare_inputseries() elements.prepare_fluxseries() elements.prepare_stateseries() def init_values(seq, value1_): value2_ = value1_ + len(seq.series.flatten()) values_ = numpy.arange(value1_, value2_, dtype=float) seq.testarray = values_.reshape(seq.seriesshape) seq.series = seq.testarray.copy() return value2_ import numpy value1 = 0 for subname, seqname in zip([, , ], [, , ]): for element in elements: subseqs = getattr(element.model.sequences, subname) value1 = init_values(getattr(subseqs, seqname), value1) for node in nodes: value1 = init_values(node.sequences.sim, value1) return nodes, elements
Prepare an IO example configuration. >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() (1) Prepares a short initialisation period of five days: >>> from hydpy import pub >>> pub.timegrids Timegrids(Timegrid('2000-01-01 00:00:00', '2000-01-05 00:00:00', '1d')) (2) Prepares a plain IO testing directory structure: >>> pub.sequencemanager.inputdirpath 'inputpath' >>> pub.sequencemanager.fluxdirpath 'outputpath' >>> pub.sequencemanager.statedirpath 'outputpath' >>> pub.sequencemanager.nodedirpath 'nodepath' >>> import os >>> from hydpy import TestIO >>> with TestIO(): ... print(sorted(filename for filename in os.listdir('.') ... if not filename.startswith('_'))) ['inputpath', 'nodepath', 'outputpath'] (3) Returns three |Element| objects handling either application model |lland_v1| or |lland_v2|, and two |Node| objects handling variables `Q` and `T`: >>> for element in elements: ... print(element.name, element.model) element1 lland_v1 element2 lland_v1 element3 lland_v2 >>> for node in nodes: ... print(node.name, node.variable) node1 Q node2 T (4) Prepares the time series data of the input sequence |lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state sequence |lland_states.BoWa| for each model instance, and |Sim| for each node instance (all values are different), e.g.: >>> nied1 = elements.element1.model.sequences.inputs.nied >>> nied1.series InfoArray([ 0., 1., 2., 3.]) >>> nkor1 = elements.element1.model.sequences.fluxes.nkor >>> nkor1.series InfoArray([[ 12.], [ 13.], [ 14.], [ 15.]]) >>> bowa3 = elements.element3.model.sequences.states.bowa >>> bowa3.series InfoArray([[ 48., 49., 50.], [ 51., 52., 53.], [ 54., 55., 56.], [ 57., 58., 59.]]) >>> sim2 = nodes.node2.sequences.sim >>> sim2.series InfoArray([ 64., 65., 66., 67.]) (5) All sequences carry |numpy.ndarray| objects with (deep) copies of the time series data for testing: >>> import numpy >>> (numpy.all(nied1.series == nied1.testarray) and ... numpy.all(nkor1.series == nkor1.testarray) and ... numpy.all(bowa3.series == bowa3.testarray) and ... numpy.all(sim2.series == sim2.testarray)) InfoArray(True, dtype=bool) >>> bowa3.series[1, 2] = -999.0 >>> numpy.all(bowa3.series == bowa3.testarray) InfoArray(False, dtype=bool)
10,420
def nb_to_html(nb_path): exporter = html.HTMLExporter(template_file=) output, resources = exporter.from_filename(nb_path) header = output.split(, 1)[1].split(,1)[0] body = output.split(, 1)[1].split(,1)[0] header = header.replace(, ) header = header.replace(, ) filter_strings = [ , , , , , ] filter_strings.extend([ % (i+1) for i in range(6)]) header_lines = filter( lambda x: not any([s in x for s in filter_strings]), header.split()) header = .join(header_lines) lines = [] lines.append(header) lines.append(body) lines.append() return .join(lines)
convert notebook to html
10,421
def manage_job_with_blocking_dependencies(self, job_record): composite_state = self.timetable.dependent_on_composite_state(job_record) assert isinstance(composite_state, NodesCompositeState) if composite_state.all_processed: self.manage_job(job_record) elif composite_state.skipped_present: job_record.state = job.STATE_SKIPPED self.job_dao.update(job_record) self.mq_transmitter.publish_job_status(job_record) msg = \ .format(job_record.process_name, job_record.timeperiod) self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg) else: msg = \ .format(job_record.process_name, job_record.timeperiod) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
method will trigger job processing only if: - all dependencies are in [STATE_PROCESSED, STATE_NOOP] NOTICE: method will transfer current job into STATE_SKIPPED if any dependency is in STATE_SKIPPED
10,422
def count(self, query, _or=False): if isinstance(query, str): return self.fm.count(query, MapIntInt({})) else: search_results = [] for q in query: dids = MapIntInt({}) self.fm.search(q, dids) search_results.append(dids.asdict()) merged_dids = self._merge_search_result(search_results, _or) counts = 0 for did in merged_dids: if _or: counts += reduce(add, [int(x.pop(did, 0)) for x in search_results]) else: counts += min([int(x.pop(did, 0)) for x in search_results]) return counts
Count word from FM-index Params: <str> | <Sequential> query <bool> _or <list <str> > ignores Return: <int> counts
10,423
def accept_line(self, logevent): if ("is now in state" in logevent.line_str and logevent.split_tokens[-1] in self.states): return True if ("replSet" in logevent.line_str and logevent.thread == "rsMgr" and logevent.split_tokens[-1] in self.states): return True return False
Return True on match. Only match log lines containing 'is now in state' (reflects other node's state changes) or of type "[rsMgr] replSet PRIMARY" (reflects own state changes).
10,424
def report_fit(self): if not self.fitted: print() return print(.format( self.log_likelihoods[])) print(.format( self.log_likelihoods[])) print(.format( self.log_likelihoods[])) tbl = PrettyTable( [, ]) tbl = PrettyTable() tbl.add_column(, self.fit_parameters.index.values) for col in (, , ): tbl.add_column(col, self.fit_parameters[col].values) tbl.align[] = tbl.float_format = print(tbl)
Print a report of the fit results.
10,425
def calc_q0_perc_uz_v1(self): con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess flu.perc = 0. flu.q0 = 0. for dummy in range(con.recstep): sta.uz += der.dt*flu.inuz d_perc = min(der.dt*con.percmax*flu.contriarea, sta.uz) sta.uz -= d_perc flu.perc += d_perc if sta.uz > 0.: if flu.contriarea > 0.: d_q0 = (der.dt*con.k * (sta.uz/flu.contriarea)**(1.+con.alpha)) d_q0 = min(d_q0, sta.uz) else: d_q0 = sta.uz sta.uz -= d_q0 flu.q0 += d_q0 else: d_q0 = 0.
Perform the upper zone layer routine which determines percolation to the lower zone layer and the fast response of the hland model. Note that the system behaviour of this method depends strongly on the specifications of the options |RespArea| and |RecStep|. Required control parameters: |RecStep| |PercMax| |K| |Alpha| Required derived parameters: |DT| Required fluxes sequence: |InUZ| Calculated fluxes sequences: |Perc| |Q0| Updated state sequence: |UZ| Basic equations: :math:`\\frac{dUZ}{dt} = InUZ - Perc - Q0` \n :math:`Perc = PercMax \\cdot ContriArea` \n :math:`Q0 = K * \\cdot \\left( \\frac{UZ}{ContriArea} \\right)^{1+Alpha}` Examples: The upper zone layer routine is an exception compared to the other routines of the HydPy-H-Land model, regarding its consideration of numerical accuracy. To increase the accuracy of the numerical integration of the underlying ordinary differential equation, each simulation step can be divided into substeps, which are all solved with first order accuracy. In the first example, this option is omitted through setting the RecStep parameter to one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> recstep(2) >>> derived.dt = 1/recstep >>> percmax(2.0) >>> alpha(1.0) >>> k(2.0) >>> fluxes.contriarea = 1.0 >>> fluxes.inuz = 0.0 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(1.0) >>> fluxes.q0 q0(0.0) >>> states.uz uz(0.0) Due to the sequential calculation of the upper zone routine, the upper zone storage is drained completely through percolation and no water is left for fast discharge response. By dividing the simulation step in 100 substeps, the results are quite different: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.786934) >>> fluxes.q0 q0(0.213066) >>> states.uz uz(0.0) Note that the assumed length of the simulation step is only a half day. Hence the effective values of the maximum percolation rate and the storage coefficient is not 2 but 1: >>> percmax percmax(2.0) >>> k k(2.0) >>> percmax.value 1.0 >>> k.value 1.0 By decreasing the contributing area one decreases percolation but increases fast discharge response: >>> fluxes.contriarea = 0.5 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.434108) >>> fluxes.q0 q0(0.565892) >>> states.uz uz(0.0) Resetting RecStep leads to more transparent results. Note that, due to the large value of the storage coefficient and the low accuracy of the numerical approximation, direct discharge drains the rest of the upper zone storage: >>> recstep(2) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.5) >>> states.uz uz(0.0) Applying a more reasonable storage coefficient results in: >>> k(0.5) >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.25) >>> states.uz uz(0.25) Adding an input of 0.3 mm results the same percolation value (which, in the given example, is determined by the maximum percolation rate only), but in an increases value of the direct response (which always depends on the actual upper zone storage directly): >>> fluxes.inuz = 0.3 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.64) >>> states.uz uz(0.16) Due to the same reasons, another increase in numerical accuracy has no impact on percolation but decreases the direct response in the given example: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.421708) >>> states.uz uz(0.378292)
10,426
def _extract_input_processes(self): for proc in self.processes: ends = proc.split() pid, name = self._get_tuple(ends) self.input_processes.append((pid, name))
Given user input of interested processes, it will extract the info and output a list of tuples. - input can be multiple values, separated by space; - either pid or process_name is optional - e.g., "10001/python 10002/java cpp" :return: None
10,427
def _get_text(node, tag, default=None): try: return node.find(tag).text except AttributeError: return default
Get the text for the provided tag from the provided node
10,428
def _set_valid_props(artist, kwargs): artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)}) return artist
Set valid properties for the artist, dropping the others.
10,429
def getSpecs(self): content = {} if len(self.roles) != 0: content["roles"] = self.roles if self.password: content["password"] = self.password return content
Get specs Returns: dict: Representation of the object
10,430
def merge_blocks(a_blocks, b_blocks): assert a_blocks[-1][2] == b_blocks[-1][2] == 0 assert a_blocks[-1] == b_blocks[-1] combined_blocks = sorted(list(set(a_blocks + b_blocks))) i = j = 0 for a, b, size in combined_blocks: assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length.
10,431
def _filtered_data_zeroed(self): filt_data = self._filtered_data[self._slice] filt_data = np.where(self._total_mask, 0., filt_data) filt_data[filt_data < 0] = 0. return filt_data.astype(np.float64)
A 2D `~numpy.nddarray` cutout from the input ``filtered_data`` (or ``data`` if ``filtered_data`` is `None`) where any masked pixels (_segment_mask, _input_mask, or _data_mask) are set to zero. Invalid values (e.g. NaNs or infs) are set to zero. Units are dropped on the input ``filtered_data`` (or ``data``). Negative data values are also set to zero because negative pixels (especially at large radii) can result in image moments that result in negative variances.
10,432
def add_integer_proxy_for(self, label: str, shape: Collection[int] = None) -> Vertex: if shape is None: return Vertex._from_java_vertex(self.unwrap().addIntegerProxyFor(_VertexLabel(label).unwrap())) else: return Vertex._from_java_vertex(self.unwrap().addIntegerProxyFor(_VertexLabel(label).unwrap(), shape))
Creates a proxy vertex for the given label and adds to the sequence item
10,433
def get_comparable_values(self): return (int(self.major), int(self.minor), str(self.label), str(self.name))
Return a tupple of values representing the unicity of the object
10,434
def encode_events(self, duration, events, values, dtype=np.bool): frames = time_to_frames(events, sr=self.sr, hop_length=self.hop_length) n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length)) n_alloc = n_total if np.any(frames): n_alloc = max(n_total, 1 + int(frames.max())) target = np.empty((n_alloc, values.shape[1]), dtype=dtype) target.fill(fill_value(dtype)) values = values.astype(dtype) for column, event in zip(values, frames): target[event] += column return target[:n_total]
Encode labeled events as a time-series matrix. Parameters ---------- duration : number The duration of the track events : ndarray, shape=(n,) Time index of the events values : ndarray, shape=(n, m) Values array. Must have the same first index as `events`. dtype : numpy data type Returns ------- target : ndarray, shape=(n_frames, n_values)
10,435
def replace(self, text=None): if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, TypeError): pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace.
10,436
def view_on_site(self, request, content_type_id, object_id): : content_type_id, : object_id, }) try: get_absolute_url = obj.get_absolute_url except AttributeError: raise Http404(_("%(ct_name)s objects donct_name': content_type.name, }) absurl = get_absolute_url() return HttpResponseRedirect(absurl)
Redirect to an object's page based on a content-type ID and an object ID.
10,437
def fiemap(fd): count = 72 fiemap_cbuf = ffi.new( , ffi.sizeof() + count * ffi.sizeof()) fiemap_pybuf = ffi.buffer(fiemap_cbuf) fiemap_ptr = ffi.cast(, fiemap_cbuf) assert ffi.sizeof(fiemap_cbuf) <= 4096 while True: fiemap_ptr.fm_length = lib.FIEMAP_MAX_OFFSET fiemap_ptr.fm_extent_count = count fcntl.ioctl(fd, lib.FS_IOC_FIEMAP, fiemap_pybuf) if fiemap_ptr.fm_mapped_extents == 0: break for i in range(fiemap_ptr.fm_mapped_extents): extent = fiemap_ptr.fm_extents[i] yield FiemapExtent( extent.fe_logical, extent.fe_physical, extent.fe_length, extent.fe_flags) fiemap_ptr.fm_start = extent.fe_logical + extent.fe_length
Gets a map of file extents.
10,438
def tree(path, depth=2, topdown=True, followlinks=False, showhidden=False): rt = [] for root, dirs, files in os.walk(path, topdown=topdown, followlinks=followlinks): if not showhidden and File.is_hidden(root): continue current_depth = len(os.path.relpath(root, path).split(os.sep)) if current_depth > depth: continue if showhidden: _tuple = ( root, [File(os.path.join(root, _dir)) for _dir in dirs], [File(os.path.join(root, _file)) for _file in files] ) else: _tuple = ( root, [File(os.path.join(root, _dir)) for _dir in dirs if _dir[0] != ], [File(os.path.join(root, _file)) for _file in files if _file[0] != ] ) rt.append(_tuple) return rt
A generator return a tuple with three elements (root, dirs, files).
10,439
def crypto_sign(msg, sk): if len(sk) != SECRETKEYBYTES: raise ValueError("Bad signing key length %d" % len(sk)) vkbytes = sk[PUBLICKEYBYTES:] skbytes = sk[:PUBLICKEYBYTES] sig = djbec.signature(msg, skbytes, vkbytes) return sig + msg
Return signature+message given message and secret key. The signature is the first SIGNATUREBYTES bytes of the return value. A copy of msg is in the remainder.
10,440
def usages(self): api_version = self._get_api_version() if api_version == : from .v2018_03_01_preview.operations import UsagesOperations as OperationClass elif api_version == : from .v2018_07_01.operations import UsagesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2018-03-01-preview: :class:`UsagesOperations<azure.mgmt.storage.v2018_03_01_preview.operations.UsagesOperations>` * 2018-07-01: :class:`UsagesOperations<azure.mgmt.storage.v2018_07_01.operations.UsagesOperations>`
10,441
def SetUseSSL(self, use_ssl): self._use_ssl = use_ssl logger.debug(.format(use_ssl))
Sets the use of ssl. Args: use_ssl (bool): enforces use of ssl.
10,442
def cycle_gan_internal(inputs, targets, _, hparams): with tf.variable_scope("cycle_gan"): inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) x, _ = split_on_batch(inputs) _, y = split_on_batch(targets) y_fake = generator(y, hparams, "Fy", reuse=False) y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") x_fake = generator(x, hparams, "Gx", reuse=False) x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( tf.abs(x_fake_ - x)) y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( tf.abs(y_fake_ - y)) cycloss = x_to_x_loss + y_to_y_loss sample_generated = generator(inputs, hparams, "Gx", reuse=True) sample_generated = tf.layers.dense( sample_generated, hparams.vocab_size, name="softmax", reuse=None) sample_generated = tf.stop_gradient( tf.expand_dims(sample_generated, axis=2)) losses = {"cycloss": cycloss, "y_to_x_loss": y_to_x_loss, "x_to_y_loss": x_to_y_loss} return sample_generated, losses
Cycle GAN, main step used for training.
10,443
def sigma_sq(self, sample): ret = 0 for i in range(1, self.point_num + 1): temp = self.trial_history[i - 1] - self.f_comb(i, sample) ret += temp * temp return 1.0 * ret / self.point_num
returns the value of sigma square, given the weight's sample Parameters ---------- sample: list sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} Returns ------- float the value of sigma square, given the weight's sample
10,444
def _extendrange(self, start, end): range_positions = [] for i in range(start, end): if i != 0: range_positions.append(str(i)) if i < end: range_positions.append(self.separator) return range_positions
Creates list of values in a range with output delimiters. Arguments: start - range start end - range end
10,445
def db_for_read(self, model, **hints): if model._meta.app_label in self._apps: return getattr(model, , model._meta.app_label) return None
If the app has its own database, use it for reads
10,446
def parse_bismark_report(self, report, regexes): parsed_data = {} for k, r in regexes.items(): r_search = re.search(r, report, re.MULTILINE) if r_search: try: parsed_data[k] = float(r_search.group(1)) except ValueError: parsed_data[k] = r_search.group(1) if len(parsed_data) == 0: return None return parsed_data
Search a bismark report with a set of regexes
10,447
def syslog_generate(str_processName, str_pid): pretoriamessage.py localtime = time.asctime( time.localtime(time.time()) ) hostname = os.uname()[1] syslog = % (localtime, hostname, str_processName, str_pid) return syslog
Returns a string similar to: Tue Oct 9 10:49:53 2012 pretoria message.py[26873]: where 'pretoria' is the hostname, 'message.py' is the current process name and 26873 is the current process id.
10,448
def slug_field_data(field, **kwargs): min_length = kwargs.get(, 1) max_length = kwargs.get(, field.max_length or 20) from string import ascii_letters, digits letters = ascii_letters + digits + return xunit.any_string(letters = letters, min_length = min_length, max_length = max_length)
Return random value for SlugField >>> result = any_form_field(forms.SlugField()) >>> type(result) <type 'str'> >>> from django.core.validators import slug_re >>> import re >>> re.match(slug_re, result) is not None True
10,449
def get_connection(cls): if cls.__connection_obj is None: if cls.__connection_fn is None: _, cls.__connection_fn = cls.from_settings() cls.__connection_obj = cls.__connection_fn() return cls.__connection_obj
Return connection object. :rtype: :class:`cloud_browser.cloud.base.CloudConnection`
10,450
def make_sh_output(value, output_script, witness=False): return _make_output( value=utils.i2le_padded(value, 8), output_script=make_sh_output_script(output_script, witness))
int, str -> TxOut
10,451
def create_kubernetes_role(self, name, bound_service_account_names, bound_service_account_namespaces, ttl="", max_ttl="", period="", policies=None, mount_point=): if bound_service_account_names == and bound_service_account_namespaces == : error_message = raise exceptions.ParamValidationError(error_message) params = { : bound_service_account_names, : bound_service_account_namespaces, : ttl, : max_ttl, : period, : policies, } url = .format(mount_point, name) return self._adapter.post(url, json=params)
POST /auth/<mount_point>/role/:name :param name: Name of the role. :type name: str. :param bound_service_account_names: List of service account names able to access this role. If set to "*" all names are allowed, both this and bound_service_account_namespaces can not be "*". :type bound_service_account_names: list. :param bound_service_account_namespaces: List of namespaces allowed to access this role. If set to "*" all namespaces are allowed, both this and bound_service_account_names can not be set to "*". :type bound_service_account_namespaces: list. :param ttl: The TTL period of tokens issued using this role in seconds. :type ttl: str. :param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role. :type max_ttl: str. :param period: If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter. :type period: str. :param policies: Policies to be set on tokens issued using this role :type policies: list. :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Will be an empty body with a 204 status code upon success :rtype: requests.Response.
10,452
def loop_template_list(loop_positions, instance, instance_type, default_template, registry): templates = [] local_loop_position = loop_positions[1] global_loop_position = loop_positions[0] instance_string = slugify(str(instance)) for key in [ % (instance_type, instance_string), instance_string, instance_type, ]: try: templates.append(registry[key][global_loop_position]) except KeyError: pass templates.append( append_position(default_template, global_loop_position, )) templates.append( append_position(default_template, local_loop_position, )) templates.append(default_template) return templates
Build a list of templates from a position within a loop and a registry of templates.
10,453
def needs_quotes( s ): if s in dot_keywords: return False chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0] if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s): return True for test_re in [id_re_alpha_nums, id_re_num, id_re_dbl_quoted, id_re_html, id_re_alpha_nums_with_ports]: if test_re.match(s): return False m = id_re_with_port.match(s) if m: return needs_quotes(m.group(1)) or needs_quotes(m.group(2)) return True
Checks whether a string is a dot language ID. It will check whether the string is solely composed by the characters allowed in an ID or not. If the string is one of the reserved keywords it will need quotes too but the user will need to add them manually.
10,454
def astimezone(self, tz): if self.tzinfo is tz: return self if self.tzinfo: utc = self - self.utcoffset() else: utc = self return tz.fromutc(utc.replace(tzinfo=tz))
Return a :py:class:`khayyam.JalaliDatetime` object with new :py:meth:`khayyam.JalaliDatetime.tzinfo` attribute tz, adjusting the date and time data so the result is the same UTC time as self, but in *tz*‘s local time. *tz* must be an instance of a :py:class:`datetime.tzinfo` subclass, and its :py:meth:`datetime.tzinfo.utcoffset()` and :py:meth:`datetime.tzinfo.dst()` methods must not return :py:obj:`None`. *self* must be aware (`self.tzinfo` must not be `None`, and `self.utcoffset()` must not return `None`). If `self.tzinfo` is `tz`, `self.astimezone(tz)` is equal to `self`: no adjustment of date or time data is performed. Else the result is local time in time zone `tz`, representing the same UTC time as `self`: after `astz = dt.astimezone(tz), astz - astz.utcoffset()` will usually have the same date and time data as `dt - dt.utcoffset()`. The discussion of class :py:class:`datetime.tzinfo` explains the cases at Daylight Saving Time transition boundaries where this cannot be achieved (an issue only if `tz` models both standard and daylight time). If you merely want to attach a time zone object `tz` to a datetime dt without adjustment of date and time data, use `dt.replace(tzinfo=tz)`. If you merely want to remove the time zone object from an aware datetime dt without conversion of date and time data, use `dt.replace(tzinfo=None)`. Note that the default :py:meth:`datetime.tzinfo.fromutc()` method can be overridden in a :py:class:`datetime.tzinfo` subclass to affect the result returned by :py:meth:`khayyam.JalaliDatetime.astimezone()`. Ignoring error cases, :py:meth:`khayyam.JalaliDatetime.astimezone()` acts like: .. code-block:: python :emphasize-lines: 3,5 def astimezone(self, tz): # doctest: +SKIP if self.tzinfo is tz: return self if self.tzinfo: utc = self - self.utcoffset() else: utc = self return tz.fromutc(utc.replace(tzinfo=tz)) :param tz: :py:class:`datetime.tzinfo` :rtype: :py:class:`khayyam.JalaliDatetime`
10,455
def p_duration_number_duration_unit(self, p): logger.debug(, p[1], p[2]) p[0] = Duration.from_quantity_unit(p[1], p[2])
duration : NUMBER DURATION_UNIT
10,456
def entitlement(self, token): headers = {"Authorization": "Bearer %s" % token} url = self._realm.client.get_full_url( PATH_ENTITLEMENT.format(self._realm.realm_name, self._client_id) ) return self._realm.client.get(url, headers=headers)
Client applications can use a specific endpoint to obtain a special security token called a requesting party token (RPT). This token consists of all the entitlements (or permissions) for a user as a result of the evaluation of the permissions and authorization policies associated with the resources being requested. With an RPT, client applications can gain access to protected resources at the resource server. http://www.keycloak.org/docs/latest/authorization_services/index .html#_service_entitlement_api :rtype: dict
10,457
def ensure_unique_obs_ids_in_wide_data(obs_id_col, wide_data): if len(wide_data[obs_id_col].unique()) != wide_data.shape[0]: msg = "The values in wide_data[obs_id_col] are not unique, " msg_2 = "but they need to be." raise ValueError(msg + msg_2) return None
Ensures that there is one observation per row in wide_data. Raises a helpful ValueError if otherwise. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `obs_id_col` column. Returns ------- None.
10,458
def hacking_has_license(physical_line, filename, lines, line_number): license_found = False license_found = True if not license_found: return (0, "H102: Apache 2.0 license header not found")
Check for Apache 2.0 license. H102 license header not found
10,459
def protein_sequences_generator_to_dataframe(variant_and_protein_sequences_generator): return dataframe_from_generator( element_class=ProteinSequence, variant_and_elements_generator=variant_and_protein_sequences_generator, converters=dict( gene=lambda x: ";".join(x)))
Given a generator which yields (Variant, [ProteinSequence]) elements, returns a pandas.DataFrame
10,460
def _read_http_window_update(self, size, kind, flag): if size != 4: raise ProtocolError(f, quiet=True) if any((int(bit, base=2) for bit in flag)): raise ProtocolError(f, quiet=True) _size = self._read_binary(4) if int(_size[0], base=2): raise ProtocolError(f, quiet=True) data = dict( flags=None, window=int(_size[1:], base=2), ) return data
Read HTTP/2 WINDOW_UPDATE frames. Structure of HTTP/2 WINDOW_UPDATE frame [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +-+-------------+---------------+-------------------------------+ |R| Window Size Increment (31) | +-+-------------------------------------------------------------+ Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type (2) 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 - Reserved 9 73 http.window Window Size Increment
10,461
def open(self): sess_id = self._get_sess_id() if sess_id: self.application.pc.websockets[self._get_sess_id()] = self self.write_message(json.dumps({"cmd": "status", "status": "open"})) else: self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
Called on new websocket connection.
10,462
def _classify_section(cls, section): name = section.lower() if name in frozenset([, , "params", "parameters"]): return cls.ARGS_SECTION if name in frozenset([, ]): return cls.RETURN_SECTION if name in frozenset([]): return cls.MAIN_SECTION return None
Attempt to find the canonical name of this section.
10,463
def show(self): for rule in self.rules_list: result = ", ".join([str(check) for check, deny in rule]) print(result)
Show the structure of self.rules_list, only for debug.
10,464
def json(self): if six.PY3: return json.loads(self.body.decode(self.charset)) else: return json.loads(self.body)
Return response body deserialized into JSON object.
10,465
def fetch_from_sdr(folder=data_folder, data=): url = "https://stacks.stanford.edu/file/druid:fn662rv4961/" if data == : md5_dict = {: , : , : , : } elif data == : md5_dict = {: , : } if not os.path.exists(folder): print( % folder) os.makedirs(folder) for k, v in md5_dict.items(): fname = pjoin(folder, k) if not os.path.exists(fname): print(%k) _get_file_data(fname, url + k) check_md5(fname, v) else: print( % (fname, folder)) print() print( % folder)
Download MRS data from SDR Parameters ---------- folder : str Full path to a location in which to place the data. Per default this will be a directory under the user's home `.mrs_data`. data : str Which data to download. Either 'test', which is data required for testing, or 'example', which is data needed for the example notebooks.
10,466
def _fetchone(self, query, vars): cursor = self.get_db().cursor() self._log(cursor, query, vars) cursor.execute(query, vars) return cursor.fetchone()
Return none or one row.
10,467
def load_translations(directory: str, encoding: str = None) -> None: global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error( "Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path), ) continue full_path = os.path.join(directory, path) if encoding is None: with open(full_path, "rb") as bf: data = bf.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = "utf-16" else: encoding = "utf-8-sig" with open(full_path, encoding=encoding) as f: _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error( "Unrecognized plural indicator %r in %s line %d", plural, path, i + 1, ) continue _translations[locale].setdefault(plural, {})[english] = translation _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales))
Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM.
10,468
def EventsNotificationsPost(self, parameters): if self.__SenseApiCall__(, , parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
Create an event-notification in CommonSense. If EvensNotificationsPost was successful the result, including the event_notification_id can be obtained from getResponse(), and should be a json string. @param parameters (dictionary) - Parameters according to which to create the event notification. @note - @return (bool) - Boolean indicating whether EventsNotificationsPost was successful.
10,469
def asscalar(a): try: return np.asscalar(a) except AttributeError as e: return np.asscalar(np.asarray(a))
https://github.com/numpy/numpy/issues/4701
10,470
def _level_coords(self): level_coords = OrderedDict() for cname, var in self._coords.items(): if var.ndim == 1 and isinstance(var, IndexVariable): level_names = var.level_names if level_names is not None: dim, = var.dims level_coords.update({lname: dim for lname in level_names}) return level_coords
Return a mapping of all MultiIndex levels and their corresponding coordinate name.
10,471
def emitRemoved( self ): if ( self.signalsBlocked() ): return False self.dispatch.removed.emit() return True
Emits the removed signal, provided the dispatcher's signals \ are not currently blocked. :return <bool> emitted
10,472
def getResetsIndices(networkDataFile): try: with open(networkDataFile) as f: reader = csv.reader(f) next(reader, None) next(reader, None) resetIdx = next(reader).index("R") resets = [] for i, line in enumerate(reader): if int(line[resetIdx]) == 1: resets.append(i) return resets except IOError as e: print "Could not open the file {}.".format(networkDataFile) raise e
Returns the indices at which the data sequences reset.
10,473
def _set_set_overload_bit(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=set_overload_bit.set_overload_bit, is_container=, presence=True, yang_name="set-overload-bit", rest_name="set-overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__set_overload_bit = t if hasattr(self, ): self._set()
Setter method for set_overload_bit, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit (container) If this variable is read-only (config: false) in the source YANG file, then _set_set_overload_bit is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_set_overload_bit() directly.
10,474
def post_registration_redirect(self, request, user): next_url = "/registration/register/complete/" if "next" in request.GET or "next" in request.POST: next_url = request.GET.get("next", None) or request.POST.get("next", None) or "/" return (next_url, (), {})
After registration, redirect to the home page or supplied "next" query string or hidden field value.
10,475
def background(self): if self._background is None: self._background = GSBackgroundLayer() self._background._foreground = self return self._background
Only a getter on purpose. See the tests.
10,476
def _get_hashing_context(self, app: FlaskUnchained) -> CryptContext: return CryptContext(schemes=app.config.SECURITY_HASHING_SCHEMES, deprecated=app.config.SECURITY_DEPRECATED_HASHING_SCHEMES)
Get the token hashing (and verifying) context.
10,477
def evalMetric(self, x, method=None): return super(DensityMatching, self).evalMetric(x, method)
Evaluates the density matching metric at a given design point. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theDM = DensityMatching(myFunc, u) >>> x0 = [1, 2] >>> theDM.evalMetric(x0)
10,478
def Bernoulli(p, tag=None): assert ( 0 < p < 1 ), return uv(ss.bernoulli(p), tag=tag)
A Bernoulli random variate Parameters ---------- p : scalar The probability of success
10,479
def convert_to_codec_key(value): if not value: value = converted = value.replace(, ).lower() all_aliases = { : [ , , , , , , , , ], : [ , , , , , ], : [ , , , , ], : [ , , , ], : [] } for key, aliases in all_aliases.items(): if converted in aliases: return key return converted
Normalize code key value (encoding codecs must be lower case and must not contain any dashes). :param value: value to convert.
10,480
def log_prob(self, response, predicted_linear_response, name=None): with self._name_scope( name, , [response, predicted_linear_response]): dtype = dtype_util.common_dtype([response, predicted_linear_response]) response = tf.convert_to_tensor( value=response, dtype=dtype, name=) predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, name=) return self._log_prob(response, predicted_linear_response)
Computes `D(param=mean(r)).log_prob(response)` for linear response, `r`. Args: response: `float`-like `Tensor` representing observed ("actual") responses. predicted_linear_response: `float`-like `Tensor` corresponding to `tf.matmul(model_matrix, weights)`. name: Python `str` used as TF namescope for ops created by member functions. Default value: `None` (i.e., 'log_prob'). Returns: log_prob: `Tensor` with shape and dtype of `predicted_linear_response` representing the distribution prescribed log-probability of the observed `response`s.
10,481
def getDescendant(Ancestor, RouteParts): r if not RouteParts: return Ancestor Resolved = Ancestor.Members.get(RouteParts.pop(0)) if isinstance(Resolved, Group): return getDescendant(Resolved, RouteParts) else: return Resolved
r"""Resolves a descendant, of the given Ancestor, as pointed by the RouteParts.
10,482
def gen_compliance_xdr(self): sequence = self.sequence self.sequence = -1 tx_xdr = self.gen_tx().xdr() self.sequence = sequence return tx_xdr
Create an XDR object representing this builder's transaction to be sent over via the Compliance protocol (notably, with a sequence number of 0). Intentionally, the XDR object is returned without any signatures on the transaction. See `Stellar's documentation on its Compliance Protocol <https://www.stellar.org/developers/guides/compliance-protocol.html>`_ for more information.
10,483
def slurp(path, encoding=): with io.open(path, , encoding=encoding) as f: return f.read()
Reads file `path` and returns the entire contents as a unicode string By default assumes the file is encoded as UTF-8 Parameters ---------- path : str File path to file on disk encoding : str, default `UTF-8`, optional Encoding of the file Returns ------- The txt read from the file as a unicode string
10,484
def indices(self, names, axis=None): return Matrix.find_rowcol_indices(names,self.row_names,self.col_names,axis=axis)
get the row and col indices of names. If axis is None, two ndarrays are returned, corresponding the indices of names for each axis Parameters ---------- names : iterable column and/or row names axis : (int) (optional) the axis to search. Returns ------- numpy.ndarray : numpy.ndarray indices of names.
10,485
def generate_rrab_lightcurve( times, mags=None, errs=None, paramdists={ :sps.uniform(loc=0.45,scale=0.35), :[8,11], :sps.uniform(loc=0.4,scale=0.5), :np.pi, }, magsarefluxes=False ): periodfourierorderamplitudefrozenvartypeRRabparamsperiodepochamplitudefourierorderfourierampsfourierphasestimesmagserrsvarperiodperiodvaramplitudeamplitude modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict[] = return modeldict
This generates fake RRab light curves. Parameters ---------- times : np.array This is an array of time values that will be used as the time base. mags,errs : np.array These arrays will have the model added to them. If either is None, `np.full_like(times, 0.0)` will used as a substitute and the model light curve will be centered around 0.0. paramdists : dict This is a dict containing parameter distributions to use for the model params, containing the following keys :: {'period', 'fourierorder', 'amplitude'} The values of these keys should all be 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The variability epoch will be automatically chosen from a uniform distribution between `times.min()` and `times.max()`. The `amplitude` will be flipped automatically as appropriate if `magsarefluxes=True`. magsarefluxes : bool If the generated time series is meant to be a flux time-series, set this to True to get the correct sign of variability amplitude. Returns ------- dict A dict of the form below is returned:: {'vartype': 'RRab', 'params': {'period': generated value of period, 'epoch': generated value of epoch, 'amplitude': generated value of amplitude, 'fourierorder': generated value of fourier order, 'fourieramps': generated values of fourier amplitudes, 'fourierphases': generated values of fourier phases}, 'times': the model times, 'mags': the model mags, 'errs': the model errs, 'varperiod': the generated period of variability == 'period' 'varamplitude': the generated amplitude of variability == 'amplitude'}
10,486
def GetService(self, service_name, version=sorted(_SERVICE_MAP.keys())[-1], server=None): if not server: server = DEFAULT_ENDPOINT server = server[:-1] if server[-1] == else server try: service = googleads.common.GetServiceClassForLibrary(self.soap_impl)( self._SOAP_SERVICE_FORMAT % (server, version, service_name), self._header_handler, _AdManagerPacker, self.proxy_config, self.timeout, version, cache=self.cache) return service except googleads.errors.GoogleAdsSoapTransportError: if version in _SERVICE_MAP: if service_name in _SERVICE_MAP[version]: raise else: raise googleads.errors.GoogleAdsValueError( % (service_name, _SERVICE_MAP[version])) else: raise googleads.errors.GoogleAdsValueError( % (version, _SERVICE_MAP.keys()))
Creates a service client for the given service. Args: service_name: A string identifying which Ad Manager service to create a service client for. [optional] version: A string identifying the Ad Manager version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the Ad Manager API. Returns: A googleads.common.GoogleSoapService instance which has the headers and proxy configured for use. Raises: A GoogleAdsValueError if the service or version provided do not exist.
10,487
def _events_process(event_types=None, eager=False): event_types = event_types or list(current_stats.enabled_events) if eager: process_events.apply((event_types,), throw=True) click.secho(, fg=) else: process_events.delay(event_types) click.secho(, fg=)
Process stats events.
10,488
def url_signature(url: str) -> Optional[Tuple]: request = urllib.request.Request(url) request.get_method = lambda: response = None try: response = urllib.request.urlopen(request) except urllib.error.HTTPError: return None return response.info()[], response.info()[], response.info().get()
Return an identify signature for url :param url: item to get signature for :return: tuple containing last modified, length and, if present, etag
10,489
def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs): class_path = abspath(Device.DEVICE_ROOT_PATH + + Motor.SYSTEM_CLASS_NAME) return (Motor(name_pattern=name, name_exact=True) for name in list_device_names(class_path, name_pattern, **kwargs))
This is a generator function that enumerates all tacho motors that match the provided arguments. Parameters: name_pattern: pattern that device name should match. For example, 'motor*'. Default value: '*'. keyword arguments: used for matching the corresponding device attributes. For example, driver_name='lego-ev3-l-motor', or address=['outB', 'outC']. When argument value is a list, then a match against any entry of the list is enough.
10,490
def _flatten_projection(cls, projection): if not projection: return {f: True for f in cls._fields}, {}, {} flat_projection = {} references = {} subs = {} inclusive = True for key, value in deepcopy(projection).items(): if isinstance(value, dict): project_value = { k: v for k, v in value.items() if k.startswith() and k not in [, , ] } if len(project_value) == 0: project_value = True else: inclusive = False if in value: references[key] = value elif in value or in value: subs[key] = value if in value: sub_frame = value[] if in value: sub_frame = value[] project_value = sub_frame._projection_to_paths(key, value) if isinstance(project_value, dict): flat_projection.update(project_value) else: flat_projection[key] = project_value elif key == : continue elif key == or key == : continue else: flat_projection[key] = value inclusive = False if inclusive: flat_projection = {f: True for f in cls._fields} return flat_projection, references, subs
Flatten a structured projection (structure projections support for projections of (to be) dereferenced fields.
10,491
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): mru_values_dict = {} for subkey in registry_key.GetSubkeys(): username_value = subkey.GetValueByName() if (username_value and username_value.data and username_value.DataIsString()): username = username_value.GetDataAsObject() else: username = mru_values_dict[subkey.name] = username event_data = windows_events.WindowsRegistryEventData() event_data.key_path = subkey.path event_data.offset = subkey.offset event_data.regvalue = {: username} event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = mru_values_dict event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Terminal Server Client Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
10,492
def signature(self, node, frame, extra_kwargs=None): kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write() self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write() self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write( % (key, value)) if node.dyn_args: self.write() self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write() else: self.write() for kwarg in node.kwargs: self.write( % kwarg.key) self.visit(kwarg.value, frame) self.write() if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write( % (key, value)) if node.dyn_kwargs is not None: self.write() self.visit(node.dyn_kwargs, frame) self.write() else: self.write() elif node.dyn_kwargs is not None: self.write() self.visit(node.dyn_kwargs, frame)
Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict.
10,493
def cleanupContainers(self): for i in range(self.count() - 1, self.currentIndex(), -1): widget = self.widget(i) widget.close() widget.setParent(None) widget.deleteLater()
Cleans up all containers to the right of the current one.
10,494
def make_multi_entry(plist, pkg_pyvers, ver_dict): for pyver in pkg_pyvers: pver = pyver[2] + "." + pyver[3:] plist.append("Python {0}: {1}".format(pver, ops_to_words(ver_dict[pyver])))
Generate Python interpreter version entries.
10,495
def tag(self, version=, message=): self.clone_from_github() self.github_repo.tag(version, message=message)
tag and commit
10,496
def _update_with_like_args(ctx, _, value): if value is None: return env = ctx.ensure_object(environment.Environment) vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, value, ) like_details = vsi.get_instance(vs_id) like_args = { : like_details[], : like_details[], : like_details[], : like_details[][], : like_details[][0][], : like_details[] or None, : like_details.get(), : like_details[], : like_details[], : like_details.get(, None), } like_args[] = utils.lookup(like_details, , , , ) if not like_args[]: like_args[] = like_details[] like_args[] = % like_details[] tag_refs = like_details.get(, None) if tag_refs is not None and len(tag_refs) > 0: like_args[] = [t[][] for t in tag_refs] like_image = utils.lookup(like_details, , ) like_os = utils.lookup(like_details, , , , ) if like_image: like_args[] = like_image elif like_os: like_args[] = like_os if ctx.default_map is None: ctx.default_map = {} ctx.default_map.update(like_args)
Update arguments with options taken from a currently running VS.
10,497
def transpose(self, *axes): if self.ndim <= 1: return self ar = np.asarray(self).transpose(*axes) if axes[0] != 0: newlabels = [self.labels[ax] for ax in axes] return Timeseries(ar, self.tspan, newlabels) else: return ar
Permute the dimensions of a Timeseries.
10,498
def findNestedEnums(self, lst): if self.kind == "enum": lst.append(self) for c in self.children: c.findNestedEnums(lst)
Recursive helper function for finding nested enums. If this node is a class or struct it may have had an enum added to its child list. When this occurred, the enum was removed from ``self.enums`` in the :class:`~exhale.graph.ExhaleRoot` class and needs to be rediscovered by calling this method on all of its children. If this node is an enum, it is because a parent class or struct called this method, in which case it is added to ``lst``. **Note**: this is used slightly differently than nested directories, namespaces, and classes will be. Refer to :func:`~exhale.graph.ExhaleRoot.generateNodeDocuments`. :Parameters: ``lst`` (list) The list each enum is to be appended to.
10,499
def _line_parse(line): if line[-2:] in [, b]: return line[:-2], True elif line[-1:] in [, , b, b]: return line[:-1], True return line, False
Removes line ending characters and returns a tuple (`stripped_line`, `is_terminated`).