Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
9,400
def register_view(self, view): self.view[].connect(, gtk.main_quit) self.view.set_text("%d" % self.model.counter) return
This method is called by the view, that calls it when it is ready to register itself. Here we connect the 'pressed' signal of the button with a controller's method. Signal 'destroy' for the main window is handled as well.
9,401
def openStream(self, source): if hasattr(source, ): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: stream = BufferedStream(stream) return stream
Produces a file object from source. source can be either a file object, local filename or a string.
9,402
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): hslab = 50 rjb, rrup = utils.get_equivalent_distance_inslab(rup.mag, dists.repi, hslab) dists.rjb = rjb dists.rrup = rrup mean, stddevs = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) cff = self.SITE_COEFFS[imt] mean_adj = np.log(np.exp(mean) * 10**cff[]) stddevs = [np.ones(len(dists.rrup))*get_sigma(imt)] return mean_adj, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
9,403
def sequence_matcher_similarity(state_a, state_b): addrs_a = tuple(state_a.history.bbl_addrs) addrs_b = tuple(state_b.history.bbl_addrs) return SequenceMatcher(a=addrs_a, b=addrs_b).ratio()
The `difflib.SequenceMatcher` ratio between the state addresses in the history of the path. :param state_a: The first state to compare :param state_b: The second state to compare
9,404
def _get(self, target, alias): if target not in self._aliases: return return self._aliases[target].get(alias)
Internal method to get a specific alias.
9,405
def find_block(context, *names): block_set = context.render_context[BLOCK_CONTEXT_KEY] for name in names: block = block_set.get_block(name) if block is not None: return block raise template.TemplateSyntaxError( % (names,))
Find the first matching block in the current block_context
9,406
def request(self, method, url, headers=None, raise_exception=True, **kwargs): if in url: if errors is None: text = response.text utils.raise_for_error(response.status_code, text, errors=errors) return response
Main method for routing HTTP requests to the configured Vault base_uri. :param method: HTTP method to use with the request. E.g., GET, POST, etc. :type method: str :param url: Partial URL path to send the request to. This will be joined to the end of the instance's base_uri attribute. :type url: str | unicode :param headers: Additional headers to include with the request. :type headers: dict :param raise_exception: If True, raise an exception via utils.raise_for_error(). Set this parameter to False to bypass this functionality. :type raise_exception: bool :param kwargs: Additional keyword arguments to include in the requests call. :type kwargs: dict :return: The response of the request. :rtype: requests.Response
9,407
def make_openid_request(arq, keys, issuer, request_object_signing_alg, recv): _jwt = JWT(key_jar=keys, iss=issuer, sign_alg=request_object_signing_alg) return _jwt.pack(arq.to_dict(), owner=issuer, recv=recv)
Construct the JWT to be passed by value (the request parameter) or by reference (request_uri). The request will be signed :param arq: The Authorization request :param keys: Keys to use for signing/encrypting. A KeyJar instance :param issuer: Who is signing this JSON Web Token :param request_object_signing_alg: Which signing algorithm to use :param recv: The intended receiver of the request :return: JWT encoded OpenID request
9,408
def remove_peer(self, peer): if type(peer) == list: for x in peer: check_url(x) for i in self.PEERS: if x in i: self.PEERS.remove(i) elif type(peer) == str: check_url(peer) for i in self.PEERS: if peer == i: self.PEERS.remove(i) else: raise ValueError()
remove one or multiple peers from PEERS variable :param peer(list or string):
9,409
def dynamize_range_key_condition(self, range_key_condition): d = None if range_key_condition: d = {} for range_value in range_key_condition: range_condition = range_key_condition[range_value] if range_condition == : if isinstance(range_value, tuple): avl = [self.dynamize_value(v) for v in range_value] else: msg = raise TypeError(msg) elif isinstance(range_value, tuple): msg = raise TypeError(msg) else: avl = [self.dynamize_value(range_value)] d = {: avl, : range_condition} return d
Convert a layer2 range_key_condition parameter into the structure required by Layer1.
9,410
def _log_default(self): log = logging.getLogger(self.__class__.__name__) log.setLevel(self.log_level) if sys.executable.endswith(): _log_handler = logging.StreamHandler(open(os.devnull, )) else: _log_handler = logging.StreamHandler() _log_formatter = logging.Formatter(self.log_format) _log_handler.setFormatter(_log_formatter) log.addHandler(_log_handler) return log
Start logging for this application. The default is to log to stdout using a StreaHandler. The log level starts at loggin.WARN, but this can be adjusted by setting the ``log_level`` attribute.
9,411
def end_of_history(self, current): u self.history_cursor = len(self.history) current.set_line(self.history[-1].get_line_text())
u'''Move to the end of the input history, i.e., the line currently being entered.
9,412
def modifiedLaplacian(img): LAPM M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M) FM = np.abs(Lx) + np.abs(Ly) return cv2.mean(FM)[0]
LAPM' algorithm (Nayar89)
9,413
def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False): self.debug(+str(fname)++\ str(set_ro)++str(overwriteRO)) cantWrite = False inInstArea = False if fname in (None, ): fname = self._taskParsObj.getFilename() try: if _isInstalled(fname): inInstArea = cantWrite = True else: if overwriteRO and os.path.exists(fname): setWritePrivs(fname, True, True) rv=self._taskParsObj.saveParList(filename=fname,comment=comment) except IOError: cantWrite = True if cantWrite: fname = self._taskParsObj.getDefaultSaveFilename() msg = if inInstArea: msg = msg += self._taskParsObj.getName()++\ +fname+ showwarning(message=msg, title="Will not overwrite!") rv=self._taskParsObj.saveParList(filename=fname, comment=comment) self._saveAsPostSave_Hook(fname) if set_ro and os.path.dirname(os.path.abspath(fname)) != \ os.path.abspath(self._rcDir): cfgpars.checkSetReadOnly(fname) self._lastSavedState = self._taskParsObj.dict() return rv
Override this so we can handle case of file not writable, as well as to make our _lastSavedState copy.
9,414
def search_filter(entities, filters): if len(entities) == 0: return entities fields = entities[0].get_fields() if not set(filters).issubset(fields): raise NoSuchFieldError( .format(fields.keys(), filters.keys()) ) for field_name in filters: if isinstance(fields[field_name], (OneToOneField, OneToManyField)): raise NotImplementedError( .format(field_name, type(fields[field_name]).__name__) ) filtered = [entity.read() for entity in entities] for field_name, field_value in filters.items(): filtered = [ entity for entity in filtered if getattr(entity, field_name) == field_value ] return filtered
Read all ``entities`` and locally filter them. This method can be used like so:: entities = EntitySearchMixin(entities, {'name': 'foo'}) In this example, only entities where ``entity.name == 'foo'`` holds true are returned. An arbitrary number of field names and values may be provided as filters. .. NOTE:: This method calls :meth:`EntityReadMixin.read`. As a result, this method only works when called on a class that also inherits from :class:`EntityReadMixin`. :param entities: A list of :class:`Entity` objects. All list items should be of the same type. :param filters: A dict in the form ``{field_name: field_value, …}``. :raises nailgun.entity_mixins.NoSuchFieldError: If any of the fields named in ``filters`` do not exist on the entities being filtered. :raises: ``NotImplementedError`` If any of the fields named in ``filters`` are a :class:`nailgun.entity_fields.OneToOneField` or :class:`nailgun.entity_fields.OneToManyField`.
9,415
def shareproject(self, project_id, group_id, group_access): data = {: project_id, : group_id, : group_access} request = requests.post( .format(self.projects_url, project_id), headers=self.headers, data=data, verify=self.verify_ssl) return request.status_code == 201
Allow to share project with group. :param project_id: The ID of a project :param group_id: The ID of a group :param group_access: Level of permissions for sharing :return: True is success
9,416
def isrchi(value, ndim, array): value = ctypes.c_int(value) ndim = ctypes.c_int(ndim) array = stypes.toIntVector(array) return libspice.isrchi_c(value, ndim, array)
Search for a given value within an integer array. Return the index of the first matching array entry, or -1 if the key value was not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrchi_c.html :param value: Key value to be found in array. :type value: int :param ndim: Dimension of array. :type ndim: int :param array: Integer array to search. :type array: Array of ints :return: The index of the first matching array element or -1 if the value is not found. :rtype: int
9,417
def start_after(self, document_fields): return self._cursor_helper(document_fields, before=False, start=True)
Start query results after a particular document value. The result set will **exclude** the document specified by ``document_fields``. If the current query already has specified a start cursor -- either via this method or :meth:`~.firestore_v1beta1.query.Query.start_at` -- this will overwrite it. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "start after" cursor.
9,418
def addTrack(self, track): if track.kind == : if self.__container.format.name == : codec_name = elif self.__container.format.name == : codec_name = else: codec_name = stream = self.__container.add_stream(codec_name) else: if self.__container.format.name == : stream = self.__container.add_stream(, rate=30) stream.pix_fmt = else: stream = self.__container.add_stream(, rate=30) stream.pix_fmt = self.__tracks[track] = MediaRecorderContext(stream)
Add a track to be recorded. :param: track: An :class:`aiortc.AudioStreamTrack` or :class:`aiortc.VideoStreamTrack`.
9,419
def post(self, request, *args, **kwargs): if "data" in request.data: if "metadata" not in request.data["data"]: request.data["data"]["metadata"] = {} if "initial_sequence_number" not in request.data["data"]: request.data["data"]["initial_sequence_number"] = request.data[ "data" ].get("next_sequence_number") subscription = SubscriptionSerializer(data=request.data["data"]) if subscription.is_valid(): subscription.save() status = 201 accepted = {"accepted": True} return Response(accepted, status=status) else: status = 400 return Response(subscription.errors, status=status) else: status = 400 message = {"data": ["This field is required."]} return Response(message, status=status)
Validates subscription data before creating Subscription message
9,420
def get_slab(self, shift=0, tol=0.1, energy=None): h = self._proj_height p = h/self.parent.lattice.d_hkl(self.miller_index) if self.in_unit_planes: nlayers_slab = int(math.ceil(self.min_slab_size / p)) nlayers_vac = int(math.ceil(self.min_vac_size / p)) else: nlayers_slab = int(math.ceil(self.min_slab_size / h)) nlayers_vac = int(math.ceil(self.min_vac_size / h)) nlayers = nlayers_slab + nlayers_vac species = self.oriented_unit_cell.species_and_occu props = self.oriented_unit_cell.site_properties props = {k: v * nlayers_slab for k, v in props.items()} frac_coords = self.oriented_unit_cell.frac_coords frac_coords = np.array(frac_coords) + np.array([0, 0, -shift])[None, :] frac_coords -= np.floor(frac_coords) a, b, c = self.oriented_unit_cell.lattice.matrix new_lattice = [a, b, nlayers * c] frac_coords[:, 2] = frac_coords[:, 2] / nlayers all_coords = [] for i in range(nlayers_slab): fcoords = frac_coords.copy() fcoords[:, 2] += i / nlayers all_coords.extend(fcoords) slab = Structure(new_lattice, species * nlayers_slab, all_coords, site_properties=props) scale_factor = self.slab_scale_factor if self.lll_reduce: lll_slab = slab.copy(sanitize=True) mapping = lll_slab.lattice.find_mapping(slab.lattice) scale_factor = np.dot(mapping[2], scale_factor) slab = lll_slab if self.center_slab: avg_c = np.average([c[2] for c in slab.frac_coords]) slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c]) if self.primitive: prim = slab.get_primitive_structure(tolerance=tol) if energy is not None: energy = prim.volume / slab.volume * energy slab = prim ouc = self.oriented_unit_cell.copy() if self.primitive: slab_l = slab.lattice ouc = ouc.get_primitive_structure(constrain_latt={"a": slab_l.a, "b": slab_l.b, "alpha": slab_l.alpha, "beta": slab_l.beta, "gamma": slab_l.gamma}) return Slab(slab.lattice, slab.species_and_occu, slab.frac_coords, self.miller_index, ouc, shift, scale_factor, energy=energy, site_properties=slab.site_properties, reorient_lattice=self.reorient_lattice)
This method takes in shift value for the c lattice direction and generates a slab based on the given shift. You should rarely use this method. Instead, it is used by other generation algorithms to obtain all slabs. Arg: shift (float): A shift value in Angstrom that determines how much a slab should be shifted. tol (float): Tolerance to determine primitive cell. energy (float): An energy to assign to the slab. Returns: (Slab) A Slab object with a particular shifted oriented unit cell.
9,421
def find_standard_sakefile(settings): error = settings["error"] if settings["customsake"]: custom = settings["customsake"] if not os.path.isfile(custom): error("Specified sakefile doesn't exist", custom) sys.exit(1) return custom for name in ["Sakefile", "Sakefile.yaml", "Sakefile.yml"]: if os.path.isfile(name): return name error("Error: there is no Sakefile to read") sys.exit(1)
Returns the filename of the appropriate sakefile
9,422
def paintEvent(self, event): if self.isVisible() and self.position != self.Position.FLOATING: self._background_brush = QBrush(QColor( self.editor.sideareas_color)) self._foreground_pen = QPen(QColor( self.palette().windowText().color())) painter = QPainter(self) painter.fillRect(event.rect(), self._background_brush)
Fills the panel background using QPalette.
9,423
def get_urls(self, **kwargs): kwargs["site"] = Site.objects.get(id=current_site_id()) return super(DisplayableSitemap, self).get_urls(**kwargs)
Ensure the correct host by injecting the current site.
9,424
def mag_field_motors(RAW_IMU, SENSOR_OFFSETS, ofs, SERVO_OUTPUT_RAW, motor_ofs): mag_x = RAW_IMU.xmag mag_y = RAW_IMU.ymag mag_z = RAW_IMU.zmag ofs = get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs) if SENSOR_OFFSETS is not None and ofs is not None: mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
calculate magnetic field strength from raw magnetometer
9,425
def bulk_get_or_create(self, data_list): items_to_create = dict() for record_key, record_config in data_list.items(): if record_key not in items_to_create: record = self.get_instance(record_key) if not record: items_to_create[record_key] = self.model_cls(**record_config) if items_to_create: self.model_cls.objects.bulk_create(items_to_create.values()) self.set_record_lookup(True) return self.record_lookup
data_list is the data to get or create We generate the query and set all the record keys based on passed in queryset Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time Use values instead of the whole object, much faster Args: data_list: Returns:
9,426
def parse_column_filter(definition): parts = list(generate_tokens(StringIO(definition.strip()).readline)) while parts[-1][0] in (token.ENDMARKER, token.NEWLINE): parts = parts[:-1] if len(parts) == 3: a, b, c = parts if a[0] in [token.NAME, token.STRING]: name = QUOTE_REGEX.sub(, a[1]) oprtr = OPERATORS[b[1]] value = _float_or_str(c[1]) return [(name, oprtr, value)] elif b[0] in [token.NAME, token.STRING]: name = QUOTE_REGEX.sub(, b[1]) oprtr = OPERATORS_INV[b[1]] value = _float_or_str(a[1]) return [(name, oprtr, value)] elif len(parts) == 5: a, b, c, d, e = list(zip(*parts))[1] name = QUOTE_REGEX.sub(, c) return [(name, OPERATORS_INV[b], _float_or_str(a)), (name, OPERATORS[d], _float_or_str(e))] raise ValueError("Cannot parse filter definition from %r" % definition)
Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- filters : `list` of `tuple` a `list` of filter 3-`tuple`s, where each `tuple` contains the following elements: - ``column`` (`str`) - the name of the column on which to operate - ``operator`` (`callable`) - the operator to call when evaluating the filter - ``operand`` (`anything`) - the argument to the operator function Raises ------ ValueError if the filter definition cannot be parsed KeyError if any parsed operator string cannnot be mapped to a function from the `operator` module Notes ----- Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should be quoted inside the filter definition, to prevent such characters being interpreted as operators, e.g. ``channel = X1:TEST`` should always be passed as ``channel = "X1:TEST"``. Examples -------- >>> parse_column_filter("frequency>10") [('frequency', <function operator.gt>, 10.)] >>> parse_column_filter("50 < snr < 100") [('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)] >>> parse_column_filter("channel = "H1:TEST") [('channel', <function operator.eq>, 'H1:TEST')]
9,427
def set_states(self, left_state, right_state): simgr = self.project.factory.simulation_manager(right_state) simgr.stash(to_stash=) simgr.active.append(left_state) simgr.stash(to_stash=) simgr.stash(to_stash=) simgr.stash(to_stash=) return self.set_simgr(simgr)
Checks that the specified paths stay the same over the next `depth` states.
9,428
def set_token(self, token): if not token: self.token = None return expected_keys = [, , , , , ] if not isinstance(token, dict) or not set(token) >= set(expected_keys): raise InvalidUsage("Expected a token dictionary containing the following keys: {0}" .format(expected_keys)) self.token = dict((k, v) for k, v in token.items() if k in expected_keys)
Validate and set token :param token: the token (dict) to set
9,429
def mentions_links(uri, s): for username, after in mentions_re.findall(s): _uri = + (uri or "").lstrip("/") + quote(username) link = .format(_uri.lower(), username, after) s = s.replace( + username, link) return s
Turns mentions-like strings into HTML links, @uri: /uri/ root for the hashtag-like @s: the #str string you're looking for |@|mentions in -> #str HTML link |<a href="/uri/mention">mention</a>|
9,430
def fix_e271(self, result): line_index = result[] - 1 target = self.source[line_index] offset = result[] - 1 fixed = fix_whitespace(target, offset=offset, replacement=) if fixed == target: return [] else: self.source[line_index] = fixed
Fix extraneous whitespace around keywords.
9,431
def build_url(base, seg, query=None): def clean_segment(segment): segment = segment.strip() if isinstance(segment, basestring): segment = segment.encode() return segment seg = (quote(clean_segment(s)) for s in seg) if query is None or len(query) == 0: query_string = else: query_string = "?" + urlencode(query) path = .join(seg) + query_string adjusted_base = base.rstrip() + return urljoin(str(adjusted_base), str(path))
Create a URL from a list of path segments and an optional dict of query parameters.
9,432
def file_list(self, tgt_env): files = set() symlinks = {} tree = self.get_tree(tgt_env) if not tree: return files, symlinks if self.root(tgt_env): try: tree = tree / self.root(tgt_env) except KeyError: return files, symlinks relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) else: relpath = lambda path: path add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for file_blob in tree.traverse(): if not isinstance(file_blob, git.Blob): continue file_path = add_mountpoint(relpath(file_blob.path)) files.add(file_path) if stat.S_ISLNK(file_blob.mode): stream = six.StringIO() file_blob.stream_data(stream) stream.seek(0) link_tgt = stream.read() stream.close() symlinks[file_path] = link_tgt return files, symlinks
Get file list for the target environment using GitPython
9,433
def upload_file(token, channel_name, file_name): slack = Slacker(token) slack.files.upload(file_name, channels=channel_name)
upload file to a channel
9,434
def consume(self, key, amount=1, rate=None, capacity=None, **kwargs): bucket = self.get_bucket(key, rate, capacity, **kwargs) return bucket.consume(amount)
Consume an amount for a given key. Non-default rate/capacity can be given to override Throttler defaults. Returns: bool: whether the units could be consumed
9,435
def unlock(self, password): self.password = password if self.config_key in self.config and self.config[self.config_key]: self._decrypt_masterpassword() else: self._new_masterpassword(password) self._save_encrypted_masterpassword()
The password is used to encrypt this masterpassword. To decrypt the keys stored in the keys database, one must use BIP38, decrypt the masterpassword from the configuration store with the user password, and use the decrypted masterpassword to decrypt the BIP38 encrypted private keys from the keys storage! :param str password: Password to use for en-/de-cryption
9,436
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=None): lines = ([] + lines + []) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) ProcessGlobalSuppresions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) if file_extension in GetHeaderExtensions(): CheckForHeaderGuard(filename, clean_lines, error) for line in range(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) if _IsSourceExtension(file_extension): CheckHeaderFileIncluded(filename, include_state, error) CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error)
Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
9,437
def has_implementation(self, number, arch, abi_list=()): name, _, _ = self._canonicalize(number, arch, abi_list) return super(SimSyscallLibrary, self).has_implementation(name)
Pretty much the intersection of SimLibrary.has_implementation() and SimSyscallLibrary.get(). :param number: The syscall number :param arch: The architecture being worked with, as either a string name or an archinfo.Arch :param abi_list: A list of ABI names that could be used :return: A bool of whether or not an implementation of the syscall is available
9,438
def _Backward3_sat_v_P(P, T, x): if x == 0: if P < 19.00881189: region = "c" elif P < 21.0434: region = "s" elif P < 21.9316: region = "u" else: region = "y" else: if P < 20.5: region = "t" elif P < 21.0434: region = "r" elif P < 21.9009: region = "x" else: region = "z" return _Backward3x_v_PT(T, P, region)
Backward equation for region 3 for saturated state, vs=f(P,x) Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] x : integer Vapor quality, [-] Returns ------- v : float Specific volume, [m³/kg] Notes ----- The vapor quality (x) can be 0 (saturated liquid) or 1 (saturated vapour)
9,439
def batch_query_event_records( self, batch_size: int, filters: List[Tuple[str, Any]] = None, logical_and: bool = True, ) -> Iterator[List[EventRecord]]: limit = batch_size offset = 0 result_length = 1 while result_length != 0: result = self._get_event_records( limit=limit, offset=offset, filters=filters, logical_and=logical_and, ) result_length = len(result) offset += result_length yield result
Batch query event records with a given batch size and an optional filter This is a generator function returning each batch to the caller to work with.
9,440
def convert_elementwise_add(net, node, module, builder): input_names, output_name = _get_input_output_name(net, node, [0, 1]) name = node[] builder.add_elementwise(name, input_names, output_name, )
Convert an elementwise add layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
9,441
def tickets_create_many(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tickets api_path = "/api/v2/tickets/create_many.json" return self.call(api_path, method="POST", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/tickets#create-many-tickets
9,442
def exec_function(ast, globals_map): locals_map = globals_map exec ast in globals_map, locals_map return locals_map
Execute a python code object in the given environment. Args: globals_map: Dictionary to use as the globals context. Returns: locals_map: Dictionary of locals from the environment after execution.
9,443
def get(self, block=True, timeout=None, method=): if method not in (, ): raise ValueError(.format(method)) t_start = time.clock() while not self: if not block: raise self.Empty if timeout is None: wait(self) else: t_delta = time.clock() - t_start if t_delta > timeout: raise Timeout wait(self, timeout - t_delta) return getattr(self, method)()
If *block* is True, this method blocks until an element can be removed from the deque with the specified *method*. If *block* is False, the function will raise #Empty if no elements are available. # Arguments block (bool): #True to block and wait until an element becomes available, #False otherwise. timeout (number, None): The timeout in seconds to use when waiting for an element (only with `block=True`). method (str): The name of the method to use to remove an element from the queue. Must be either `'pop'` or `'popleft'`. # Raises ValueError: If *method* has an invalid value. Timeout: If the *timeout* is exceeded.
9,444
def run_pylint(): from pylint.lint import Run try: Run(sys.argv[1:]) except KeyboardInterrupt: sys.exit(1)
run pylint
9,445
def __neighbor_indexes_distance_matrix(self, index_point): distances = self.__pointer_data[index_point] return [index_neighbor for index_neighbor in range(len(distances)) if ((distances[index_neighbor] <= self.__eps) and (index_neighbor != index_point))]
! @brief Return neighbors of the specified object in case of distance matrix. @param[in] index_point (uint): Index point whose neighbors are should be found. @return (list) List of indexes of neighbors in line the connectivity radius.
9,446
def _included_frames(frame_list, frame_format): return INCLUDED_FRAMES.format(Nframes=len(frame_list), frame_dir=os.path.dirname(frame_list[0]), frame_format=frame_format)
frame_list should be a list of filenames
9,447
def _update_process_resources(self, process, vals): resources = ["cpus"] for r in resources: if not self.processes[process][r]: try: self.processes[process][r] = vals[0]["cpus"] except KeyError: pass
Updates the resources info in :attr:`processes` dictionary.
9,448
def publishCommand(self, typeId, deviceId, commandId, msgFormat, data=None, qos=0, on_publish=None): if self._config.isQuickstart(): self.logger.warning("QuickStart applications do not support sending commands") return False if not self.connectEvent.wait(timeout=10): return False else: topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat) if self.getMessageCodec(msgFormat) is None: raise MissingMessageEncoderException(msgFormat) payload = self.getMessageCodec(msgFormat).encode(data, datetime.now()) result = self.client.publish(topic, payload=payload, qos=qos, retain=False) if result[0] == paho.MQTT_ERR_SUCCESS: with self._messagesLock: if result[1] in self._onPublishCallbacks: del self._onPublishCallbacks[result[1]] if on_publish is not None: on_publish() else: self._onPublishCallbacks[result[1]] = on_publish return True else: return False
Publish a command to a device # Parameters typeId (string) : The type of the device this command is to be published to deviceId (string): The id of the device this command is to be published to command (string) : The name of the command msgFormat (string) : The format of the command payload data (dict) : The command data qos (int) : The equivalent MQTT semantics of quality of service using the same constants (optional, defaults to `0`) on_publish (function) : A function that will be called when receipt of the publication is confirmed. This has different implications depending on the qos: - qos 0 : the client has asynchronously begun to send the event - qos 1 and 2 : the client has confirmation of delivery from WIoTP
9,449
def ensure_path_exists(dir_path): if not os.path.exists(dir_path): os.makedirs(dir_path) return True return False
Make sure that a path exists
9,450
def _peek_job(self, pos): if pos < len(self._job_info_queue): return self._job_info_queue[pos].job raise IndexError()
Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised if that position does not currently exist in the job list. :param int pos: Position of the job to get. :return: The job
9,451
def translate_key(jsonkey): "helper for translate_*" nombre,pkey,field=ujson.loads(jsonkey) return FieldKey(nombre,tuple(pkey),field)
helper for translate_*
9,452
def crc16_nojit(s, crc=0): for ch in bytearray(s): crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
CRC16 implementation acording to CCITT standards.
9,453
def get_item_metadata(self, handle): metadata = {} identifier = generate_identifier(handle) prefix = self.fragments_key_prefix + .format(identifier) blob_generator = self._blobservice.list_blobs( self.uuid, include=, prefix=prefix ) for blob in blob_generator: metadata_key = blob.name.split()[-2] value_as_string = self.get_text(blob.name) value = json.loads(value_as_string) metadata[metadata_key] = value return metadata
Return dictionary containing all metadata associated with handle. In other words all the metadata added using the ``add_item_metadata`` method. :param handle: handle for accessing an item before the dataset is frozen :returns: dictionary containing item metadata
9,454
def plotMDS(data, theOrders, theLabels, theColors, theAlphas, theSizes, theMarkers, options): import matplotlib as mpl if options.format != "X11" and mpl.get_backend() != "agg": mpl.use("Agg") import matplotlib.pyplot as plt if options.format != "X11": plt.ioff() fig = plt.figure() try: fig.subplots_adjust(right=options.adjust_right, left=options.adjust_left, bottom=options.adjust_bottom, top=options.adjust_top) except ValueError as e: raise ProgramError(e) ax = fig.add_subplot(111) ax.xaxis.set_ticks_position("bottom") ax.yaxis.set_ticks_position("left") ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["bottom"].set_position(("outward", 9)) ax.spines["left"].set_position(("outward", 9)) plotObject = [] labels = [] for i, index in enumerate(theOrders): try: tmp, = ax.plot(data[0][i], data[1][i], theMarkers[i], color=theColors[i], mec=theColors[i], markersize=theSizes[i], alpha=theAlphas[i]) except ValueError as e: msg = "Problem with markers: %(e)s" % locals() raise ProgramError(msg) plotObject.append(tmp) labels.append(index) prop = mpl.font_manager.FontProperties(size=options.legend_size) leg = ax.legend(plotObject, labels, loc=options.legend_position, numpoints=1, fancybox=True, prop=prop, ncol=options.legend_ncol) leg.get_frame().set_alpha(0.5) ax.set_title(options.title, fontsize=options.title_fontsize, weight="bold") ax.set_xlabel(options.xlabel, fontsize=options.label_fontsize) ax.set_ylabel(options.ylabel, fontsize=options.label_fontsize) for tick in ax.yaxis.get_major_ticks() + ax.xaxis.get_major_ticks(): tick.label.set_fontsize(options.axis_fontsize) if options.format == "X11": plt.show() else: fileName = options.out + "." + options.format try: plt.savefig(fileName, dpi=300) except IOError: msg = "%(fileName)s: can't write file" % locals() raise ProgramError(msg) except ValueError as e: colorError = False for errorMsg in str(e).split("\n"): if errorMsg.startswith("to_rgb"): colorError = True if colorError: msg = "problem with the population colors" raise ProgramError(msg) else: print str(e)
Plot the MDS data. :param data: the data to plot (MDS values). :param theOrders: the order of the populations to plot. :param theLabels: the names of the populations to plot. :param theColors: the colors of the populations to plot. :param theAlphas: the alpha value for the populations to plot. :param theSizes: the sizes of the markers for each population to plot. :param theMarkers: the type of marker for each population to plot. :param options: the options. :type data: list of numpy.array :type theOrders: list :type theLabels: list :type theColors: list :type theAlphas: list :type theSizes: list :type theMarkers: list :type options: argparse.Namespace
9,455
def start(self): from .nurest_session import NURESTSession session = NURESTSession.get_current_session() if self.async: thread = threading.Thread(target=self._make_request, kwargs={: session}) thread.is_daemon = False thread.start() return self.transaction_id return self._make_request(session=session)
Make an HTTP request with a specific method
9,456
def get_range(self): (min, max) = (100000, -1) for cont in self.bar: for note in cont[2]: if int(note) < int(min): min = note elif int(note) > int(max): max = note return (min, max)
Return the highest and the lowest note in a tuple.
9,457
def _query(action=None, command=None, args=None, method=, header_dict=None, data=None): vm_ = get_configured_provider() apikey = config.get_cloud_config_value( , vm_, __opts__, search_global=False ) sharedsecret = config.get_cloud_config_value( , vm_, __opts__, search_global=False ) path = if action: path += action if command: path += .format(command) log.debug(, path) if not isinstance(args, dict): args = {} epoch = six.text_type(int(time.time())) hashtext = .join((apikey, sharedsecret, epoch)) args[] = salt.utils.hashutils.md5_digest(hashtext) args[] = args[] = args[] = apikey if header_dict is None: header_dict = {} if method != : header_dict[] = decode = True if method == : decode = False return_content = None result = salt.utils.http.query( path, method, params=args, data=data, header_dict=header_dict, decode=decode, decode_type=, text=True, status=True, opts=__opts__, ) log.debug(, result[]) return result[]
Make a web call to GoGrid .. versionadded:: 2015.8.0
9,458
def __query_cmd(self, command, device=None): base_url = u % (self.__homeauto_url_with_sid(), command) if device is None: url = base_url else: url = % (base_url, device) if self.__debug: print(u + url) return self.__query(url)
Calls a command
9,459
def get_container(self, path): if not settings.container_permitted(path): raise errors.NotPermittedException( "Access to container \"%s\" is not permitted." % path) return self._get_container(path)
Return single container.
9,460
def connect(self): if self.connection_type.lower() == : self.server = smtplib.SMTP_SSL(host=self.host, port=self.port, local_hostname=self.local_hostname, timeout=self.timeout, source_address=self.source_address) elif self.connection_type.lower() == : self.server = smtplib.LMTP(host=self.host, port=self.port, local_hostname=self.local_hostname, source_address=self.source_address) else: self.server = smtplib.SMTP(host=self.host, port=self.port, local_hostname=self.local_hostname, timeout=self.timeout, source_address=self.source_address) self.server.login(self.username, self.password)
Connect to server Returns: None
9,461
def get_assessment_admin_session(self): if not self.supports_assessment_admin(): raise errors.Unimplemented() return sessions.AssessmentAdminSession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment administration service. return: (osid.assessment.AssessmentAdminSession) - an ``AssessmentAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_admin()`` is ``true``.*
9,462
def aliases(self): return [.format(self.device.parent.sys_number, self.label), self.node]
Aliases for UBI volume. This propery evaluates to device node itself plus the ``'ubi${INDEX}:${LABEL}'`` string. The latter is used to identify the device in /proc/mounts table, and is not really an alias.
9,463
def create(source_name, size, metadata_backend=None, storage_backend=None): if storage_backend is None: storage_backend = backends.storage.get_backend() if metadata_backend is None: metadata_backend = backends.metadata.get_backend() thumbnail_file = processors.process(storage_backend.open(source_name), size) thumbnail_file = post_processors.process(thumbnail_file, size) name = get_thumbnail_name(source_name, size) name = storage_backend.save(name, thumbnail_file) metadata = metadata_backend.add_thumbnail(source_name, size, name) return Thumbnail(metadata=metadata, storage=storage_backend)
Creates a thumbnail file and its relevant metadata. Returns a Thumbnail instance.
9,464
def validate_arguments(args): if args.diff: if not args.output_dir: logger.error() print_usage() sys.exit(0) elif not args.output_dir: print_usage() sys.exit(0)
Validate that the necessary argument for normal or diff analysis are specified. :param: args: Command line arguments namespace
9,465
def write_bytes(fp, data): pos = fp.tell() fp.write(data) written = fp.tell() - pos assert written == len(data), % ( written, len(data) ) return written
Write bytes to the file object and returns bytes written. :return: written byte size
9,466
def Serialize(self, writer): super(SpentCoinState, self).Serialize(writer) writer.WriteUInt256(self.TransactionHash) writer.WriteUInt32(self.TransactionHeight) writer.WriteVarInt(len(self.Items)) for item in self.Items: writer.WriteUInt16(item.index) writer.WriteUInt32(item.height)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
9,467
def defBoroCnst(self,BoroCnstArt): ShkCount = self.TranShkValsNext.size pLvlCount = self.pLvlGrid.size PermShkVals_temp = np.tile(np.reshape(self.PermShkValsNext,(1,ShkCount)),(pLvlCount,1)) TranShkVals_temp = np.tile(np.reshape(self.TranShkValsNext,(1,ShkCount)),(pLvlCount,1)) pLvlNext_temp = np.tile(np.reshape(self.pLvlNextFunc(self.pLvlGrid),(pLvlCount,1)),(1,ShkCount))*PermShkVals_temp aLvlMin_candidates = (self.mLvlMinNext(pLvlNext_temp) - TranShkVals_temp*pLvlNext_temp)/self.Rfree aLvlMinNow = np.max(aLvlMin_candidates,axis=1) self.BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(aLvlMinNow,0,0.0)) if self.BoroCnstArt is not None: self.BoroCnstArt = LinearInterp(np.array([0.0,1.0]),np.array([0.0,self.BoroCnstArt])) self.mLvlMinNow = UpperEnvelope(self.BoroCnstArt,self.BoroCnstNat) else: self.mLvlMinNow = self.BoroCnstNat cFuncNowCnstBase = BilinearInterp(np.array([[0.,0.],[1.,1.]]),np.array([0.0,1.0]),np.array([0.0,1.0])) self.cFuncNowCnst = VariableLowerBoundFunc2D(cFuncNowCnstBase,self.mLvlMinNow)
Defines the constrained portion of the consumption function as cFuncNowCnst, an attribute of self. Parameters ---------- BoroCnstArt : float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. Returns ------- None
9,468
def _validate_response(self, response): try: response.raise_for_status() except HTTPError as http_error: raise BadRequestException(str(http_error)) response_data = response.json() if response_data["status_code"] != self.RESPONSE_STATUS_OK: raise InvalidResponseException( "Response code {0}: {1}".format( response_data["status_code"], response_data["error"] ) )
:param response: requests.models.Response :raises: pybomb.exceptions.InvalidResponseException :raises: pybomb.exceptions.BadRequestException
9,469
def from_tibiadata(cls, content, vocation=None): json_content = parse_json(content) try: highscores_json = json_content["highscores"] if "error" in highscores_json["data"]: return None world = highscores_json["world"] category = highscores_json["type"] highscores = cls(world, category) for entry in highscores_json["data"]: value_key = "level" if highscores.category in [Category.ACHIEVEMENTS, Category.LOYALTY_POINTS, Category.EXPERIENCE]: value_key = "points" if highscores.category == Category.EXPERIENCE: highscores.entries.append(ExpHighscoresEntry(entry["name"], entry["rank"], entry["voc"], entry[value_key], entry["level"])) elif highscores.category == Category.LOYALTY_POINTS: highscores.entries.append(LoyaltyHighscoresEntry(entry["name"], entry["rank"], entry["voc"], entry[value_key], entry["title"])) else: highscores.entries.append(HighscoresEntry(entry["name"], entry["rank"], entry["voc"], entry[value_key])) highscores.results_count = len(highscores.entries) except KeyError: raise InvalidContent("content is not a TibiaData highscores response.") if isinstance(vocation, VocationFilter): highscores.vocation = vocation return highscores
Builds a highscores object from a TibiaData highscores response. Notes ----- Since TibiaData.com's response doesn't contain any indication of the vocation filter applied, :py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually. If the attribute is known, it can be passed for it to be assigned in this method. Parameters ---------- content: :class:`str` The JSON content of the response. vocation: :class:`VocationFilter`, optional The vocation filter to assign to the results. Note that this won't affect the parsing. Returns ------- :class:`Highscores` The highscores contained in the page, or None if the content is for the highscores of a nonexistent world. Raises ------ InvalidContent If content is not a JSON string of the highscores response.
9,470
def list_my(self): org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout(" self._printout(" for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout(" self._printout(" for n,i in rez_member: self._printout("%s %s"%(n,i))
Find organization that has the current identity as the owner or as the member
9,471
def xmlns(source): namespaces = {} events=("end", "start-ns", "end-ns") for (event, elem) in iterparse(source, events): if event == "start-ns": prefix, ns = elem namespaces[prefix] = ns elif event == "end": break if hasattr(source, "seek"): source.seek(0) return namespaces
Returns a map of prefix to namespace for the given XML file.
9,472
def target_Orange_table(self): table, cls_att = self.db.target_table, self.db.target_att if not self.db.orng_tables: return self.convert_table(table, cls_att=cls_att) else: return self.db.orng_tables[table]
Returns the target table as an Orange example table. :rtype: orange.ExampleTable
9,473
def get_attachment(self, project, build_id, timeline_id, record_id, type, name, **kwargs): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if build_id is not None: route_values[] = self._serialize.url(, build_id, ) if timeline_id is not None: route_values[] = self._serialize.url(, timeline_id, ) if record_id is not None: route_values[] = self._serialize.url(, record_id, ) if type is not None: route_values[] = self._serialize.url(, type, ) if name is not None: route_values[] = self._serialize.url(, name, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, accept_media_type=) if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
GetAttachment. [Preview API] Gets a specific attachment. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str timeline_id: The ID of the timeline. :param str record_id: The ID of the timeline record. :param str type: The type of the attachment. :param str name: The name of the attachment. :rtype: object
9,474
def rewind(self, count): if count > self._index: raise ValueError("Can't rewind past beginning!") self._index -= count
Rewind index.
9,475
def get_language_from_abbr(self, abbr): for language in self.user_data.languages: if language[] == abbr: return language[] return None
Get language full name from abbreviation.
9,476
def collapse_if_tuple(abi): typ = abi["type"] if not typ.startswith("tuple"): return typ delimited = ",".join(collapse_if_tuple(c) for c in abi["components"]) array_dim = typ[5:] collapsed = "({}){}".format(delimited, array_dim) return collapsed
Converts a tuple from a dict to a parenthesized list of its types. >>> from eth_utils.abi import collapse_if_tuple >>> collapse_if_tuple( ... { ... 'components': [ ... {'name': 'anAddress', 'type': 'address'}, ... {'name': 'anInt', 'type': 'uint256'}, ... {'name': 'someBytes', 'type': 'bytes'}, ... ], ... 'type': 'tuple', ... } ... ) '(address,uint256,bytes)'
9,477
def fw_romaji_lt(full, regular): lt = {} for n in range(len(full)): fw = full[n] reg = regular[n] lt[fw] = reg return lt
Generates a lookup table with the fullwidth rōmaji characters on the left side, and the regular rōmaji characters as the values.
9,478
def _facet_counts(items): facets = {} for name, data in items: facets[name] = FacetResult(name, data) return facets
Returns facet counts as dict. Given the `items()` on the raw dictionary from Elasticsearch this processes it and returns the counts keyed on the facet name provided in the original query.
9,479
def license(self, value=None): if not (value is None): if (self.metadatatype == "native"): self.metadata[] = value else: self._license = value if (self.metadatatype == "native"): if in self.metadata: return self.metadata[] else: return None else: return self._license
No arguments: Get the document's license from metadata Argument: Set the document's license in metadata
9,480
def next(self): code, message = self.command("NEXT") if code != 223: raise NNTPReplyError(code, message) parts = message.split(None, 3) try: article = int(parts[0]) ident = parts[1] except (IndexError, ValueError): raise NNTPDataError("Invalid NEXT status") return article, ident
NEXT command.
9,481
def scroll(self): def __scroll(vertical, forward, steps=100): method = self.jsonrpc.scrollForward if forward else self.jsonrpc.scrollBackward return method(self.selector, vertical, steps) def __scroll_to_beginning(vertical, steps=100, max_swipes=1000): return self.jsonrpc.scrollToBeginning(self.selector, vertical, max_swipes, steps) def __scroll_to_end(vertical, steps=100, max_swipes=1000): return self.jsonrpc.scrollToEnd(self.selector, vertical, max_swipes, steps) def __scroll_to(vertical, **kwargs): return self.jsonrpc.scrollTo(self.selector, Selector(**kwargs), vertical) @param_to_property( dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"], action=["forward", "backward", "toBeginning", "toEnd", "to"]) def _scroll(dimention="vert", action="forward", **kwargs): vertical = dimention in ["vert", "vertically", "vertical"] if action in ["forward", "backward"]: return __scroll(vertical, action == "forward", **kwargs) elif action == "toBeginning": return __scroll_to_beginning(vertical, **kwargs) elif action == "toEnd": return __scroll_to_end(vertical, **kwargs) elif action == "to": return __scroll_to(vertical, **kwargs) return _scroll
Perfrom scroll action. Usage: d().scroll(steps=50) # default vertically and forward d().scroll.horiz.forward(steps=100) d().scroll.vert.backward(steps=100) d().scroll.horiz.toBeginning(steps=100, max_swipes=100) d().scroll.vert.toEnd(steps=100) d().scroll.horiz.to(text="Clock")
9,482
def _get_linked_entities(self) -> Dict[str, Dict[str, Tuple[str, str, List[int]]]]: current_tokenized_utterance = [] if not self.tokenized_utterances \ else self.tokenized_utterances[-1] entity_linking_scores: Dict[str, Dict[str, Tuple[str, str, List[int]]]] = {} number_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {} string_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {} self.add_to_number_linking_scores({}, number_linking_scores, get_time_range_start_from_utterance, current_tokenized_utterance, ) self.add_to_number_linking_scores({}, number_linking_scores, get_time_range_end_from_utterance, current_tokenized_utterance, ) self.add_to_number_linking_scores({, , , }, number_linking_scores, get_numbers_from_utterance, current_tokenized_utterance, ) self.add_to_number_linking_scores({}, number_linking_scores, get_costs_from_utterance, current_tokenized_utterance, ) self.add_to_number_linking_scores({}, number_linking_scores, get_costs_from_utterance, current_tokenized_utterance, ) self.add_to_number_linking_scores({}, number_linking_scores, get_flight_numbers_from_utterance, current_tokenized_utterance, ) self.add_dates_to_number_linking_scores(number_linking_scores, current_tokenized_utterance) string_linking_dict: Dict[str, List[int]] = {} for tokenized_utterance in self.tokenized_utterances: string_linking_dict = get_strings_from_utterance(tokenized_utterance) strings_list = AtisWorld.sql_table_context.strings_list strings_list.append((EA\, )) strings_list.append((EA\, )) for string in strings_list: entity_linking = [0 for token in current_tokenized_utterance] for token_index in string_linking_dict.get(string[1], []): entity_linking[token_index] = 1 action = string[0] string_linking_scores[action] = (action.split()[0], string[1], entity_linking) entity_linking_scores[] = number_linking_scores entity_linking_scores[] = string_linking_scores return entity_linking_scores
This method gets entities from the current utterance finds which tokens they are linked to. The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these entities later for updating the valid actions and the grammar.
9,483
def log(self, *args, **kwargs): func = inspect.currentframe().f_back.f_code if in kwargs and kwargs[] is True: exc_type, exc_obj, exc_tb = exc_info() line_no = exc_tb.tb_lineno args += traceback.extract_tb(exc_tb), else: line_no = func.co_firstlineno sourceloc = "[%.10s@%s:%i]" % ( func.co_name, func.co_filename, line_no ) hfoslog(sourceloc=sourceloc, emitter=self.uniquename, *args, **kwargs)
Log a statement from this component
9,484
def _fetchSequence(ac, startIndex=None, endIndex=None): urlFmt = ( "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db=nucleotide&id={ac}&rettype=fasta&retmode=text") if startIndex is None or endIndex is None: url = urlFmt.format(ac=ac) else: urlFmt += "&seq_start={start}&seq_stop={stop}" url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex) resp = requests.get(url) resp.raise_for_status() seqlines = resp.content.splitlines()[1:] print("{ac}[{s},{e}) => {n} lines ({u})".format( ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url)) return seqlines
Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. >>> len(_fetchSequence('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >>> _fetchSequence('NP_056374.2',0,10) 'MESRETLSSS' >>> _fetchSequence('NP_056374.2')[0:10] 'MESRETLSSS'
9,485
def prepend_scheme_if_needed(url, new_scheme): scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment))
Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str
9,486
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs): kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries( [vc.source_to_metadata_dict for vc in variant_collections]) kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections])) for key, value in variant_collections[0].to_dict().items(): if key not in kwargs: kwargs[key] = value return cls(**kwargs)
Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection.
9,487
def eth_call(self, from_, to=None, gas=None, gas_price=None, value=None, data=None, block=BLOCK_TAG_LATEST): block = validate_block(block) obj = {} obj[] = from_ if to is not None: obj[] = to if gas is not None: obj[] = hex(gas) if gas_price is not None: obj[] = hex(gas_price) if value is not None: obj[] = hex(ether_to_wei(value)) if data is not None: obj[] = data return (yield from self.rpc_call(, [obj, block]))
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call :param from_: From account address :type from_: str :param to: To account address (optional) :type to: str :param gas: Gas amount for current transaction (optional) :type gas: int :param gas_price: Gas price for current transaction (optional) :type gas_price: int :param value: Amount of ether to send (optional) :type value: int :param data: Additional data for transaction (optional) :type data: hex :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :rtype: str
9,488
def fit_spectrum(self, specFunc, initPars, freePars=None): if not isinstance(specFunc, SEDFunctor): specFunc = self.create_functor(specFunc, initPars, scale=specFunc.scale) if freePars is None: freePars = np.empty(len(initPars), dtype=bool) freePars.fill(True) initPars = np.array(initPars) freePars = np.array(freePars) def fToMin(x): xp = np.array(specFunc.params) xp[freePars] = x return self.__call__(specFunc(xp)) result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6) out_pars = specFunc.params out_pars[freePars] = np.array(result) spec_vals = specFunc(out_pars) spec_npred = np.zeros(len(spec_vals)) if isinstance(specFunc, spectrum.SEDFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux elif isinstance(specFunc, spectrum.SEDEFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux ts_spec = self.TS_spectrum(spec_vals) chi2_vals = self.chi2_vals(spec_vals) chi2_spec = np.sum(chi2_vals) pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals)) return dict(params=out_pars, spec_vals=spec_vals, spec_npred=spec_npred, ts_spec=ts_spec, chi2_spec=chi2_spec, chi2_vals=chi2_vals, pval_spec=pval_spec)
Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum.
9,489
def _initialize(self): self.y_transform = self.y - numpy.mean(self.y) self.y_transform /= numpy.std(self.y_transform) self.x_transforms = [numpy.zeros(len(self.y)) for _xi in self.x] self._compute_sorted_indices()
Set up and normalize initial data once input data is specified.
9,490
def expr_code(self, expr): if "|" in expr: pipes = expr.split("|") code = self.expr_code(pipes[0]) for func in pipes[1:]: self.all_vars.add(func) code = "c_%s(%s)" % (func, code) elif "." in expr: dots = expr.split(".") code = self.expr_code(dots[0]) args = [repr(d) for d in dots[1:]] code = "dot(%s, %s)" % (code, ", ".join(args)) else: self.all_vars.add(expr) code = "c_%s" % expr return code
Generate a Python expression for `expr`.
9,491
def delete(self, doc_id: str) -> bool: try: self.instance.delete(self.index, self.doc_type, doc_id) except RequestError as ex: logging.error(ex) return False else: return True
Delete a document with id.
9,492
def _add_childTnLst(self): self.remove(self.get_or_add_timing()) timing = parse_xml(self._childTnLst_timing_xml()) self._insert_timing(timing) return timing.xpath()[0]
Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant. Any existing `p:timing` child element is ruthlessly removed and replaced.
9,493
def ipa_substrings(unicode_string, single_char_parsing=False): return split_using_dictionary( string=unicode_string, dictionary=UNICODE_TO_IPA, max_key_length=UNICODE_TO_IPA_MAX_KEY_LENGTH, single_char_parsing=single_char_parsing )
Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest Unicode string starting at the current index representing a (known) valid IPA character, or 2. a single Unicode character (which is not IPA valid). If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. For example, if ``s = u"\u006e\u0361\u006d"``, with ``single_char_parsing=True`` the result will be a list with a single element: ``[u"\u006e\u0361\u006d"]``, while ``single_char_parsing=False`` will yield a list with three elements: ``[u"\u006e", u"\u0361", u"\u006d"]``. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str
9,494
def cosinebell(n, fraction): mask = np.ones(n) nmasked = int(fraction * n) for i in range(nmasked): yval = 0.5 * (1 - np.cos(np.pi * float(i) / float(nmasked))) mask[i] = yval mask[n - i - 1] = yval return mask
Return a cosine bell spanning n pixels, masking a fraction of pixels Parameters ---------- n : int Number of pixels. fraction : float Length fraction over which the data will be masked.
9,495
def polygons_obb(polygons): rectangles = [None] * len(polygons) transforms = [None] * len(polygons) for i, p in enumerate(polygons): transforms[i], rectangles[i] = polygon_obb(p) return np.array(transforms), np.array(rectangles)
Find the OBBs for a list of shapely.geometry.Polygons
9,496
def sed_or_dryrun(*args, **kwargs): dryrun = get_dryrun(kwargs.get()) if in kwargs: del kwargs[] use_sudo = kwargs.get(, False) if dryrun: context = dict( filename=args[0] if len(args) >= 1 else kwargs[], before=args[1] if len(args) >= 2 else kwargs[], after=args[2] if len(args) >= 3 else kwargs[], backup=args[3] if len(args) >= 4 else kwargs.get(, ), limit=kwargs.get(, ), ) cmd = .format(**context) cmd_run = if use_sudo else if BURLAP_COMMAND_PREFIX: print( % (render_command_prefix(), cmd_run, cmd)) else: print(cmd) else: from fabric.contrib.files import sed sed(*args, **kwargs)
Wrapper around Fabric's contrib.files.sed() to give it a dryrun option. http://docs.fabfile.org/en/0.9.1/api/contrib/files.html#fabric.contrib.files.sed
9,497
def combined_analysis(using): python_analysis = collect_analysis(using) es_analysis = existing_analysis(using) if es_analysis == DOES_NOT_EXIST: return python_analysis for section in python_analysis: if section not in es_analysis: es_analysis[section] = python_analysis[section] subdict_python = python_analysis[section] subdict_es = es_analysis[section] for name in subdict_python: subdict_es[name] = subdict_python[name] return es_analysis
Combine the analysis in ES with the analysis defined in Python. The one in Python takes precedence
9,498
def type_map(gtype, fn): cb = ffi.callback(, fn) return vips_lib.vips_type_map(gtype, cb, ffi.NULL, ffi.NULL)
Map fn over all child types of gtype.
9,499
def clean_restricted_chars(path, restricted_chars=restricted_chars): for character in restricted_chars: path = path.replace(character, ) return path
Get path without restricted characters. :param path: path :return: path without restricted characters :rtype: str or unicode (depending on given path)