Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,500
def slideshow(self, **kwargs): for label, cycle in self.items(): cycle.plot(title=label, tight_layout=True)
Produce slides show of the different cycles. One plot per cycle.
381,501
def get_year(self): year = super(BuildableDayArchiveView, self).get_year() fmt = self.get_year_format() dt = date(int(year), 1, 1) return dt.strftime(fmt)
Return the year from the database in the format expected by the URL.
381,502
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None): return [dict(r) for r in self.find_aliases(seq_id=seq_id, current_only=current_only, translate_ncbi_namespace=translate_ncbi_namespace)]
return list of alias annotation records (dicts) for a given seq_id
381,503
async def observer_orm_notify(self, message): @database_sync_to_async def get_observers(table): return list( Observer.objects.filter( dependencies__table=table, subscribers__isnull=False ) .distinct() .values_list(, flat=True) ) observers_ids = await get_observers(message[]) for observer_id in observers_ids: await self.channel_layer.send( CHANNEL_WORKER, {: TYPE_EVALUATE, : observer_id} )
Process notification from ORM.
381,504
def _from_lattice_vectors(self): degreeConvsersion = 180.0 / np.pi vector_magnitudes = np.linalg.norm(self.lattice_vectors, axis=1) a_dot_b = np.dot(self.lattice_vectors[0], self.lattice_vectors[1]) b_dot_c = np.dot(self.lattice_vectors[1], self.lattice_vectors[2]) a_dot_c = np.dot(self.lattice_vectors[0], self.lattice_vectors[2]) alpha_raw = b_dot_c / (vector_magnitudes[1] * vector_magnitudes[2]) beta_raw = a_dot_c / (vector_magnitudes[0] * vector_magnitudes[2]) gamma_raw = a_dot_b / (vector_magnitudes[0] * vector_magnitudes[1]) alpha = np.arccos(np.clip(alpha_raw, -1.0, 1.0)) * degreeConvsersion beta = np.arccos(np.clip(beta_raw, -1.0, 1.0)) * degreeConvsersion gamma = np.arccos(np.clip(gamma_raw, -1.0, 1.0)) * degreeConvsersion return np.asarray([alpha, beta, gamma], dtype=np.float64)
Calculate the angles between the vectors that define the lattice. _from_lattice_vectors will calculate the angles alpha, beta, and gamma from the Lattice object attribute lattice_vectors.
381,505
def _logger_levels(self): return { : logging.DEBUG, : logging.INFO, : logging.WARNING, : logging.ERROR, : logging.CRITICAL, }
Return log levels.
381,506
def get_conn(self): service = self.get_service() project = self._get_field() return BigQueryConnection( service=service, project_id=project, use_legacy_sql=self.use_legacy_sql, location=self.location, num_retries=self.num_retries )
Returns a BigQuery PEP 249 connection object.
381,507
def _prepare_app(self, app): obj = app[key] for name, pattern in obj.items(): obj[name] = self._prepare_pattern(obj[name])
Normalize app data, preparing it for the detection phase.
381,508
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout): self.init_hadoop() self.init_mapper() outputs = self._map_input((line[:-1] for line in stdin)) if self.reducer == NotImplemented: self.writer(outputs, stdout) else: self.internal_writer(outputs, stdout)
Run the mapper on the hadoop node.
381,509
def from_onnx(self, graph): self.model_metadata = self.get_graph_metadata(graph) for init_tensor in graph.initializer: if not init_tensor.name.strip(): raise ValueError("Tensor's name is required.") self._params[init_tensor.name] = self._parse_array(init_tensor) for i in graph.input: if i.name in self._params: self._nodes[i.name] = symbol.Variable(name=i.name, shape=self._params[i.name].shape) else: self._nodes[i.name] = symbol.Variable(name=i.name) for node in graph.node: op_name = node.op_type node_name = node.name.strip() node_name = node_name if node_name else None onnx_attr = self._parse_attr(node.attribute) inputs = [self._nodes[i] for i in node.input] mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs) for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))): self._nodes[k] = mxnet_sym[i] for args in mxnet_sym.list_arguments(): if args in self._params: self.arg_dict.update({args: nd.array(self._params[args])}) for aux in mxnet_sym.list_auxiliary_states(): if aux in self._params: self.aux_dict.update({aux: nd.array(self._params[aux])}) out = [self._nodes[i.name] for i in graph.output] if len(out) > 1: out = symbol.Group(out) else: out = out[0] return out, self.arg_dict, self.aux_dict
Construct symbol from onnx graph. Parameters ---------- graph : onnx protobuf object The loaded onnx graph Returns ------- sym :symbol.Symbol The returned mxnet symbol params : dict A dict of name: nd.array pairs, used as pretrained weights
381,510
def multisorted(items, *keys): if len(keys) == 0: keys = [asc()] for key in reversed(keys): items = sorted(items, key=key.func, reverse=key.reverse) return items
Sort by multiple attributes. Args: items: An iterable series to be sorted. *keys: Key objects which extract key values from the items. The first key will be the most significant, and the last key the least significant. If no key functions are provided, the items will be sorted in ascending natural order. Returns: A list of items sorted according to keys.
381,511
def weight_from_comm(self, v, comm): return _c_louvain._MutableVertexPartition_weight_from_comm(self._partition, v, comm)
The total number of edges (or sum of weights) to node ``v`` from community ``comm``. See Also -------- :func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
381,512
def merge_dicts(*dicts, **kwargs): cls = kwargs.get("cls", None) if cls is None: for d in dicts: if isinstance(d, dict): cls = d.__class__ break else: raise TypeError("cannot infer cls as none of the passed objects is of type dict") merged_dict = cls() for d in dicts: if isinstance(d, dict): merged_dict.update(d) return merged_dict
merge_dicts(*dicts, cls=None) Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the passed dicts and therefore, values of rear objects have precedence in case of field collisions. The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is inferred from the first dict object in *dicts*.
381,513
def _to_dict(self): _dict = {} if hasattr(self, ) and self.sentence is not None: _dict[] = self.sentence if hasattr(self, ) and self.subject is not None: _dict[] = self.subject._to_dict() if hasattr(self, ) and self.action is not None: _dict[] = self.action._to_dict() if hasattr(self, ) and self.object is not None: _dict[] = self.object._to_dict() return _dict
Return a json dictionary representing this model.
381,514
def _parse_v_parameters(val_type, val, filename, param_name): if val_type == "logical": val = [i == "T" for i in val.split()] elif val_type == "int": try: val = [int(i) for i in val.split()] except ValueError: val = _parse_from_incar(filename, param_name) if val is None: raise IOError("Error in parsing vasprun.xml") elif val_type == "string": val = val.split() else: try: val = [float(i) for i in val.split()] except ValueError: val = _parse_from_incar(filename, param_name) if val is None: raise IOError("Error in parsing vasprun.xml") return val
Helper function to convert a Vasprun array-type parameter into the proper type. Boolean, int and float types are converted. Args: val_type: Value type parsed from vasprun.xml. val: Actual string value parsed for vasprun.xml. filename: Fullpath of vasprun.xml. Used for robust error handling. E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters, the code will try to read from an INCAR file present in the same directory. param_name: Name of parameter. Returns: Parsed value.
381,515
def get_nested_attribute(obj, attribute): parent, attr = resolve_nested_attribute(obj, attribute) if not parent is None: attr_value = getattr(parent, attr) else: attr_value = None return attr_value
Returns the value of the given (possibly dotted) attribute for the given object. If any of the parents on the nested attribute's name path are `None`, the value of the nested attribute is also assumed as `None`. :raises AttributeError: If any attribute access along the attribute path fails with an `AttributeError`.
381,516
def check_length_of_shape_or_intercept_names(name_list, num_alts, constrained_param, list_title): if len(name_list) != (num_alts - constrained_param): msg_1 = "{} is of the wrong length:".format(list_title) msg_2 = "len({}) == {}".format(list_title, len(name_list)) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}".format(correct_length) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None.
381,517
def window_open_config(self, temperature, duration): _LOGGER.debug("Window open config, temperature: %s duration: %s", temperature, duration) self._verify_temperature(temperature) if duration.seconds < 0 and duration.seconds > 3600: raise ValueError value = struct.pack(, PROP_WINDOW_OPEN_CONFIG, int(temperature * 2), int(duration.seconds / 300)) self._conn.make_request(PROP_WRITE_HANDLE, value)
Configures the window open behavior. The duration is specified in 5 minute increments.
381,518
def QA_SU_save_index_min(engine, client=DATABASE): engine = select_save_engine(engine) engine.QA_SU_save_index_min(client=client)
save index_min Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE})
381,519
def write_json(obj, path): obj_str = text_type(json.dumps(obj, indent=4, separators=(",", ": "), ensure_ascii=False)) helpers.ensure_dir_exists(os.path.dirname(path)) with io.open(path, "w", encoding=) as target: target.write(obj_str)
Escribo un objeto a un archivo JSON con codificación UTF-8.
381,520
def random_choice(sequence): return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence)
Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence.
381,521
def create_parser(subparsers): parser = subparsers.add_parser( , help=, usage="%(prog)s", add_help=False) args.add_titles(parser) parser.set_defaults(subcommand=) return parser
create parser
381,522
def quit(self): if self.is_running == True: warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, , "PlanarRad is running. Stop it before quit !", QtGui.QMessageBox.Ok) else: quit = QtGui.QMessageBox.question(self.ui.quit, , "Are you sure to quit ?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if quit == QtGui.QMessageBox.Yes: QtGui.qApp.quit()
This function quits PlanarRad, checking if PlanarRad is running before.
381,523
def setRandomParams(self): params = sp.randn(self.getNumberParams()) self.setParams(params)
set random hyperparameters
381,524
def _submit_request(self, url, params=None, data=None, headers=None, method="GET"): if headers is None: headers = {} if self._auth_header is not None: headers[] = self._auth_header try: if method == : result = requests.post( url, params=params, data=data, headers=headers) elif method == : result = requests.get( url, params=params, data=data, headers=headers) result.raise_for_status() return (result.status_code, result.json()) except requests.exceptions.HTTPError as e: return (e.response.status_code, e.response.reason) except RemoteDisconnected as e: raise CliException(e) except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL) as e: raise CliException(e) except requests.exceptions.ConnectionError as e: raise CliException( ( ).format(self._base_url))
Submits the given request, and handles the errors appropriately. Args: url (str): the request to send. params (dict): params to be passed along to get/post data (bytes): the data to include in the request. headers (dict): the headers to include in the request. method (str): the method to use for the request, "POST" or "GET". Returns: tuple of (int, str): The response status code and the json parsed body, or the error message. Raises: `CliException`: If any issues occur with the URL.
381,525
def get_suggested_field_names(type_: GraphQLOutputType, field_name: str) -> List[str]: if is_object_type(type_) or is_interface_type(type_): possible_field_names = list(type_.fields) return suggestion_list(field_name, possible_field_names) return []
Get a list of suggested field names. For the field name provided, determine if there are any similar field names that may be the result of a typo.
381,526
def get_tokens_list(self, registry_address: PaymentNetworkID): tokens_list = views.get_token_identifiers( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, ) return tokens_list
Returns a list of tokens the node knows about
381,527
def load_readers(filenames=None, reader=None, reader_kwargs=None, ppp_config_dir=None): reader_instances = {} reader_kwargs = reader_kwargs or {} reader_kwargs_without_filter = reader_kwargs.copy() reader_kwargs_without_filter.pop(, None) if ppp_config_dir is None: ppp_config_dir = get_environ_config_dir() if not filenames and not reader: return {} elif reader and filenames is not None and not filenames: raise ValueError(" was provided but is empty.") elif not filenames: LOG.warning(" required to create readers and load data") return {} elif reader is None and isinstance(filenames, dict): reader = list(filenames.keys()) remaining_filenames = set(f for fl in filenames.values() for f in fl) elif reader and isinstance(filenames, dict): filenames = filenames[reader] remaining_filenames = set(filenames or []) else: remaining_filenames = set(filenames or []) for idx, reader_configs in enumerate(configs_for_reader(reader, ppp_config_dir)): if isinstance(filenames, dict): readers_files = set(filenames[reader[idx]]) else: readers_files = remaining_filenames try: reader_instance = load_reader(reader_configs, **reader_kwargs) except (KeyError, IOError, yaml.YAMLError) as err: LOG.info(, str(reader_configs)) LOG.debug(str(err)) continue if readers_files: loadables = reader_instance.select_files_from_pathnames(readers_files) if loadables: reader_instance.create_filehandlers(loadables, fh_kwargs=reader_kwargs_without_filter) reader_instances[reader_instance.name] = reader_instance remaining_filenames -= set(loadables) if not remaining_filenames: break if remaining_filenames: LOG.warning("Don't know how to open the following files: {}".format(str(remaining_filenames))) if not reader_instances: raise ValueError("No supported files found") elif not any(list(r.available_dataset_ids) for r in reader_instances.values()): raise ValueError("No dataset could be loaded. Either missing " "requirements (such as Epilog, Prolog) or none of the " "provided files match the filter parameters.") return reader_instances
Create specified readers and assign files to them. Args: filenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object should map reader names to a list of filenames for that reader. reader (str or list): The name of the reader to use for loading the data or a list of names. reader_kwargs (dict): Keyword arguments to pass to specific reader instances. ppp_config_dir (str): The directory containing the configuration files for satpy. Returns: Dictionary mapping reader name to reader instance
381,528
def disable_scanners_by_ids(self, scanner_ids): scanner_ids = .join(scanner_ids) self.logger.debug(.format(scanner_ids)) return self.zap.ascan.disable_scanners(scanner_ids)
Disable a list of scanner IDs.
381,529
def add_audio(self, customization_id, audio_name, audio_resource, contained_content_type=None, allow_overwrite=None, content_type=None, **kwargs): if customization_id is None: raise ValueError() if audio_name is None: raise ValueError() if audio_resource is None: raise ValueError() headers = { : contained_content_type, : content_type } if in kwargs: headers.update(kwargs.get()) sdk_headers = get_sdk_headers(, , ) headers.update(sdk_headers) params = {: allow_overwrite} data = audio_resource url = .format( *self._encode_path_vars(customization_id, audio_name)) response = self.request( method=, url=url, headers=headers, params=params, data=data, accept_json=True) return response
Add an audio resource. Adds an audio resource to a custom acoustic model. Add audio content that reflects the acoustic characteristics of the audio that you plan to transcribe. You must use credentials for the instance of the service that owns a model to add an audio resource to it. Adding audio data does not affect the custom acoustic model until you train the model for the new data by using the **Train a custom acoustic model** method. You can add individual audio files or an archive file that contains multiple audio files. Adding multiple audio files via a single archive file is significantly more efficient than adding each file individually. You can add audio resources in any format that the service supports for speech recognition. You can use this method to add any number of audio resources to a custom model by calling the method once for each audio or archive file. But the addition of one audio resource must be fully complete before you can add another. You must add a minimum of 10 minutes and a maximum of 100 hours of audio that includes speech, not just silence, to a custom acoustic model before you can train it. No audio resource, audio- or archive-type, can be larger than 100 MB. To add an audio resource that has the same name as an existing audio resource, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. The method is asynchronous. It can take several seconds to complete depending on the duration of the audio and, in the case of an archive file, the total number of audio files being processed. The service returns a 201 response code if the audio is valid. It then asynchronously analyzes the contents of the audio file or files and automatically extracts information about the audio such as its length, sampling rate, and encoding. You cannot submit requests to add additional audio resources to a custom acoustic model, or to train the model, until the service's analysis of all audio files for the current request completes. To determine the status of the service's analysis of the audio, use the **Get an audio resource** method to poll the status of the audio. The method accepts the customization ID of the custom model and the name of the audio resource, and it returns the status of the resource. Use a loop to check the status of the audio every few seconds until it becomes `ok`. **See also:** [Add audio to the custom acoustic model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#addAudio). ### Content types for audio-type resources You can add an individual audio file in any format that the service supports for speech recognition. For an audio-type resource, use the `Content-Type` parameter to specify the audio format (MIME type) of the audio file, including specifying the sampling rate, channels, and endianness where indicated. * `audio/alaw` (Specify the sampling rate (`rate`) of the audio.) * `audio/basic` (Use only with narrowband models.) * `audio/flac` * `audio/g729` (Use only with narrowband models.) * `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of channels (`channels`) and endianness (`endianness`) of the audio.) * `audio/mp3` * `audio/mpeg` * `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.) * `audio/ogg` (The service automatically detects the codec of the input audio.) * `audio/ogg;codecs=opus` * `audio/ogg;codecs=vorbis` * `audio/wav` (Provide audio with a maximum of nine channels.) * `audio/webm` (The service automatically detects the codec of the input audio.) * `audio/webm;codecs=opus` * `audio/webm;codecs=vorbis` The sampling rate of an audio file must match the sampling rate of the base model for the custom model: for broadband models, at least 16 kHz; for narrowband models, at least 8 kHz. If the sampling rate of the audio is higher than the minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the service labels the audio file as `invalid`. **See also:** [Audio formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html). ### Content types for archive-type resources You can add an archive file (**.zip** or **.tar.gz** file) that contains audio files in any format that the service supports for speech recognition. For an archive-type resource, use the `Content-Type` parameter to specify the media type of the archive file: * `application/zip` for a **.zip** file * `application/gzip` for a **.tar.gz** file. When you add an archive-type resource, the `Contained-Content-Type` header is optional depending on the format of the files that you are adding: * For audio files of type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`, you must use the `Contained-Content-Type` header to specify the format of the contained audio files. Include the `rate`, `channels`, and `endianness` parameters where necessary. In this case, all audio files contained in the archive file must have the same audio format. * For audio files of all other types, you can omit the `Contained-Content-Type` header. In this case, the audio files contained in the archive file can have any of the formats not listed in the previous bullet. The audio files do not need to have the same format. Do not use the `Contained-Content-Type` header when adding an audio-type resource. ### Naming restrictions for embedded audio files The name of an audio file that is embedded within an archive-type resource must meet the following restrictions: * Include a maximum of 128 characters in the file name; this includes the file extension. * Do not include spaces, slashes, or backslashes in the file name. * Do not use the name of an audio file that has already been added to the custom model as part of an archive-type resource. :param str customization_id: The customization ID (GUID) of the custom acoustic model that is to be used for the request. You must make the request with credentials for the instance of the service that owns the custom model. :param str audio_name: The name of the new audio resource for the custom acoustic model. Use a localized name that matches the language of the custom model and reflects the contents of the resource. * Include a maximum of 128 characters in the name. * Do not include spaces, slashes, or backslashes in the name. * Do not use the name of an audio resource that has already been added to the custom model. :param file audio_resource: The audio resource that is to be added to the custom acoustic model, an individual audio file or an archive file. :param str contained_content_type: **For an archive-type resource,** specify the format of the audio files that are contained in the archive file if they are of type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the `rate`, `channels`, and `endianness` parameters where necessary. In this case, all audio files that are contained in the archive file must be of the indicated type. For all other audio formats, you can omit the header. In this case, the audio files can be of multiple types as long as they are not of the types listed in the previous paragraph. The parameter accepts all of the audio formats that are supported for use with speech recognition. For more information, see **Content types for audio-type resources** in the method description. **For an audio-type resource,** omit the header. :param bool allow_overwrite: If `true`, the specified audio resource overwrites an existing audio resource with the same name. If `false`, the request fails if an audio resource with the same name already exists. The parameter has no effect if an audio resource with the same name does not already exist. :param str content_type: For an audio-type resource, the format (MIME type) of the audio. For more information, see **Content types for audio-type resources** in the method description. For an archive-type resource, the media type of the archive file. For more information, see **Content types for archive-type resources** in the method description. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
381,530
def write(self, data, mode=): with open(self.path, mode) as f: f.write(data)
Write data to the file. `data` is the data to write `mode` is the mode argument to pass to `open()`
381,531
def coneSearch(self, center, radius=3*u.arcmin, magnitudelimit=25): self.magnitudelimit = magnitudelimit self.speak(.format(center, radius, magnitudelimit)) coordinatetosearch = .format(center) table = astroquery.mast.Catalogs.query_region(coordinates=center, radius=radius, catalog=) epoch = 2005 self.coordinates = coord.SkyCoord( ra=table[].data*u.deg, dec=table[].data*u.deg, obstime=Time(epoch, format=)) self.magnitudes = dict(NUV=table[].data, FUV=table[].data) self.magnitude = self.magnitudes[]
Run a cone search of the GALEX archive
381,532
def pretty(price, currency, *, abbrev=True, trim=True): currency = validate_currency(currency) price = validate_price(price) space = if nospace(currency) else fmtstr = if trim: fmtstr = .format(price, x=decimals(currency)).rstrip().rstrip() else: fmtstr = .format(price).rstrip().rstrip() if abbrev: if issuffix(currency): return fmtstr + space + symbol(currency) return symbol(currency, native=False) + space + fmtstr return fmtstr + + code(currency)
return format price with symbol. Example format(100, 'USD') return '$100' pretty(price, currency, abbrev=True, trim=False) abbrev: True: print value + symbol. Symbol can either be placed before or after value False: print value + currency code. currency code is placed behind value trim: True: trim float value to the maximum digit numbers of that currency False: keep number of decimal in initial argument
381,533
def refresh_all_states(self): header = BASE_HEADERS.copy() header[] = self.__cookie request = requests.get( BASE_URL + "refreshAllStates", headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.refresh_all_states() return
Update all states.
381,534
def text(self, text): if text: if not isinstance(text, str): text = _pformat(text) text += self.m( , more=dict(len=len(text)) ) self.__message.attach(_MIMEText(text, , ))
.. seealso:: :attr:`text`
381,535
def add_alias(self, alias): aliases = self.list_aliases() if alias in aliases: logger.debug("Alias %s already exists on %s.", alias, self.anonymize_url(self.index_url)) return alias_data = % (self.index, alias) r = self.requests.post(self.url + "/_aliases", headers=HEADER_JSON, verify=False, data=alias_data) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: logger.warning("Something went wrong when adding an alias on %s. Alias not set.", self.anonymize_url(self.index_url)) logger.warning(ex) return logger.info("Alias %s created on %s.", alias, self.anonymize_url(self.index_url))
Add an alias to the index set in the elastic obj :param alias: alias to add :returns: None
381,536
def _get_cl_dependency_code(self): code = for d in self._dependencies: code += d.get_cl_code() + "\n" return code
Get the CL code for all the CL code for all the dependencies. Returns: str: The CL code with the actual code.
381,537
def get_checkcode(cls, id_number_str): if len(id_number_str) != 17: return False, -1 id_regex = if not re.match(id_regex, id_number_str): return False, -1 items = [int(item) for item in id_number_str] factors = (7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2) copulas = sum([a * b for a, b in zip(factors, items)]) check_codes = (, , , , , , , , , , ) checkcode = check_codes[copulas % 11].upper() return True, checkcode
计算身份证号码的校验位; :param: * id_number_str: (string) 身份证号的前17位,比如 3201241987010100 :returns: * 返回类型 (tuple) * flag: (bool) 如果身份证号格式正确,返回 True;格式错误,返回 False * checkcode: 计算身份证前17位的校验码 举例如下:: from fishbase.fish_data import * print('--- fish_data get_checkcode demo ---') # id number id1 = '32012419870101001' print(id1, IdCard.get_checkcode(id1)[1]) # id number id2 = '13052219840731647' print(id2, IdCard.get_checkcode(id2)[1]) print('---') 输出结果:: --- fish_data get_checkcode demo --- 32012419870101001 5 13052219840731647 1 ---
381,538
def namedlist(objname, fieldnames): class NamedListTemplate(list): __name__ = objname _fields = fieldnames def __init__(self, L=None, **kwargs): if L is None: L = [None]*len(fieldnames) super().__init__(L) for k, v in kwargs.items(): setattr(self, k, v) @classmethod def length(cls): return len(cls._fields) for i, attrname in enumerate(fieldnames): setattr(NamedListTemplate, attrname, property(operator.itemgetter(i), itemsetter(i))) return NamedListTemplate
like namedtuple but editable
381,539
def InitializeNoPrompt(config=None, external_hostname = None, admin_password = None, mysql_hostname = None, mysql_port = None, mysql_username = None, mysql_password = None, mysql_db = None, mysql_client_key_path = None, mysql_client_cert_path = None, mysql_ca_cert_path = None, redownload_templates = False, repack_templates = True, token = None): if config["Server.initialized"]: raise ValueError("Config has already been initialized.") if not external_hostname: raise ValueError( "--noprompt set, but --external_hostname was not provided.") if not admin_password: raise ValueError("--noprompt set, but --admin_password was not provided.") if mysql_password is None: raise ValueError("--noprompt set, but --mysql_password was not provided.") print("Checking write access on config %s" % config.parser) if not os.access(config.parser.filename, os.W_OK): raise IOError("Config not writeable (need sudo?)") config_dict = {} config_dict["Datastore.implementation"] = "MySQLAdvancedDataStore" config_dict["Mysql.host"] = mysql_hostname or config["Mysql.host"] config_dict["Mysql.port"] = mysql_port or config["Mysql.port"] config_dict["Mysql.database_name"] = mysql_db or config["Mysql.database_name"] config_dict["Mysql.database_username"] = ( mysql_username or config["Mysql.database_username"]) config_dict["Client.server_urls"] = [ "http://%s:%s/" % (external_hostname, config["Frontend.bind_port"]) ] config_dict["AdminUI.url"] = "http://%s:%s" % (external_hostname, config["AdminUI.port"]) config_dict["Logging.domain"] = external_hostname config_dict["Monitoring.alert_email"] = ("grr-monitoring@%s" % external_hostname) config_dict["Monitoring.emergency_access_email"] = ("grr-emergency@%s" % external_hostname) print("Setting configuration as:\n\n%s" % config_dict) config_dict["Mysql.database_password"] = mysql_password if mysql_client_key_path is not None: config_dict["Mysql.client_key_path"] = mysql_client_key_path config_dict["Mysql.client_cert_path"] = mysql_client_cert_path config_dict["Mysql.ca_cert_path"] = mysql_ca_cert_path if CheckMySQLConnection(config_dict): print("Successfully connected to MySQL with the given configuration.") else: print("Error: Could not connect to MySQL with the given configuration.") raise ConfigInitError() for key, value in iteritems(config_dict): config.Set(key, value) config_updater_keys_util.GenerateKeys(config) FinalizeConfigInit( config, token, admin_password=admin_password, redownload_templates=redownload_templates, repack_templates=repack_templates, prompt=False)
Initialize GRR with no prompts. Args: config: config object external_hostname: A hostname. admin_password: A password used for the admin user. mysql_hostname: A hostname used for establishing connection to MySQL. mysql_port: A port used for establishing connection to MySQL. mysql_username: A username used for establishing connection to MySQL. mysql_password: A password used for establishing connection to MySQL. mysql_db: Name of the MySQL database to use. mysql_client_key_path: The path name of the client private key file. mysql_client_cert_path: The path name of the client public key certificate. mysql_ca_cert_path: The path name of the CA certificate file. redownload_templates: Indicates whether templates should be re-downloaded. repack_templates: Indicates whether templates should be re-packed. token: auth token Raises: ValueError: if required flags are not provided, or if the config has already been initialized. IOError: if config is not writeable ConfigInitError: if GRR is unable to connect to a running MySQL instance. This method does the minimum work necessary to configure GRR without any user prompting, relying heavily on config default values. User must supply the external hostname, admin password, and MySQL password; everything else is set automatically.
381,540
def connection_made(self, address): self._proxy = PickleProxy(self.loop, self) for d in self._proxy_deferreds: d.callback(self._proxy)
When a connection is made the proxy is available.
381,541
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True): conn = Qubole.agent(version=Cluster.api_version) parameters = {} parameters[] = s3_location parameters[] = backup_id parameters[] = table_names parameters[] = overwrite parameters[] = automatic return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
Restoring cluster from a given hbase snapshot id
381,542
def get_mathjax_header(https=False): if cfg[].lower() == : if https: mathjax_path = "https://d3eoax9i5htok0.cloudfront.net/mathjax/2.1-latest" else: mathjax_path = "http://cdn.mathjax.org/mathjax/2.1-latest" else: mathjax_path = "/vendors/MathJax" if cfg[]: mathjax_config = "TeX-AMS-MML_HTMLorMML" else: mathjax_config = "TeX-AMS_HTML" return % { : mathjax_path, : mathjax_config, }
Return the snippet of HTML code to put in HTML HEAD tag, in order to enable MathJax support. @param https: when using the CDN, whether to use the HTTPS URL rather than the HTTP one. @type https: bool @note: with new releases of MathJax, update this function toghether with $MJV variable in the root Makefile.am
381,543
def arcs(self): all_arcs = [] for l1, l2 in self.byte_parser._all_arcs(): fl1 = self.first_line(l1) fl2 = self.first_line(l2) if fl1 != fl2: all_arcs.append((fl1, fl2)) return sorted(all_arcs)
Get information about the arcs available in the code. Returns a sorted list of line number pairs. Line numbers have been normalized to the first line of multiline statements.
381,544
def parse_dom(dom): root = dom.getElementsByTagName("graphml")[0] graph = root.getElementsByTagName("graph")[0] name = graph.getAttribute() g = Graph(name) for node in graph.getElementsByTagName("node"): n = g.add_node(id=node.getAttribute()) for attr in node.getElementsByTagName("data"): if attr.firstChild: n[attr.getAttribute("key")] = attr.firstChild.data else: n[attr.getAttribute("key")] = "" for edge in graph.getElementsByTagName("edge"): source = edge.getAttribute() dest = edge.getAttribute() e = g.add_edge_by_id(source, dest) for attr in edge.getElementsByTagName("data"): if attr.firstChild: e[attr.getAttribute("key")] = attr.firstChild.data else: e[attr.getAttribute("key")] = "" return g
Parse dom into a Graph. :param dom: dom as returned by minidom.parse or minidom.parseString :return: A Graph representation
381,545
def display_png(*objs, **kwargs): raw = kwargs.pop(,False) if raw: for obj in objs: publish_png(obj) else: display(*objs, include=[,])
Display the PNG representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw png data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
381,546
def get_instance(self, payload): return AuthRegistrationsCredentialListMappingInstance( self._version, payload, account_sid=self._solution[], domain_sid=self._solution[], )
Build an instance of AuthRegistrationsCredentialListMappingInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance
381,547
def deleted(self): return self.timestamps.deleted is not None and self.timestamps.deleted > NodeTimestamps.int_to_dt(0)
Get the deleted state. Returns: bool: Whether this item is deleted.
381,548
def input(self, data): self._lexer.lineno = 1 return self._lexer.input(data)
Reset the lexer and feed in new input. :param data: String of input data.
381,549
def on_excepthandler(self, node): return (self.run(node.type), node.name, node.body)
Exception handler...
381,550
def _get_on_crash(dom): * node = ElementTree.fromstring(get_xml(dom)).find() return node.text if node is not None else
Return `on_crash` setting from the named vm CLI Example: .. code-block:: bash salt '*' virt.get_on_crash <domain>
381,551
def get_issue_remotelinks(self, issue_key, global_id=None, internal_id=None): return self.get_issue_remote_links(issue_key, global_id, internal_id)
Compatibility naming method with get_issue_remote_links()
381,552
def resolve_url(url, directory=None, permissions=None): u = urlparse(url) if directory is None: directory = os.getcwd() filename = os.path.join(directory,os.path.basename(u.path)) if u.scheme == or u.scheme == : if os.path.isfile(u.path): if os.path.isfile(filename): src_inode = os.stat(u.path)[stat.ST_INO] dst_inode = os.stat(filename)[stat.ST_INO] if src_inode != dst_inode: shutil.copy(u.path, filename) else: shutil.copy(u.path, filename) else: errmsg = "Cannot open file %s from URL %s" % (u.path, url) raise ValueError(errmsg) elif u.scheme == or u.scheme == : s = requests.Session() s.mount(str(u.scheme)+, requests.adapters.HTTPAdapter(max_retries=5)) cookie_dict = {} ecp_file = % os.getuid() if os.path.isfile(ecp_file): cj = cookielib.MozillaCookieJar() cj.load(ecp_file, ignore_discard=True, ignore_expires=True) else: cj = [] for c in cj: if c.domain == u.netloc: cookie_dict[c.name] = c.value elif u.netloc == "code.pycbc.phy.syr.edu" and \ c.domain == "git.ligo.org": cookie_dict[c.name] = c.value r = s.get(url, cookies=cookie_dict, allow_redirects=True) if r.status_code != 200: errmsg = "Unable to download %s\nError code = %d" % (url, r.status_code) raise ValueError(errmsg) if u.netloc == or u.netloc == : if istext(r.content): soup = BeautifulSoup(r.content, ) desc = soup.findAll(attrs={"property":"og:url"}) if len(desc) and \ desc[0][] == : raise ValueError(ecp_cookie_error.format(url)) output_fp = open(filename, ) output_fp.write(r.content) output_fp.close() else: errmsg = "Unknown URL scheme: %s\n" % (u.scheme) errmsg += "Currently supported are: file, http, and https." raise ValueError(errmsg) if not os.path.isfile(filename): errmsg = "Error trying to create file %s from %s" % (filename,url) raise ValueError(errmsg) if permissions: if os.access(filename, os.W_OK): os.chmod(filename, permissions) else: s = os.stat(filename)[stat.ST_MODE] if (s & permissions) != permissions: errmsg = "Could not change permissions on %s (read-only)" % url raise ValueError(errmsg) return filename
Resolves a URL to a local file, and returns the path to that file.
381,553
def handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period): if e.error_code == 401: raise e elif e.error_code == 404: raise e elif e.error_code == 429: error_count += 0.5 call_counter = 0 wait_period = 2 time.sleep(60*15 + 5) time_window_start = time.perf_counter() return error_count, call_counter, time_window_start, wait_period elif e.error_code in (500, 502, 503, 504): error_count += 1 time.sleep(wait_period) wait_period *= 1.5 return error_count, call_counter, time_window_start, wait_period else: raise e
This function handles the twitter request in case of an HTTP error. Inputs: - e: A twython.TwythonError instance to be handled. - error_count: Number of failed retries of the call until now. - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. - wait_period: For certain Twitter errors (i.e. server overload), we wait and call again. Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. - wait_period: For certain Twitter errors (i.e. server overload), we wait and call again. Raises: - twython.TwythonError
381,554
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False): text = CtsText( urn=textId, retriever=self.endpoint ) if metadata or prevnext: return text.getPassagePlus(reference=subreference) else: return text.getTextualNode(subreference=subreference)
Retrieve a text node from the API :param textId: CtsTextMetadata Identifier :type textId: str :param subreference: CapitainsCtsPassage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: CapitainsCtsPassage :rtype: CapitainsCtsPassage
381,555
def output_colored(code, text, is_bold=False): if is_bold: code = % code return % (code, text)
Create function to output with color sequence
381,556
def fromhdf5(source, where=None, name=None, condition=None, condvars=None, start=None, stop=None, step=None): return HDF5View(source, where=where, name=name, condition=condition, condvars=condvars, start=start, stop=stop, step=step)
Provides access to an HDF5 table. E.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5file.flush() >>> h5file.close() >>> # ... # now demonstrate use of fromhdf5 ... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') >>> table1 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'zxcvbn' | +-----+-----------+ >>> # alternatively just specify path to table node ... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') >>> # ...or use an existing tables.File object ... h5file = tables.open_file('example.h5') >>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') >>> # ...or use an existing tables.Table object ... h5tbl = h5file.get_node('/testgroup/testtable') >>> table1 = etl.fromhdf5(h5tbl) >>> # use a condition to filter data ... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ >>> h5file.close()
381,557
def load_array_elements(self, array, start_idx, no_of_elements): concrete_start_idxes = self.concretize_load_idx(start_idx) if len(concrete_start_idxes) == 1: concrete_start_idx = concrete_start_idxes[0] load_values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] self.state.solver.add(start_idx == concrete_start_idx) else: concrete_start_idx = concrete_start_idxes[0] load_values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] start_idx_options = [concrete_start_idx == start_idx] for concrete_start_idx in concrete_start_idxes[1:]: values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] for i, value in enumerate(values): load_values[i] = self.state.solver.If( concrete_start_idx == start_idx, value, load_values[i] ) start_idx_options.append(start_idx == concrete_start_idx) constraint_on_start_idx = self.state.solver.Or(*start_idx_options) self.state.add_constraints(constraint_on_start_idx) return load_values
Loads either a single element or a range of elements from the array. :param array: Reference to the array. :param start_idx: Starting index for the load. :param no_of_elements: Number of elements to load.
381,558
def _get(self, scheme, host, port, path, assert_key=None): url = % (scheme, host, port, path) try: request = urllib2.Request(url) if self.config[] and self.config[]: base64string = base64.standard_b64encode( % (self.config[], self.config[])) request.add_header("Authorization", "Basic %s" % base64string) response = urllib2.urlopen(request) except Exception as err: self.log.error("%s: %s" % (url, err)) return False try: doc = json.load(response) except (TypeError, ValueError): self.log.error("Unable to parse response from elasticsearch as a" + " json object") return False if assert_key and assert_key not in doc: self.log.error("Bad response from elasticsearch, expected key " " was missing for %s" % (assert_key, url)) return False return doc
Execute a ES API call. Convert response into JSON and optionally assert its structure.
381,559
def export(self, class_name, method_name, export_data=False, export_dir=, export_filename=, export_append_checksum=False, **kwargs): self.class_name = class_name self.method_name = method_name est = self.estimator self.n_classes = len(est.classes_) self.n_features = len(est.feature_log_prob_[0]) temp_type = self.temp() temp_arr = self.temp() temp_arr_ = self.temp() temp_arr__ = self.temp() priors = [self.temp().format(self.repr(p)) for p in est.class_log_prior_] priors = .join(priors) self.priors = temp_arr_.format(type=, name=, values=priors) neg_prob = log(1 - exp(est.feature_log_prob_)) probs = [] for prob in neg_prob: tmp = [temp_type.format(self.repr(p)) for p in prob] tmp = temp_arr.format(.join(tmp)) probs.append(tmp) probs = .join(probs) self.neg_probs = temp_arr__.format(type=, name=, values=probs) delta_probs = (est.feature_log_prob_ - neg_prob).T probs = [] for prob in delta_probs: tmp = [temp_type.format(self.repr(p)) for p in prob] tmp = temp_arr.format(.join(tmp)) probs.append(tmp) probs = .join(probs) self.del_probs = temp_arr__.format(type=, name=, values=probs) if self.target_method == : if export_data and os.path.isdir(export_dir): self.export_data(export_dir, export_filename, export_append_checksum) return self.predict() return self.predict()
Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders.
381,560
def convert_path(path): if os.path.isabs(path): raise Exception("Cannot include file with absolute path {}. Please use relative path instead".format((path))) path = os.path.normpath(path) return path
Convert path to a normalized format
381,561
def _threaded_start(self): self.active = True self.thread = Thread(target=self._main_loop) self.thread.setDaemon(True) self.thread.start()
Spawns a worker thread to do the expiration checks
381,562
def _is_under_root(self, full_path): if (path.abspath(full_path) + path.sep)\ .startswith(path.abspath(self.root) + path.sep): return True else: return False
Guard against arbitrary file retrieval.
381,563
def value(self, x): return x if isinstance(x, FiniteField.Value) and x.field == self else FiniteField.Value(self, x)
converts an integer or FinitField.Value to a value of this FiniteField.
381,564
def mk_pools(things, keyfnc=lambda x: x): "Indexes a thing by the keyfnc to construct pools of things." pools = {} sthings = sorted(things, key=keyfnc) for key, thingz in groupby(sthings, key=keyfnc): pools.setdefault(key, []).extend(list(thingz)) return pools
Indexes a thing by the keyfnc to construct pools of things.
381,565
def _write_ini(source_dict, namespace_name=None, level=0, indent_size=4, output_stream=sys.stdout): options = [ value for value in source_dict.values() if isinstance(value, Option) ] options.sort(key=lambda x: x.name) indent_spacer = " " * (level * indent_size) for an_option in options: print("%s file=output_stream) option_value = to_str(an_option) if an_option.reference_value_from: print( % ( indent_spacer, an_option.reference_value_from, an_option.name ), file=output_stream ) if an_option.likely_to_be_changed or an_option.has_changed: option_format = else: option_format = if isinstance(option_value, six.string_types) and \ in option_value: option_value = % option_value print(option_format % (indent_spacer, an_option.name, option_value), file=output_stream) next_level = level + 1 namespaces = [ (key, value) for key, value in source_dict.items() if isinstance(value, Namespace) ] namespaces.sort(key=ValueSource._namespace_reference_value_from_sort) for key, namespace in namespaces: next_level_spacer = " " * next_level * indent_size print("%s%s%s%s\n" % (indent_spacer, "[" * next_level, key, "]" * next_level), file=output_stream) if namespace._doc: print("%s%s" % (next_level_spacer, namespace._doc), file=output_stream) if namespace._reference_value_from: print("%s % (next_level_spacer, key), file=output_stream) if namespace_name: ValueSource._write_ini( source_dict=namespace, namespace_name="%s.%s" % (namespace_name, key), level=level+1, indent_size=indent_size, output_stream=output_stream ) else: ValueSource._write_ini( source_dict=namespace, namespace_name=key, level=level+1, indent_size=indent_size, output_stream=output_stream )
this function prints the components of a configobj ini file. It is recursive for outputing the nested sections of the ini file.
381,566
def release(ctx, yes, latest): m = RepoManager(ctx.obj[]) api = m.github_repo() if latest: latest = api.releases.latest() if latest: click.echo(latest[]) elif m.can_release(): branch = m.info[] version = m.validate_version() name = % version body = [ % name] data = dict( tag_name=name, target_commitish=branch, name=name, body=.join(body), draft=False, prerelease=False ) if yes: data = api.releases.create(data=data) m.message() click.echo(niceJson(data)) else: click.echo()
Create a new release in github
381,567
def status(queue, munin, munin_config): if munin_config: return status_print_config(queue) queues = get_queues(queue) for queue in queues: status_print_queue(queue, munin=munin) if not munin: print( * 40)
List queued tasks aggregated by name
381,568
def connect(self): self.logger.debug( "Connecting... (address = %s, port = %s, clientId = %s, username = %s)" % (self.address, self.port, self.clientId, self.username) ) try: self.connectEvent.clear() self.client.connect(self.address, port=self.port, keepalive=self.keepAlive) self.client.loop_start() if not self.connectEvent.wait(timeout=30): self.client.loop_stop() self._logAndRaiseException( ConnectionException( "Operation timed out connecting to IBM Watson IoT Platform: %s" % (self.address) ) ) except socket.error as serr: self.client.loop_stop() self._logAndRaiseException( ConnectionException("Failed to connect to IBM Watson IoT Platform: %s - %s" % (self.address, str(serr))) )
Connect the client to IBM Watson IoT Platform using the underlying Paho MQTT client # Raises ConnectionException: If there is a problem establishing the connection.
381,569
def split_on_condition(seq, condition): l1, l2 = tee((condition(item), item) for item in seq) return (i for p, i in l1 if p), (i for p, i in l2 if not p)
Split a sequence into two iterables without looping twice
381,570
def html5_serialize_simple_color(simple_color): red, green, blue = simple_color result = u format_string = result += format_string.format(red) result += format_string.format(green) result += format_string.format(blue) return result
Apply the serialization algorithm for a simple color from section 2.4.6 of HTML5.
381,571
def walk(self, top, topdown=True, ignore_file_handler=None): tree = self.git_object_by_path(top) if tree is None: raise IOError(errno.ENOENT, "No such file") for x in self._walk(tree, topdown): yield x
Directory tree generator. See `os.walk` for the docs. Differences: - no support for symlinks - it could raise exceptions, there is no onerror argument
381,572
def _restore_base_estimators(self, kernel_cache, out, X, cv): train_folds = {fold: train_index for fold, (train_index, _) in enumerate(cv)} for idx, fold, _, est in out: if idx in kernel_cache: if not hasattr(est, ): raise ValueError( % self.base_estimators[idx][0]) est.set_params(kernel=self.base_estimators[idx][1].kernel) est.fit_X_ = X[train_folds[fold]] return out
Restore custom kernel functions of estimators for predictions
381,573
def revise_helper(query): match = re.search(extract_sql_regex, query, re.DOTALL | re.I) return match.group(1), match.group(2)
given sql containing a "CREATE TABLE {table_name} AS ({query})" returns table_name, query
381,574
async def analog_write(self, pin, value): if PrivateConstants.ANALOG_MESSAGE + pin < 0xf0: command = [PrivateConstants.ANALOG_MESSAGE + pin, value & 0x7f, (value >> 7) & 0x7f] await self._send_command(command) else: await self.extended_analog(pin, value)
Set the selected pin to the specified value. :param pin: PWM pin number :param value: Pin value (0 - 0x4000) :returns: No return value
381,575
def convert_timezone(date_str, tz_from, tz_to="UTC", fmt=None): tz_offset = datetime_to_timezone( datetime.datetime.now(), tz=tz_from).strftime() tz_offset = tz_offset[:3] + + tz_offset[3:] date = parse_date(str(date_str) + tz_offset) if tz_from != tz_to: date = datetime_to_timezone(date, tz_to) if isinstance(fmt, str): return date.strftime(fmt) return date
get timezone as tz_offset
381,576
def dump_code(disassembly, pc = None, bLowercase = True, bits = None): if not disassembly: return table = Table(sep = ) for (addr, size, code, dump) in disassembly: if bLowercase: code = code.lower() if addr == pc: addr = % HexDump.address(addr, bits) else: addr = % HexDump.address(addr, bits) table.addRow(addr, dump, code) table.justify(1, 1) return table.getOutput()
Dump a disassembly. Optionally mark where the program counter is. @type disassembly: list of tuple( int, int, str, str ) @param disassembly: Disassembly dump as returned by L{Process.disassemble} or L{Thread.disassemble_around_pc}. @type pc: int @param pc: (Optional) Program counter. @type bLowercase: bool @param bLowercase: (Optional) If C{True} convert the code to lowercase. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.address_size} @rtype: str @return: Text suitable for logging.
381,577
def get_check(self, check): chk = self._check_manager.get(check) chk.set_entity(self) return chk
Returns an instance of the specified check.
381,578
def log_cdf_laplace(x, name="log_cdf_laplace"): with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name="x") lower_solution = -np.log(2.) + x safe_exp_neg_x = tf.exp(-tf.abs(x)) upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x) return tf.where(x < 0., lower_solution, upper_solution)
Log Laplace distribution function. This function calculates `Log[L(x)]`, where `L(x)` is the cumulative distribution function of the Laplace distribution, i.e. ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt``` For numerical accuracy, `L(x)` is computed in different ways depending on `x`, ``` x <= 0: Log[L(x)] = Log[0.5] + x, which is exact 0 < x: Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact ``` Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="log_ndtr"). Returns: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled.
381,579
def fopen(name, mode=, buffering=-1): f = _fopen(name, mode, buffering) return _FileObjectThreadWithContext(f, mode, buffering)
Similar to Python's built-in `open()` function.
381,580
def remove_input_link(self, process_code, input_code): process = self.database[][process_code] exchanges = process[] initial_count = len(exchanges) new_exchanges = [e for e in exchanges if e[] != input_code] product_code = [e[] for e in exchanges if e[] == ][0] param_id = [k for k, v in self.params.items() if (v[] == input_code[1] and v[] == product_code[1])][0] problem_functions = self.check_param_function_use(param_id) if len(problem_functions) != 0: for p in problem_functions: self.params[p[0]][] = None process[] = new_exchanges del self.params[param_id] self.parameter_scan() return initial_count - len(new_exchanges)
Remove an input (technosphere or biosphere exchange) from a process, resolving all parameter issues
381,581
def run(self, start_command_srv): if start_command_srv: self._command_server.start() self._drop_privs() self._task_runner.start() self._reg_sighandlers() while self.running: time.sleep(self._sleep_period) self.shutdown()
Setup daemon process, start child forks, and sleep until events are signalled. `start_command_srv` Set to ``True`` if command server should be started.
381,582
def fetchallfirstvalues(self, sql: str, *args) -> List[Any]: rows = self.fetchall(sql, *args) return [row[0] for row in rows]
Executes SQL; returns list of first values of each row.
381,583
def _handle_template_param_value(self): self._emit_all(self._pop()) self._context ^= contexts.TEMPLATE_PARAM_KEY self._context |= contexts.TEMPLATE_PARAM_VALUE self._emit(tokens.TemplateParamEquals())
Handle a template parameter's value at the head of the string.
381,584
def exception_log_and_respond(exception, logger, message, status_code): logger.error(message, exc_info=True) return make_response( message, status_code, dict(exception_type=type(exception).__name__, exception_message=str(exception)), )
Log an error and send jsonified respond.
381,585
def cover(session): session.interpreter = session.install(, ) session.run(, , , ) session.run(, )
Run the final coverage report. This outputs the coverage report aggregating coverage from the unit test runs (not system test runs), and then erases coverage data.
381,586
def run_coroutine_threadsafe(coro, loop): if not asyncio.iscoroutine(coro): raise TypeError() future = concurrent.futures.Future() def callback(): try: _chain_future(asyncio.ensure_future(coro, loop=loop), future) except Exception as exc: if future.set_running_or_notify_cancel(): future.set_exception(exc) raise loop.call_soon_threadsafe(callback) return future
Submit a coroutine object to a given event loop. Return a concurrent.futures.Future to access the result.
381,587
def add_bindings(self, g: Graph) -> "PrefixLibrary": for prefix, namespace in self: g.bind(prefix.lower(), namespace) return self
Add bindings in the library to the graph :param g: graph to add prefixes to :return: PrefixLibrary object
381,588
def _debug_check(self): old_end = 0 old_sort = "" for segment in self._list: if segment.start <= old_end and segment.sort == old_sort: raise AngrCFGError("Error in SegmentList: blocks are not merged") old_end = segment.end old_sort = segment.sort
Iterates over list checking segments with same sort do not overlap :raise: Exception: if segments overlap space with same sort
381,589
def Normal(cls, mean: , variance: , batch_size: Optional[int] = None) -> Tuple[Distribution, ]: if mean.scope != variance.scope: raise ValueError() loc = mean.tensor scale = tf.sqrt(variance.tensor) dist = tf.distributions.Normal(loc, scale) batch = mean.batch or variance.batch if not batch and batch_size is not None: t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = mean.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
Returns a TensorFluent for the Normal sampling op with given mean and variance. Args: mean: The mean parameter of the Normal distribution. variance: The variance parameter of the Normal distribution. batch_size: The size of the batch (optional). Returns: The Normal distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope.
381,590
def _get_primary_index_in_altered_table(self, diff): primary_index = {} for index in self._get_indexes_in_altered_table(diff).values(): if index.is_primary(): primary_index = {index.get_name(): index} return primary_index
:param diff: The table diff :type diff: orator.dbal.table_diff.TableDiff :rtype: dict
381,591
def contains(self, column, value): df = self.df[self.df[column].str.contains(value) == True] if df is None: self.err("Can not select contained data") return self.df = df
Set the main dataframe instance to rows that contains a string value in a column
381,592
def setup(app): app.info() app.add_role(, ghissue_role) app.add_role(, ghissue_role) app.add_role(, ghuser_role) app.add_role(, ghcommit_role) app.add_config_value(, None, ) return
Install the plugin. :param app: Sphinx application context.
381,593
def cyan(cls, string, auto=False): return cls.colorize(, string, auto=auto)
Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color
381,594
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs): return self._write_op(self._require_dataset_nosync, name, shape=shape, dtype=dtype, exact=exact, **kwargs)
Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.hierarchy.Group.create_dataset`. Parameters ---------- name : string Array name. shape : int or tuple of ints Array shape. dtype : string or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype.
381,595
def submit_order(id_or_ins, amount, side, price=None, position_effect=None): order_book_id = assure_order_book_id(id_or_ins) env = Environment.get_instance() if ( env.config.base.run_type != RUN_TYPE.BACKTEST and env.get_instrument(order_book_id).type == "Future" ): if "88" in order_book_id: raise RQInvalidArgument( _(u"Main Future contracts[88] are not supported in paper trading.") ) if "99" in order_book_id: raise RQInvalidArgument( _(u"Index Future contracts[99] are not supported in paper trading.") ) style = cal_style(price, None) market_price = env.get_last_price(order_book_id) if not is_valid_price(market_price): user_system_log.warn( _(u"Order Creation Failed: [{order_book_id}] No market data").format( order_book_id=order_book_id ) ) return amount = int(amount) order = Order.__from_create__( order_book_id=order_book_id, quantity=amount, side=side, style=style, position_effect=position_effect, ) if order.type == ORDER_TYPE.MARKET: order.set_frozen_price(market_price) if env.can_submit_order(order): env.broker.submit_order(order) return order
通用下单函数,策略可以通过该函数自由选择参数下单。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param float amount: 下单量,需为正数 :param side: 多空方向,多(SIDE.BUY)或空(SIDE.SELL) :type side: :class:`~SIDE` enum :param float price: 下单价格,默认为None,表示市价单 :param position_effect: 开平方向,开仓(POSITION_EFFECT.OPEN),平仓(POSITION.CLOSE)或平今(POSITION_EFFECT.CLOSE_TODAY),交易股票不需要该参数 :type position_effect: :class:`~POSITION_EFFECT` enum :return: :class:`~Order` object | None :example: .. code-block:: python # 购买 2000 股的平安银行股票,并以市价单发送: submit_order('000001.XSHE', 2000, SIDE.BUY) # 平 10 份 RB1812 多方向的今仓,并以 4000 的价格发送限价单 submit_order('RB1812', 10, SIDE.SELL, price=4000, position_effect=POSITION_EFFECT.CLOSE_TODAY)
381,596
def called_alts_from_genotype(self): if not in self.FORMAT: return None genotype_indexes = set([int(x) for x in self.FORMAT[].split()]) alts = set() for i in genotype_indexes: if i == 0: alts.add(self.REF) else: alts.add(self.ALT[i-1]) return alts
Returns a set of the (maybe REF and) ALT strings that were called, using GT in FORMAT. Returns None if GT not in the record
381,597
def get_2d_markers_linearized( self, component_info=None, data=None, component_position=None, index=None ): return self._get_2d_markers( data, component_info, component_position, index=index )
Get 2D linearized markers. :param index: Specify which camera to get 2D from, will be returned as first entry in the returned array.
381,598
def _create_table_and_update_context(node, context): schema_type_name = sql_context_helpers.get_schema_type_name(node, context) table = context.compiler_metadata.get_table(schema_type_name).alias() context.query_path_to_selectable[node.query_path] = table return table
Create an aliased table for a SqlNode. Updates the relevant Selectable global context. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Table, the newly aliased SQLAlchemy table.
381,599
def vx(self,*args,**kwargs): out= self._orb.vx(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: vx PURPOSE: return x velocity at time t INPUT: t - (optional) time at which to get the velocity (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vx(t) HISTORY: 2010-11-30 - Written - Bovy (NYU)