Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
21,800
def gen_primes(): D = {} q = 2 while True: if q not in D: yield q D[q * q] = [q] else: for p in D[q]: D.setdefault(p + q, []).append(p) del D[q] q += 1
Generate an infinite sequence of prime numbers.
21,801
def get_signed_area(self): accum = 0.0 for i in range(len(self.verts)): j = (i + 1) % len(self.verts) accum += ( self.verts[j][0] * self.verts[i][1] - self.verts[i][0] * self.verts[j][1]) return accum / 2
Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up.
21,802
def find_dependencies_with_parent(self, dependent, parent): self.logger.info(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.info(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_diff_hunk(dependent, parent, path, hunk)
Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents.
21,803
def newEntity(self, name, type, ExternalID, SystemID, content): ret = libxml2mod.xmlNewEntity(self._o, name, type, ExternalID, SystemID, content) if ret is None:raise treeError() __tmp = xmlEntity(_obj=ret) return __tmp
Create a new entity, this differs from xmlAddDocEntity() that if the document is None or has no internal subset defined, then an unlinked entity structure will be returned, it is then the responsability of the caller to link it to the document later or free it when not needed anymore.
21,804
def _get_manifest_string(self): config_str = "" config_str += .format(self.pipeline_name) config_str += .format(self.nf_file) return config_str
Returns the nextflow manifest config string to include in the config file from the information on the pipeline. Returns ------- str Nextflow manifest configuration string
21,805
def get_next_base26(prev=None): if not prev: return r = re.compile("^[a-z]*$") if not r.match(prev): raise ValueError("Invalid base26") if not prev.endswith(): return prev[:-1] + chr(ord(prev[-1]) + 1) return get_next_base26(prev[:-1]) +
Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID.
21,806
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs): from plexapi.sync import Policy, MediaSettings kwargs[] = MediaSettings.createVideo(videoQuality) kwargs[] = Policy.create(limit, unwatched) return super(MovieSection, self).sync(**kwargs)
Add current Movie library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. limit (int): maximum count of movies to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Movies') section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True, title='Next best movie', sort='rating:desc')
21,807
def _on_timeout(self, info: str = None) -> None: self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: self._handle_exception( HTTPTimeoutError, HTTPTimeoutError(error_message), None )
Timeout callback of _HTTPConnection instance. Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information.
21,808
def get(search="unsigned"): plugins = [] for i in os.walk(): for f in i[2]: plugins.append(f) return plugins
List all available plugins
21,809
def filter_results(self, boxlist, num_classes): boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result
Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS).
21,810
def privmsg(self, target, message): with self.lock: self.send( + target + + message) if self.readable(): msg = self._recv(expected_replies=(,)) if msg[0] == : return , msg[2].split(None, 1)[1].replace(, , 1)
Sends a PRIVMSG to someone. Required arguments: * target - Who to send the message to. * message - Message to send.
21,811
def _LowerBoundSearch(partitions, hash_value): for i in xrange(0, len(partitions) - 1): if partitions[i].CompareTo(hash_value) <= 0 and partitions[i+1].CompareTo(hash_value) > 0: return i return len(partitions) - 1
Searches the partition in the partition array using hashValue.
21,812
def _flatten(lst): if not isinstance(lst, (list, tuple)): return [lst] result = [] for item in lst: result.extend(_flatten(item)) return result
Flatten a nested list.
21,813
def get_synset_1000(self): fname = os.path.join(self.dir, ) assert os.path.isfile(fname) lines = [x.strip() for x in open(fname).readlines()] return dict(enumerate(lines))
Returns: dict: {cls_number: synset_id}
21,814
def format_currency_field(__, prec, number, locale): locale = Locale.parse(locale) currency = get_territory_currencies(locale.territory)[0] if prec is None: pattern, currency_digits = None, True else: prec = int(prec) pattern = locale.currency_formats[] pattern = modify_number_pattern(pattern, frac_prec=(prec, prec)) currency_digits = False return format_currency(number, currency, pattern, locale=locale, currency_digits=currency_digits)
Formats a currency field.
21,815
def tagAttributes(fdef_master_list,node,depth=0): if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node[]: node[]=x.path node[]=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return node
recursively tag objects with sizes, depths and path names
21,816
def data(link): link = _remove_api_url_from_link(link) req = _get_from_dapi_or_mirror(link) return _process_req(req)
Returns a dictionary from requested link
21,817
def iterate_similarity_datasets(args): for dataset_name in args.similarity_datasets: parameters = nlp.data.list_datasets(dataset_name) for key_values in itertools.product(*parameters.values()): kwargs = dict(zip(parameters.keys(), key_values)) yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)
Generator over all similarity evaluation datasets. Iterates over dataset names, keyword arguments for their creation and the created dataset.
21,818
def get_encoder_from_vocab(vocab_filepath): if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
21,819
def ap_state(value, failure_string=None): try: return statestyle.get(value).ap except: if failure_string: return failure_string else: return value
Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.'
21,820
def modify_ip_prefixes( config, config_file, variable_name, dummy_ip_prefix, reconfigure_cmd, keep_changes, changes_counter, ip_version): log = logging.getLogger(PROGRAM_NAME) services = config.sections() services.remove() update_bird_conf = False try: ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file) except OSError as error: log.error("failed to open Bird configuration %s, this is a FATAL " "error, thus exiting main program", error) sys.exit(1) _name = get_variable_name_from_bird(config_file) if _name is None: log.warning("failed to find variable name in %s, going to add it", config_file) update_bird_conf = True elif _name != variable_name: log.warning("found incorrect variable name in %s, going to add the " "correct one %s", _name, variable_name) update_bird_conf = True if dummy_ip_prefix not in ip_prefixes_in_bird: log.warning("dummy IP prefix %s is missing from bird configuration " "%s, adding it", dummy_ip_prefix, config_file) ip_prefixes_in_bird.insert(0, dummy_ip_prefix) update_bird_conf = True ip_prefixes_with_check = get_ip_prefixes_from_config( config, services, ip_version) "have a service check configured", .join(ip_prefixes_without_check), config_file) ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird if ip not in ip_prefixes_without_check) update_bird_conf = True else: log.warning("found IP prefixes %s in %s without a service " "check configured", .join(ip_prefixes_without_check), config_file) if update_bird_conf: if keep_changes: archive_bird_conf(config_file, changes_counter) tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, ip_prefixes_in_bird ) try: os.rename(tempname, config_file) except OSError as error: msg = ("CRITICAL: failed to create Bird configuration {e}, " "this is FATAL error, thus exiting main program" .format(e=error)) sys.exit("{m}".format(m=msg)) else: log.info("Bird configuration for IPv%s is updated", ip_version) reconfigure_bird(reconfigure_cmd)
Modify IP prefixes in Bird configuration. Depending on the configuration either removes or reports IP prefixes found in Bird configuration for which we don't have a service check associated with them. Moreover, it adds the dummy IP prefix if it isn't present and ensures that the correct variable name is set. Arguments: config (obg): A configparser object which holds our configuration. config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration dummy_ip_prefix (str): The dummy IP prefix, which must be always reconfigure_cmd (str): The command to run to trigger a reconfiguration on Bird daemon upon successful configuration update keep_changes (boolean): To enable keeping a history of changes applied to bird configuration changes_counter (int): The number of configuration changes to keep ip_version (int): IP protocol version of Bird configuration
21,821
def available_edbg_ports(self): ports_available = sorted(list(list_ports.comports())) edbg_ports = [] for iport in ports_available: port = iport[0] desc = iport[1] hwid = iport[2] if str(desc).startswith("EDBG Virtual COM Port") or \ "VID:PID=03EB:2111" in str(hwid).upper(): try: edbg_ports.index(port, 0) print("There is multiple %s ports with same number!" % port) except ValueError: edbg_ports.append(port) return edbg_ports
Finds available EDBG COM ports. :return: list of available ports
21,822
def _load_types(root): def text(t): if t.tag == : return elif t.tag == : return out = [] if t.text: out.append(_escape_tpl_str(t.text)) for x in t: out.append(text(x)) if x.tail: out.append(_escape_tpl_str(x.tail)) return .join(out) out_dict = collections.OrderedDict() for elem in root.findall(): name = elem.get() or elem.find().text template = text(elem) api = elem.get() if in elem.attrib: required_types = set((elem.attrib[],)) else: required_types = set() comment = elem.get() if api: k = (name, api) else: k = (name, None) out_dict[k] = Type(name, template, required_types, api, comment) return out_dict
Returns {name: Type}
21,823
def remove_group_from_favorites(self, id): path = {} data = {} params = {} path["id"] = id self.logger.debug("DELETE /api/v1/users/self/favorites/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/groups/{id}".format(**path), data=data, params=params, single_item=True)
Remove group from favorites. Remove a group from the current user's favorites.
21,824
def array_dualmap(ol,value_map_func,**kwargs): abcd: : is ord yes? def get_self(obj): return(obj) if( in kwargs): index_map_func_args = kwargs[] else: index_map_func_args = [] if( in kwargs): value_map_func_args = kwargs[] else: value_map_func_args = [] if( in kwargs): index_map_func = kwargs[] else: index_map_func = get_self length = ol.__len__() il = list(range(0,length)) nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il)) nvl = [] for i in range(0,length): ele = ol[i] v = value_map_func(nil[i],ele,*value_map_func_args) nvl.append(v) return(nvl)
from elist.elist import * ol = ['a','b','c','d'] def index_map_func(index,prefix,suffix): s = prefix +str(index+97)+ suffix return(s) def value_map_func(mapped_index,ele,prefix,suffix): s = prefix+mapped_index+': ' + str(ele) + suffix return(s) #### rslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?']) pobj(rslt)
21,825
def to_safe(self, word): bad regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word)
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
21,826
def make_tx(version, tx_ins, tx_outs, lock_time, expiry=None, value_balance=0, tx_shielded_spends=None, tx_shielded_outputs=None, tx_witnesses=None, tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None, binding_sig=None): n = riemann.get_current_network_name() if in n: return tx.DecredTx( version=utils.i2le_padded(version, 4), tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry=utils.i2le_padded(expiry, 4), tx_witnesses=[tx_witnesses]) if in n and tx_joinsplits is not None: return tx.SproutTx( version=version, tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if in n: return tx.OverwinterTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if in n: return tx.SaplingTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), value_balance=utils.i2le_padded(value_balance, 8), tx_shielded_spends=(tx_shielded_spends if tx_shielded_spends is not None else []), tx_shielded_outputs=(tx_shielded_outputs if tx_shielded_outputs is not None else []), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig, binding_sig=binding_sig) flag = riemann.network.SEGWIT_TX_FLAG \ if tx_witnesses is not None else None return tx.Tx(version=utils.i2le_padded(version, 4), flag=flag, tx_ins=tx_ins, tx_outs=tx_outs, tx_witnesses=tx_witnesses, lock_time=utils.i2le_padded(lock_time, 4))
int, list(TxIn), list(TxOut), int, list(InputWitness) -> Tx
21,827
def download(self, packageName, versionCode=None, offerType=1, expansion_files=False): if self.authSubToken is None: raise LoginError("You need to login before executing any request") if versionCode is None: appDetails = self.details(packageName).get().get() versionCode = appDetails.get() headers = self.getHeaders() params = {: str(offerType), : packageName, : str(versionCode)} self.log(packageName) response = requests.post(PURCHASE_URL, headers=headers, params=params, verify=ssl_verify, timeout=60, proxies=self.proxies_config) response = googleplay_pb2.ResponseWrapper.FromString(response.content) if response.commands.displayErrorMessage != "": raise RequestError(response.commands.displayErrorMessage) else: dlToken = response.payload.buyResponse.downloadToken return self.delivery(packageName, versionCode, offerType, dlToken, expansion_files=expansion_files)
Download an app and return its raw data (APK file). Free apps need to be "purchased" first, in order to retrieve the download cookie. If you want to download an already purchased app, use *delivery* method. Args: packageName (str): app unique ID (usually starting with 'com.') versionCode (int): version to download offerType (int): different type of downloads (mostly unused for apks) downloadToken (str): download token returned by 'purchase' API progress_bar (bool): wether or not to print a progress bar to stdout Returns Dictionary containing apk data and optional expansion files (see *delivery*)
21,828
def edit_standard_fwl_rules(self, firewall_id, rules): rule_svc = self.client[] template = {: firewall_id, : rules} return rule_svc.createObject(template)
Edit the rules for standard firewall. :param integer firewall_id: the instance ID of the standard firewall :param dict rules: the rules to be pushed on the firewall
21,829
def get_mode(self, gpio): res = yield from self._pigpio_aio_command(_PI_CMD_MODEG, gpio, 0) return _u2i(res)
Returns the gpio mode. gpio:= 0-53. Returns a value as follows . . 0 = INPUT 1 = OUTPUT 2 = ALT5 3 = ALT4 4 = ALT0 5 = ALT1 6 = ALT2 7 = ALT3 . . ... print(pi.get_mode(0)) 4 ...
21,830
def open_connection(self): self.connection = None base_directory = os.path.dirname(self.metadata_db_path) if not os.path.exists(base_directory): try: os.mkdir(base_directory) except IOError: LOGGER.exception( ) raise try: self.connection = sqlite.connect(self.metadata_db_path) except (OperationalError, sqlite.Error): LOGGER.exception() raise
Open an sqlite connection to the metadata database. By default the metadata database will be used in the plugin dir, unless an explicit path has been set using setmetadataDbPath, or overridden in QSettings. If the db does not exist it will be created. :raises: An sqlite.Error is raised if anything goes wrong
21,831
def _format_args(): try: pretty_format = \ current_app.config[] and \ not request.is_xhr except RuntimeError: pretty_format = False if pretty_format: return dict( indent=2, separators=(, ), ) else: return dict( indent=None, separators=(, ), )
Get JSON dump indentation and separates.
21,832
def get_exif_tags(data, datetime_format=): logger = logging.getLogger(__name__) simple = {} for tag in (, , ): if tag in data: if isinstance(data[tag], tuple): simple[tag] = data[tag][0].strip() else: simple[tag] = data[tag].strip() if in data: fnumber = data[] try: simple[] = float(fnumber[0]) / fnumber[1] except Exception: logger.debug(, fnumber, exc_info=True) if in data: focal = data[] try: simple[] = round(float(focal[0]) / focal[1]) except Exception: logger.debug(, focal, exc_info=True) if in data: exptime = data[] if isinstance(exptime, tuple): try: simple[] = str(fractions.Fraction(exptime[0], exptime[1])) except ZeroDivisionError: logger.info(, exptime) elif isinstance(exptime, int): simple[] = str(exptime) else: logger.info(, exptime) if data.get(): simple[] = data[] if in data: date = data[].rsplit()[0] try: simple[] = datetime.strptime(date, ) simple[] = simple[].strftime(datetime_format) except (ValueError, TypeError) as e: logger.info(, e) if in data: info = data[] lat_info = info.get() lon_info = info.get() lat_ref_info = info.get() lon_ref_info = info.get() if lat_info and lon_info and lat_ref_info and lon_ref_info: try: lat = dms_to_degrees(lat_info) lon = dms_to_degrees(lon_info) except (ZeroDivisionError, ValueError, TypeError): logger.info() else: simple[] = { : - lat if lat_ref_info != else lat, : - lon if lon_ref_info != else lon, } return simple
Make a simplified version with common tags from raw EXIF data.
21,833
def read(filename, file_format=None): assert isinstance(filename, str) if not file_format: file_format = _filetype_from_filename(filename) format_to_reader = { "ansys": ansys_io, "ansys-ascii": ansys_io, "ansys-binary": ansys_io, "gmsh": msh_io, "gmsh-ascii": msh_io, "gmsh-binary": msh_io, "gmsh2": msh_io, "gmsh2-ascii": msh_io, "gmsh2-binary": msh_io, "gmsh4": msh_io, "gmsh4-ascii": msh_io, "gmsh4-binary": msh_io, "med": med_io, "medit": medit_io, "dolfin-xml": dolfin_io, "permas": permas_io, "moab": h5m_io, "off": off_io, "stl": stl_io, "stl-ascii": stl_io, "stl-binary": stl_io, "vtu-ascii": vtu_io, "vtu-binary": vtu_io, "vtk-ascii": vtk_io, "vtk-binary": vtk_io, "xdmf": xdmf_io, "exodus": exodus_io, "abaqus": abaqus_io, "mdpa": mdpa_io, } assert file_format in format_to_reader, "Unknown file format of .".format( file_format, filename ) return format_to_reader[file_format].read(filename)
Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data.
21,834
def poe_map(self, src, s_sites, imtls, trunclevel, rup_indep=True): pmap = ProbabilityMap.build( len(imtls.array), len(self.gsims), s_sites.sids, initvalue=rup_indep) eff_ruptures = 0 for rup, sctx, dctx in self.gen_rup_contexts(src, s_sites): eff_ruptures += 1 with self.poe_mon: pnes = self._make_pnes(rup, sctx, dctx, imtls, trunclevel) for sid, pne in zip(sctx.sids, pnes): if rup_indep: pmap[sid].array *= pne else: pmap[sid].array += (1.-pne) * rup.weight if rup_indep: pmap = ~pmap pmap.eff_ruptures = eff_ruptures return pmap
:param src: a source object :param s_sites: a filtered SiteCollection of sites around the source :param imtls: intensity measure and levels :param trunclevel: truncation level :param rup_indep: True if the ruptures are independent :returns: a ProbabilityMap instance
21,835
def encode(arg, delimiter=None, encodeseq=None, encoded=tuple()): arg = coerce_unicode(arg, _c.FSQ_CHARSET) new_arg = sep = u delimiter, encodeseq = delimiter_encodeseq( _c.FSQ_DELIMITER if delimiter is None else delimiter, _c.FSQ_ENCODE if encodeseq is None else encodeseq, _c.FSQ_CHARSET) for enc in encoded: enc = coerce_unicode(enc, _c.FSQ_CHARSET) try: enc = enc.encode() except UnicodeEncodeError: raise FSQEncodeError(errno.EINVAL, u\ u.format(enc)) for seq in arg: if seq == delimiter or seq == encodeseq or seq in _ENCODED + encoded: h_val = hex(ord(seq)) if 3 == len(h_val): h_val = sep.join([h_val[:2], u, h_val[2:]]) if 4 != len(h_val): raise FSQEncodeError(errno.EINVAL, u\ .format(h_val, seq)) seq = sep.join([encodeseq, h_val[2:]]) new_arg = sep.join([new_arg, seq]) return new_arg
Encode a single argument for the file-system
21,836
def bind(context, block=False): if block: def decorate(func): name = func.__name__.replace(, ) if name not in context: context[name] = func return context[name] return decorate def decorate(func): name = func.__name__ if name not in context: context[name] = func return context[name] return decorate
Given the context, returns a decorator wrapper; the binder replaces the wrapped func with the value from the context OR puts this function in the context with the name.
21,837
def get_item_bank_id_metadata(self): metadata = dict(self._item_bank_id_metadata) metadata.update({: self.my_osid_object_form._my_map[]}) return Metadata(**metadata)
get the metadata for item bank
21,838
def _finish(self): if self._process.returncode is None: self._process.stdin.flush() self._process.stdin.close() self._process.wait() self.closed = True
Closes and waits for subprocess to exit.
21,839
def run(self): salt.utils.process.appendproctitle(self.__class__.__name__) halite.start(self.hopts)
Fire up halite!
21,840
def add_error_class(klass, code): if not isinstance(code, python.str_types): code = code.decode() if not isinstance(klass, python.class_types): raise TypeError("klass must be a class type") mro = inspect.getmro(klass) if not Exception in mro: raise TypeError( ) if code in ERROR_CLASS_MAP: raise ValueError( % (code,)) ERROR_CLASS_MAP[code] = klass
Maps an exception class to a string code. Used to map remoting C{onStatus} objects to an exception class so that an exception can be built to represent that error. An example:: >>> class AuthenticationError(Exception): ... pass ... >>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed') >>> print pyamf.ERROR_CLASS_MAP {'TypeError': <type 'exceptions.TypeError'>, 'IndexError': <type 'exceptions.IndexError'>, 'Auth.Failed': <class '__main__.AuthenticationError'>, 'KeyError': <type 'exceptions.KeyError'>, 'NameError': <type 'exceptions.NameError'>, 'LookupError': <type 'exceptions.LookupError'>} @param klass: Exception class @param code: Exception code @type code: C{str} @see: L{remove_error_class}
21,841
def clean(self): super().clean() if ( (self.user is None and not self.anonymous_user) or (self.user and self.anonymous_user) ): raise ValidationError( _(), )
Validates the current instance.
21,842
def _base_get_list(self, url, limit=None, *, query=None, order_by=None, batch=None): if limit is None or limit > self.protocol.max_top_value: batch = self.protocol.max_top_value params = {: batch if batch else limit} if order_by: params[] = order_by if query: if query.has_filters: warnings.warn( ) query.clear_filters() if isinstance(query, str): params[] = query else: params.update(query.as_params()) response = self.con.get(url, params=params) if not response: return iter(()) data = response.json() items = ( self._classifier(item)(parent=self, **{self._cloud_data_key: item}) for item in data.get(, [])) next_link = data.get(NEXT_LINK_KEYWORD, None) if batch and next_link: return Pagination(parent=self, data=items, constructor=self._classifier, next_link=next_link, limit=limit) else: return items
Returns a collection of drive items
21,843
def config_(dev=None, **kwargs): **** if dev is None: spath = _fspath() else: spath = _bcpath(dev) updates = dict([(key, val) for key, val in kwargs.items() if not key.startswith()]) if updates: endres = 0 for key, val in updates.items(): endres += _sysfs_attr([spath, key], val, , .format(os.path.join(spath, key), val)) return endres > 0 else: result = {} data = _sysfs_parse(spath, config=True, internals=True, options=True) for key in (, ): if key in data: del data[key] for key in data: result.update(data[key]) return result
Show or update config of a bcache device. If no device is given, operate on the cache set itself. CLI example: .. code-block:: bash salt '*' bcache.config salt '*' bcache.config bcache1 salt '*' bcache.config errors=panic journal_delay_ms=150 salt '*' bcache.config bcache1 cache_mode=writeback writeback_percent=15 :return: config or True/False
21,844
def serialize_rdf(update_graph, signing): s pretty-xml serialization of update_graph into the "indentical" representation as defined in http://mzl.la/x4XF6o pretty-xmlxmlns:rdfxmlns:RDFrdf:RDF:RDF:aboutabout\n\n\n\n\n'.join(sorted_s) return sorted_s
Tweak rdflib's pretty-xml serialization of update_graph into the "indentical" representation as defined in http://mzl.la/x4XF6o
21,845
def execute(self): repo = repository.PickleRepository(self.params.storage_path) clusters = [i[:-7] for i in os.listdir(self.params.storage_path) if i.endswith()] if self.params.cluster: clusters = [x for x in clusters if x in self.params.cluster] if not clusters: print("No clusters") sys.exit(0) patch_cluster() for cluster in clusters: print("Cluster `%s`" % cluster) print("path: %s" % repo.storage_path + % cluster) cl = repo.get(cluster) if cl._patches: print("Attributes changed: ") for attr, val in cl._patches.items(): print(" %s: %s -> %s" % (attr, val[0], val[1])) else: print("No upgrade needed") print("") if not self.params.dry_run: if cl._patches: del cl._patches print("Changes saved to disk") cl.repository.save_or_update(cl)
migrate storage
21,846
def to_text_string(obj, encoding=None): if PY2: if encoding is None: return unicode(obj) else: return unicode(obj, encoding) else: if encoding is None: return str(obj) elif isinstance(obj, str): return obj else: return str(obj, encoding)
Convert `obj` to (unicode) text string
21,847
def format_duration(seconds): units, divider = get_time_units_and_multiplier(seconds) seconds *= divider return "%.3f %s" % (seconds, units)
Formats a number of seconds using the best units.
21,848
def get_bucket(): args = parser.parse_args() bucket = s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name) for b in bucket.list(): print(.join([i if ord(i) < 128 else for i in b.name]))
Get listing of S3 Bucket
21,849
def generate(input_path, output_path=None, in_memory=False, safe_mode=False, error_context=None): project = StatikProject(input_path, safe_mode=safe_mode, error_context=error_context) return project.generate(output_path=output_path, in_memory=in_memory)
Executes the Statik site generator using the given parameters.
21,850
def getCatalogPixels(self): filenames = self.config.getFilenames() nside_catalog = self.config.params[][] nside_pixel = self.config.params[][] superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog) superpix = np.unique(superpix) pixels = np.intersect1d(superpix, filenames[].compressed()) return pixels
Return the catalog pixels spanned by this ROI.
21,851
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train, inputs_length, targets_length): with tf.variable_scope("lstm_seq2seq_attention"): inputs = common_layers.flatten4d3d(inputs) inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) encoder_outputs, final_encoder_state = lstm( inputs, inputs_length, hparams, train, "encoder") shifted_targets = common_layers.shift_right(targets) targets_length = targets_length + 1 decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
LSTM seq2seq model with attention, main step used for training.
21,852
def _is_valid(self, log: Optional[Logger] = None) -> bool: return self._validate(self, log)[0]
Determine whether the current contents are valid
21,853
def get_exif_data(filename): logger = logging.getLogger(__name__) img = _read_image(filename) try: exif = img._getexif() or {} except ZeroDivisionError: logger.warning() return None data = {TAGS.get(tag, tag): value for tag, value in exif.items()} if in data: try: data[] = {GPSTAGS.get(tag, tag): value for tag, value in data[].items()} except AttributeError: logger = logging.getLogger(__name__) logger.info() del data[] return data
Return a dict with the raw EXIF data.
21,854
def _update_self_link(self, link, headers): self.self.props.update(link) self.self.props[] = headers.get( , self.DEFAULT_CONTENT_TYPE) self.self.props
Update the self link of this navigator
21,855
def _init_filename(self, filename=None, ext=None): extension = ext or self.default_extension filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True) self.real_filename = os.path.realpath(filename)
Initialize the current filename :attr:`FileUtils.real_filename` of the object. Bit of a hack. - The first invocation must have ``filename != None``; this will set a default filename with suffix :attr:`FileUtils.default_extension` unless another one was supplied. - Subsequent invocations either change the filename accordingly or ensure that the default filename is set with the proper suffix.
21,856
def ListingBox(listing, *args, **kwargs): " Delegate the boxing to the target's Box class. " obj = listing.publishable return obj.box_class(obj, *args, **kwargs)
Delegate the boxing to the target's Box class.
21,857
def channel_info(self, channel): resource = self.RCHANNEL_INFO params = { self.PCHANNEL: channel, } response = self._fetch(resource, params) return response
Fetch information about a channel.
21,858
def gradient(self, q, t=0.): q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = self.units[] / self.units[]**2 return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units[])
Compute the gradient of the potential at the given position(s). Parameters ---------- q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like The position to compute the value of the potential. If the input position object has no units (i.e. is an `~numpy.ndarray`), it is assumed to be in the same unit system as the potential. Returns ------- grad : `~astropy.units.Quantity` The gradient of the potential. Will have the same shape as the input position.
21,859
def create_ticket_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesTicket url_reverse = msg_error_relation = _("Hay lineas asignadas a ticket") msg_error_not_found = _() msg_error_line_not_found = _() return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ).values_list('pk') if new_list_lines: new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first() if new_pk: context = SalesLines.create_ticket_from_order(new_pk, new_list_lines) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
21,860
def connect(self): if self.connected: return pk = self._pk if self.exists(pk=pk): self._connected = True else: self._pk = None self._connected = False raise DoesNotExist("No %s found with pk %s" % (self.__class__.__name__, pk))
Connect the instance to redis by checking the existence of its primary key. Do nothing if already connected.
21,861
def write_string(value, buff, byteorder=): data = value.encode() write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
Write a string to a file-like object.
21,862
def run(self): import salt.runner self.parse_args() self.setup_logfile_logger() verify_log(self.config) profiling_enabled = self.options.profiling_enabled runner = salt.runner.Runner(self.config) if self.options.doc: runner.print_docs() self.exit(salt.defaults.exitcodes.EX_OK) finally: salt.utils.profile.output_profile( pr, stats_path=self.options.profiling_path, stop=True) except SaltClientError as exc: raise SystemExit(six.text_type(exc))
Execute salt-run
21,863
def extract_status(self, status_headers): self[] = status_headers.get_statuscode() if not self[]: self[] = elif self[] == and in status_headers.statusline: self[] =
Extract status code only from status line
21,864
def NumExpr(ex, signature=(), **kwargs): program, constants, input_names)
Compile an expression built using E.<variable> variables to a function. ex can also be specified as a string "2*a+3*b". The order of the input variables and their types can be specified using the signature parameter, which is a list of (name, type) pairs. Returns a `NumExpr` object containing the compiled function.
21,865
def pwarning(*args, **kwargs): if should_msg(kwargs.get("groups", ["warning"])): global colorama_init if not colorama_init: colorama_init = True colorama.init() args = indent_text(*args, **kwargs) sys.stderr.write(colorama.Fore.YELLOW) sys.stderr.write("".join(args)) sys.stderr.write(colorama.Fore.RESET) sys.stderr.write("\n")
print formatted output to stderr with indentation control
21,866
def get_holdings(self, account: SEPAAccount): with self._get_dialog() as dialog: hkwpd = self._find_highest_supported_command(HKWPD5, HKWPD6) responses = self._fetch_with_touchdowns( dialog, lambda touchdown: hkwpd( account=hkwpd._fields[].type.from_sepa_account(account), touchdown_point=touchdown, ), ) holdings = [] for resp in responses: if type(resp.holdings) == bytes: holding_str = resp.holdings.decode() else: holding_str = resp.holdings mt535_lines = str.splitlines(holding_str) del mt535_lines[0] mt535 = MT535_Miniparser() holdings.extend(mt535.parse(mt535_lines)) if not holdings: logger.debug() return holdings
Retrieve holdings of an account. :param account: SEPAAccount to retrieve holdings for. :return: List of Holding objects
21,867
def parse(json, query_path, expected_vars=NO_VARS): if hasattr(json, "read"): temp = json def get_more(): return temp.read(MIN_READ_SIZE) json = List_usingStream(get_more) elif hasattr(json, "__call__"): json = List_usingStream(json) elif isinstance(json, GeneratorType): json = List_usingStream(json.next) else: Log.error("Expecting json to be a stream, or a function that will return more bytes") def _iterate_list(index, c, parent_path, path, expected_vars): c, index = skip_whitespace(index) if c == b: yield index return while True: if not path: index = _assign_token(index, c, expected_vars) c, index = skip_whitespace(index) if c == b: yield index _done(parent_path) return elif c == b: yield index c, index = skip_whitespace(index) else: for index in _decode_token(index, c, parent_path, path, expected_vars): c, index = skip_whitespace(index) if c == b: yield index _done(parent_path) return elif c == b: yield index c, index = skip_whitespace(index) def _done(parent_path): if len(parent_path) < len(done[0]): done[0] = parent_path def _decode_object(index, c, parent_path, query_path, expected_vars): if "." in expected_vars: if len(done[0]) <= len(parent_path) and all(d == p for d, p in zip(done[0], parent_path)): Log.error("Can not pick up more variables, iterator is done") if query_path: Log.error("Can not extract objects that contain the iteration", var=join_field(query_path)) index = _assign_token(index, c, expected_vars) yield index return did_yield = False while True: c, index = skip_whitespace(index) if c == b: continue elif c == b: name, index = simple_token(index, c) c, index = skip_whitespace(index) if c != b: Log.error("Expecting colon") c, index = skip_whitespace(index) child_expected = needed(name, expected_vars) child_path = parent_path + [name] if any(child_expected): if not query_path: index = _assign_token(index, c, child_expected) elif query_path[0] == name: for index in _decode_token(index, c, child_path, query_path[1:], child_expected): did_yield = True yield index else: if len(done[0]) <= len(child_path): Log.error("Can not pick up more variables, iterator over {{path}} is done", path=join_field(done[0])) index = _assign_token(index, c, child_expected) elif query_path and query_path[0] == name: for index in _decode_token(index, c, child_path, query_path[1:], child_expected): yield index else: index = jump_to_end(index, c) elif c == b"}": if not did_yield: yield index break def set_destination(expected_vars, value): for i, e in enumerate(expected_vars): if e is None: pass elif e == ".": destination[i] = value elif is_data(value): destination[i] = value[e] else: destination[i] = Null def _decode_object_items(index, c, parent_path, query_path, expected_vars): c, index = skip_whitespace(index) num_items = 0 while True: if c == b: c, index = skip_whitespace(index) elif c == b: name, index = simple_token(index, c) if "name" in expected_vars: for i, e in enumerate(expected_vars): if e == "name": destination[i] = name c, index = skip_whitespace(index) if c != b: Log.error("Expecting colon") c, index = skip_whitespace(index) child_expected = needed("value", expected_vars) index = _assign_token(index, c, child_expected) c, index = skip_whitespace(index) DEBUG and not num_items % 1000 and Log.note("{{num}} items iterated", num=num_items) yield index num_items += 1 elif c == b"}": break def _decode_token(index, c, parent_path, query_path, expected_vars): if c == b: if query_path and query_path[0] == "$items": if any(expected_vars): for index in _decode_object_items(index, c, parent_path, query_path[1:], expected_vars): yield index else: index = jump_to_end(index, c) yield index elif not any(expected_vars): index = jump_to_end(index, c) yield index else: for index in _decode_object(index, c, parent_path, query_path, expected_vars): yield index elif c == b: for index in _iterate_list(index, c, parent_path, query_path, expected_vars): yield index else: index = _assign_token(index, c, expected_vars) yield index def _assign_token(index, c, expected_vars): if not any(expected_vars): return jump_to_end(index, c) value, index = simple_token(index, c) set_destination(expected_vars, value) return index def jump_to_end(index, c): if c == b: while True: c = json[index] index += 1 if c == b: index += 1 elif c == b: break return index elif c not in b"[{": while True: c = json[index] index += 1 if c in b: break return index - 1 stack = [None] * 1024 stack[0] = CLOSE[c] i = 0 while True: c = json[index] index += 1 if c == b: while True: c = json[index] index += 1 if c == b: index += 1 elif c == b: break elif c in b: i += 1 stack[i] = CLOSE[c] elif c == stack[i]: i -= 1 if i == -1: return index elif c in b: Log.error("expecting {{symbol}}", symbol=stack[i]) def simple_token(index, c): if c == b: json.mark(index - 1) while True: c = json[index] index += 1 if c == b"\\": index += 1 elif c == b: break return json_decoder(json.release(index).decode("utf8")), index elif c in b"{[": json.mark(index-1) index = jump_to_end(index, c) value = wrap(json_decoder(json.release(index).decode("utf8"))) return value, index elif c == b"t" and json.slice(index, index + 3) == b"rue": return True, index + 3 elif c == b"n" and json.slice(index, index + 3) == b"ull": return None, index + 3 elif c == b"f" and json.slice(index, index + 4) == b"alse": return False, index + 4 else: json.mark(index-1) while True: c = json[index] if c in b: break index += 1 text = json.release(index) try: return float(text), index except Exception: Log.error("Not a known JSON primitive: {{text|quote}}", text=text) def skip_whitespace(index): c = json[index] while c in WHITESPACE: index += 1 c = json[index] return c, index + 1 if is_data(query_path) and query_path.get("items"): path_list = split_field(query_path.get("items")) + ["$items"] else: path_list = split_field(query_path) destination = [None] * len(expected_vars) c, index = skip_whitespace(0) done = [path_list + [None]] for _ in _decode_token(index, c, [], path_list, expected_vars): output = Data() for i, e in enumerate(expected_vars): output[e] = destination[i] yield output
INTENDED TO TREAT JSON AS A STREAM; USING MINIMAL MEMORY WHILE IT ITERATES THROUGH THE STRUCTURE. ASSUMING THE JSON IS LARGE, AND HAS A HIGH LEVEL ARRAY STRUCTURE, IT WILL yield EACH OBJECT IN THAT ARRAY. NESTED ARRAYS ARE HANDLED BY REPEATING THE PARENT PROPERTIES FOR EACH MEMBER OF THE NESTED ARRAY. DEEPER NESTED PROPERTIES ARE TREATED AS PRIMITIVE VALUES; THE STANDARD JSON DECODER IS USED. LARGE MANY-PROPERTY OBJECTS CAN BE HANDLED BY `items()` :param json: SOME STRING-LIKE STRUCTURE THAT CAN ASSUME WE LOOK AT ONE CHARACTER AT A TIME, IN ORDER :param query_path: A DOT-SEPARATED STRING INDICATING THE PATH TO THE NESTED ARRAY OPTIONALLY, {"items":query_path} TO FURTHER ITERATE OVER PROPERTIES OF OBJECTS FOUND AT query_path :param expected_vars: REQUIRED PROPERTY NAMES, USED TO DETERMINE IF MORE-THAN-ONE PASS IS REQUIRED :return: RETURNS AN ITERATOR OVER ALL OBJECTS FROM ARRAY LOCATED AT query_path
21,868
def start(check_time: int = 500) -> None: io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start()
Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed.
21,869
async def run_task(self) -> None: while self.running: try: item = self.QUEUE.get_nowait() Log.debug(, self.name) await self.run(item) Log.debug(, self.name) self.QUEUE.task_done() except asyncio.QueueEmpty: if self.OPEN: await self.sleep(0.05) else: Log.debug(, self.name) return except CancelledError: Log.debug() self.QUEUE.task_done() raise except Exception: Log.exception(, self.name) self.QUEUE.task_done()
Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.
21,870
def profile_write(self, profile, outfile=None): if outfile is None: outfile = .format(profile.get().replace(, ).lower()) fqpn = os.path.join(self.profile_dir, outfile) if os.path.isfile(fqpn): print(.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn)) with open(fqpn, ) as fh: try: data = json.load(fh, object_pairs_hook=OrderedDict) except ValueError as e: self.handle_error(.format(e)) data.append(profile) fh.seek(0) fh.write(json.dumps(data, indent=2, sort_keys=True)) fh.truncate() else: print(.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn)) with open(fqpn, ) as fh: data = [profile] fh.write(json.dumps(data, indent=2, sort_keys=True))
Write the profile to the output directory. Args: profile (dict): The dictionary containting the profile settings. outfile (str, optional): Defaults to None. The filename for the profile.
21,871
def cleanup(self): "Purpose: Frees the GL resources for a render model" if self.m_glVertBuffer != 0: glDeleteBuffers(1, (self.m_glIndexBuffer,)) glDeleteVertexArrays( 1, (self.m_glVertArray,) ) glDeleteBuffers(1, (self.m_glVertBuffer,)) self.m_glIndexBuffer = 0 self.m_glVertArray = 0 self.m_glVertBuffer = 0
Purpose: Frees the GL resources for a render model
21,872
def _read_configfile(self): rc = self.config_filename if not os.path.isabs(rc): rc = os.path.join(os.path.expanduser(), self.config_filename) files = [f for f in [rc, ] if os.path.exists(f)] if not files: self.config = None return self.config = ConfigParser() self.config.read(files) if (not self.is_old_pypi_config() and not self.is_new_pypi_config()): self.config = None
Read the config file and store it (when valid)
21,873
def render_context_with_title(self, context): if "page_title" not in context: con = template.Context(context) temp = template.Template(encoding.force_text(self.page_title)) context["page_title"] = temp.render(con) return context
Render a page title and insert it into the context. This function takes in a context dict and uses it to render the page_title variable. It then appends this title to the context using the 'page_title' key. If there is already a page_title key defined in context received then this function will do nothing.
21,874
def account_setup(remote, token=None, response=None, account_setup=None): gh = GitHubAPI(user_id=token.remote_account.user_id) with db.session.begin_nested(): gh.init_account() oauth_link_external_id( token.remote_account.user, dict(id=str(gh.account.extra_data[]), method="github") )
Setup user account.
21,875
def truepath_relative(path, otherpath=None): if otherpath is None: otherpath = os.getcwd() otherpath = truepath(otherpath) path_ = normpath(relpath(path, otherpath)) return path_
Normalizes and returns absolute path with so specs Args: path (str): path to file or directory otherpath (None): (default = None) Returns: str: path_ CommandLine: python -m utool.util_path --exec-truepath_relative --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> path = 'C:/foobar/foobiz' >>> otherpath = 'C:/foobar' >>> path_ = truepath_relative(path, otherpath) >>> result = ('path_ = %s' % (ut.repr2(path_),)) >>> print(result) path_ = 'foobiz'
21,876
def get_publication_date(self, **kwargs): date_string = kwargs.get(, ) date_match = CREATION_DATE_REGEX.match(date_string) month_match = CREATION_MONTH_REGEX.match(date_string) year_match = CREATION_YEAR_REGEX.match(date_string) if date_match: (year, month, day) = date_match.groups() try: creation_date = datetime.date(int(year), int(month), int(day)) except ValueError: return None else: return % ( format_date_string(creation_date.month), format_date_string(creation_date.day), creation_date.year, ) elif month_match: (year, month) = month_match.groups() try: creation_date = datetime.date(int(year), int(month), 1) except ValueError: return None else: return % ( format_date_string(creation_date.month), creation_date.year, ) elif year_match: year = year_match.groups()[0] return year else: return None
Determine the creation date for the publication date.
21,877
def post_values(self, values): return self.api.post(self.subpath(), data={ : values })
Method for `Post Data Stream Values <https://m2x.att.com/developer/documentation/v2/device#Post-Data-Stream-Values>`_ endpoint. :param values: Values to post, see M2X API docs for details :type values: dict :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
21,878
def readline(self, timeout=1): data = None if self.read_thread: data = self.read_thread.readline() if data and self.__print_io: self.logger.info(data, extra={: }) return data
Readline implementation. :param timeout: Timeout, not used :return: Line read or None
21,879
def sample_less_than_condition(choices_in, condition): output = np.zeros(min(condition.shape[0], choices_in.shape[0])) choices = copy.deepcopy(choices_in) for i, _ in enumerate(output): avail_inds = np.where(choices < condition[i])[0] selected_ind = np.random.choice(avail_inds) output[i] = choices[selected_ind] choices = np.delete(choices, selected_ind) return output
Creates a random sample from choices without replacement, subject to the condition that each element of the output is greater than the corresponding element of the condition array. condition should be in ascending order.
21,880
def write(self, output=None): if not output: outfile = self[]+ output = self[] else: outfile = output if os.path.exists(outfile): warningmsg = "\n warningmsg += " warningmsg += " warningmsg += " warningmsg += " " + str(outfile) + warningmsg += " warningmsg += " warningmsg += " fasn = fits.HDUList() _maxlen = 0 for _fname in self[]: if len(_fname) > _maxlen: _maxlen = len(_fname) if _maxlen < 24: _maxlen = 24 namelen_str = str(_maxlen+2)+ self.buildPrimary(fasn, output=output) mname = self[][:] mname.append(output) mtype = [ for l in self[]] mtype.append() mprsn = [True for l in self[]] mprsn.append(False) xoff = [self[][l][] for l in self[]] xoff.append(0.0) yoff = [self[][l][] for l in self[]] yoff.append(0.0) xsh = [self[][l][] for l in self[]] xsh.append(0.0) ysh = [self[][l][] for l in self[]] ysh.append(0.0) rot = [self[][l][] for l in self[]] rot.append(0.0) scl = [self[][l][] for l in self[]] scl.append(1.0) memname = fits.Column(name=,format=namelen_str,array=N.char.array(mname)) memtype = fits.Column(name=,format=,array=N.char.array(mtype)) memprsn = fits.Column(name=, format=, array=N.array(mprsn).astype(N.uint8)) xoffset = fits.Column(name=, format=, array=N.array(xoff)) yoffset = fits.Column(name=, format=, array=N.array(yoff)) xdelta = fits.Column(name=, format=, array=N.array(xsh)) ydelta = fits.Column(name=, format=, array=N.array(ysh)) rotation = fits.Column(name=, format=, array=N.array(rot)) scale = fits.Column(name=, format=, array=N.array(scl)) cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale]) hdu = fits.BinTableHDU.from_columns(cols) fasn.append(hdu) if ASTROPY_VER_GE13: fasn.writeto(outfile, overwrite=True) else: fasn.writeto(outfile, clobber=True) fasn.close() mem0 = self[][0] refimg = self[][mem0][] if refimg is not None: whdu = wcsutil.WCSObject(refimg) whdu.createReferenceWCS(outfile,overwrite=False) ftab = fits.open(outfile) ftab[].header[] = outfile+"[wcs]" ftab.close() del whdu
Write association table to a file.
21,881
def input(self, data): self.buf += data while len(self.buf) > HEADER_SIZE: data_len = struct.unpack(, self.buf[0:HEADER_SIZE])[0] if len(self.buf) >= data_len + HEADER_SIZE: content = self.buf[HEADER_SIZE:data_len + HEADER_SIZE] self.buf = self.buf[data_len + HEADER_SIZE:] yield content else: break
小数据片段拼接成完整数据包 如果内容足够则yield数据包
21,882
def guess_external_url(local_host, port): if local_host in [, ]:
Return a URL that is most likely to route to `local_host` from outside. The point is that we may be running on a remote host from the user's point of view, so they can't access `local_host` from a Web browser just by typing ``http://localhost:12345/``.
21,883
def join(self, word_blocks, float_part): word_list = [] length = len(word_blocks) - 1 first_block = word_blocks[0], start = 0 if length == 1 and first_block[0][0] == : word_list += [] start = 1 for i in range(start, length + 1, 1): word_list += word_blocks[i][1] if not word_blocks[i][1]: continue if i == length: break word_list += [self.TENS_TO[(length - i) * 3]] return .join(word_list) + float_part
join the words by first join lists in the tuple :param word_blocks: tuple :rtype: str
21,884
def apply_augments(self, auglist, p_elem, pset): for a in auglist: par = a.parent if a.search_one("when") is None: wel = p_elem else: if p_elem.interleave: kw = "interleave" else: kw = "group" wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave) wel.occur = p_elem.occur if par.keyword == "uses": self.handle_substmts(a, wel, pset) continue if par.keyword == "submodule": mnam = par.i_including_modulename else: mnam = par.arg if self.prefix_stack[-1] == self.module_prefixes[mnam]: self.handle_substmts(a, wel, pset) else: self.prefix_stack.append(self.module_prefixes[mnam]) self.handle_substmts(a, wel, pset) self.prefix_stack.pop()
Handle substatements of augments from `auglist`. The augments are applied in the context of `p_elem`. `pset` is a patch set containing patches that may be applicable to descendants.
21,885
def logged_api_call(func): module = inspect.getmodule(func) if not inspect.isfunction(func) or not hasattr(module, ): raise TypeError("The @logged_api_call decorator must be used on a " "function or method (and not on top of the @property " "decorator)") try: this_frame = inspect.currentframe() apifunc_frame = this_frame.f_back apifunc_owner = inspect.getframeinfo(apifunc_frame)[2] finally: del this_frame del apifunc_frame if apifunc_owner == : apifunc_str = .format(func=func.__name__) else: apifunc_str = .format(owner=apifunc_owner, func=func.__name__) logger = get_logger(API_LOGGER_NAME) def is_external_call(): try: log_it_frame = inspect.currentframe() log_api_call_frame = log_it_frame.f_back apifunc_frame = log_api_call_frame.f_back apicaller_frame = apifunc_frame.f_back apicaller_module = inspect.getmodule(apicaller_frame) if apicaller_module is None: apicaller_module_name = "<unknown>" else: apicaller_module_name = apicaller_module.__name__ finally: del log_it_frame del log_api_call_frame del apifunc_frame del apicaller_frame del apicaller_module return apicaller_module_name.split()[0] != def log_api_call(func, *args, **kwargs): _log_it = is_external_call() and logger.isEnabledFor(logging.DEBUG) if _log_it: logger.debug("Called: {}, args: {:.500}, kwargs: {:.500}". format(apifunc_str, log_escaped(repr(args)), log_escaped(repr(kwargs)))) result = func(*args, **kwargs) if _log_it: logger.debug("Return: {}, result: {:.1000}". format(apifunc_str, log_escaped(repr(result)))) return result if in globals(): return decorate(func, log_api_call) else: return decorator(log_api_call, func)
Function decorator that causes the decorated API function or method to log calls to itself to a logger. The logger's name is the dotted module name of the module defining the decorated function (e.g. 'zhmcclient._cpc'). Parameters: func (function object): The original function being decorated. Returns: function object: The function wrappering the original function being decorated. Raises: TypeError: The @logged_api_call decorator must be used on a function or method (and not on top of the @property decorator).
21,886
def attempt_file_write( path: str, contents: typing.Union[str, bytes], mode: str = , offset: int = 0 ) -> typing.Union[None, Exception]: try: data = contents.encode() except Exception: data = contents if offset > 0: with open(path, ) as f: existing = f.read(offset) else: existing = None append = in mode write_mode = if offset > 0 or not append else try: with open(path, write_mode) as f: if existing is not None: f.write(existing) f.write(data) return None except Exception as error: return error
Attempts to write the specified contents to a file and returns None if successful, or the raised exception if writing failed. :param path: The path to the file that will be written :param contents: The contents of the file to write :param mode: The mode in which the file will be opened when written :param offset: The byte offset in the file where the contents should be written. If the value is zero, the offset information will be ignored and the operation will write entirely based on mode. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append. :return: None if the write operation succeeded. Otherwise, the exception that was raised by the failed write action.
21,887
def close(self): conn = self.connection if conn is None: return try: while self.nextset(): pass finally: self.connection = None
Closing a cursor just exhausts all remaining data.
21,888
def _find_base_tds_url(catalog_url): url_components = urlparse(catalog_url) if url_components.path: return catalog_url.split(url_components.path)[0] else: return catalog_url
Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present.
21,889
def __do_query_into_hash(conn, sql_str): mod = sys._getframe().f_code.co_name log.debug(, mod, sql_str) rtn_results = [] try: cursor = conn.cursor() except MySQLdb.MySQLError: log.error(t get cursor for SQL->%s%s-->%s: try to execute : SQL->%s%s-->%s-->', mod) return rtn_results
Perform the query that is passed to it (sql_str). Returns: results in a dict.
21,890
def _CronJobFromRow(self, row): (job, create_time, enabled, forced_run_requested, last_run_status, last_run_time, current_run_id, state, leased_until, leased_by) = row job = rdf_cronjobs.CronJob.FromSerializedString(job) job.current_run_id = db_utils.IntToCronJobRunID(current_run_id) job.enabled = enabled job.forced_run_requested = forced_run_requested job.last_run_status = last_run_status job.last_run_time = mysql_utils.TimestampToRDFDatetime(last_run_time) if state: job.state = rdf_protodict.AttributedDict.FromSerializedString(state) job.created_at = mysql_utils.TimestampToRDFDatetime(create_time) job.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) job.leased_by = leased_by return job
Creates a cronjob object from a database result row.
21,891
def checks(similarities, verbose = False): if similarities.size == 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities " "matrix provided as input happens to be empty.\n") elif np.where(np.isnan(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities " "matrix contains at least one .\n") elif np.where(np.isinf(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry " "detected in input similarities matrix.\n") else: if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: complex entries found " "in the similarities matrix.") similarities = similarities.real if verbose: print("\nINFO: Cluster_Ensembles: checks: " "truncated to their real components.") if similarities.shape[0] != similarities.shape[1]: if verbose: print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.") N_square = min(similarities.shape) similarities = similarities[:N_square, :N_square] if verbose: print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.") max_sim = np.amax(similarities) min_sim = np.amin(similarities) if max_sim > 1 or min_sim < 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: strictly negative " "or bigger than unity entries spotted in input similarities matrix.") indices_too_big = np.where(similarities > 1) indices_negative = np.where(similarities < 0) similarities[indices_too_big] = 1.0 similarities[indices_negative] = 0.0 if verbose: print("\nINFO: Cluster_Ensembles: checks: done setting them to " "the lower or upper accepted values.") if not np.allclose(similarities, np.transpose(similarities)): if verbose: print("\nINFO: Cluster_Ensembles: checks: non-symmetric input " "similarities matrix.") similarities = np.divide(similarities + np.transpose(similarities), 2.0) if verbose: print("\nINFO: Cluster_Ensembles: checks: now symmetrized.") if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])): if verbose: print("\nINFO: Cluster_Ensembles: checks: the self-similarities " "provided as input are not all of unit value.") similarities[np.diag_indices(similarities.shape[0])] = 1 if verbose: print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = False) Alerts of any issue with the similarities matrix provided and of any step possibly taken to remediate such problem.
21,892
def _get_service_instance(host, username, password, protocol, port, mechanism, principal, domain): log.trace() token = None if mechanism == : if username is None: raise salt.exceptions.CommandExecutionError( username\) if password is None: raise salt.exceptions.CommandExecutionError( password\) elif mechanism == : if principal is not None and domain is not None: try: token = get_gssapi_token(principal, host, domain) except Exception as exc: raise salt.exceptions.VMwareConnectionError(six.text_type(exc)) else: err_msg = {0}\ \ .format(mechanism) raise salt.exceptions.CommandExecutionError(err_msg) else: raise salt.exceptions.CommandExecutionError( {0}\.format(mechanism)) try: log.trace(%s\%s\, mechanism, username) service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, b64token=token, mechanism=mechanism) except TypeError as exc: if in exc.message: log.error(, exc.message) log.error() log.error() raise except Exception as exc: if in six.text_type(exc): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_NONE try: service_instance = SmartConnect( host=host, user=username, pwd=password, protocol=protocol, port=port, sslContext=context, b64token=token, mechanism=mechanism ) except Exception as exc: log.exception(exc) err_msg = exc.msg if hasattr(exc, ) else six.text_type(exc) raise salt.exceptions.VMwareConnectionError( {0}\ .format(host, err_msg)) else: err_msg = exc.msg if hasattr(exc, ) else default_msg log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) atexit.register(Disconnect, service_instance) return service_instance
Internal method to authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
21,893
def get_vardict_command(data): vcaller = dd.get_variantcaller(data) if isinstance(vcaller, list): vardict = [x for x in vcaller if "vardict" in x] if not vardict: return None vardict = vardict[0] elif not vcaller: return None else: vardict = vcaller vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict" return vardict
convert variantcaller specification to proper vardict command, handling string or list specification
21,894
def find_action(self, action_name): for service in self.services: action = service.find_action(action_name) if action is not None: return action
Find an action by name. Convenience method that searches through all the services offered by the Server for an action and returns an Action instance. If the action is not found, returns None. If multiple actions with the same name are found it returns the first one.
21,895
def get_photo_url(photo_id): args = _get_request_args( , photo_id=photo_id ) resp = requests.post(API_URL, data=args) resp_json = json.loads(resp.text.encode()) logger.debug(json.dumps(resp_json, indent=2)) size_list = resp_json[][] size_list_len = len(size_list) global image_size_mode image_size_mode = size_list_len if size_list_len < image_size_mode \ else image_size_mode download_url = resp_json[][][-image_size_mode][] return download_url
Request the photo download url with the photo id :param photo_id: The photo id of flickr :type photo_id: str :return: Photo download url :rtype: str
21,896
def verify(backup_path, fast): from PyHardLinkBackup.phlb.verify import verify_backup verify_backup(backup_path, fast)
Verify a existing backup
21,897
def fit(self, X, y=None, **kwargs): self.base_n_encoder.fit(X, y, **kwargs) return self
Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self.
21,898
def num2term(num, fs, conj=False): if not isinstance(num, int): fstr = "expected num to be an int, got {0.__name__}" raise TypeError(fstr.format(type(num))) n = len(fs) if not 0 <= num < 2**n: fstr = "expected num to be in range [0, {}), got {}" raise ValueError(fstr.format(2**n, num)) if conj: return tuple(~f if bit_on(num, i) else f for i, f in enumerate(fs)) else: return tuple(f if bit_on(num, i) else ~f for i, f in enumerate(fs))
Convert *num* into a min/max term in an N-dimensional Boolean space. The *fs* argument is a sequence of :math:`N` Boolean functions. There are :math:`2^N` points in the corresponding Boolean space. The dimension number of each function is its index in the sequence. The *num* argument is an int in range :math:`[0, 2^N)`. If *conj* is ``False``, return a minterm. Otherwise, return a maxterm. For example, consider the 3-dimensional space formed by functions :math:`f`, :math:`g`, :math:`h`. Each vertex corresponds to a min/max term as summarized by the table:: 6-----------7 ===== ======= ========== ========== /| /| num f g h minterm maxterm / | / | ===== ======= ========== ========== / | / | 0 0 0 0 f' g' h' f g h 4-----------5 | 1 1 0 0 f g' h' f' g h | | | | 2 0 1 0 f' g h' f g' h | | | | 3 1 1 0 f g h' f' g' h | 2-------|---3 4 0 0 1 f' g' h f g h' | / | / 5 1 0 1 f g' h f' g h' h g | / | / 6 0 1 1 f' g h f g' h' |/ |/ |/ 7 1 1 1 f g h f' g' h' +-f 0-----------1 ===== ======= ========= =========== .. note:: The ``f g h`` column is the binary representation of *num* written in little-endian order.
21,899
def predecessors(self, node): if isinstance(node, int): warnings.warn( , DeprecationWarning, 2) node = self._id_to_node[node] return self._multi_graph.predecessors(node)
Returns list of the predecessors of a node as DAGNodes.