code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_tar_file(self, full_archive=False): tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name) ext = "" if self.compressor == "none" else ".%s" % self.compressor tar_file_name = tar_file_name + ".tar" + ext logger.debug("Tar File: " + tar_file_name) subprocess.call(shlex.split("tar c%sfS %s -C %s ." % ( self.get_compression_flag(self.compressor), tar_file_name, self.tmp_dir if not full_archive else self.archive_dir)), stderr=subprocess.PIPE) self.delete_archive_dir() logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name))) return tar_file_name
Create tar file to be compressed
def init(self): if not valid_ovsdb_addr(self.ovsdb_addr): raise ValueError('Invalid OVSDB address: %s' % self.ovsdb_addr) if self.br_name is None: self.br_name = self._get_bridge_name()
Validates the given ``ovsdb_addr`` and connects to OVS instance. If failed to connect to OVS instance or the given ``datapath_id`` does not match with the Datapath ID of the connected OVS instance, raises :py:mod:`ryu.lib.ovs.bridge.OVSBridgeNotFound` exception.
def run_foreach_or_conditional(self, context): logger.debug("starting") if self.foreach_items: self.foreach_loop(context) else: self.run_conditional_decorators(context) logger.debug("done")
Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
def delete(self, **kwargs): response = self._requester.request( 'DELETE', 'calendar_events/{}'.format(self.id), _kwargs=combine_kwargs(**kwargs) ) return CalendarEvent(self._requester, response.json())
Delete this calendar event. :calls: `DELETE /api/v1/calendar_events/:id \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.destroy>`_ :rtype: :class:`canvasapi.calendar_event.CalendarEvent`
def quantize(self, image): if get_cKDTree(): return self.quantize_with_scipy(image) else: print('Scipy not available, falling back to slower version.') return self.quantize_without_scipy(image)
Use a kdtree to quickly find the closest palette colors for the pixels
def search_references( self, reference_set_id, accession=None, md5checksum=None): request = protocol.SearchReferencesRequest() request.reference_set_id = reference_set_id request.accession = pb.string(accession) request.md5checksum = pb.string(md5checksum) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "references", protocol.SearchReferencesResponse)
Returns an iterator over the References fulfilling the specified conditions from the specified Dataset. :param str reference_set_id: The ReferenceSet to search. :param str accession: If not None, return the references for which the `accession` matches this string (case-sensitive, exact match). :param str md5checksum: If not None, return the references for which the `md5checksum` matches this string (case-sensitive, exact match). :return: An iterator over the :class:`ga4gh.protocol.Reference` objects defined by the query parameters.
def get(self, index): assert index <= self.count assert index < self.size offset = index * self.chunk_size return self.data[offset:offset + self.chunk_size]
Get a chunk by index
def _verify(function): def wrapped(pin, *args, **kwargs): pin = int(pin) if pin not in _open: ppath = gpiopath(pin) if not os.path.exists(ppath): log.debug("Creating Pin {0}".format(pin)) with _export_lock: with open(pjoin(gpio_root, 'export'), 'w') as f: _write(f, pin) value = open(pjoin(ppath, 'value'), FMODE) direction = open(pjoin(ppath, 'direction'), FMODE) _open[pin] = PinState(value=value, direction=direction) return function(pin, *args, **kwargs) return wrapped
decorator to ensure pin is properly set up
def get_database_name(self): uri_dict = uri_parser.parse_uri(self.host) database = uri_dict.get('database', None) if not database: raise "database name is missing" return database
extract database from connection string
def deviation(reference_intervals, estimated_intervals, trim=False): validate_boundary(reference_intervals, estimated_intervals, trim) reference_boundaries = util.intervals_to_boundaries(reference_intervals) estimated_boundaries = util.intervals_to_boundaries(estimated_intervals) if trim: reference_boundaries = reference_boundaries[1:-1] estimated_boundaries = estimated_boundaries[1:-1] if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0: return np.nan, np.nan dist = np.abs(np.subtract.outer(reference_boundaries, estimated_boundaries)) estimated_to_reference = np.median(dist.min(axis=0)) reference_to_estimated = np.median(dist.min(axis=1)) return reference_to_estimated, estimated_to_reference
Compute the median deviations between reference and estimated boundary times. Examples -------- >>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab') >>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab') >>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals, ... est_intervals) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. trim : boolean if ``True``, the first and last intervals are ignored. Typically, these denote start (0.0) and end-of-track markers. (Default value = False) Returns ------- reference_to_estimated : float median time from each reference boundary to the closest estimated boundary estimated_to_reference : float median time from each estimated boundary to the closest reference boundary
def _rm_gos_edges_rel(self, rm_goids, edges_rel): edges_ret = {} for rname, edges_cur in edges_rel.items(): edges_new = self._rm_gos_edges(rm_goids, edges_cur) if edges_new: edges_ret[rname] = edges_new return edges_ret
Remove any relationship that contain user-specified edges.
def _resolve_parameters(parameters, blueprint): params = {} param_defs = blueprint.get_parameter_definitions() for key, value in parameters.items(): if key not in param_defs: logger.debug("Blueprint %s does not use parameter %s.", blueprint.name, key) continue if value is None: logger.debug("Got None value for parameter %s, not submitting it " "to cloudformation, default value should be used.", key) continue if isinstance(value, bool): logger.debug("Converting parameter %s boolean \"%s\" to string.", key, value) value = str(value).lower() params[key] = value return params
Resolves CloudFormation Parameters for a given blueprint. Given a list of parameters, handles: - discard any parameters that the blueprint does not use - discard any empty values - convert booleans to strings suitable for CloudFormation Args: parameters (dict): A dictionary of parameters provided by the stack definition blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint object that is having the parameters applied to it. Returns: dict: The resolved parameters.
def add(self, transport, address=None): if not address: address = str(uuid.uuid1()) if address in self.recipients: self.recipients[address].add(transport) else: self.recipients[address] = RecipientManager(transport, address) return address
add a new recipient to be addressable by this MessageDispatcher generate a new uuid address if one is not specified
def buffer(self, frame): frame.buffer = self.temporary_identifier() self.writeline('%s = []' % frame.buffer)
Enable buffering for the frame from that point onwards.
def inputs(self, name): self._closed() step = self._get_step(name, make_copy=False) return step.list_inputs()
List input names and types of a step in the steps library. Args: name (str): name of a step in the steps library.
def preprocess_bel_stmt(stmt: str) -> str: stmt = stmt.strip() stmt = re.sub(r",+", ",", stmt) stmt = re.sub(r",", ", ", stmt) stmt = re.sub(r" +", " ", stmt) return stmt
Clean up basic formatting of BEL statement Args: stmt: BEL statement as single string Returns: cleaned BEL statement
def SaveState( self, config_parser ): if not config_parser.has_section( 'window' ): config_parser.add_section( 'window' ) if self.IsMaximized(): config_parser.set( 'window', 'maximized', str(True)) else: config_parser.set( 'window', 'maximized', str(False)) size = self.GetSizeTuple() position = self.GetPositionTuple() config_parser.set( 'window', 'width', str(size[0]) ) config_parser.set( 'window', 'height', str(size[1]) ) config_parser.set( 'window', 'x', str(position[0]) ) config_parser.set( 'window', 'y', str(position[1]) ) for control in self.ProfileListControls: control.SaveState( config_parser ) return config_parser
Retrieve window state to be restored on the next run...
def get_values(self, profile, bitarray, status): if not self.init_ok or profile is None: return [], {} output = OrderedDict({}) for source in profile.contents: if not source.name: continue if source.name == 'value': output.update(self._get_value(source, bitarray)) if source.name == 'enum': output.update(self._get_enum(source, bitarray)) if source.name == 'status': output.update(self._get_boolean(source, status)) return output.keys(), output
Get keys and values from bitarray
def _path_from_module(module): paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] else: paths = list(set(paths)) if len(paths) > 1: raise ImproperlyConfigured( "The bot module %r has multiple filesystem locations (%r); " "you must configure this bot with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The bot module %r has no filesystem location, " "you must configure this bot with an AppConfig subclass " "with a 'path' class attribute." % (module,)) return paths[0]
Attempt to determine bot's filesystem path from its module.
def create_seq(self, ): name = self.name_le.text() desc = self.desc_pte.toPlainText() try: seq = djadapter.models.Sequence(name=name, project=self._project, description=desc) seq.save() self.sequence = seq self.accept() except: log.exception("Could not create new sequence")
Create a sequence and store it in the self.sequence :returns: None :rtype: None :raises: None
def prolong(self): D = self.__class__ collection = self.get_collection() identity = self.Lock() query = D.id == self query &= D.lock.instance == identity.instance query &= D.lock.time >= (identity.time - identity.__period__) previous = collection.find_one_and_update(query, {'$set': {~D.lock.time: identity.time}}, {~D.lock: True}) if previous is None: lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None) if lock and lock.expires <= identity.time: lock.expired(self) raise self.Locked("Unable to prolong lock.", lock) identity.prolonged(self) return identity
Prolong the working duration of an already held lock. Attempting to prolong a lock not already owned will result in a Locked exception.
def after_invoke(self, coro): if not asyncio.iscoroutinefunction(coro): raise TypeError('The post-invoke hook must be a coroutine.') self._after_invoke = coro return coro
A decorator that registers a coroutine as a post-invoke hook. A post-invoke hook is called directly after the command is called. This makes it a useful function to clean-up database connections or any type of clean up required. This post-invoke hook takes a sole parameter, a :class:`.Context`. See :meth:`.Bot.after_invoke` for more info. Parameters ----------- coro: :ref:`coroutine <coroutine>` The coroutine to register as the post-invoke hook. Raises ------- TypeError The coroutine passed is not actually a coroutine.
def visit_yieldfrom(self, node): yi_val = (" " + node.value.accept(self)) if node.value else "" expr = "yield from" + yi_val if node.parent.is_statement: return expr return "(%s)" % (expr,)
Return an astroid.YieldFrom node as string.
def bake(self): self._sh_command = getattr(sh, self.command) self._sh_command = self._sh_command.bake( self.options, 'overlay', _env=self.env, _out=LOG.out, _err=LOG.error)
Bake a ``gilt`` command so it's ready to execute and returns None. :return: None
def stop(self): Global.LOGGER.debug(f"action {self.name} stopped") self.is_running = False self.on_stop()
Stop the current action
def get_bookmark(self, bookmark_id): url = self._generate_url('bookmarks/{0}'.format(bookmark_id)) return self.get(url)
Get a single bookmark represented by `bookmark_id`. The requested bookmark must belong to the current user. :param bookmark_id: ID of the bookmark to retrieve.
def read_csv_file(self, file_name): result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents
def request_session(token, url=None): if url is None: api = SlackApi() else: api = SlackApi(url) response = api.rtm.start(token=token) return SessionMetadata(response, api, token)
Requests a WebSocket session for the Real-Time Messaging API. Returns a SessionMetadata object containing the information retrieved from the API call.
def put(self, item): check_not_none(item, "Value can't be None") element_data = self._to_data(item) return self._encode_invoke(queue_put_codec, value=element_data)
Adds the specified element into this queue. If there is no space, it waits until necessary space becomes available. :param item: (object), the specified item.
def joint_sfs(dac1, dac2, n1=None, n2=None): dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) x = n1 + 1 y = n2 + 1 tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population.
def not_storable(_type): return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))
Helper for tagging unserializable types. Arguments: _type (type): type to be ignored. Returns: Storable: storable instance that does not poke.
def after_insert(mapper, connection, target): record_after_update.send(CmtRECORDCOMMENT, recid=target.id_bibrec) from .api import get_reply_order_cache_data if target.in_reply_to_id_cmtRECORDCOMMENT > 0: parent = CmtRECORDCOMMENT.query.get( target.in_reply_to_id_cmtRECORDCOMMENT) if parent: trans = connection.begin() parent_reply_order = parent.reply_order_cached_data \ if parent.reply_order_cached_data else '' parent_reply_order += get_reply_order_cache_data(target.id) connection.execute( db.update(CmtRECORDCOMMENT.__table__). where(CmtRECORDCOMMENT.id == parent.id). values(reply_order_cached_data=parent_reply_order)) trans.commit()
Update reply order cache and send record-after-update signal.
def init(self, dict_or_str, val=None, warn=True): self.check(dict_or_str) dic = dict_or_str if val is not None: dic = {dict_or_str:val} for key, val in dic.items(): key = self.corrected_key(key) if key not in CMAOptions.defaults(): if warn: print('Warning in cma.CMAOptions.init(): key ' + str(key) + ' ignored') else: self[key] = val return self
initialize one or several options. Arguments --------- `dict_or_str` a dictionary if ``val is None``, otherwise a key. If `val` is provided `dict_or_str` must be a valid key. `val` value for key Details ------- Only known keys are accepted. Known keys are in `CMAOptions.defaults()`
def enable_logging(log_level): root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) logfile_handler = logging.StreamHandler(_LOGFILE_STREAM) logfile_handler.setLevel(logging.DEBUG) logfile_handler.setFormatter(logging.Formatter( '%(levelname)s [%(asctime)s][%(name)s] %(message)s')) root_logger.addHandler(logfile_handler) if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL: signal.signal(signal.SIGTERM, _logfile_sigterm_handler) if log_level: handler = logging.StreamHandler() handler.setFormatter(_LogColorFormatter()) root_logger.setLevel(log_level) root_logger.addHandler(handler)
Configure the root logger and a logfile handler. Args: log_level: The logging level to set the logger handler.
def _strip_invisible(s): "Remove invisible ANSI color codes." if isinstance(s, _text_type): return re.sub(_invisible_codes, "", s) else: return re.sub(_invisible_codes_bytes, "", s)
Remove invisible ANSI color codes.
def uppercase(self, value): if not isinstance(value, bool): raise TypeError('uppercase attribute must be a logical type.') self._uppercase = value
Validate and set the uppercase flag.
def get_num_procs(): logger = logging.getLogger(__name__) try: py3nvml.nvmlInit() except: str_ = warnings.warn(str_, RuntimeWarning) logger.warn(str_) return [] num_gpus = py3nvml.nvmlDeviceGetCount() gpu_procs = [-1]*num_gpus for i in range(num_gpus): try: h = py3nvml.nvmlDeviceGetHandleByIndex(i) except: continue procs = try_get_info(py3nvml.nvmlDeviceGetComputeRunningProcesses, h, ['something']) gpu_procs[i] = len(procs) py3nvml.nvmlShutdown() return gpu_procs
Gets the number of processes running on each gpu Returns ------- num_procs : list(int) Number of processes running on each gpu Note ---- If function can't query the driver will return an empty list rather than raise an Exception. Note ---- If function can't get the info from the gpu will return -1 in that gpu's place
def make_op_return_script(data, format='bin'): if format == 'hex': assert(is_hex(data)) hex_data = data elif format == 'bin': hex_data = hexlify(data) else: raise Exception("Format must be either 'hex' or 'bin'") num_bytes = count_bytes(hex_data) if num_bytes > MAX_BYTES_AFTER_OP_RETURN: raise Exception('Data is %i bytes - must not exceed 40.' % num_bytes) script_string = 'OP_RETURN %s' % hex_data return script_to_hex(script_string)
Takes in raw ascii data to be embedded and returns a script.
def pitching_stats_bref(season=None): if season is None: season = datetime.datetime.today().strftime("%Y") season = str(season) start_dt = season + '-03-01' end_dt = season + '-11-01' return(pitching_stats_range(start_dt, end_dt))
Get all pitching stats for a set season. If no argument is supplied, gives stats for current season to date.
def list_price(self): price = self._safe_get_element_text('ItemAttributes.ListPrice.Amount') currency = self._safe_get_element_text( 'ItemAttributes.ListPrice.CurrencyCode') if price: dprice = Decimal( price) / 100 if 'JP' not in self.region else Decimal(price) return dprice, currency else: return None, None
List Price. :return: A tuple containing: 1. Decimal representation of price. 2. ISO Currency code (string).
def last_version(): try: last_update, version, success = last_version._cache except AttributeError: last_update = 0 version = None success = False cache_delta = 24 * 3600 if success else 600 if (time.time() - last_update) < cache_delta: return version else: try: req = requests.get(settings.CAS_NEW_VERSION_JSON_URL) data = json.loads(req.text) version = data["info"]["version"] last_version._cache = (time.time(), version, True) return version except ( KeyError, ValueError, requests.exceptions.RequestException ) as error: logger.error( "Unable to fetch %s: %s" % (settings.CAS_NEW_VERSION_JSON_URL, error) ) last_version._cache = (time.time(), version, False)
Fetch the last version from pypi and return it. On successful fetch from pypi, the response is cached 24h, on error, it is cached 10 min. :return: the last django-cas-server version :rtype: unicode
def _get_bs4_string(soup): if len(soup.find_all("script")) == 0: soup_str = soup.prettify(formatter=None).strip() else: soup_str = str(soup.html) soup_str = re.sub("&amp;", "&", soup_str) soup_str = re.sub("&lt;", "<", soup_str) soup_str = re.sub("&gt;", ">", soup_str) return soup_str
Outputs a BeautifulSoup object as a string that should hopefully be minimally modified
def get_item(key): CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) try: return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"] except (IOError, ValueError): return None
Return content in cached file in JSON format
def from_yaml(data): molecule_env_file = os.environ['MOLECULE_ENV_FILE'] env = os.environ.copy() env = config.set_env_from_file(env, molecule_env_file) i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env) interpolated_data = i.interpolate(data) return util.safe_load(interpolated_data)
Interpolate the provided data and return a dict. Currently, this is used to reinterpolate the `molecule.yml` inside an Ansible playbook. If there were any interpolation errors, they would have been found and raised earlier. :return: dict
def respond_fw_config(self, msg): (req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver) = fw_hex_to_int(msg.payload, 5) _LOGGER.debug( 'Received firmware config request with firmware type %s, ' 'firmware version %s, %s blocks, CRC %s, bootloader %s', req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver) fw_type, fw_ver, fware = self._get_fw( msg, (self.requested, self.unstarted)) if fware is None: return None if fw_type != req_fw_type: _LOGGER.warning( 'Firmware type %s of update is not identical to existing ' 'firmware type %s for node %s', fw_type, req_fw_type, msg.node_id) _LOGGER.info( 'Updating node %s to firmware type %s version %s from type %s ' 'version %s', msg.node_id, fw_type, fw_ver, req_fw_type, req_fw_ver) msg = msg.copy(sub_type=self._const.Stream.ST_FIRMWARE_CONFIG_RESPONSE) msg.payload = fw_int_to_hex( fw_type, fw_ver, fware['blocks'], fware['crc']) return msg
Respond to a firmware config request.
def get_vector(self, max_choice=3): vec = {} for dim in ['forbidden', 'required', 'permitted']: if self.meta[dim] is None: continue dim_vec = map(lambda x: (x, max_choice), self.meta[dim]) vec[dim] = dict(dim_vec) return vec
Return pseudo-choice vectors.
def to_hdf(self,path,key,mode='a'): pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9) f = h5py.File(path,'r+') f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan f.close()
Save the CellDataFrame to an hdf5 file. Args: path (str): the path to save to key (str): the name of the location to save it to mode (str): write mode
def get_xml_root(xml_file): try: xml_root = etree.parse(os.path.expanduser(xml_file), NO_BLANKS_PARSER).getroot() except Exception as err: raise Dump2PolarionException("Failed to parse XML file '{}': {}".format(xml_file, err)) return xml_root
Returns XML root.
def getNextJobID(self): with self.localBatch.jobIndexLock: jobID = self.localBatch.jobIndex self.localBatch.jobIndex += 1 return jobID
Must be used to get job IDs so that the local and batch jobs do not conflict.
def params(self): par = {"radius": self.radius, "sphere_index": self.sphere_index, "pha_offset": self.pha_offset, "center": [self.posx_offset, self.posy_offset] } return par
Current interpolation parameter dictionary
def FromFile(cls, in_path): with open(in_path, "rb") as infile: in_data = json.load(infile) if not ('trace', 'selectors') in in_data: raise ArgumentError("Invalid trace file format", keys=in_data.keys(), expected=('trace', 'selectors')) selectors = [DataStreamSelector.FromString(x) for x in in_data['selectors']] readings = [IOTileReading(x['time'], DataStream.FromString(x['stream']).encode(), x['value'], reading_id=x['reading_id']) for x in in_data['trace']] return SimulationTrace(readings, selectors=selectors)
Load a previously saved ascii representation of this simulation trace. Args: in_path (str): The path of the input file that we should load. Returns: SimulationTrace: The loaded trace object.
def set_matrix_dimensions(self, *args): self._image = None super(FileImage, self).set_matrix_dimensions(*args)
Subclassed to delete the cached image when matrix dimensions are changed.
def add_sample_meta(self, source, reference, method='', filename='', md5='', sha1='', sha256='', size='', mimetype='', campaign='', confidence='', description='', bucket_list=[]): data = { 'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'filename': filename, 'md5': md5, 'sha1': sha1, 'sha256': sha256, 'size': size, 'mimetype': mimetype, 'upload_type': 'meta', 'campaign': campaign, 'confidence': confidence, 'bucket_list': ','.join(bucket_list), } r = requests.post('{0}/samples/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies) if r.status_code == 200: result_data = json.loads(r.text) return result_data else: log.error('Error with status code {0} and message ' '{1}'.format(r.status_code, r.text)) return None
Adds a metadata sample. To add an actual file, use add_sample_file. Args: source: Source of the information reference: A reference where more information can be found method: The method for obtaining the sample. filename: The name of the file. md5: An MD5 hash of the file. sha1: SHA1 hash of the file. sha256: SHA256 hash of the file. size: size of the file. mimetype: The mimetype of the file. campaign: An associated campaign confidence: The campaign confidence bucket_list: A list of bucket list items to add upload_type: Either 'file' or 'meta' Returns: A JSON sample object or None if there was an error.
def get_mac_dot_app_dir(directory): return os.path.dirname(os.path.dirname(os.path.dirname(directory)))
Returns parent directory of mac .app Args: directory (str): Current directory Returns: (str): Parent directory of mac .app
def _add_monomer(self, monomer, mon_vector, move_direction): translate_by = self.molecule.cart_coords[self.end] + \ self.link_distance * move_direction monomer.translate_sites(range(len(monomer)), translate_by) if not self.linear_chain: self._align_monomer(monomer, mon_vector, move_direction) does_cross = False for i, site in enumerate(monomer): try: self.molecule.append(site.specie, site.coords, properties=site.properties) except: does_cross = True polymer_length = len(self.molecule) self.molecule.remove_sites( range(polymer_length - i, polymer_length)) break if not does_cross: self.length += 1 self.end += len(self.monomer)
extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned
def get_network(self, name, batch_size=None, callback=None): network_proto = nnabla_pb2.Network() network_proto.CopyFrom(self.network_dict[name]) return NnpNetwork(network_proto, self._params, batch_size, callback=callback)
Create a variable graph given network by name Returns: NnpNetwork
def get_context(self, publish=False): context = self.project.DEFAULT_CONTEXT try: file = self.project.CONTEXT_SOURCE_FILE if re.search(r'(csv|CSV)$', file): context.update(self.get_context_from_csv()) if re.search(r'(xlsx|XLSX|xls|XLS)$', file): context.update(self.get_context_from_xlsx()) except AttributeError: context.update(self.get_context_from_gdoc()) return context
Use optional CONTEXT_SOURCE_FILE setting to determine data source. Return the parsed data. Can be an http|https url or local file. Supports csv and excel files.
def default_update(self, step, T, E, acceptance, improvement): elapsed = time.time() - self.start if step == 0: print(' Temperature Energy Accept Improve Elapsed Remaining', file=sys.stderr) print('\r%12.5f %12.2f %s ' % (T, E, time_string(elapsed)), file=sys.stderr, end="\r") sys.stderr.flush() else: remain = (self.steps - step) * (elapsed / step) print('\r%12.5f %12.2f %7.2f%% %7.2f%% %s %s\r' % (T, E, 100.0 * acceptance, 100.0 * improvement, time_string(elapsed), time_string(remain)), file=sys.stderr, end="\r") sys.stderr.flush()
Default update, outputs to stderr. Prints the current temperature, energy, acceptance rate, improvement rate, elapsed time, and remaining time. The acceptance rate indicates the percentage of moves since the last update that were accepted by the Metropolis algorithm. It includes moves that decreased the energy, moves that left the energy unchanged, and moves that increased the energy yet were reached by thermal excitation. The improvement rate indicates the percentage of moves since the last update that strictly decreased the energy. At high temperatures it will include both moves that improved the overall state and moves that simply undid previously accepted moves that increased the energy by thermal excititation. At low temperatures it will tend toward zero as the moves that can decrease the energy are exhausted and moves that would increase the energy are no longer thermally accessible.
def _ParseCmdItem(self, cmd_input, template_file=None): fsm = textfsm.TextFSM(template_file) if not self._keys: self._keys = set(fsm.GetValuesByAttrib('Key')) table = texttable.TextTable() table.header = fsm.header for record in fsm.ParseText(cmd_input): table.Append(record) return table
Creates Texttable with output of command. Args: cmd_input: String, Device response. template_file: File object, template to parse with. Returns: TextTable containing command output. Raises: CliTableError: A template was not found for the given command.
def add_ref(self, wordlist): refname = wordlist[0][:-1] if(refname in self.refs): raise ReferenceError("[line {}]:{} already defined here (word) {} (line) {}".format(self.line_count, refname, self.refs[refname][0], self.refs[refname][1])) self.refs[refname] = (self.word_count, self.line_count)
Adds a reference.
def linkify_h_by_h(self): for host in self: new_parents = [] for parent in getattr(host, 'parents', []): parent = parent.strip() o_parent = self.find_by_name(parent) if o_parent is not None: new_parents.append(o_parent.uuid) else: err = "the parent '%s' for the host '%s' is unknown!" % (parent, host.get_name()) self.add_error(err) host.parents = new_parents
Link hosts with their parents :return: None
def symbol(self, index): if isinstance(index, str): return index elif (index < 0) or (index >= self.symtab.table_len): self.error("symbol table index out of range") sym = self.symtab.table[index] if sym.kind == SharedData.KINDS.LOCAL_VAR: return "-{0}(1:%14)".format(sym.attribute * 4 + 4) elif sym.kind == SharedData.KINDS.PARAMETER: return "{0}(1:%14)".format(8 + sym.attribute * 4) elif sym.kind == SharedData.KINDS.CONSTANT: return "${0}".format(sym.name) else: return "{0}".format(sym.name)
Generates symbol name from index
def state_machines_set_notification(self, model, prop_name, info): if info['method_name'] == '__setitem__': state_machine_m = info.args[1] self.observe_model(state_machine_m)
Observe all open state machines and their root states
def size(self, time): if self.start_time <= time <= self.end_time: return self.masks[time - self.start_time].sum() else: return 0
Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels
def when_connected(self): if self._client and not self._client.is_closed: return defer.succeed(self._client) else: return self._client_deferred
Retrieve the currently-connected Protocol, or the next one to connect. Returns: defer.Deferred: A Deferred that fires with a connected :class:`FedoraMessagingProtocolV2` instance. This is similar to the whenConnected method from the Twisted endpoints APIs, which is sadly isn't available before 16.1.0, which isn't available in EL7.
def translate_indirect(properties, context_module): assert is_iterable_typed(properties, Property) assert isinstance(context_module, basestring) result = [] for p in properties: if p.value[0] == '@': q = qualify_jam_action(p.value[1:], context_module) get_manager().engine().register_bjam_action(q) result.append(Property(p.feature, '@' + q, p.condition)) else: result.append(p) return result
Assumes that all feature values that start with '@' are names of rules, used in 'context-module'. Such rules can be either local to the module or global. Qualified local rules with the name of the module.
def register_patches(self): if not self.__paths: return False unregistered_patches = [] for path in self.paths: for file in foundations.walkers.files_walker(path, ("\.{0}$".format(self.__extension),), ("\._",)): name = foundations.strings.get_splitext_basename(file) if not self.register_patch(name, file): unregistered_patches.append(name) if not unregistered_patches: return True else: raise umbra.exceptions.PatchRegistrationError( "{0} | '{1}' patches failed to register!".format(self.__class__.__name__, ", ".join(unregistered_patches)))
Registers the patches. :return: Method success. :rtype: bool
def signed_in_users(session=None, today=None, full_name=True): if session is None: session = Session() else: session = session if today is None: today = date.today() else: today = today signed_in_users = ( session .query(User) .filter(Entry.date == today) .filter(Entry.time_out.is_(None)) .filter(User.user_id == Entry.user_id) .all() ) session.close() return signed_in_users
Return list of names of currently signed in users. :param session: SQLAlchemy session through which to access the database. :param today: (optional) The current date as a `datetime.date` object. Used for testing. :param full_name: (optional) Whether to display full user names, or just first names. :return: List of currently signed in users.
def AgregarAjusteFisico(self, cantidad, cantidad_cabezas=None, cantidad_kg_vivo=None, **kwargs): "Agrega campos al detalle de item por un ajuste fisico" d = {'cantidad': cantidad, 'cantidadCabezas': cantidad_cabezas, 'cantidadKgVivo': cantidad_kg_vivo, } item_liq = self.solicitud['itemDetalleAjusteLiquidacion'][-1] item_liq['ajusteFisico'] = d return True
Agrega campos al detalle de item por un ajuste fisico
def get_all_results(starting_page): logging.info('Retrieving all results for {}'.format(starting_page)) page = starting_page results = [] while True: logging.debug('Getting data from: {}'.format(page)) data = get_page(page) logging.debug('JSON data: {}'.format(data)) results = results + data['results'] if data['next']: page = data['next'] else: break return results
Given starting API query for Open Humans, iterate to get all results. :param starting page: This field is the first page, starting from which results will be obtained.
def create_typedef(self, typedef_name, unused=None, with_defaults=True): return free_function_type_t.TYPEDEF_NAME_TEMPLATE % { 'typedef_name': typedef_name, 'return_type': self.return_type.build_decl_string(with_defaults), 'arguments': ','.join( [_f(x, with_defaults) for x in self.arguments_types])}
returns string, that contains valid C++ code, that defines typedef to function type :param name: the desired name of typedef
def find_egg(self, egg_dist): site_packages = self.libdir[1] search_filename = "{0}.egg-link".format(egg_dist.project_name) try: user_site = site.getusersitepackages() except AttributeError: user_site = site.USER_SITE search_locations = [site_packages, user_site] for site_directory in search_locations: egg = os.path.join(site_directory, search_filename) if os.path.isfile(egg): return egg
Find an egg by name in the given environment
def _directory (self): if self._filename is None: return os.path.join(self._ROOT_DIR, 'config') else: return os.path.dirname(self._filename)
The directory for this AitConfig.
def _write_user_prefs(self, user_prefs): with open(self.userPrefs, "w") as f: for key, value in user_prefs.items(): f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
writes the current user prefs dictionary to disk
def delete_pod(name, namespace='default', **kwargs): cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default
def free_symbols(self): return set([ sym for sym in self.term.free_symbols if sym not in self.bound_symbols])
Set of all free symbols
def ask(message='Are you sure? [y/N]'): agree = False answer = raw_input(message).lower() if answer.startswith('y'): agree = True return agree
Asks the user his opinion.
def better_sentences(func): @wraps(func) def wrapped(*args): sentences = func(*args) new_sentences = [] for i, l in enumerate(sentences): if '\n\n' in l: splits = l.split('\n\n') if len(splits)>1: for ind,spl in enumerate(splits): if len(spl) <20: del splits[ind] new_sentences.extend(splits) else: new_sentences.append(l) return new_sentences return wrapped
takes care of some edge cases of sentence tokenization for cases when websites don't close sentences properly, usually after blockquotes, image captions or attributions
def update(self, ipv4s): data = {'ips': ipv4s} ipv4s_ids = [str(ipv4.get('id')) for ipv4 in ipv4s] return super(ApiIPv4, self).put('api/v3/ipv4/%s/' % ';'.join(ipv4s_ids), data)
Method to update ipv4's :param ipv4s: List containing ipv4's desired to updated :return: None
def nxapi_request(commands, method='cli_show', **kwargs): client = NxapiClient(**kwargs) return client.request(method, commands)
Send exec and config commands to the NX-OS device over NX-API. commands The exec or config commands to be sent. method: ``cli_show_ascii``: Return raw test or unstructured output. ``cli_show``: Return structured output. ``cli_conf``: Send configuration commands to the device. Defaults to ``cli_show``. transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``http``, and ``https``. host: ``localhost`` The IP address or DNS host name of the device. username: ``admin`` The username to pass to the device to authenticate the NX-API connection. password The password to pass to the device to authenticate the NX-API connection. port The TCP port of the endpoint for the NX-API connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). timeout: ``60`` Time in seconds to wait for the device to respond. Default: 60 seconds. verify: ``True`` Either a boolean, in which case it controls whether we verify the NX-API TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``.
def _convert_params(sql, params): args = [sql] if params is not None: if hasattr(params, 'keys'): args += [params] else: args += [list(params)] return args
Convert SQL and params args to DBAPI2.0 compliant format.
def _query_params(self): params = {} if self.generation is not None: params["generation"] = self.generation if self.user_project is not None: params["userProject"] = self.user_project return params
Default query parameters.
def __assert_not_empty(returned): result = "Pass" try: assert (returned), "value is empty" except AssertionError as err: result = "Fail: " + six.text_type(err) return result
Test if a returned value is not empty
def create_current_pb(self, ): pb = QtGui.QPushButton("Select current") self.selection_tabw.setCornerWidget(pb) return pb
Create a push button and place it in the corner of the tabwidget :returns: the created button :rtype: :class:`QtGui.QPushButton` :raises: None
def run(targets, config_dir='.', check_licenses=False): pylint_return_state = False flake8_return_state = False if check_licenses: run_license_checker(config_path=get_license_checker_config_path(config_dir)) pylint_options = get_pylint_options(config_dir=config_dir) flake8_options = get_flake8_options(config_dir=config_dir) if targets: pylint_return_state = _run_command(command='pylint', targets=targets, options=pylint_options) flake8_return_state = _run_command(command='flake8', targets=targets, options=flake8_options) if not flake8_return_state and not pylint_return_state: sys.exit(0) else: sys.exit(1)
Runs `pylint` and `flake8` commands and exits based off the evaluation of both command results. :param targets: List[str] :param config_dir: str :param check_licenses: bool :return:
def precision_recall(self): return plot.precision_recall(self.y_true, self.y_score, ax=_gen_ax())
Precision-recall plot
def _start_data_json(self) -> str: rv = { 'logging': { 'paths': [] }, 'wallet': { } } logger = LOGGER while not logger.level: logger = logger.parent if logger is None: break rv['logging']['level'] = logger.level logger = LOGGER log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')] while not log_paths: logger = logger.parent if logger is None: break log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')] for log_path in log_paths: rv['logging']['paths'].append(log_path) rv['wallet']['storage_type'] = self.wallet.storage_type rv['wallet']['config'] = self.wallet.config rv['wallet']['access_creds'] = self.wallet.access_creds return json.dumps(rv)
Output json with start data to write for external revocation registry builder process pickup. :return: logging and wallet init data json
def main(argv=None): opts = cmdparse.parse_args(argv) assert validate_opts.validate(opts) opts = normalize_opts.normalize(opts) if opts.verbose: messages.print_input_output(opts) file_paths = fileparse.get_file_list(opts) assert validate_files.validate(file_paths, opts) if opts.verbose and opts.is_dir: messages.print_files(opts, file_paths) collector = make_github_markdown_collector(opts) anchors, duplicate_tags = collector.collect(file_paths) assert validate_anchors.validate(anchors, duplicate_tags, opts) writer = make_github_markdown_writer(opts) counter = writer.write(file_paths, anchors, opts) if opts.verbose: if opts.is_dir: messages.print_modified_files(opts, anchors) messages.print_summary_stats(counter)
Main entry method for AnchorHub. Takes in command-line arguments, finds files to parse within the specified input directory, and outputs parsed files to the specified output directory. :param argv: a list of string command line arguments
def read_files(*files): text = "" for single_file in files: content = read(single_file) text = text + content + "\n" return text
Read files into setup
def _postprocess(self, filehandle, metadata): "Runs all attached postprocessors on the provided filehandle." for process in self._postprocessors: filehandle = process(filehandle, metadata) return filehandle
Runs all attached postprocessors on the provided filehandle.
def transformer_mlperf_tpu(): hparams = transformer_base_v3() hparams.mlperf_mode = True hparams.symbol_modality_num_shards = 1 hparams.max_length = 256 hparams.batch_size = 2048 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.attention_dropout_broadcast_dims = "0,1" hparams.relu_dropout_broadcast_dims = "1" hparams.layer_prepostprocess_dropout_broadcast_dims = "1" return hparams
HParams for Transformer model on TPU for MLPerf on TPU 2x2.
def after_flush_postexec(self, session, context): instances = self.instances[session] while instances: instance = instances.pop() if instance not in session: continue parent = self.get_parent_value(instance) while parent != NO_VALUE and parent is not None: instances.discard(parent) session.expire(parent, ['left', 'right', 'tree_id', 'level']) parent = self.get_parent_value(parent) else: session.expire(instance, ['left', 'right', 'tree_id', 'level']) self.expire_session_for_children(session, instance)
Event listener to recursively expire `left` and `right` attributes the parents of all modified instances part of this flush.
def request(func=None, timeout=600): if func is None: return partial(request, timeout=timeout) @wraps(func) def wrapper(self, *args, **kwargs): params = func(self, *args, **kwargs) self = params.pop('self', None) entity = params.pop('entity', None) app_name = params.pop('app_name', None) request_id = unique_hex() params['request_id'] = request_id future = self._send_request(app_name, endpoint=func.__name__, entity=entity, params=params, timeout=timeout) return future wrapper.is_request = True return wrapper
use to request an api call from a specific endpoint
def stop(self): LOGGER.info('Shutting down controller') self.set_state(self.STATE_STOP_REQUESTED) signal.setitimer(signal.ITIMER_PROF, 0, 0) self._mcp.stop_processes() if self._mcp.is_running: LOGGER.info('Waiting up to 3 seconds for MCP to shut things down') signal.setitimer(signal.ITIMER_REAL, 3, 0) signal.pause() LOGGER.info('Post pause') if self._mcp.is_running: LOGGER.warning('MCP is taking too long, requesting process kills') self._mcp.stop_processes() del self._mcp else: LOGGER.info('MCP exited cleanly') self._stopped() LOGGER.info('Shutdown complete')
Shutdown the MCP and child processes cleanly
def from_dict(cls, tag=None): if tag is None: tag = {} l = Tag() l.name = tag['name'] return l
Create new Tag-object from dict. Suitable for creating objects from XML-RPC data. All available keys must exist.
def datetime_to_djd(time): if time.tzinfo is None: time_utc = pytz.utc.localize(time) else: time_utc = time.astimezone(pytz.utc) djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12)) djd = (time_utc - djd_start).total_seconds() * 1.0/(60 * 60 * 24) return djd
Converts a datetime to the Dublin Julian Day Parameters ---------- time : datetime.datetime time to convert Returns ------- float fractional days since 12/31/1899+0000
def relfreq(inlist, numbins=10, defaultreallimits=None): h, l, b, e = histogram(inlist, numbins, defaultreallimits) for i in range(len(h)): h[i] = h[i] / float(len(inlist)) return h, l, b, e
Returns a relative frequency histogram, using the histogram function. Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None) Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
def as_dict(self) -> Dict[str, str]: items: Dict[str, str] = {} for k, v in self.items(): if type(v) is str: items.update({k: v}) return items
Export color register as dict.
def dremove(self, **kwds): filtered_dr = self.dfilter(**kwds) for item in filtered_dr: self.remove(item) return filtered_dr
Removes from the object any element that matches the given specification.
def load(cls, v): if v is None: return [] if isinstance(v, list): return [ Action(s) for s in v ] elif isinstance(v, str): return [Action(v)] else: raise ParseError("Couldn't parse action: %r" % v)
Load the action from configuration