code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def drp_load(package, resource, confclass=None): data = pkgutil.get_data(package, resource) return drp_load_data(package, data, confclass=confclass)
Load the DRPS from a resource file.
def get_matrix(self, indices): new = numpy.empty(self.samples1.shape) for idx in range(len(indices)): if indices[idx]: new[idx] = self.samples1[idx] else: new[idx] = self.samples2[idx] if self.poly: new = self.poly(*new) return new
Retrieve Saltelli matrix.
def __insert_frond_RF(d_w, d_u, dfs_data): dfs_data['RF'].append( (d_w, d_u) ) dfs_data['FG']['r'] += 1 dfs_data['last_inserted_side'] = 'RF'
Encapsulates the process of inserting a frond uw into the right side frond group.
def get_all_stations(self, station_type=None): params = None if station_type and station_type in STATION_TYPE_TO_CODE_DICT: url = self.api_base_url + 'getAllStationsXML_WithStationType' params = { 'stationType': STATION_TYPE_TO_CODE_DICT[station_type] } else: url = self.api_base_url + 'getAllStationsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] return self._parse_station_list(response.content)
Returns information of all stations. @param<optional> station_type: ['mainline', 'suburban', 'dart']
def fetchUrls(cls, url, data, urlSearch): searchUrls = [] if cls.css: searchFun = data.cssselect else: searchFun = data.xpath searches = makeSequence(urlSearch) for search in searches: for match in searchFun(search): try: for attrib in html_link_attrs: if attrib in match.attrib: searchUrls.append(match.get(attrib)) except AttributeError: searchUrls.append(str(match)) if not cls.multipleImagesPerStrip and searchUrls: break if not searchUrls: raise ValueError("XPath %s not found at URL %s." % (searches, url)) return searchUrls
Search all entries for given XPath in a HTML page.
def get_stats_display_height(self, curse_msg): r try: c = [i['msg'] for i in curse_msg['msgdict']].count('\n') except Exception as e: logger.debug('ERROR: Can not compute plugin height ({})'.format(e)) return 0 else: return c + 1
r"""Return the height of the formatted curses message. The height is defined by the number of '\n' (new line).
def _find_log_index(f): global _last_asked, _log_cache (begin, end) = (0, 128) if _last_asked is not None: (lastn, lastval) = _last_asked if f >= lastval: if f <= _log_cache[lastn]: _last_asked = (lastn, f) return lastn elif f <= _log_cache[lastn + 1]: _last_asked = (lastn + 1, f) return lastn + 1 begin = lastn if f > _log_cache[127] or f <= 0: return 128 while begin != end: n = (begin + end) // 2 c = _log_cache[n] cp = _log_cache[n - 1] if n != 0 else 0 if cp < f <= c: _last_asked = (n, f) return n if f < c: end = n else: begin = n _last_asked = (begin, f) return begin
Look up the index of the frequency f in the frequency table. Return the nearest index.
def diskwarp_multi_fn(src_fn_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None): if not iolib.fn_list_check(src_fn_list): sys.exit('Missing input file(s)') src_ds_list = [gdal.Open(fn, gdal.GA_ReadOnly) for fn in src_fn_list] return diskwarp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, outdir=outdir, dst_ndv=dst_ndv)
Helper function for diskwarp of multiple input filenames
def split_signature(klass, signature): if signature == '()': return [] if not signature.startswith('('): return [signature] result = [] head = '' tail = signature[1:-1] while tail: c = tail[0] head += c tail = tail[1:] if c in ('m', 'a'): continue if c in ('(', '{'): level = 1 up = c if up == '(': down = ')' else: down = '}' while level > 0: c = tail[0] head += c tail = tail[1:] if c == up: level += 1 elif c == down: level -= 1 result.append(head) head = '' return result
Return a list of the element signatures of the topmost signature tuple. If the signature is not a tuple, it returns one element with the entire signature. If the signature is an empty tuple, the result is []. This is useful for e. g. iterating over method parameters which are passed as a single Variant.
def _perform_merge(self, other): if len(other.value) > len(self.value): self.value = other.value[:] return True
Merges the longer string
def SetWriteBack(self, filename): try: self.writeback = self.LoadSecondaryConfig(filename) self.MergeData(self.writeback.RawData(), self.writeback_data) except IOError as e: logging.error("Unable to read writeback file: %s", e) return except Exception as we: if os.path.exists(filename): try: b = filename + ".bak" os.rename(filename, b) logging.warning("Broken writeback (%s) renamed to: %s", we, b) except Exception as e: logging.error("Unable to rename broken writeback: %s", e) raise we logging.debug("Configuration writeback is set to %s", filename)
Sets the config file which will receive any modifications. The main config file can be made writable, but directing all Set() operations into a secondary location. This secondary location will receive any updates and will override the options for this file. Args: filename: A filename which will receive updates. The file is parsed first and merged into the raw data from this object.
def fetch(self, async=False, callback=None): request = NURESTRequest(method=HTTP_METHOD_GET, url=self.get_resource_url()) if async: return self.send_request(request=request, async=async, local_callback=self._did_fetch, remote_callback=callback) else: connection = self.send_request(request=request) return self._did_retrieve(connection)
Fetch all information about the current object Args: async (bool): Boolean to make an asynchronous call. Default is False callback (function): Callback method that will be triggered in case of asynchronous call Returns: tuple: (current_fetcher, callee_parent, fetched_bjects, connection) Example: >>> entity = NUEntity(id="xxx-xxx-xxx-xxx") >>> entity.fetch() # will get the entity with id "xxx-xxx-xxx-xxx" >>> print entity.name "My Entity"
def setup_logging(self): self.logger = logging.getLogger() if os.path.exists('/dev/log'): handler = SysLogHandler('/dev/log') else: handler = SysLogHandler() self.logger.addHandler(handler)
Set up self.logger This function is called after load_configuration() and after changing to new user/group IDs (if configured). Logging to syslog using the root logger is configured by default, you can override this method if you want something else.
def get_field_label(self, field_name, field=None): label = None if field is not None: label = getattr(field, 'verbose_name', None) if label is None: label = getattr(field, 'name', None) if label is None: label = field_name return label.capitalize()
Return a label to display for a field
def append(self, frame_p): return lib.zmsg_append(self._as_parameter_, byref(zframe_p.from_param(frame_p)))
Add frame to the end of the message, i.e. after all other frames. Message takes ownership of frame, will destroy it when message is sent. Returns 0 on success. Deprecates zmsg_add, which did not nullify the caller's frame reference.
def get_numeric_feature_names(example): numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') in numeric_features ])
Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features.
def get_access_tokens(self, authorization_code): response = self.box_request.get_access_token(authorization_code) try: att = response.json() except Exception, ex: raise BoxHttpResponseError(ex) if response.status_code >= 400: raise BoxError(response.status_code, att) return att['access_token'], att['refresh_token']
From the authorization code, get the "access token" and the "refresh token" from Box. Args: authorization_code (str). Authorisation code emitted by Box at the url provided by the function :func:`get_authorization_url`. Returns: tuple. (access_token, refresh_token) Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
def clean_html(context, data): doc = _get_html_document(context, data) if doc is None: context.emit(data=data) return remove_paths = context.params.get('remove_paths') for path in ensure_list(remove_paths): for el in doc.findall(path): el.drop_tree() html_text = html.tostring(doc, pretty_print=True) content_hash = context.store_data(html_text) data['content_hash'] = content_hash context.emit(data=data)
Clean an HTML DOM and store the changed version.
def cleanup_defenses(self): print_header('CLEANING UP DEFENSES DATA') work_ancestor_key = self.datastore_client.key('WorkType', 'AllDefenses') keys_to_delete = [ e.key for e in self.datastore_client.query_fetch(kind=u'ClassificationBatch') ] + [ e.key for e in self.datastore_client.query_fetch(kind=u'Work', ancestor=work_ancestor_key) ] self._cleanup_keys_with_confirmation(keys_to_delete)
Cleans up all data about defense work in current round.
async def _delete_agent(self, agent_addr): self._available_agents = [agent for agent in self._available_agents if agent != agent_addr] del self._registered_agents[agent_addr] await self._recover_jobs(agent_addr)
Deletes an agent
def core_profile_check(self) -> None: profile_mask = self.info['GL_CONTEXT_PROFILE_MASK'] if profile_mask != 1: warnings.warn('The window should request a CORE OpenGL profile') version_code = self.version_code if not version_code: major, minor = map(int, self.info['GL_VERSION'].split('.', 2)[:2]) version_code = major * 100 + minor * 10 if version_code < 330: warnings.warn('The window should support OpenGL 3.3+ (version_code=%d)' % version_code)
Core profile check. FOR DEBUG PURPOSES ONLY
def get_uint_info(self, field): length = ctypes.c_ulong() ret = ctypes.POINTER(ctypes.c_uint)() _check_call(_LIB.XGDMatrixGetUIntInfo(self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret))) return ctypes2numpy(ret, length.value, np.uint32)
Get unsigned integer property from the DMatrix. Parameters ---------- field: str The field name of the information Returns ------- info : array a numpy array of float information of the data
def get_path(dest, file, cwd = None): if callable(dest): return dest(file) if not cwd: cwd = file.cwd if not os.path.isabs(dest): dest = os.path.join(cwd, dest) relative = os.path.relpath(file.path, file.base) return os.path.join(dest, relative)
Get the writing path of a file.
def metalarchives(song): artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found.
def get_system_config_directory(): if platform.system().lower() == 'windows': _cfg_directory = Path(os.getenv('APPDATA') or '~') elif platform.system().lower() == 'darwin': _cfg_directory = Path('~', 'Library', 'Preferences') else: _cfg_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config') logger.debug('Fetching configt directory for {}.' .format(platform.system())) return _cfg_directory.joinpath(Path('mayalauncher/.config'))
Return platform specific config directory.
def _speak_none(self, element): element.set_attribute('role', 'presentation') element.set_attribute('aria-hidden', 'true') element.set_attribute(AccessibleCSSImplementation.DATA_SPEAK, 'none')
No speak any content of element only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def save(self, filename=None, tc=None): if filename is None: filename = self.filename for sub in self.sub_workflows: sub.save() if tc is None: tc = '{}.tc.txt'.format(filename) p = os.path.dirname(tc) f = os.path.basename(tc) if not p: p = '.' tc = TransformationCatalog(p, f) for e in self._adag.executables.copy(): tc.add(e) try: tc.add_container(e.container) except: pass self._adag.removeExecutable(e) f = open(filename, "w") self._adag.writeXML(f) tc.write()
Write this workflow to DAX file
def strip_encoding_cookie(filelike): it = iter(filelike) try: first = next(it) if not cookie_comment_re.match(first): yield first second = next(it) if not cookie_comment_re.match(second): yield second except StopIteration: return for line in it: yield line
Generator to pull lines from a text-mode file, skipping the encoding cookie if it is found in the first two lines.
def patch(self, url, callback, params=None, json=None, headers=None): return self.adapter.patch(url, callback, params=params, json=json, headers=headers)
Patch a URL. Args: url(string): URL for the request callback(func): The response callback function headers(dict): HTTP headers for the request Keyword Args: params(dict): Parameters for the request json(dict): JSON body for the request Returns: The result of the callback handling the resopnse from the executed request
def intersects(self, other_grid_coordinates): ogc = other_grid_coordinates ax1, ay1, ax2, ay2 = self.ULC.lon, self.ULC.lat, self.LRC.lon, self.LRC.lat bx1, by1, bx2, by2 = ogc.ULC.lon, ogc.ULC.lat, ogc.LRC.lon, ogc.LRC.lat if ((ax1 <= bx2) and (ax2 >= bx1) and (ay1 >= by2) and (ay2 <= by1)): return True else: return False
returns True if the GC's overlap.
def specAutoRange(self): trace_range = self.responsePlots.values()[0].viewRange()[0] vb = self.specPlot.getViewBox() vb.autoRange(padding=0) self.specPlot.setXlim(trace_range)
Auto adjusts the visible range of the spectrogram
def fix_missing_lang_tags(marc_xml, dom): def get_lang_tag(lang): lang_str = '\n <mods:language>\n' lang_str += ' <mods:languageTerm authority="iso639-2b" type="code">' lang_str += lang lang_str += '</mods:languageTerm>\n' lang_str += ' </mods:language>\n\n' lang_dom = dhtmlparser.parseString(lang_str) return first(lang_dom.find("mods:language")) for lang in reversed(marc_xml["041a0 "]): lang_tag = dom.find( "mods:languageTerm", fn=lambda x: x.getContent().strip().lower() == lang.lower() ) if not lang_tag: insert_tag( get_lang_tag(lang), dom.find("mods:language"), get_mods_tag(dom) )
If the lang tags are missing, add them to the MODS. Lang tags are parsed from `marc_xml`.
def removeActor(self, a): if not self.initializedPlotter: save_int = self.interactive self.show(interactive=0) self.interactive = save_int return if self.renderer: self.renderer.RemoveActor(a) if hasattr(a, 'renderedAt'): ir = self.renderers.index(self.renderer) a.renderedAt.discard(ir) if a in self.actors: i = self.actors.index(a) del self.actors[i]
Remove ``vtkActor`` or actor index from current renderer.
def sanity(request, sysmeta_pyxb): _does_not_contain_replica_sections(sysmeta_pyxb) _is_not_archived(sysmeta_pyxb) _obsoleted_by_not_specified(sysmeta_pyxb) if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META: return _has_correct_file_size(request, sysmeta_pyxb) _is_supported_checksum_algorithm(sysmeta_pyxb) _is_correct_checksum(request, sysmeta_pyxb)
Check that sysmeta_pyxb is suitable for creating a new object and matches the uploaded sciobj bytes.
def get_children(self, id_): id_list = [] for r in self._rls.get_relationships_by_genus_type_for_source(id_, self._relationship_type): id_list.append(r.get_destination_id()) return IdList(id_list)
Gets the children of the given ``Id``. arg: id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the ``id`` raise: NotFound - ``id`` is not found raise: NullArgument - ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def connect(cls, dbname): test_times_schema = setup_times_schema = schemas = [test_times_schema, setup_times_schema] db_file = '{}.db'.format(dbname) cls.connection = sqlite3.connect(db_file) for s in schemas: cls.connection.execute(s)
Create a new connection to the SQLite3 database. :param dbname: The database name :type dbname: str
def holiday_description(self): entry = self._holiday_entry() desc = entry.description return desc.hebrew.long if self.hebrew else desc.english
Return the holiday description. In case none exists will return None.
def open_url(self, url): try: c = pycurl.Curl() c.setopt(pycurl.FAILONERROR, True) c.setopt(pycurl.URL, "%s/api/v0/%s" % (self.url, url)) c.setopt(pycurl.HTTPHEADER, ["User-Agent: %s" % self.userAgent, "apiToken: %s" % self.apiToken]) b = StringIO.StringIO() c.setopt(pycurl.WRITEFUNCTION, b.write) c.setopt(pycurl.FOLLOWLOCATION, 1) c.setopt(pycurl.MAXREDIRS, 5) c.setopt(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3) c.setopt(pycurl.SSL_VERIFYPEER, 1) c.setopt(pycurl.SSL_VERIFYHOST, 2) c.perform() return b.getvalue() except pycurl.error, e: raise MyTimetableError(e)
Open's URL with apiToken in the headers
def _build_generator_list(network): genos_mv = pd.DataFrame(columns= ('id', 'obj')) genos_lv = pd.DataFrame(columns= ('id', 'obj')) genos_lv_agg = pd.DataFrame(columns= ('la_id', 'id', 'obj')) for geno in network.mv_grid.graph.nodes_by_attribute('generator'): genos_mv.loc[len(genos_mv)] = [int(geno.id), geno] for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'): la_id = int(geno.id.split('-')[1].split('_')[-1]) genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno] for lv_grid in network.mv_grid.lv_grids: for geno in lv_grid.generators: genos_lv.loc[len(genos_lv)] = [int(geno.id), geno] return genos_mv, genos_lv, genos_lv_agg
Builds DataFrames with all generators in MV and LV grids Returns ------- :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to MV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to LV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to aggregated LV generators
def is_open(self, refresh=False): if refresh: self.refresh() return self.get_level(refresh) > 0
Get curtains state. Refresh data from Vera if refresh is True, otherwise use local cache. Refresh is only needed if you're not using subscriptions.
def rotate(self): item = self._address_infos.pop(0) self._address_infos.append(item)
Move the first address to the last position.
def format_op_hdr(): txt = 'Base Filename'.ljust(36) + ' ' txt += 'Lines'.rjust(7) + ' ' txt += 'Words'.rjust(7) + ' ' txt += 'Unique'.ljust(8) + '' return txt
Build the header
def write_networking_file(version, pairs): vmnets = OrderedDict(sorted(pairs.items(), key=lambda t: t[0])) try: with open(VMWARE_NETWORKING_FILE, "w", encoding="utf-8") as f: f.write(version) for key, value in vmnets.items(): f.write("answer {} {}\n".format(key, value)) except OSError as e: raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e)) if sys.platform.startswith("darwin"): if not os.path.exists("/Applications/VMware Fusion.app/Contents/Library/vmnet-cli"): raise SystemExit("VMware Fusion is not installed in Applications") os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --configure") os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop") os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start") else: os.system("vmware-networks --stop") os.system("vmware-networks --start")
Write the VMware networking file.
def get_command(self, ctx, cmd_name): if cmd_name not in self.all_cmds: return None return EventTypeSubCommand(self.events_lib, cmd_name, self.all_cmds[cmd_name])
gets the subcommands under the service name Parameters ---------- ctx : Context the context object passed into the method cmd_name : str the service name Returns ------- EventTypeSubCommand: returns subcommand if successful, None if not.
def from_grpc(operation, operations_stub, result_type, **kwargs): refresh = functools.partial(_refresh_grpc, operations_stub, operation.name) cancel = functools.partial(_cancel_grpc, operations_stub, operation.name) return Operation(operation, refresh, cancel, result_type, **kwargs)
Create an operation future using a gRPC client. This interacts with the long-running operations `service`_ (specific to a given API) via gRPC. .. _service: https://github.com/googleapis/googleapis/blob/\ 050400df0fdb16f63b63e9dee53819044bffc857/\ google/longrunning/operations.proto#L38 Args: operation (google.longrunning.operations_pb2.Operation): The operation. operations_stub (google.longrunning.operations_pb2.OperationsStub): The operations stub. result_type (:func:`type`): The protobuf result type. kwargs: Keyword args passed into the :class:`Operation` constructor. Returns: ~.api_core.operation.Operation: The operation future to track the given operation.
def generateAcceptHeader(*elements): parts = [] for element in elements: if type(element) is str: qs = "1.0" mtype = element else: mtype, q = element q = float(q) if q > 1 or q <= 0: raise ValueError('Invalid preference factor: %r' % q) qs = '%0.1f' % (q,) parts.append((qs, mtype)) parts.sort() chunks = [] for q, mtype in parts: if q == '1.0': chunks.append(mtype) else: chunks.append('%s; q=%s' % (mtype, q)) return ', '.join(chunks)
Generate an accept header value [str or (str, float)] -> str
def set_mtime(self, name, mtime, size): self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
Set modification time on file.
def send_async( self, queue_identifier: QueueIdentifier, message: Message, ): recipient = queue_identifier.recipient if not is_binary_address(recipient): raise ValueError('Invalid address {}'.format(pex(recipient))) if isinstance(message, (Delivered, Ping, Pong)): raise ValueError('Do not use send for {} messages'.format(message.__class__.__name__)) messagedata = message.encode() if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: raise ValueError( 'message size exceeds the maximum {}'.format(self.UDP_MAX_MESSAGE_SIZE), ) message_id = message.message_identifier if message_id not in self.messageids_to_asyncresults: self.messageids_to_asyncresults[message_id] = AsyncResult() queue = self.get_queue_for(queue_identifier) queue.put((messagedata, message_id)) assert queue.is_set() self.log.debug( 'Message queued', queue_identifier=queue_identifier, queue_size=len(queue), message=message, )
Send a new ordered message to recipient. Messages that use the same `queue_identifier` are ordered.
def liouvillian(H, Ls=None): r if Ls is None: Ls = [] elif isinstance(Ls, Matrix): Ls = Ls.matrix.ravel().tolist() summands = [-I * commutator(H), ] summands.extend([lindblad(L) for L in Ls]) return SuperOperatorPlus.create(*summands)
r"""Return the Liouvillian super-operator associated with `H` and `Ls` The Liouvillian :math:`\mathcal{L}` generates the Markovian-dynamics of a system via the Master equation: .. math:: \dot{\rho} = \mathcal{L}\rho = -i[H,\rho] + \sum_{j=1}^n \mathcal{D}[L_j] \rho Args: H (Operator): The associated Hamilton operator Ls (sequence or Matrix): A sequence of Lindblad operators. Returns: SuperOperator: The Liouvillian super-operator.
def ignore_code(self, code): if len(code) < 4 and any(s.startswith(code) for s in self.options.select): return False return (code.startswith(self.options.ignore) and not code.startswith(self.options.select))
Check if the error code should be ignored. If 'options.select' contains a prefix of the error code, return False. Else, if 'options.ignore' contains a prefix of the error code, return True.
def put_path(self, url, path): cache_path = self._url_to_path(url) try: dir = os.path.dirname(cache_path) os.makedirs(dir) except OSError as e: if e.errno != errno.EEXIST: raise Error('Failed to create cache directories for ' % cache_path) try: os.unlink(cache_path) except OSError: pass try: os.link(path, cache_path) except OSError: try: shutil.copyfile(path, cache_path) except IOError: raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
Puts a resource already on disk into the disk cache. Args: url: The original url of the resource path: The resource already available on disk Raises: CacheError: If the file cannot be put in cache
def get_suggested_repositories(self): if self.suggested_repositories is None: repository_set = list() for term_count in range(5, 2, -1): query = self.__get_query_for_repos(term_count=term_count) repository_set.extend(self.__get_repos_for_query(query)) catchy_repos = GitSuggest.minus( repository_set, self.user_starred_repositories ) filtered_repos = [] if len(catchy_repos) > 0: for repo in catchy_repos: if ( repo is not None and repo.description is not None and len(repo.description) <= GitSuggest.MAX_DESC_LEN ): filtered_repos.append(repo) filtered_repos = sorted( filtered_repos, key=attrgetter("stargazers_count"), reverse=True, ) self.suggested_repositories = GitSuggest.get_unique_repositories( filtered_repos ) for repository in self.suggested_repositories: yield repository
Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user.
def get_object_by_record(record): if not record: return None if record.get("uid"): return get_object_by_uid(record["uid"]) if record.get("path"): return get_object_by_path(record["path"]) if record.get("parent_path") and record.get("id"): path = "/".join([record["parent_path"], record["id"]]) return get_object_by_path(path) logger.warn("get_object_by_record::No object found! record='%r'" % record) return None
Find an object by a given record Inspects request the record to locate an object :param record: A dictionary representation of an object :type record: dict :returns: Found Object or None :rtype: object
def createLocationEncoder(t, w=15): encoder = CoordinateEncoder(name="positionEncoder", n=t.l6CellCount, w=w) return encoder
A default coordinate encoder for encoding locations into sparse distributed representations.
def _update_evaluated_individuals_(self, result_score_list, eval_individuals_str, operator_counts, stats_dicts): for result_score, individual_str in zip(result_score_list, eval_individuals_str): if type(result_score) in [float, np.float64, np.float32]: self.evaluated_individuals_[individual_str] = self._combine_individual_stats(operator_counts[individual_str], result_score, stats_dicts[individual_str]) else: raise ValueError('Scoring function does not return a float.')
Update self.evaluated_individuals_ and error message during pipeline evaluation. Parameters ---------- result_score_list: list A list of CV scores for evaluated pipelines eval_individuals_str: list A list of strings for evaluated pipelines operator_counts: dict A dict where 'key' is the string representation of an individual and 'value' is the number of operators in the pipeline stats_dicts: dict A dict where 'key' is the string representation of an individual and 'value' is a dict containing statistics about the individual Returns ------- None
def _read_stc(stc_file): hdr = _read_hdr_file(stc_file) stc_dtype = dtype([('segment_name', 'a256'), ('start_stamp', '<i'), ('end_stamp', '<i'), ('sample_num', '<i'), ('sample_span', '<i')]) with stc_file.open('rb') as f: f.seek(352) hdr['next_segment'] = unpack('<i', f.read(4))[0] hdr['final'] = unpack('<i', f.read(4))[0] hdr['padding'] = unpack('<' + 'i' * 12, f.read(48)) stamps = fromfile(f, dtype=stc_dtype) return hdr, stamps
Read Segment Table of Contents file. Returns ------- hdr : dict - next_segment : Sample frequency in Hertz - final : Number of channels stored - padding : Padding stamps : ndarray of dtype - segment_name : Name of ERD / ETC file segment - start_stamp : First sample stamp that is found in the ERD / ETC pair - end_stamp : Last sample stamp that is still found in the ERD / ETC pair - sample_num : Number of samples actually being recorded (gaps in the data are not included in this number) - sample_span : Number of samples in that .erd file Notes ----- The Segment Table of Contents file is an index into pairs of (raw data file / table of contents file). It is used for mapping samples file segments. EEG raw data is split into segments in order to break a single file size limit (used to be 2GB) while still allowing quick searches. This file ends in the extension '.stc'. Default segment size (size of ERD file after which it is closed and new [ERD / ETC] pair is opened) is 50MB. The file starts with a generic EEG file header, and is followed by a series of fixed length records called the STC entries. ERD segments are named according to the following schema: - <FIRST_NAME>, <LAST_NAME>_<GUID>.ERD (first) - <FIRST_NAME>, <LAST_NAME>_<GUID>.ETC (first) - <FIRST_NAME>, <LAST_NAME>_<GUID>_<INDEX>.ERD (second and subsequent) - <FIRST_NAME>, <LAST_NAME>_<GUID>_<INDEX>.ETC (second and subsequent) <INDEX> is formatted with "%03d" format specifier and starts at 1 (initial value being 0 and omitted for compatibility with the previous versions).
def requires_auth(func): @six.wraps(func) def wrapper(self, *args, **kwargs): if self.token_expired: self.authenticate() return func(self, *args, **kwargs) return wrapper
Handle authentication checks. .. py:decorator:: requires_auth Checks if the token has expired and performs authentication if needed.
def do_proplist(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
List all properties on files, dirs, or revisions. usage: 1. proplist [PATH...] 2. proplist --revprop -r REV [URL] 1. Lists versioned props in working copy. 2. Lists unversioned remote props on repos revision. ${cmd_option_list}
def distribution(self, **slice_kwargs): values = [] keys = [] for key, size in self.slice(count_only=True, **slice_kwargs): values.append(size) keys.append(key) return keys, values
Calculates the number of papers in each slice, as defined by ``slice_kwargs``. Examples -------- .. code-block:: python >>> corpus.distribution(step_size=1, window_size=1) [5, 5] Parameters ---------- slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list
def stream( self, accountID, **kwargs ): request = Request( 'GET', '/v3/accounts/{accountID}/transactions/stream' ) request.set_path_param( 'accountID', accountID ) request.set_stream(True) class Parser(): def __init__(self, ctx): self.ctx = ctx def __call__(self, line): j = json.loads(line.decode('utf-8')) type = j.get("type") if type is None: return ("unknown", j) elif type == "HEARTBEAT": return ( "transaction.TransactionHeartbeat", self.ctx.transaction.TransactionHeartbeat.from_dict( j, self.ctx ) ) transaction = self.ctx.transaction.Transaction.from_dict( j, self.ctx ) return ( "transaction.Transaction", transaction ) request.set_line_parser( Parser(self.ctx) ) response = self.ctx.request(request) return response
Get a stream of Transactions for an Account starting from when the request is made. Args: accountID: Account Identifier Returns: v20.response.Response containing the results from submitting the request
def flatten(input_list): for el in input_list: if isinstance(el, collections.Iterable) \ and not isinstance(el, basestring): for sub in flatten(el): yield sub else: yield el
Return a flattened genertor from an input list. Usage: input_list = [['a'], ['b', 'c', 'd'], [['e']], ['f']] list(flatten(input_list)) >> ['a', 'b', 'c', 'd', 'e', 'f']
def search_shell(self): with self._lock: if self._shell is not None: return reference = self._context.get_service_reference(SERVICE_SHELL) if reference is not None: self.set_shell(reference)
Looks for a shell service
def create_assign_context_menu(self): menu = QMenu("AutoKey") self._build_menu(menu) self.setContextMenu(menu)
Create a context menu, then set the created QMenu as the context menu. This builds the menu with all required actions and signal-slot connections.
def write_terminal(matrix, version, out, border=None): with writable(out, 'wt') as f: write = f.write colours = ['\033[{0}m'.format(i) for i in (7, 49)] for row in matrix_iter(matrix, version, scale=1, border=border): prev_bit = -1 cnt = 0 for bit in row: if bit == prev_bit: cnt += 1 else: if cnt: write(colours[prev_bit]) write(' ' * cnt) write('\033[0m') prev_bit = bit cnt = 1 if cnt: write(colours[prev_bit]) write(' ' * cnt) write('\033[0m') write('\n')
\ Function to write to a terminal which supports ANSI escape codes. :param matrix: The matrix to serialize. :param int version: The (Micro) QR code version. :param out: Filename or a file-like object supporting to write text. :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
def XstarT_dot(self,M): if 0: pass else: RV = np.dot(self.Xstar().T,M) return RV
get dot product of Xhat and M
def flatten(l): for el in l: if _iterable_not_string(el): for s in flatten(el): yield s else: yield el
Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator
def check_web_config(config_fname): print("Looking for config file at {0} ...".format(config_fname)) config = RawConfigParser() try: config.readfp(open(config_fname)) return config except IOError: print("ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.") return None
Try to load the Django settings. If this does not work, than settings file does not exist. Returns: Loaded configuration, or None.
def root_parent(self, category=None): return next(filter(lambda c: c.is_root, self.hierarchy()))
Returns the topmost parent of the current category.
def apply(self, func, *args, **kwds): wrapped = self._wrapped_apply(func, *args, **kwds) n_repeats = 3 timed = timeit.timeit(wrapped, number=n_repeats) samp_proc_est = timed / n_repeats est_apply_duration = samp_proc_est / self._SAMP_SIZE * self._nrows if est_apply_duration > self._dask_threshold: return self._dask_apply(func, *args, **kwds) else: if self._progress_bar: tqdm.pandas(desc="Pandas Apply") return self._obj_pd.progress_apply(func, *args, **kwds) else: return self._obj_pd.apply(func, *args, **kwds)
Apply the function to the transformed swifter object
def run(self): if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) if self.enable_metadata_columns: self.post_copy_metacolumns(cursor) output.touch(connection) connection.commit() connection.close()
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
def add_waveform(self, waveform): if not isinstance(waveform, PlotWaveform): self.log_exc(u"waveform must be an instance of PlotWaveform", None, True, TypeError) self.waveform = waveform self.log(u"Added waveform")
Add a waveform to the plot. :param waveform: the waveform to be added :type waveform: :class:`~aeneas.plotter.PlotWaveform` :raises: TypeError: if ``waveform`` is not an instance of :class:`~aeneas.plotter.PlotWaveform`
def next(self): try: results = self._stride_buffer.pop() except (IndexError, AttributeError): self._rebuffer() results = self._stride_buffer.pop() if not results: raise StopIteration return results
Returns the next sequence of results, given stride and n.
def warning(self, amplexception): msg = '\t'+str(amplexception).replace('\n', '\n\t') print('Warning:\n{:s}'.format(msg))
Receives notification of a warning.
def append(self, ldap_filter): if not isinstance(ldap_filter, (LDAPFilter, LDAPCriteria)): raise TypeError( "Invalid filter type: {0}".format(type(ldap_filter).__name__) ) if len(self.subfilters) >= 1 and self.operator == NOT: raise ValueError("Not operator only handles one child") self.subfilters.append(ldap_filter)
Appends a filter or a criterion to this filter :param ldap_filter: An LDAP filter or criterion :raise TypeError: If the parameter is not of a known type :raise ValueError: If the more than one filter is associated to a NOT operator
def current(cls): name = socket.getfqdn() ip = socket.gethostbyname(name) return cls(name, ip)
Helper method for getting the current peer of whichever host we're running on.
def align(self, input_path, output_path, directions, pipeline, filter_minimum): with tempfile.NamedTemporaryFile(prefix='for_conv_file', suffix='.fa') as fwd_fh: fwd_conv_file = fwd_fh.name with tempfile.NamedTemporaryFile(prefix='rev_conv_file', suffix='.fa') as rev_fh: rev_conv_file = rev_fh.name alignments = self._hmmalign( input_path, directions, pipeline, fwd_conv_file, rev_conv_file) alignment_result = self.alignment_correcter(alignments, output_path, filter_minimum) return alignment_result
align - Takes input path to fasta of unaligned reads, aligns them to a HMM, and returns the aligned reads in the output path Parameters ---------- input_path : str output_path : str reverse_direction : dict A dictionary of read names, with the entries being the complement strand of the read (True = forward, False = reverse) pipeline : str Either "P" or "D" corresponding to the protein and nucleotide (DNA) pipelines, respectively. Returns ------- N/A - output alignment path known.
def _process_added_port_event(self, port_name): LOG.info("Hyper-V VM vNIC added: %s", port_name) self._added_ports.add(port_name)
Callback for added ports.
def set_components(self, params): for key, value in params.items(): if isinstance(value, pd.Series): new_function = self._timeseries_component(value) elif callable(value): new_function = value else: new_function = self._constant_component(value) func_name = utils.get_value_by_insensitive_key_or_value(key, self.components._namespace) if func_name is None: raise NameError('%s is not recognized as a model component' % key) if '_integ_' + func_name in dir(self.components): warnings.warn("Replacing the equation of stock {} with params".format(key), stacklevel=2) setattr(self.components, func_name, new_function)
Set the value of exogenous model elements. Element values can be passed as keyword=value pairs in the function call. Values can be numeric type or pandas Series. Series will be interpolated by integrator. Examples -------- >>> model.set_components({'birth_rate': 10}) >>> model.set_components({'Birth Rate': 10}) >>> br = pandas.Series(index=range(30), values=np.sin(range(30)) >>> model.set_components({'birth_rate': br})
def add(self, rd, ttl=None): if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if not ttl is None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd)
Add the specified rdata to the rdataset. If the optional I{ttl} parameter is supplied, then self.update_ttl(ttl) will be called prior to adding the rdata. @param rd: The rdata @type rd: dns.rdata.Rdata object @param ttl: The TTL @type ttl: int
def _get_user(self, user): return ' '.join([user.username, user.first_name, user.last_name])
Generate user filtering tokens.
def merge_query(path, postmap, force_unicode=True): if postmap: p = postmap.copy() else: p = {} p.update(get_query(path, force_unicode=False)) if force_unicode: p = _unicode(p) return p
Merges params parsed from the URI into the mapping from the POST body and returns a new dict with the values. This is a convenience function that gives use a dict a bit like PHP's $_REQUEST array. The original 'postmap' is preserved so the caller can identify a param's source if necessary.
def get_correctness_for_response(self, response): for answer in self.my_osid_object.get_answers(): if self._is_match(response, answer): try: return answer.get_score() except AttributeError: return 100 for answer in self.my_osid_object.get_wrong_answers(): if self._is_match(response, answer): try: return answer.get_score() except AttributeError: return 0 return 0
get measure of correctness available for a particular response
def scan_file(self, this_file): params = {'apikey': self.api_key} try: if type(this_file) == str and os.path.isfile(this_file): files = {'file': (this_file, open(this_file, 'rb'))} elif isinstance(this_file, StringIO.StringIO): files = {'file': this_file.read()} else: files = {'file': this_file} except TypeError as e: return dict(error=e.message) try: response = requests.post(self.base + 'file/scan', files=files, params=params, proxies=self.proxies) except requests.RequestException as e: return dict(error=e.message) return _return_response_and_status_code(response)
Submit a file to be scanned by VirusTotal :param this_file: File to be scanned (32MB file size limit) :return: JSON response that contains scan_id and permalink.
def _get_websocket(self, reuse=True): if self.ws and reuse: if self.ws.connected: return self.ws logging.debug("Stale connection, reconnecting.") self.ws = self._create_connection() return self.ws
Reuse existing connection or create a new connection.
def cli(conf): try: config = init_config(conf) debug = config.getboolean('DEFAULT', 'debug') conn = get_conn(config.get('DEFAULT','statusdb')) cur = conn.cursor() sqlstr = try: cur.execute('drop table client_status') except: pass cur.execute(sqlstr) print 'flush client status database' conn.commit() conn.close() except: traceback.print_exc()
OpenVPN status initdb method
def log_status (self): duration = time.time() - self.start_time checked, in_progress, queue = self.aggregator.urlqueue.status() num_urls = len(self.aggregator.result_cache) self.logger.log_status(checked, in_progress, queue, duration, num_urls)
Log a status message.
def find_log_dir(log_dir=None): if log_dir: dirs = [log_dir] elif FLAGS['log_dir'].value: dirs = [FLAGS['log_dir'].value] else: dirs = ['/tmp/', './'] for d in dirs: if os.path.isdir(d) and os.access(d, os.W_OK): return d _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs)
Returns the most suitable directory to put log files into. Args: log_dir: str|None, if specified, the logfile(s) will be created in that directory. Otherwise if the --log_dir command-line flag is provided, the logfile will be created in that directory. Otherwise the logfile will be created in a standard location.
def addLNT(LocalName, phenoId, predicate, g=None): if g is None: s = inspect.stack(0) checkCalledInside('LocalNameManager', s) g = s[1][0].f_locals addLN(LocalName, Phenotype(phenoId, predicate), g)
Add a local name for a phenotype from a pair of identifiers
def close(self): try: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() except socket.error: pass
Closes the tunnel.
def write(self, src, dest=None): if not src or not isinstance(src, string_types): raise ValueError('The src path must be a non-empty string, got {} of type {}.'.format( src, type(src))) if dest and not isinstance(dest, string_types): raise ValueError('The dest entry path must be a non-empty string, got {} of type {}.'.format( dest, type(dest))) if not os.path.isdir(src) and not dest: raise self.Error('Source file {} must have a jar destination specified'.format(src)) self._add_entry(self.FileSystemEntry(src, dest))
Schedules a write of the file at ``src`` to the ``dest`` path in this jar. If the ``src`` is a file, then ``dest`` must be specified. If the ``src`` is a directory then by default all descendant files will be added to the jar as entries carrying their relative path. If ``dest`` is specified it will be prefixed to each descendant's relative path to form its jar entry path. :param string src: the path to the pre-existing source file or directory :param string dest: the path the source file or directory should have in this jar
def put(self, key, val, minutes): minutes = self._get_minutes(minutes) if minutes is not None: self._store.put(key, val, minutes)
Store an item in the cache. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime
def _set_ip(self): self._ip = socket.gethostbyname(self._fqdn) log.debug('IP: %s' % self._ip)
Resolve FQDN to IP address
def flatten_reducer( flattened_list: list, entry: typing.Union[list, tuple, COMPONENT] ) -> list: if hasattr(entry, 'includes') and hasattr(entry, 'files'): flattened_list.append(entry) elif entry: flattened_list.extend(entry) return flattened_list
Flattens a list of COMPONENT instances to remove any lists or tuples of COMPONENTS contained within the list :param flattened_list: The existing flattened list that has been populated from previous calls of this reducer function :param entry: An entry to be reduced. Either a COMPONENT instance or a list/tuple of COMPONENT instances :return: The flattened list with the entry flatly added to it
def add_pyspark_path(): try: spark_home = os.environ['SPARK_HOME'] sys.path.append(os.path.join(spark_home, 'python')) py4j_src_zip = glob(os.path.join(spark_home, 'python', 'lib', 'py4j-*-src.zip')) if len(py4j_src_zip) == 0: raise ValueError('py4j source archive not found in %s' % os.path.join(spark_home, 'python', 'lib')) else: py4j_src_zip = sorted(py4j_src_zip)[::-1] sys.path.append(py4j_src_zip[0]) except KeyError: logging.error( ) exit(-1) except ValueError as e: logging.error(str(e)) exit(-1)
Add PySpark to the library path based on the value of SPARK_HOME.
def provide_session(self, start_new=False): if self.is_global: self._session_info = self._global_session_info self._session_start = self._global_session_start if self._session_info is None or start_new or \ datetime.datetime.now() > self._session_start + self.SESSION_DURATION: self._start_new_session() return self._session_info
Makes sure that session is still valid and provides session info :param start_new: If `True` it will always create a new session. Otherwise it will create a new session only if no session exists or the previous session timed out. :type start_new: bool :return: Current session info :rtype: dict
def ctr_geom(geom, masses): import numpy as np shift = np.tile(ctr_mass(geom, masses), geom.shape[0] / 3) ctr_geom = geom - shift return ctr_geom
Returns geometry shifted to center of mass. Helper function to automate / encapsulate translation of a geometry to its center of mass. Parameters ---------- geom length-3N |npfloat_| -- Original coordinates of the atoms masses length-N OR length-3N |npfloat_| -- Atomic masses of the atoms. Length-3N option is to allow calculation of a per-coordinate perturbed value. Returns ------- ctr_geom length-3N |npfloat_| -- Atomic coordinates after shift to center of mass Raises ------ ~exceptions.ValueError If shapes of `geom` & `masses` are inconsistent
def supply(self, issuer): def _retrieve_jwks(): jwks_uri = self._key_uri_supplier.supply(issuer) if not jwks_uri: raise UnauthenticatedException(u"Cannot find the `jwks_uri` for issuer " u"%s: either the issuer is unknown or " u"the OpenID discovery failed" % issuer) try: response = requests.get(jwks_uri) json_response = response.json() except Exception as exception: message = u"Cannot retrieve valid verification keys from the `jwks_uri`" raise UnauthenticatedException(message, exception) if u"keys" in json_response: jwks_keys = jwk.KEYS() jwks_keys.load_jwks(response.text) return jwks_keys._keys else: return _extract_x509_certificates(json_response) return self._jwks_cache.get_or_create(issuer, _retrieve_jwks)
Supplies the `Json Web Key Set` for the given issuer. Args: issuer: the issuer. Returns: The successfully retrieved Json Web Key Set. None is returned if the issuer is unknown or the retrieval process fails. Raises: UnauthenticatedException: When this method cannot supply JWKS for the given issuer (e.g. unknown issuer, HTTP request error).
def remember_forever(self, key, callback): val = self.get(key) if val is not None: return val val = value(callback) self.forever(key, val) return val
Get an item from the cache, or store the default value forever. :param key: The cache key :type key: str :param callback: The default function :type callback: mixed :rtype: mixed
def calculate_input(self, buffer): if TriggerMode.ABBREVIATION in self.modes: if self._should_trigger_abbreviation(buffer): if self.immediate: return len(self._get_trigger_abbreviation(buffer)) else: return len(self._get_trigger_abbreviation(buffer)) + 1 if TriggerMode.HOTKEY in self.modes: if buffer == '': return len(self.modifiers) + 1 return self.parent.calculate_input(buffer)
Calculate how many keystrokes were used in triggering this phrase.
def access_service_descriptor(price, consume_endpoint, service_endpoint, timeout, template_id): return (ServiceTypes.ASSET_ACCESS, {'price': price, 'consumeEndpoint': consume_endpoint, 'serviceEndpoint': service_endpoint, 'timeout': timeout, 'templateId': template_id})
Access service descriptor. :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: Service descriptor.