docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
初始化异常. Parameters: message (str): - 异常信息 ID (str): - 任务ID exception (str): - 异常栈信息 status_code (int): - 状态码
def __init__(self, message, ID, exception=None, status_code=None): super().__init__(message, status_code) self.ID = ID self.EXCEPTION = exception
1,089,921
Expire the key, delete the value, and call the callback function if one is specified. Args: key: The ``TimedDict`` key
def expire_key(self, key): value = self.base_dict[key] del self[key] if self.callback is not None: self.callback( key, value, *self.callback_args, **self.callback_kwargs)
1,090,068
Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str): Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix.
def _get_suffix(path): suffix = os.path.basename(path).split(".")[-1] if "/" in suffix: raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path) return suffix
1,090,116
Creates a map of letter use in a word. Args: word: a string to create a letter map from Returns: a dictionary of {letter: integer count of letter in word}
def _letter_map(word): lmap = {} for letter in word: try: lmap[letter] += 1 except KeyError: lmap[letter] = 1 return lmap
1,090,144
Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word
def anagrams_in_word(word, sowpods=False, start="", end=""): input_letters, blanks, questions = blank_tiles(word) for tile in start + end: input_letters.append(tile) for word in word_list(sowpods, start, end): lmap = _letter_map(input_letters) used_blanks = 0 for letter in word: if letter in lmap: lmap[letter] -= 1 if lmap[letter] < 0: used_blanks += 1 if used_blanks > (blanks + questions): break else: used_blanks += 1 if used_blanks > (blanks + questions): break else: yield (word, word_score(word, input_letters, questions))
1,090,145
Parse configuration values from the database. The extension must have been previously initialized. If a key is not found in the database, it will be created with the default value specified. Arguments: keys (list[str]): list of keys to parse. If the list is empty, then all the keys known to the application will be used. Returns: dict of the parsed config values.
def parse_conf(self, keys=[]): confs = self.app.config.get('WAFFLE_CONFS', {}) if not keys: keys = confs.keys() result = {} for key in keys: # Some things cannot be changed... if key.startswith('WAFFLE_'): continue # No arbitrary keys if key not in confs.keys(): continue stored_conf = self.configstore.get(key) if not stored_conf: # Store new record in database value = confs[key].get('default', '') stored_conf = self.configstore.put(key, util.serialize(value)) self.configstore.commit() else: # Get stored value value = util.deserialize(stored_conf.get_value()) result[stored_conf.get_key()] = value return result
1,090,358
Update database values and application configuration. The provided keys must be defined in the ``WAFFLE_CONFS`` setting. Arguments: new_values (dict): dict of configuration variables and their values The dict has the following structure: { 'MY_CONFIG_VAR' : <CONFIG_VAL>, 'MY_CONFIG_VAR1' : <CONFIG_VAL1> }
def update_db(self, new_values): confs = self.app.config.get('WAFFLE_CONFS', {}) to_update = {} for key in new_values.keys(): # Some things cannot be changed... if key.startswith('WAFFLE_'): continue # No arbitrary keys if key not in confs.keys(): continue value = new_values[key] self.configstore.put(key, util.serialize(value)) self.configstore.commit() to_update[key] = value # Update config if not to_update: return self.app.config.update(to_update) # Notify other processes if self.app.config.get('WAFFLE_MULTIPROC', False): self.notify(self)
1,090,359
Initialize the extension for the given application and store. Parse the configuration values stored in the database obtained from the ``WAFFLE_CONFS`` value of the configuration. Arguments: app: Flask application instance configstore (WaffleStore): database store.
def init_app(self, app, configstore): if not hasattr(app, 'extensions'): app.extensions = {} self.state = _WaffleState(app, configstore) app.extensions['waffleconf'] = self.state
1,090,362
Support item access via dot notation. Args: __key: Key to fetch
def __getattr__(self, __key: Hashable) -> Any: try: return self[__key] except KeyError: raise AttributeError(__key)
1,090,552
Support item assignment via dot notation. Args: __key: Key to set value for __value: Value to set key to
def __setattr__(self, __key: Hashable, __value: Any) -> None: try: self[__key] = __value except Exception as err: raise AttributeError(str(err))
1,090,553
Support item deletion via dot notation. Args: __key: Key to delete
def __delattr__(self, __key: Hashable) -> None: try: del self[__key] except TypeError as err: raise AttributeError(str(err))
1,090,554
Init a new FileSystem Cache Args: cache_dir maxsize. Maximum size of the cache, in GB
def __init__( self, dir=None, options=None, upstream=None, prefix='', **kwargs): from ambry.dbexceptions import ConfigurationError super(FsCache, self).__init__(upstream, **kwargs) self._cache_dir = dir if not os.path.isabs(self._cache_dir): raise ConfigurationError( "Filesystem cache must have an absolute path. Got: '{}' ".format( self._cache_dir)) self.prefix = prefix
1,090,581
Copy a file to the repository Args: source: Absolute path to the source file, or a file-like object rel_path: path relative to the root of the repository
def put(self, source, rel_path, metadata=None): # This case should probably be deprecated. if not isinstance(rel_path, basestring): rel_path = rel_path.cache_key sink = self.put_stream(rel_path, metadata=metadata) try: copy_file_or_flo(source, sink) except (KeyboardInterrupt, SystemExit): path_ = self.path(rel_path) if os.path.exists(path_): os.remove(path_) raise sink.close() return os.path.join(self.cache_dir, rel_path)
1,090,583
Init a new FileSystem Cache Args: cache_dir maxsize. Maximum size of the cache, in GB
def __init__(self, dir=dir, size=10000, upstream=None, **kwargs): from ambry.dbexceptions import ConfigurationError super(FsLimitedCache, self).__init__(dir, upstream=upstream, **kwargs) self._size = size self.maxsize = int(size) * 1048578 # size in MB self.readonly = False self.usreadonly = False self._database = None self.use_db = True if not os.path.isdir(self.cache_dir): os.makedirs(self.cache_dir) if not os.path.isdir(self.cache_dir): raise ConfigurationError( "Cache dir '{}' is not valid".format( self.cache_dir))
1,090,595
If there are not size bytes of space left, delete files until there is Args: size: size of the current file this_rel_path: rel_pat to the current file, so we don't delete it.
def _free_up_space(self, size, this_rel_path=None): # Amount of space we are over ( bytes ) for next put space = self.size + size - self.maxsize if space <= 0: return removes = [] for row in self.database.execute("SELECT path, size, time FROM files ORDER BY time ASC"): if space > 0: removes.append(row[0]) space -= row[1] else: break for rel_path in removes: if rel_path != this_rel_path: global_logger.debug("Deleting {}".format(rel_path)) self.remove(rel_path)
1,090,599
Retrieve the prices of a list of equities as a DataFrame (columns = symbols) Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"] start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. Yahoo data stops at 2013/1/1
def price_dataframe(symbols=('sne',), start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='actual_close', cleaner=util.clean_dataframe, ): if isinstance(price_type, basestring): price_type = [price_type] start = nlp.util.normalize_date(start or datetime.date(2008, 1, 1)) end = nlp.util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = util.make_symbols(symbols) df = get_dataframes(symbols) # t = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) # df = clean_dataframes(dataobj.get_data(t, symbols, price_type)) if not df or len(df) > 1: return cleaner(df) else: return cleaner(df[0])
1,090,676
Create a tar-file or a tar.gz at location: filename. params: gzip: if True - gzip the file, default = False dirs: dirs to be tared returns a 3-tuple with returncode (integer), terminal output (string) and the new filename.
def tar(filename, dirs=[], gzip=False): if gzip: cmd = 'tar czvf %s ' % filename else: cmd = 'tar cvf %s ' % filename if type(dirs) != 'list': dirs = [dirs] cmd += ' '.join(str(x) for x in dirs) retcode, output = sh(cmd) return (retcode, output, filename)
1,090,941
Create a Thing. Args: name (str): name of the Thing. This corresponds to the AWS IoT Thing name. client (str): MQTT client connection to use. This can be set anytime before publishing Thing messages to the server.
def __init__(self, name, client=None): self._name = name self.client = client self._state = None
1,091,118
Publish thing state to AWS IoT. Args: state (dict): object state. Must be JSON serializable (i.e., not have circular references).
def publish_state(self, state): message = json.dumps({'state': {'reported': state}}) self.client.publish(self.topic, message) self._state = state
1,091,119
Gets the enum for the op code Args: op: value of the op code (will be casted to int) Returns: The enum that matches the op code
def parse(cls, op): for event in cls: if event.value == int(op): return event return None
1,091,124
Runs the thread This method handles sending the heartbeat to the Discord websocket server, so the connection can remain open and the bot remain online for those commands that require it to be. Args: None
def run(self): while self.should_run: try: self.logger.debug('Sending heartbeat, seq ' + last_sequence) self.ws.send(json.dumps({ 'op': 1, 'd': last_sequence })) except Exception as e: self.logger.error(f'Got error in heartbeat: {str(e)}') finally: elapsed = 0.0 while elapsed < self.interval and self.should_run: time.sleep(self.TICK_INTERVAL) elapsed += self.TICK_INTERVAL
1,091,126
Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console
def _setup_logger(self, logging_level: int, log_to_console: bool): self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler)
1,091,129
Make an HTTP request Args: path: the URI path (not including the base url, start with the first uri segment, like 'users/...') method: the HTTP method to use (GET, POST, PATCH, ...) data: the data to send as JSON data expected_status: expected HTTP status; other statuses received will raise an Exception Returns: Data from the endpoint's response
def _query(self, path: str, method: str, data: Dict[str, Any]=None, expected_status: int = 200) \ -> Union[List[Dict[str, Any]], Dict[str, Any], None]: url = Pycord.url_base + path self.logger.debug(f'Making {method} request to "{url}"') if method == 'GET': r = requests.get(url, headers=self._build_headers()) elif method == 'POST': r = requests.post(url, headers=self._build_headers(), json=data) r = requests.get(url, headers=self._build_headers()) elif method == 'PATCH': r = requests.patch(url, headers=self._build_headers(), json=data) else: raise ValueError(f'Unknown HTTP method {method}') self.logger.debug(f'{method} response from "{url}" was "{r.status_code}"') if r.status_code != expected_status: raise ValueError(f'Non-{expected_status} {method} response from Discord API ({r.status_code}): {r.text}') if expected_status == 200: return r.json() return None
1,091,130
Callback for receiving errors from the websocket connection Args: ws: websocket connection error: exception raised
def _ws_on_error(self, ws: websocket.WebSocketApp, error: Exception): self.logger.error(f'Got error from websocket connection: {str(error)}')
1,091,132
Callback for closing the websocket connection Args: ws: websocket connection (now closed)
def _ws_on_close(self, ws: websocket.WebSocketApp): self.connected = False self.logger.error('Websocket closed') self._reconnect_websocket()
1,091,133
Callback for sending the initial authentication data This "payload" contains the required data to authenticate this websocket client as a suitable bot connection to the Discord websocket. Args: ws: websocket connection
def _ws_on_open(self, ws: websocket.WebSocketApp): payload = { 'op': WebSocketEvent.IDENTIFY.value, 'd': { 'token': self.token, 'properties': { '$os': sys.platform, '$browser': 'Pycord', '$device': 'Pycord', '$referrer': '', '$referring_domain': '' }, 'compress': True, 'large_threshold': 250 } } self.logger.debug('Sending identify payload') ws.send(json.dumps(payload)) self.connected = True
1,091,134
Call this method to make the connection to the Discord websocket This method is not blocking, so you'll probably want to call it after initializating your Pycord object, and then move on with your code. When you want to block on just maintaining the websocket connection, then call ``keep_running``, and it'll block until your application is interrupted. Args: None
def connect_to_websocket(self): self.logger.info('Making websocket connection') try: if hasattr(self, '_ws'): self._ws.close() except: self.logger.debug('Couldn\'t terminate previous websocket connection') self._ws = websocket.WebSocketApp( self._get_websocket_address() + '?v=6&encoding=json', on_message=self._ws_on_message, on_error=self._ws_on_error, on_close=self._ws_on_close ) self._ws.on_open = self._ws_on_open self._ws_run_forever_wrapper = WebSocketRunForeverWrapper(self.logger, self._ws) self._ws_run_forever_wrapper.start()
1,091,135
Disconnects from the websocket Args: None
def disconnect_from_websocket(self): self.logger.warning('Disconnecting from websocket') self.logger.info('Stopping keep alive thread') self._ws_keep_alive.stop() self._ws_keep_alive.join() self.logger.info('Stopped keep alive thread') try: self.logger.warning('Disconnecting from websocket') self._ws.close() self.logger.info('Closed websocket connection') except: self.logger.debug('Couldn\'t terminate previous websocket connection')
1,091,136
Updates the bot's status This is used to get the game that the bot is "playing" or to clear it. If you want to set a game, pass a name; if you want to clear it, either call this method without the optional ``name`` parameter or explicitly pass ``None``. Args: name: the game's name, or None
def set_status(self, name: str = None): game = None if name: game = { 'name': name } payload = { 'op': WebSocketEvent.STATUS_UPDATE.value, 'd': { 'game': game, 'status': 'online', 'afk': False, 'since': 0.0 } } data = json.dumps(payload, indent=2) self.logger.debug(f'Sending status update payload: {data}') self._ws.send(data)
1,091,137
Send a message to a channel For formatting options, see the documentation: https://discordapp.com/developers/docs/resources/channel#create-message Args: id: channel snowflake id message: your message (string) Returns: Dictionary object of the new message
def send_message(self, id: str, message: str) -> Dict[str, Any]: if not self.connected: raise ValueError('Websocket not connected') return self._query(f'channels/{id}/messages', 'POST', {'content': message})
1,091,147
Set a cookie. Args: key (:obj:`str`): Cookie name value (:obj:`str`): Cookie value domain (:obj:`str`): Cookie domain path (:obj:`str`): Cookie value secure (:obj:`bool`): True if secure, False otherwise httponly (:obj:`bool`): True if it's a HTTP only cookie, False otherwise
def set_cookie(self, key, value, domain=None, path='/', secure=False, httponly=True): self._cookies[key] = value if domain: self._cookies[key]['domain'] = domain if path: self._cookies[key]['path'] = path if secure: self._cookies[key]['secure'] = secure if httponly: self._cookies[key]['httponly'] = httponly
1,091,181
Bakes the response and returns the content. Args: start_response (:obj:`callable`): Callback method that accepts status code and a list of tuples (pairs) containing headers' key and value respectively.
def bake(self, start_response): if isinstance(self._content, six.text_type): self._content = self._content.encode('utf8') if self._content_length is None: self._content_length = len(self._content) self._headers[HttpResponseHeaders.CONTENT_LENGTH] = \ str(self._content_length) headers = list(self._headers.items()) cookies = [(HttpResponseHeaders.SET_COOKIE, v.OutputString()) for _, v in self._cookies.items()] if len(cookies): headers = list(headers) + cookies start_response(self._status_code, headers) if isinstance(self._content, six.binary_type): return [self._content] return self._content
1,091,183
Helper method to set a redirect response. Args: url (:obj:`str`): URL to redirect to status (:obj:`str`, optional): Status code of the response
def set_redirect(self, url, status=HttpStatusCodes.HTTP_303): self.set_status(status) self.set_content('') self.set_header(HttpResponseHeaders.LOCATION, url)
1,091,184
Helper method to set a JSON response. Args: obj (:obj:`object`): JSON serializable object status (:obj:`str`, optional): Status code of the response
def set_json(self, obj, status=HttpStatusCodes.HTTP_200): obj = json.dumps(obj, sort_keys=True, default=lambda x: str(x)) self.set_status(status) self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json') self.set_content(obj)
1,091,185
Decorator routes an Rogo IntentRequest. Functions decorated as an intent are registered as the view function for the Intent's URL, and provide the backend responses to give your Skill its functionality. @ask.intent('WeatherIntent') def weather(city): return statement('I predict great weather for {}'.format(city)) Arguments: intent_name {str} -- Name of the intent request to be mapped to the decorated function
def intent(self, intent_name): def decorator(f): self._intent_view_funcs[intent_name] = f @wraps(f) def wrapper(*args, **kw): self._flask_view_func(*args, **kw) return f return decorator
1,091,197
Remove "special" characters from beginning and the end of the `inp`. For example ``,a-sd,-/`` -> ``a-sd``. Args: inp (str): Input string. hairs (str): List of characters which should be removed. See :attr:`HAIRS` for details. Returns: str: Cleaned string.
def remove_hairs(inp, hairs=HAIRS): while inp and inp[-1] in hairs: inp = inp[:-1] while inp and inp[0] in hairs: inp = inp[1:] return inp
1,091,370
Parametrized decorator wrapping the :func:`remove_hairs` function. Args: hairs (str, default HAIRS): List of characters which should be removed. See :attr:`HAIRS` for details.
def remove_hairs_decorator(fn=None, hairs=HAIRS): def decorator_wrapper(fn): @wraps(fn) def decorator(*args, **kwargs): out = fn(*args, **kwargs) return remove_hairs(out, hairs) return decorator if fn: return decorator_wrapper(fn) return decorator_wrapper
1,091,371
Returns all data entries for a particular key. Default is the main key. Args: key (str): key whose values to return (default: main key) Returns: List of all data entries for the key
def get_all(self, key=None): key = self.definition.main_key if key is None else key key = self.definition.key_synonyms.get(key, key) entries = self._get_all(key) if key in self.definition.scalar_nonunique_keys: return set(entries) return entries
1,091,452
Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md
def update(self, **args): # JSONify our data. data = json.dumps(args) r = requests.put( "https://kippt.com/api/clips/%s" % (self.id), headers=self.kippt.header, data=data) return (r.json())
1,091,545
Comment on a clip. Parameters: - body (Required)
def comment(self, body): # Merge our url as a parameter and JSONify it. data = json.dumps({'body': body}) r = requests.post( "https://kippt.com/api/clips/%s/comments" (self.id), headers=self.kippt.header, data=data ) return (r.json())
1,091,547
Find the href destinations of all links at URL Arguments: - `url`: Return: list[str] Exceptions: None
def find_links(url): url = protocolise(url) content = requests.get(url).content flike = StringIO(content) root = html.parse(flike).getroot() atags = root.cssselect('a') hrefs = [a.attrib['href'] for a in atags] # !!! This does the wrong thing for bbc.co.uk/index.html hrefs = [h if h.startswith('http') else '/'.join([url, h]) for h in hrefs ] return hrefs
1,091,636
Simple nearest interpolator that interpolates based on the minima and maxima of points based on the passed resolution in res. Parameters: ----------- xs -- A collection of `ndim` arrays of points. res -- List of resolutions.
def simple_nearest_indices(xs,res): maxs = [max(a) for a in xs] mins = [min(a) for a in xs] XS = [np.linspace(mn, mx, r) for mn,mx,r in zip(mins,maxs,res)]; XS = tuple(np.meshgrid(*XS,indexing='ij')); if type(xs) != tuple: xs = tuple(xs); return nearest_indices(xs,XS);
1,091,659
Get a vector flds data. Parameters: ----------- d -- flds data. s -- key for the data.
def getvector(d,s): return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]);
1,091,775
Restrict data by indices. Parameters: ---------- d -- the flds/sclr data restrict -- a tuple of [xmin,xmax,...] etx
def restrict(d,restrict): notqs = ['t','xs','ys','zs','fd','sd'] keys = [k for k in d if k not in notqs]; if len(restrict) == 2: for k in keys: d[k] = d[k][restrict[0]:restrict[1]] elif len(restrict) == 4: for k in keys: d[k] = d[k][ restrict[0]:restrict[1], restrict[2]:restrict[3] ]; elif len(restrict) == 6: for k in keys: d[k] = d[k][ restrict[0]:restrict[1], restrict[2]:restrict[3], restrict[4]:restrict[5] ]; else: raise ValueError("restrict of length {} is not valid".format( len(restrict)));
1,091,777
run command and show if success or failed Args: cmd: string Returns: bool: if this command run successfully
def runCmd(cls, cmd): cit.echo(cmd, "command") result = os.system(cmd) cls.checkResult(result)
1,091,914
run command and return the str format stdout Args: cmd: string Returns: str: what the command's echo
def readCmd(cls, cmd): args = shlex.split(cmd) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (proc_stdout, proc_stderr) = proc.communicate(input=None) # proc_stdin return proc_stdout.decode()
1,091,915
Check and update file compares with remote_url Args: file_: str. Local filename. Normally it's __file__ url: str. Remote url of raw file content. Normally it's https://raw.github.com/... Returns: bool: file updated or not
def updateFile(cls, file_, url): def compare(s1, s2): return s1 == s2, len(s2) - len(s1) if not url or not file_: return False try: req = urllib.request.urlopen(url) raw_codes = req.read() with open(file_, 'rb') as f: current_codes = f.read().replace(b'\r', b'') is_same, diff = compare(current_codes, raw_codes) if is_same: cit.info("{} is already up-to-date.".format(file_)) return False else: cit.ask("A new version is available. Update? (Diff: {})".format(diff)) if cit.get_choice(['Yes', 'No']) == 'Yes': with open(file_, 'wb') as f: f.write(raw_codes) cit.info("Update Success.") return True else: cit.warn("Update Canceled") return False except Exception as e: cit.err("{f} update failed: {e}".format(f=file_, e=e)) return False
1,091,916
Get info by ajax Args: url: string Returns: dict: json decoded into a dict
def ajax(cls, url, param={}, method='get'): param = urllib.parse.urlencode(param) if method.lower() == 'get': req = urllib.request.Request(url + '?' + param) elif method.lower() == 'post': param = param.encode('utf-8') req = urllib.request.Request(url, data=param) else: raise Exception("invalid method '{}' (GET/POST)".format(method)) rsp = urllib.request.urlopen(req) if rsp: rsp_json = rsp.read().decode('utf-8') rsp_dict = json.loads(rsp_json) return rsp_dict return None
1,091,917
Get the Windows OS version running on the machine. Params: None Returns: The Windows OS version running on the machine (comparables with the values list in the class).
def get_version(): # Other OS check if not 'win' in sys.platform: return NO_WIN # Get infos win_ver = sys.getwindowsversion() try: # Python 3.6.x or upper -> Use 'platform_version' attribute major, minor, build = win_ver.platform_version except AttributeError: if sys.version_info < (3, 0): # Python 2.7.x -> Use 'platform' module to ensure the correct values (seems that Win 10 is not correctly detected) from platform import _get_real_winver major, minor, build = _get_real_winver(win_ver.major, win_ver.minor, win_ver.build) major, minor, build = int(major), int(minor), int(build) # 'long' to 'int' else: # Python 3.0.x - 3.5.x -> Keep 'sys.getwindowsversion()'' values major, minor, build = win_ver.major, win_ver.minor, win_ver.build # Check is is server or not (it works only on Python 2.7.x or newer) try: is_server = 1 if win_ver.product_type == 3 else 0 except AttributeError: is_server = 0 # Parse Service Pack version (or Build number) try: if major == 10: # The OS is Windows 10 or Windows Server 2016, # so the service pack version is instead the Build number sp_ver = build else: sp_ver = win_ver.service_pack_major or 0 except AttributeError: try: sp_ver = int(win_ver.service_pack.rsplit(' ', 1)) except (IndexError, ValueError): sp_ver = 0 # Return the final version data return (major, minor, sp_ver, is_server)
1,092,166
Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary.
def _parse_doc(doc): lines = doc.split("\n") descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines)) if len(descriptions) < 3: description = lines[0] else: description = "{0}\n\n{1}".format( lines[0], textwrap.dedent("\n".join(descriptions[2:]))) args = list(itertools.takewhile( _checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines))) argmap = {} if len(args) > 1: for pair in args[1:]: kv = [v.strip() for v in pair.split(":")] if len(kv) >= 2: argmap[kv[0]] = ":".join(kv[1:]) return dict(headline=descriptions[0], description=description, args=argmap)
1,092,219
Utility function to look up XDG basedir locations Args: __pkg: Package name __type: Location type
def __user_location(__pkg: str, type_) -> str: if ALLOW_DARWIN and sys.platform == 'darwin': user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0]) else: user_dir = getenv('XDG_{}_HOME'.format(type_.upper()), path.sep.join([getenv('HOME', ''), __LOCATIONS[type_][1]])) return path.expanduser(path.sep.join([user_dir, __pkg]))
1,092,234
Return all configs for given package. Args: __pkg: Package name __name: Configuration file name
def get_configs(__pkg: str, __name: str = 'config') -> List[str]: dirs = [user_config(__pkg), ] dirs.extend(path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':')) configs = [] for dname in reversed(dirs): test_path = path.join(dname, __name) if path.exists(test_path): configs.append(test_path) return configs
1,092,236
Return top-most data file for given package. Args: __pkg: Package name __name: Data file name
def get_data(__pkg: str, __name: str) -> str: for dname in get_data_dirs(__pkg): test_path = path.join(dname, __name) if path.exists(test_path): return test_path raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
1,092,237
Return all data directories for given package. Args: __pkg: Package name
def get_data_dirs(__pkg: str) -> List[str]: dirs = [user_data(__pkg), ] dirs.extend(path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/').split(':')) return [d for d in dirs if path.isdir(d)]
1,092,238
Get a single model from the server. Args: model (string): The class as a string. model_id (string): The integer ID as a string. Returns: :class:`cinder_data.model.CinderModel`: A instance of the model.
def get_model(self, model, model_id): return self._store.find_record(self._get_model_class(model), int(model_id))
1,092,449
Get all the models from the server. Args: model (string): The class as a string. page (string, optional): The page number as a string Returns: list: A list of instances of the requested model.
def get_models(self, model, page=None): if page is not None: return self._store.find_all(self._get_model_class(model), params={'page': int(page)}) else: return self._store.find_all(self._get_model_class(model))
1,092,450
Checks the cell type to see if it represents the cell_type passed in. Args: cell_type: The type id for a cell match or None for empty match.
def check_cell_type(cell, cell_type): if cell_type == None or cell_type == type(None): return cell == None or (isinstance(cell, basestring) and not cell) else: return isinstance(cell, cell_type)
1,092,733
Performs a first step conversion of the cell to check it's type or try to convert if a valid conversion exists. This version of conversion doesn't flag changes nor store cell units. Args: units: The dictionary holder for cell units. parens_as_neg: Converts numerics surrounded by parens to negative values
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True): units = units if units != None else {} return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0, flags={}, units=units, parens_as_neg=parens_as_neg)
1,092,734
Performs a first step conversion of the cell to check it's type or try to convert if a valid conversion exists. Args: parens_as_neg: Converts numerics surrounded by parens to negative values
def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True): conversion = cell # Is an numeric? if isinstance(cell, (int, float)): pass # Is a string? elif isinstance(cell, basestring): # Blank cell? if not cell: conversion = None else: conversion = auto_convert_string_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=parens_as_neg) # Is something else?? Convert to string elif cell != None: # Since we shouldn't get this event from most file types, # make this a warning level conversion flag flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['unknown-to-string']) conversion = str(cell) # Empty cell? if not conversion: conversion = None else: # Otherwise we have an empty cell pass return conversion
1,092,735
Handles the string case of cell and attempts auto-conversion for auto_convert_cell. Args: parens_as_neg: Converts numerics surrounded by parens to negative values
def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags, units, parens_as_neg=True): conversion = cell_str.strip() # Wrapped? if re.search(allregex.control_wrapping_regex, cell_str): # Drop the wrapping characters stripped_cell = cell_str.strip() mod_cell_str = stripped_cell[1:][:-1].strip() neg_mult = False # If the wrapping characters are '(' and ')' and the interior is a number, # then the number should be interpreted as a negative value if (stripped_cell[0] == '(' and stripped_cell[-1] == ')' and re.search(allregex.contains_numerical_regex, mod_cell_str)): # Flag for conversion to negative neg_mult = True flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['removed-wrapping']) # Try again without wrapping converted_value = auto_convert_cell(flagable, mod_cell_str, position, worksheet, flags, units) neg_mult = neg_mult and check_cell_type(converted_value, get_cell_type(0)) if neg_mult and parens_as_neg: flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['converted-wrapping-to-neg']) return -converted_value if neg_mult else converted_value # Is a string containing numbers? elif re.search(allregex.contains_numerical_regex, cell_str): conversion = auto_convert_numeric_string_cell(flagable, conversion, position, worksheet, flags, units) elif re.search(allregex.bool_regex, cell_str): flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['bool-to-int']) conversion = 1 if re.search(allregex.true_bool_regex, cell_str) else 0 return conversion
1,092,736
Compute checksum for each file in `directory`, with exception of files specified in `blacklist`. Args: directory (str): Absolute or relative path to the directory. blacklist (list/set/tuple): List of blacklisted filenames. Only filenames are checked, not paths! Returns: str: Content of hashfile as it is specified in ABNF specification for \ project.
def generate_hashfile(directory, blacklist=_BLACKLIST): checksums = generate_checksums(directory, blacklist) out = "" for fn, checksum in sorted(checksums.items()): out += "%s %s\n" % (checksum, fn) return out
1,092,886
Sort based on position. Sort with s as a tuple of the sort indices and shape from first sort. Parameters: ----------- d -- the flds/sclr data s -- (si, shape) sorting and shaping data from firstsort.
def flds_sort(d,s): labels = [ key for key in d.keys() if key not in ['t', 'xs', 'ys', 'zs', 'fd', 'sd'] ]; si,shape = s; for l in labels: d[l] = d[l][si].reshape(shape); d[l] = np.squeeze(d[l]); return d;
1,093,007
print start/title/end info before and after the function call Args: title: title will show after the start, if has any
def as_session(name_or_func): # decorator if callable(name_or_func): # no name provided func = name_or_func name = func.__name__ name = "".join([(' ' + x) if x.isupper() else x for x in name]) name = name.replace('_', ' ') return as_session(name)(func) # deco(func) -> deco(name)(func) else: name = name_or_func def get_func(func): @wraps(func) def wrapper(*args, **kwargs): start() title(name) result = func(*args, **kwargs) end() return result return wrapper return get_func
1,093,192
获取函数的签名. system.methodSignature('add') => [double, int, int] Parameters: method_name (str): - 要查看的函数名 Returns: (str): - 签名文本
def system_methodSignature(self, method_name: str)->str: method = None if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: try: method = resolve_dotted_attribute( self.instance, method_name, self.allow_dotted_names ) except AttributeError: pass if method is None: return "" else: return str(inspect.signature(method))
1,093,260
注册一个实例用于执行,注意只能注册一个. Parameters: instance (Any): - 将一个类的实例注册到rpc allow_dotted_names (bool): 是否允许带`.`号的名字
def register_instance(self, instance: Any, allow_dotted_names: bool=False): if self.instance: raise RuntimeError("can only register one instance") self.instance = instance self.allow_dotted_names = allow_dotted_names return True
1,093,262
注册函数. Parameters: name (Optional[str]): - 将函数注册到的名字,如果为None,name就用其原来的名字
def register_function(self, name: Optional[str]=None): def wrap(function: Callable)->Any: nonlocal name if name is None: name = function.__name__ self.funcs[name] = function return function return wrap
1,093,263
设置计算密集型任务的执行器. Parameters: executor (futures.Executor): - 函数调用的执行器
def set_executor(self, executor: futures.Executor): self.loop.set_default_executor(executor) self._func_executor = executor return True
1,093,264
执行注册的函数或者实例的方法. 如果函数或者方法是协程则执行协程,如果是函数则使用执行器执行,默认使用的是多进程. Parameters: ID (str): 任务的ID method (str): 任务调用的函数名 args (Any): 位置参数 kwargs (Any): 关键字参数 Raise: (RPCRuntimeError): - 当执行调用后抛出了异常,那就算做RPC运行时异常 Return: (Any): - 被调用函数的返回
async def apply(self, ID: str, method: str, *args: Any, **kwargs: Any): func = None try: # check to see if a matching function has been registered func = self.funcs[method] except KeyError: if self.instance is not None: # check for a _dispatch method try: func = resolve_dotted_attribute( self.instance, method, self.allow_dotted_names) except AttributeError: pass if func is not None: sig = inspect.signature(func) try: sig.bind(*args, **kwargs) except: raise ParamError( "args can not bind to method {}".format(method), ID) if method.startswith("system."): try: result = func(*args, **kwargs) except Exception as e: raise RPCRuntimeError( 'Error:{} happend in method {}'.format( e.__class__.__name__, method ), ID ) else: return result if inspect.iscoroutinefunction(func): try: result = await func(*args, **kwargs) except Exception as e: raise RPCRuntimeError( 'Error:{} happend in method {}'.format( e.__class__.__name__, method ), ID ) else: return result elif inspect.isasyncgenfunction(func): result = func(*args, **kwargs) return result elif inspect.isfunction(func) or inspect.ismethod(func): try: f = partial(func, *args, **kwargs) result = await self.loop.run_in_executor(None, f) except Exception as e: raise RPCRuntimeError( 'Error:{} happend in method {}'.format( e.__class__.__name__, method ), ID ) else: return result else: raise RpcException('method "%s" is not supported' % method) else: raise NotFindError('method "%s" is not supported' % method, ID)
1,093,265
This method should be called every time through the main loop. It handles showing the up, over, and down states of the button. Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user clicks down and later up on the button.
def handleEvent(self, eventObj): if self.enterToActivate: if eventObj.type == pygame.KEYDOWN: # Return or Enter key if eventObj.key == pygame.K_RETURN: return True if (eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN)) or (not self.visible): # The button only cares bout mouse-related events (or no events, if it is invisible) return False if not self.isEnabled: return False clicked = False eventPointInButtonRect = self.rect.collidepoint(eventObj.pos) if (not self.mouseOverButton) and eventPointInButtonRect: # if mouse has entered the button: self.mouseOverButton = True elif self.mouseOverButton and (not eventPointInButtonRect): # if mouse has exited the button: self.mouseOverButton = False if eventPointInButtonRect: if eventObj.type == MOUSEBUTTONDOWN: self.buttonDown = True self.lastMouseDownOverButton = True else: if eventObj.type in (MOUSEBUTTONUP, MOUSEBUTTONDOWN): # if an up/down happens off the button, then the next up won't cause mouseClick() self.lastMouseDownOverButton = False if eventObj.type == MOUSEBUTTONDOWN: self.mouseIsDown = True # mouse up is handled whether or not it was over the button doMouseClick = False if eventObj.type == MOUSEBUTTONUP: self.mouseIsDown = False if self.lastMouseDownOverButton: doMouseClick = True self.lastMouseDownOverButton = False if self.buttonDown: self.buttonDown = False if doMouseClick: self.buttonDown = False clicked = True self.mouseOverButton = False # forces redraw of up state after click if self.playSoundOnClick: self.soundOnClick.play() if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
1,093,323
This method should be called every time through the main loop. It handles showing the up, over, and down states of the button. Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the has toggled the checkbox.
def handleEvent(self, eventObj): if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) or not self.visible: # The checkBox only cares bout mouse-related events (or no events, if it is invisible) return False if not self.isEnabled: return False clicked = False if (not self.mouseOverButton) and self.rect.collidepoint(eventObj.pos): # if mouse has entered the checkBox: self.mouseOverButton = True elif self.mouseOverButton and (not self.rect.collidepoint(eventObj.pos)): # if mouse has exited the checkBox: self.mouseOverButton = False if self.rect.collidepoint(eventObj.pos): if eventObj.type == MOUSEBUTTONDOWN: self.buttonDown = True self.lastMouseDownOverButton = True else: if eventObj.type in (MOUSEBUTTONUP, MOUSEBUTTONDOWN): # if an up/down happens off the checkBox, then the next up won't cause mouseClick() self.lastMouseDownOverButton = False if eventObj.type == MOUSEBUTTONDOWN: self.mouseIsDown = True # mouse up is handled whether or not it was over the checkBox doMouseClick = False if eventObj.type == MOUSEBUTTONUP: self.mouseIsDown = False if self.lastMouseDownOverButton: doMouseClick = True self.lastMouseDownOverButton = False if self.buttonDown: self.buttonDown = False if doMouseClick: self.buttonDown = False clicked = True if self.playSoundOnClick: self.soundOnClick.play() # switch state: self.value = not self.value return clicked
1,093,329
This method should be called every time through the main loop. It handles showing the up, over, and down states of the button. Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user selects a radio button from the group
def handleEvent(self, eventObj): if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) or not self.visible: # The radioButton only cares bout mouse-related events (or no events, if it is invisible) return False if not self.isEnabled: return False clicked = False if (not self.mouseOverButton) and self.rect.collidepoint(eventObj.pos): # if mouse has entered the radioButton: self.mouseOverButton = True elif self.mouseOverButton and (not self.rect.collidepoint(eventObj.pos)): # if mouse has exited the radioButton: self.mouseOverButton = False if self.rect.collidepoint(eventObj.pos): if eventObj.type == MOUSEBUTTONDOWN: self.buttonDown = True self.lastMouseDownOverButton = True else: if eventObj.type in (MOUSEBUTTONUP, MOUSEBUTTONDOWN): # if an up/down happens off the radioButton, then the next up won't cause mouseClick() self.lastMouseDownOverButton = False if eventObj.type == MOUSEBUTTONDOWN: self.mouseIsDown = True # mouse up is handled whether or not it was over the radioButton doMouseClick = False if eventObj.type == MOUSEBUTTONUP: self.mouseIsDown = False if self.lastMouseDownOverButton: doMouseClick = True self.lastMouseDownOverButton = False if self.buttonDown: self.buttonDown = False if doMouseClick: self.buttonDown = False clicked = True # Turn all radio buttons in this group off for radioButton in PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]: radioButton.setValue(False) self.setValue(True) # And turn the current one (the one that was clicked) on if self.playSoundOnClick: self.soundOnClick.play() if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
1,093,334
This method should be called every time through the main loop. It handles all of the keyboard key actions Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user clicks down and later up on the button.
def handleEvent(self, event): if not self.isEnabled: return False if (event.type == pygame.MOUSEBUTTONDOWN) and (event.button == 1): # user clicked theX, theY = event.pos # if self.imageRect.collidepoint(pos): if self.imageRect.collidepoint(theX, theY): if not self.focus: self.focus = True # give this field focus else: # Field already has focus, must position the cursor where the user clicked nPixelsFromLeft = theX - self.loc[0] nChars = len(self.text) lastCharOffset = self.font.size(self.text)[0] if nPixelsFromLeft >= lastCharOffset: self.cursorPosition = nChars else: for thisCharNum in range(0, nChars): thisCharOffset = self.font.size(self.text[:thisCharNum])[0] if thisCharOffset >= nPixelsFromLeft: self.cursorPosition = thisCharNum # Found the proper position for the cursor break self.cursorVisible = True # Show the cursor at the click point else: self.focus = False if not self.focus: # if this field does not have focus, don't do anything return False keyIsDown = False # assume False if event.type == pygame.KEYDOWN: keyIsDown = True self.currentKey = event.key # remember for potential repeating key self.unicodeOfKey = event.unicode # remember for potential repeating key if event.type == pygame.USEREVENT: # This is a special signal to check for a repeating key # if the key is still down, repeat it keyPressedList = pygame.key.get_pressed() if (self.currentKey is not None) and (keyPressedList[self.currentKey]): # Key is still down keyIsDown = True else: # Key is up pygame.time.set_timer(pygame.USEREVENT, 0) # kill the timer return False if keyIsDown: if self.currentKey in (pygame.K_RETURN, pygame.K_KP_ENTER): # User is done typing, return True to signal that text is available (via a call to getValue) self.focus = False self.currentKey = None self._updateImage() if self.callBack is not None: self.callBack(self.nickname) return True keyIsRepeatable = True # assume it is repeatable unless specifically turned off if self.currentKey == pygame.K_DELETE: self.text = self.text[:self.cursorPosition] + \ self.text[self.cursorPosition + 1:] self._updateImage() elif self.currentKey == pygame.K_BACKSPACE: # forward delete key self.text = self.text[:max(self.cursorPosition - 1, 0)] + \ self.text[self.cursorPosition:] # Subtract one from cursor_pos, but do not go below zero: self.cursorPosition = max(self.cursorPosition - 1, 0) self._updateImage() elif self.currentKey == pygame.K_RIGHT: if self.cursorPosition < len(self.text): self.cursorPosition = self.cursorPosition + 1 elif self.currentKey == pygame.K_LEFT: if self.cursorPosition > 0: self.cursorPosition = self.cursorPosition - 1 elif self.currentKey == pygame.K_END: self.cursorPosition = len(self.text) keyIsRepeatable = False elif self.currentKey == pygame.K_HOME: self.cursorPosition = 0 keyIsRepeatable = False elif self.currentKey in [pygame.K_UP, pygame.K_DOWN]: return False # ignore up arrow and down arrow else: # standard key # If no special key is pressed, add unicode of key to input_string self.text = self.text[:self.cursorPosition] + \ self.unicodeOfKey + \ self.text[self.cursorPosition:] self.cursorPosition = self.cursorPosition + len(self.unicodeOfKey) self._updateImage() if keyIsRepeatable: # set up userevent to try to repeat key pygame.time.set_timer(pygame.USEREVENT, 200) # wait for a short time before repeating return False
1,093,344
This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse.
def handleEvent(self, eventObj): if not self.isEnabled: return False if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) : # The dragger only cares about mouse-related events return False clicked = False if eventObj.type == MOUSEBUTTONDOWN: if self.rect.collidepoint(eventObj.pos): self.dragging = True self.deltaX = eventObj.pos[0] - self.rect.left self.deltaY = eventObj.pos[1] - self.rect.top self.startDraggingX = self.rect.left self.startDraggingY = self.rect.top elif eventObj.type == MOUSEBUTTONUP: if self.dragging: self.dragging = False clicked = True self.mouseUpLoc = (eventObj.pos[0], eventObj.pos[1]) self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY self.setLoc((self.rect.left, self.rect.top)) elif eventObj.type == MOUSEMOTION: if self.dragging: self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY else: self.mouseOver = self.rect.collidepoint(eventObj.pos) if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
1,093,349
rotates the image a given number of degrees Parameters: | nDegrees - the number of degrees you want the image rotated (images start at zero degrees). | Positive numbers are clockwise, negative numbers are counter-clockwise
def rotate(self, nDegrees): self.angle = self.angle + nDegrees self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
1,093,355
rotates the image to a given angle Parameters: | angle - the angle that you want the image rotated to. | Positive numbers are clockwise, negative numbers are counter-clockwise
def rotateTo(self, angle): self._transmogrophy(angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
1,093,356
scales an Image object Parameters: | percent - a percent of the original size | numbers bigger than 100 scale up | numbers less than 100 scale down | 100 scales to the original size Optional keyword parameters: | scaleFromCenter - should the image scale from the center or from the upper left hand corner | (default is True, scale from the center)
def scale(self, percent, scaleFromCenter=True): self._transmogrophy(self.angle, percent, scaleFromCenter, self.flipH, self.flipV)
1,093,357
Selects a different image to be shown. Parameters: | key - a key in the original dictionary to specify which image to show
def replace(self, key): if not (key in self.imagesDict): print('The key', key, 'was not found in the collection of images dictionary') raise KeyError self.originalImage = self.imagesDict[key] self.image = self.originalImage.copy() # Set the rect of the image to appropriate values - using the current image # then scale and rotate self.rect = self.image.get_rect() self.rect.x = self.loc[0] self.rect.y = self.loc[1] self.scale(self.percent, self.scaleFromCenter) self.rotate(self.angle)
1,093,361
Return a human-readable dictionary from the inibin. Arguments: key_mapping -- Dictionary used for conversion. Supports nesting. Every other value should be a numeric inibin key, or a tuple of the key and a function to apply to the result. inibin -- The dictionary returned from reading an inibin. string_table -- Used to translate strings. Any string with a key in string_table will be replaced. Typically loaded from a fontconfig_*.txt.
def _fix_keys(key_mapping, inibin, string_table=None): if string_table is None: string_table = {} def walk(node, out_node): # Walk the nodes of the key mapping for key, value in node.items(): if isinstance(value, dict): if key not in out_node: out_node[key] = {} walk(value, out_node[key]) else: # Can either be just the index, or the index plus a function to apply func = None if isinstance(value, tuple): func = value[-1] index = value[0] else: index = value if index is None or index not in inibin: out_node[key] = None continue val = inibin[index] # Try numeric conversion # Inibins often store numbers in strings if isinstance(val, bytes): try: val = int(val) except ValueError: try: val = float(val) except ValueError: val = val.decode('utf8') # Check if value is a reference to a fontconfig key if val in string_table: val = string_table[val] # Apply the function if callable(func): val = func(val) out_node[key] = val out = {} walk(key_mapping, out) return out
1,093,523
Shifts indicies as needed to account for one based indexing Positive indicies need to be reduced by one to match with zero based indexing. Zero is not a valid input, and as such will throw a value error. Arguments: index - index to shift
def _setup_index(index): index = int(index) if index > 0: index -= 1 elif index == 0: # Zero indicies should not be allowed by default. raise ValueError return index
1,093,973
Processes positions to account for ranges Arguments: positions - list of positions and/or ranges to process
def _setup_positions(self, positions): updated_positions = [] for i, position in enumerate(positions): ranger = re.search(r'(?P<start>-?\d*):(?P<end>\d*)', position) if ranger: if i > 0: updated_positions.append(self.separator) start = group_val(ranger.group('start')) end = group_val(ranger.group('end')) if start and end: updated_positions.extend(self._extendrange(start, end + 1)) # Since the number of positions on a line is unknown, # send input to cause exception that can be caught and call # _cut_range helper function elif ranger.group('start'): updated_positions.append([start]) else: updated_positions.extend(self._extendrange(1, end + 1)) else: updated_positions.append(positions[i]) try: if int(position) and int(positions[i+1]): updated_positions.append(self.separator) except (ValueError, IndexError): pass return updated_positions
1,093,976
Performs cut for range from start position to end Arguments: line - input to cut start - start of range current_position - current position in main cut function
def _cut_range(self, line, start, current_position): result = [] try: for j in range(start, len(line)): index = _setup_index(j) try: result.append(line[index]) except IndexError: result.append(self.invalid_pos) finally: result.append(self.separator) result.append(line[-1]) except IndexError: pass try: int(self.positions[current_position+1]) result.append(self.separator) except (ValueError, IndexError): pass return result
1,093,977
Creates list of values in a range with output delimiters. Arguments: start - range start end - range end
def _extendrange(self, start, end): range_positions = [] for i in range(start, end): if i != 0: range_positions.append(str(i)) if i < end: range_positions.append(self.separator) return range_positions
1,093,978
Format a relative time. Args: __timestamp: Event to generate relative timestamp against Returns: Human readable date and time offset
def human_timestamp(__timestamp: datetime.datetime) -> str: numstr = '. a two three four five six seven eight nine ten'.split() matches = [ 60 * 60 * 24 * 365, 60 * 60 * 24 * 28, 60 * 60 * 24 * 7, 60 * 60 * 24, 60 * 60, 60, 1, ] match_names = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second'] if __timestamp.tzinfo is None: __timestamp = __timestamp.replace(tzinfo=datetime.timezone.utc) now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) delta = int((now - __timestamp).total_seconds()) for scale in matches: i = delta // scale if i: name = match_names[matches.index(scale)] break else: i = 0 # Too small if i == 0: result = 'right now' elif i == 1 and name in ('year', 'month', 'week'): result = 'last {}'.format(name) elif i == 1 and name == 'day': result = 'yesterday' elif i == 1 and name == 'hour': result = 'about an hour ago' else: result = 'about {} {}{} ago'.format(i if i > 10 else numstr[i], name, 's' if i > 1 else '') return result
1,094,154
Parse human readable frequency. Args: __delta: Frequency to parse
def parse_timedelta(__delta: str) -> datetime.timedelta: match = re.fullmatch(r, __delta, re.IGNORECASE | re.VERBOSE) if not match: raise ValueError('Invalid ‘frequency’ value') value, units = match.groups() units_i = 'hdwmy'.index(units.lower()) # hours per hour/day/week/month/year multiplier = (1, 24, 168, 672, 8760) return datetime.timedelta(hours=float(value) * multiplier[units_i])
1,094,155
Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None`
def get_property(self, prop): prop = prop.split('.') root = self for p in prop: if p in root: root = root[p] else: return None return root
1,094,724
Creates dict2 object from dict object Args: val (:obj:`dict`): Value to create from Returns: Equivalent dict2 object.
def from_dict(cls, val): if isinstance(val, dict2): return val elif isinstance(val, dict): res = cls() for k, v in val.items(): res[k] = cls.from_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(cls.from_dict(item)) return res else: return val
1,094,726
Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object.
def to_dict(self, val=UNSET): if val is UNSET: val = self if isinstance(val, dict2) or isinstance(val, dict): res = dict() for k, v in val.items(): res[k] = self.to_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(self.to_dict(item)) return res else: return val
1,094,727
You can pass in any of the Summon Search API parameters (without the "s." prefix). For example to remove highlighting: result = api.search("Web", hl=False) See the Summon API documentation for the full list of possible parameters: http://api.summon.serialssolutions.com/help/api/search/parameters
def search(self, q, **kwargs): params = {"s.q": q} for k, v in kwargs.items(): params["s." + k] = v r = self._get("/2.0.0/search", params) return r
1,094,797
Processes a CMIP3 style file path. The standard CMIP3 directory structure: <experiment>/<variable_name>/<model>/<ensemble_member>/<CMOR filename>.nc Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path conforming to CMIP3 spec. Returns: dict: Metadata as extracted from the file path.
def get_fp_meta(fp): # Copy metadata list then reverse to start at end of path directory_meta = list(DIR_ATTS) # Prefer meta extracted from filename meta = get_dir_meta(fp, directory_meta) meta.update(get_fname_meta(fp)) return meta
1,094,849
Processes a CMIP3 style file name. Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path/name conforming to DRS spec. Returns: dict: Metadata as extracted from the filename. .. _Data Reference Syntax: http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
def get_fname_meta(fp): # Strip directory, extension, then split if '/' in fp: fp = os.path.split(fp)[1] fname = os.path.splitext(fp)[0] meta = fname.split('-') res = {} try: for key in FNAME_ATTS: res[key] = meta.pop(0) except IndexError: raise PathError(fname) return res
1,094,850
Set global variables to values defined in `config_dict`. Args: config_dict (dict): dict with data, which are used to set `globals`. Note: `config_dict` have to be dictionary, or it is ignored. Also all variables, that are not already in globals, or are not types defined in :attr:`_ALLOWED` (str, int, ..) or starts with ``_`` are silently ignored.
def _substitute_globals(config_dict): constants = _get_all_constants() if type(config_dict) != dict: return for key, val in config_dict.iteritems(): if key in constants and type(val) in _ALLOWED: globals()[key] = val
1,095,296
Convert an integer value to a character. a-z then double aa-zz etc Args: value (int): integer index we're looking up capital (bool): whether we convert to capitals or not Returns (str): alphanumeric representation of the index
def _get_variation_id(value, capital=False): # Reinforcing type just in case a valid string was entered value = int(value) base_power = base_start = base_end = 0 while value >= base_end: base_power += 1 base_start = base_end base_end += pow(26, base_power) base_index = value - base_start # create alpha representation alphas = ['a'] * base_power for index in range(base_power - 1, -1, -1): alphas[index] = chr(int(97 + (base_index % 26))) base_index /= 26 characters = ''.join(alphas) return characters.upper() if capital else characters
1,095,602
Import file directly. This is a hack to import files from packages without importing <package>/__init__.py, its purpose is to allow import without requiring all the dependencies at this point. Args: package: Package to import from fname: File to import Returns: Imported module
def import_file(package: str, fname: str) -> ModuleType: mod_name = fname.rstrip('.py') spec = spec_from_file_location(mod_name, '{}/{}'.format(package, fname)) module = module_from_spec(spec) spec.loader.exec_module(module) return module
1,095,750
Prints the anagram results sorted by score to stdout. Args: input_word: the base word we searched on anagrams: generator of (word, score) from anagrams_in_word by_length: a boolean to declare printing by length instead of score
def pretty_print(input_word, anagrams, by_length=False): scores = {} if by_length: noun = "tiles" for word, score in anagrams: try: scores[len(word)].append("{0} ({1:d})".format(word, score)) except KeyError: scores[len(word)] = ["{0} ({1:d})".format(word, score)] else: noun = "points" for word, score in anagrams: try: scores[score].append(word) except KeyError: scores[score] = [word] print("Anagrams for {0}{1}:".format(input_word, " (score)" * by_length)) if not valid_scrabble_word(input_word): print("{0} is not possible in Scrabble.".format(input_word)) for key, value in sorted(scores.items(), reverse=True): print("{0:d} {1}: {2}".format(key, noun, ", ".join(value)))
1,095,946
Find closest tag for a git repository. Note: This defaults to `Semantic Version`_ tag matching. Args: __matcher: Glob-style tag pattern to match strict: Allow commit-ish, if no tag found git_dir: Repository to search Returns: Matching tag name .. _Semantic Version: http://semver.org/
def find_tag(__matcher: str = 'v[0-9]*', *, strict: bool = True, git_dir: str = '.') -> str: command = 'git describe --abbrev=12 --dirty'.split() with chdir(git_dir): try: stdout = check_output(command + ['--match={}'.format(__matcher), ]) except CalledProcessError: if strict: raise stdout = check_output(command + ['--always', ]) stdout = stdout.decode('ascii', 'replace') return stdout.strip()
1,095,949
Helper function to add a bias column to the input array X Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 2 dimension wheres every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: The same input matrix X with an added (prefix) column of ones.
def add_bias(X): return numpy.hstack((numpy.ones((len(X),1), dtype=X.dtype), X))
1,096,287
Unpack .zip archive in `file_obj` to given `path`. Make sure, that it fits into limits (see :attr:`._max_zipfiles` for details). Args: file_obj (file): Opened file-like object. path (str): Path into which the .zip will be unpacked. Raises: ValueError: If there is too many files in .zip archive.
def _unpack_zip(self, file_obj, path): old_cwd = os.getcwd() os.chdir(path) zip_obj = zipfile.ZipFile(file_obj) for cnt, zip_info in enumerate(zip_obj.infolist()): zip_obj.extract(zip_info) if cnt + 1 > self.max_zipfiles: os.chdir(old_cwd) msg = "Too many files in .zip " msg += "(self.max_zipfiles == {}, but {} given).".format( self.max_zipfiles, cnt + 1, ) raise ValueError(msg) os.chdir(old_cwd)
1,096,381
Add archive to the storage and unpack it. Args: zip_file_obj (file): Opened file-like object. Returns: obj: Path where the `zip_file_obj` was unpacked wrapped in \ :class:`.PathAndHash` structure. Raises: ValueError: If there is too many files in .zip archive. \ See :attr:`._max_zipfiles` for details. AssertionError: If the `zip_file_obj` is not file-like object.
def add_archive_as_dir(self, zip_file_obj): BalancedDiscStorage._check_interface(zip_file_obj) file_hash = self._get_hash(zip_file_obj) dir_path = self._create_dir_path(file_hash) full_path = os.path.join(dir_path, file_hash) if os.path.exists(full_path): shutil.rmtree(full_path) os.mkdir(full_path) try: self._unpack_zip(zip_file_obj, full_path) except Exception: shutil.rmtree(full_path) raise return PathAndHash(path=full_path, hash=file_hash)
1,096,382
Wraps a WSGI app and handles uncaught exceptions and defined exception and outputs a the exception in a structured format. Parameters: - wsgi_app is the app.wsgi_app of flask, - app_name should in correct format e.g. APP_NAME_1, - app_logger is the logger object
def register_app_for_error_handling(wsgi_app, app_name, app_logger, custom_logging_service=None): logging_service = LoggingService(app_logger) if custom_logging_service is None else custom_logging_service exception_manager = ExceptionHandler(app_name, logging_service) def wrapper(environ, start_response): try: return wsgi_app(environ, start_response) except RootException as e: app_request = Request(environ) stack_trace = traceback.format_exc().splitlines()[-1] exception_manager.update_with_exception_data(e, app_request, stack_trace) except Exception: app_request = Request(environ) stack_trace = traceback.format_exc() e = RootException("FATAL_000", {}, {}, {}, status_code=500) e.error_message = "Unknown System Error" exception_manager.update_with_exception_data(e, app_request, stack_trace) error_details = exception_manager.construct_error_details() http_status_code = exception_manager.get_http_status_code() response = Response(json.dumps(error_details), status=http_status_code, content_type='application/json') return response(environ, start_response) return wrapper
1,096,534
Return package path. Use uuid to generate package's directory name. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Returns: str: Path to the root directory.
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None): if book_id is None: book_id = str(uuid.uuid4()) return os.path.join(prefix, book_id)
1,096,733