repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
sveetch/boussole
boussole/conf/post_processor.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/conf/post_processor.py#L103-L126
def _validate_path(self, settings, name, value): """ Validate path exists Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If path does not exists. Returns: str: Validated path. """ if not os.path.exists(value): raise SettingsInvalidError("Path from setting '{name}' does not " "exists: {value}".format( name=name, value=value )) return value
[ "def", "_validate_path", "(", "self", ",", "settings", ",", "name", ",", "value", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "value", ")", ":", "raise", "SettingsInvalidError", "(", "\"Path from setting '{name}' does not \"", "\"exists: {value}\"", ".", "format", "(", "name", "=", "name", ",", "value", "=", "value", ")", ")", "return", "value" ]
Validate path exists Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If path does not exists. Returns: str: Validated path.
[ "Validate", "path", "exists" ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/ktlx.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/ktlx.py#L626-L665
def _read_snc(snc_file): """Read Synchronization File and return sample stamp and time Returns ------- sampleStamp : list of int Sample number from start of study sampleTime : list of datetime.datetime File time representation of sampleStamp Notes ----- The synchronization file is used to calculate a FILETIME given a sample stamp (and vise-versa). Theoretically, it is possible to calculate a sample stamp's FILETIME given the FILETIME of sample stamp zero (when sampling started) and the sample rate. However, because the sample rate cannot be represented with full precision the accuracy of the FILETIME calculation is affected. To compensate for the lack of accuracy, the synchronization file maintains a sample stamp-to-computer time (called, MasterTime) mapping. Interpolation is then used to calculate a FILETIME given a sample stamp (and vise-versa). The attributes, sampleStamp and sampleTime, are used to predict (using interpolation) the FILETIME based upon a given sample stamp (and vise-versa). Currently, the only use for this conversion process is to enable correlation of EEG (sample_stamp) data with other sources of data such as Video (which works in FILETIME). """ snc_raw_dtype = dtype([('sampleStamp', '<i'), ('sampleTime', '<q')]) with snc_file.open('rb') as f: f.seek(352) # end of header snc_raw = fromfile(f, dtype=snc_raw_dtype) sampleStamp = snc_raw['sampleStamp'] sampleTime = asarray([_filetime_to_dt(x) for x in snc_raw['sampleTime']]) return sampleStamp, sampleTime
[ "def", "_read_snc", "(", "snc_file", ")", ":", "snc_raw_dtype", "=", "dtype", "(", "[", "(", "'sampleStamp'", ",", "'<i'", ")", ",", "(", "'sampleTime'", ",", "'<q'", ")", "]", ")", "with", "snc_file", ".", "open", "(", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "352", ")", "# end of header", "snc_raw", "=", "fromfile", "(", "f", ",", "dtype", "=", "snc_raw_dtype", ")", "sampleStamp", "=", "snc_raw", "[", "'sampleStamp'", "]", "sampleTime", "=", "asarray", "(", "[", "_filetime_to_dt", "(", "x", ")", "for", "x", "in", "snc_raw", "[", "'sampleTime'", "]", "]", ")", "return", "sampleStamp", ",", "sampleTime" ]
Read Synchronization File and return sample stamp and time Returns ------- sampleStamp : list of int Sample number from start of study sampleTime : list of datetime.datetime File time representation of sampleStamp Notes ----- The synchronization file is used to calculate a FILETIME given a sample stamp (and vise-versa). Theoretically, it is possible to calculate a sample stamp's FILETIME given the FILETIME of sample stamp zero (when sampling started) and the sample rate. However, because the sample rate cannot be represented with full precision the accuracy of the FILETIME calculation is affected. To compensate for the lack of accuracy, the synchronization file maintains a sample stamp-to-computer time (called, MasterTime) mapping. Interpolation is then used to calculate a FILETIME given a sample stamp (and vise-versa). The attributes, sampleStamp and sampleTime, are used to predict (using interpolation) the FILETIME based upon a given sample stamp (and vise-versa). Currently, the only use for this conversion process is to enable correlation of EEG (sample_stamp) data with other sources of data such as Video (which works in FILETIME).
[ "Read", "Synchronization", "File", "and", "return", "sample", "stamp", "and", "time" ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomText.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomText.py#L148-L165
def name(): """ Generates a random person's name which has the following structure <optional prefix> <first name> <second name> <optional suffix> :return: a random name. """ result = "" if RandomBoolean.chance(3, 5): result += random.choice(_name_prefixes) + " " result += random.choice(_first_names) + " " + random.choice(_last_names) if RandomBoolean.chance(5, 10): result += " " + random.choice(_name_suffixes) return result
[ "def", "name", "(", ")", ":", "result", "=", "\"\"", "if", "RandomBoolean", ".", "chance", "(", "3", ",", "5", ")", ":", "result", "+=", "random", ".", "choice", "(", "_name_prefixes", ")", "+", "\" \"", "result", "+=", "random", ".", "choice", "(", "_first_names", ")", "+", "\" \"", "+", "random", ".", "choice", "(", "_last_names", ")", "if", "RandomBoolean", ".", "chance", "(", "5", ",", "10", ")", ":", "result", "+=", "\" \"", "+", "random", ".", "choice", "(", "_name_suffixes", ")", "return", "result" ]
Generates a random person's name which has the following structure <optional prefix> <first name> <second name> <optional suffix> :return: a random name.
[ "Generates", "a", "random", "person", "s", "name", "which", "has", "the", "following", "structure", "<optional", "prefix", ">", "<first", "name", ">", "<second", "name", ">", "<optional", "suffix", ">" ]
python
train
allenai/allennlp
allennlp/state_machines/states/lambda_grammar_statelet.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/states/lambda_grammar_statelet.py#L102-L158
def take_action(self, production_rule: str) -> 'LambdaGrammarStatelet': """ Takes an action in the current grammar state, returning a new grammar state with whatever updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS". This will update the non-terminal stack and the context-dependent actions. Updating the non-terminal stack involves popping the non-terminal that was expanded off of the stack, then pushing on any non-terminals in the production rule back on the stack. We push the non-terminals on in `reverse` order, so that the first non-terminal in the production rule gets popped off the stack first. For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and ``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e", "<e,d>"]``. """ left_side, right_side = production_rule.split(' -> ') assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}" f"but got rule {left_side} -> {right_side}") assert all(self._lambda_stacks[key][-1] == left_side for key in self._lambda_stacks) new_stack = self._nonterminal_stack[:-1] new_lambda_stacks = {key: self._lambda_stacks[key][:-1] for key in self._lambda_stacks} productions = self._get_productions_from_string(right_side) # Looking for lambda productions, but not for cells or columns with the word "lambda" in # them. if 'lambda' in productions[0] and 'fb:' not in productions[0]: production = productions[0] if production[0] == "'" and production[-1] == "'": # The production rule with a lambda is typically "<t,d> -> ['lambda x', d]". We # need to strip the quotes. production = production[1:-1] lambda_variable = production.split(' ')[1] # The left side must be formatted as "<t,d>", where "t" is the type of the lambda # variable, and "d" is the return type of the lambda function. We need to pull out the # "t" here. TODO(mattg): this is pretty limiting, but I'm not sure how general we # should make this. if len(left_side) != 5: raise NotImplementedError("Can't handle this type yet:", left_side) lambda_type = left_side[1] new_lambda_stacks[(lambda_type, lambda_variable)] = [] for production in reversed(productions): if self._is_nonterminal(production): new_stack.append(production) for lambda_stack in new_lambda_stacks.values(): lambda_stack.append(production) # If any of the lambda stacks have now become empty, we remove them from our dictionary. new_lambda_stacks = {key: new_lambda_stacks[key] for key in new_lambda_stacks if new_lambda_stacks[key]} return LambdaGrammarStatelet(nonterminal_stack=new_stack, lambda_stacks=new_lambda_stacks, valid_actions=self._valid_actions, context_actions=self._context_actions, is_nonterminal=self._is_nonterminal)
[ "def", "take_action", "(", "self", ",", "production_rule", ":", "str", ")", "->", "'LambdaGrammarStatelet'", ":", "left_side", ",", "right_side", "=", "production_rule", ".", "split", "(", "' -> '", ")", "assert", "self", ".", "_nonterminal_stack", "[", "-", "1", "]", "==", "left_side", ",", "(", "f\"Tried to expand {self._nonterminal_stack[-1]}\"", "f\"but got rule {left_side} -> {right_side}\"", ")", "assert", "all", "(", "self", ".", "_lambda_stacks", "[", "key", "]", "[", "-", "1", "]", "==", "left_side", "for", "key", "in", "self", ".", "_lambda_stacks", ")", "new_stack", "=", "self", ".", "_nonterminal_stack", "[", ":", "-", "1", "]", "new_lambda_stacks", "=", "{", "key", ":", "self", ".", "_lambda_stacks", "[", "key", "]", "[", ":", "-", "1", "]", "for", "key", "in", "self", ".", "_lambda_stacks", "}", "productions", "=", "self", ".", "_get_productions_from_string", "(", "right_side", ")", "# Looking for lambda productions, but not for cells or columns with the word \"lambda\" in", "# them.", "if", "'lambda'", "in", "productions", "[", "0", "]", "and", "'fb:'", "not", "in", "productions", "[", "0", "]", ":", "production", "=", "productions", "[", "0", "]", "if", "production", "[", "0", "]", "==", "\"'\"", "and", "production", "[", "-", "1", "]", "==", "\"'\"", ":", "# The production rule with a lambda is typically \"<t,d> -> ['lambda x', d]\". We", "# need to strip the quotes.", "production", "=", "production", "[", "1", ":", "-", "1", "]", "lambda_variable", "=", "production", ".", "split", "(", "' '", ")", "[", "1", "]", "# The left side must be formatted as \"<t,d>\", where \"t\" is the type of the lambda", "# variable, and \"d\" is the return type of the lambda function. We need to pull out the", "# \"t\" here. TODO(mattg): this is pretty limiting, but I'm not sure how general we", "# should make this.", "if", "len", "(", "left_side", ")", "!=", "5", ":", "raise", "NotImplementedError", "(", "\"Can't handle this type yet:\"", ",", "left_side", ")", "lambda_type", "=", "left_side", "[", "1", "]", "new_lambda_stacks", "[", "(", "lambda_type", ",", "lambda_variable", ")", "]", "=", "[", "]", "for", "production", "in", "reversed", "(", "productions", ")", ":", "if", "self", ".", "_is_nonterminal", "(", "production", ")", ":", "new_stack", ".", "append", "(", "production", ")", "for", "lambda_stack", "in", "new_lambda_stacks", ".", "values", "(", ")", ":", "lambda_stack", ".", "append", "(", "production", ")", "# If any of the lambda stacks have now become empty, we remove them from our dictionary.", "new_lambda_stacks", "=", "{", "key", ":", "new_lambda_stacks", "[", "key", "]", "for", "key", "in", "new_lambda_stacks", "if", "new_lambda_stacks", "[", "key", "]", "}", "return", "LambdaGrammarStatelet", "(", "nonterminal_stack", "=", "new_stack", ",", "lambda_stacks", "=", "new_lambda_stacks", ",", "valid_actions", "=", "self", ".", "_valid_actions", ",", "context_actions", "=", "self", ".", "_context_actions", ",", "is_nonterminal", "=", "self", ".", "_is_nonterminal", ")" ]
Takes an action in the current grammar state, returning a new grammar state with whatever updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS". This will update the non-terminal stack and the context-dependent actions. Updating the non-terminal stack involves popping the non-terminal that was expanded off of the stack, then pushing on any non-terminals in the production rule back on the stack. We push the non-terminals on in `reverse` order, so that the first non-terminal in the production rule gets popped off the stack first. For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and ``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e", "<e,d>"]``.
[ "Takes", "an", "action", "in", "the", "current", "grammar", "state", "returning", "a", "new", "grammar", "state", "with", "whatever", "updates", "are", "necessary", ".", "The", "production", "rule", "is", "assumed", "to", "be", "formatted", "as", "LHS", "-", ">", "RHS", "." ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L718-L732
def lastIndexOf(self, item): """ Return the position of the last occurrence of an item in an array, or -1 if the item is not included in the array. """ array = self.obj i = len(array) - 1 if not (self._clean.isList() or self._clean.isTuple()): return self._wrap(-1) while i > -1: if array[i] is item: return self._wrap(i) i -= 1 return self._wrap(-1)
[ "def", "lastIndexOf", "(", "self", ",", "item", ")", ":", "array", "=", "self", ".", "obj", "i", "=", "len", "(", "array", ")", "-", "1", "if", "not", "(", "self", ".", "_clean", ".", "isList", "(", ")", "or", "self", ".", "_clean", ".", "isTuple", "(", ")", ")", ":", "return", "self", ".", "_wrap", "(", "-", "1", ")", "while", "i", ">", "-", "1", ":", "if", "array", "[", "i", "]", "is", "item", ":", "return", "self", ".", "_wrap", "(", "i", ")", "i", "-=", "1", "return", "self", ".", "_wrap", "(", "-", "1", ")" ]
Return the position of the last occurrence of an item in an array, or -1 if the item is not included in the array.
[ "Return", "the", "position", "of", "the", "last", "occurrence", "of", "an", "item", "in", "an", "array", "or", "-", "1", "if", "the", "item", "is", "not", "included", "in", "the", "array", "." ]
python
train
IdentityPython/pysaml2
src/saml2/client.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/client.py#L287-L293
def is_logged_in(self, name_id): """ Check if user is in the cache :param name_id: The identifier of the subject """ identity = self.users.get_identity(name_id)[0] return bool(identity)
[ "def", "is_logged_in", "(", "self", ",", "name_id", ")", ":", "identity", "=", "self", ".", "users", ".", "get_identity", "(", "name_id", ")", "[", "0", "]", "return", "bool", "(", "identity", ")" ]
Check if user is in the cache :param name_id: The identifier of the subject
[ "Check", "if", "user", "is", "in", "the", "cache" ]
python
train
kcallin/mqtt-codec
mqtt_codec/io.py
https://github.com/kcallin/mqtt-codec/blob/0f754250cc3f44f4376777e7e8b3676c5a4d413a/mqtt_codec/io.py#L439-L463
def read(self, max_bytes=1): """Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than ``max_bytes``. On end-of file returns a bytes object with zero-length. """ if self.limit is None: b = self.__f.read(max_bytes) else: if self.__num_bytes_consumed + max_bytes > self.limit: max_bytes = self.limit - self.__num_bytes_consumed b = self.__f.read(max_bytes) self.__num_bytes_consumed += len(b) return b
[ "def", "read", "(", "self", ",", "max_bytes", "=", "1", ")", ":", "if", "self", ".", "limit", "is", "None", ":", "b", "=", "self", ".", "__f", ".", "read", "(", "max_bytes", ")", "else", ":", "if", "self", ".", "__num_bytes_consumed", "+", "max_bytes", ">", "self", ".", "limit", ":", "max_bytes", "=", "self", ".", "limit", "-", "self", ".", "__num_bytes_consumed", "b", "=", "self", ".", "__f", ".", "read", "(", "max_bytes", ")", "self", ".", "__num_bytes_consumed", "+=", "len", "(", "b", ")", "return", "b" ]
Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than ``max_bytes``. On end-of file returns a bytes object with zero-length.
[ "Read", "at", "most", "max_bytes", "from", "internal", "buffer", "." ]
python
train
openai/baselines
baselines/her/her_sampler.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/her_sampler.py#L4-L63
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun): """Creates a sample function that can be used for HER experience replay. Args: replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none', regular DDPG experience replay is used replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times as many HER replays as regular replays are used) reward_fun (function): function to re-compute the reward with substituted goals """ if replay_strategy == 'future': future_p = 1 - (1. / (1 + replay_k)) else: # 'replay_strategy' == 'none' future_p = 0 def _sample_her_transitions(episode_batch, batch_size_in_transitions): """episode_batch is {key: array(buffer_size x T x dim_key)} """ T = episode_batch['u'].shape[1] rollout_batch_size = episode_batch['u'].shape[0] batch_size = batch_size_in_transitions # Select which episodes and time steps to use. episode_idxs = np.random.randint(0, rollout_batch_size, batch_size) t_samples = np.random.randint(T, size=batch_size) transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()} # Select future time indexes proportional with probability future_p. These # will be used for HER replay by substituting in future goals. her_indexes = np.where(np.random.uniform(size=batch_size) < future_p) future_offset = np.random.uniform(size=batch_size) * (T - t_samples) future_offset = future_offset.astype(int) future_t = (t_samples + 1 + future_offset)[her_indexes] # Replace goal with achieved goal but only for the previously-selected # HER transitions (as defined by her_indexes). For the other transitions, # keep the original goal. future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t] transitions['g'][her_indexes] = future_ag # Reconstruct info dictionary for reward computation. info = {} for key, value in transitions.items(): if key.startswith('info_'): info[key.replace('info_', '')] = value # Re-compute reward since we may have substituted the goal. reward_params = {k: transitions[k] for k in ['ag_2', 'g']} reward_params['info'] = info transitions['r'] = reward_fun(**reward_params) transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()} assert(transitions['u'].shape[0] == batch_size_in_transitions) return transitions return _sample_her_transitions
[ "def", "make_sample_her_transitions", "(", "replay_strategy", ",", "replay_k", ",", "reward_fun", ")", ":", "if", "replay_strategy", "==", "'future'", ":", "future_p", "=", "1", "-", "(", "1.", "/", "(", "1", "+", "replay_k", ")", ")", "else", ":", "# 'replay_strategy' == 'none'", "future_p", "=", "0", "def", "_sample_her_transitions", "(", "episode_batch", ",", "batch_size_in_transitions", ")", ":", "\"\"\"episode_batch is {key: array(buffer_size x T x dim_key)}\n \"\"\"", "T", "=", "episode_batch", "[", "'u'", "]", ".", "shape", "[", "1", "]", "rollout_batch_size", "=", "episode_batch", "[", "'u'", "]", ".", "shape", "[", "0", "]", "batch_size", "=", "batch_size_in_transitions", "# Select which episodes and time steps to use.", "episode_idxs", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "rollout_batch_size", ",", "batch_size", ")", "t_samples", "=", "np", ".", "random", ".", "randint", "(", "T", ",", "size", "=", "batch_size", ")", "transitions", "=", "{", "key", ":", "episode_batch", "[", "key", "]", "[", "episode_idxs", ",", "t_samples", "]", ".", "copy", "(", ")", "for", "key", "in", "episode_batch", ".", "keys", "(", ")", "}", "# Select future time indexes proportional with probability future_p. These", "# will be used for HER replay by substituting in future goals.", "her_indexes", "=", "np", ".", "where", "(", "np", ".", "random", ".", "uniform", "(", "size", "=", "batch_size", ")", "<", "future_p", ")", "future_offset", "=", "np", ".", "random", ".", "uniform", "(", "size", "=", "batch_size", ")", "*", "(", "T", "-", "t_samples", ")", "future_offset", "=", "future_offset", ".", "astype", "(", "int", ")", "future_t", "=", "(", "t_samples", "+", "1", "+", "future_offset", ")", "[", "her_indexes", "]", "# Replace goal with achieved goal but only for the previously-selected", "# HER transitions (as defined by her_indexes). For the other transitions,", "# keep the original goal.", "future_ag", "=", "episode_batch", "[", "'ag'", "]", "[", "episode_idxs", "[", "her_indexes", "]", ",", "future_t", "]", "transitions", "[", "'g'", "]", "[", "her_indexes", "]", "=", "future_ag", "# Reconstruct info dictionary for reward computation.", "info", "=", "{", "}", "for", "key", ",", "value", "in", "transitions", ".", "items", "(", ")", ":", "if", "key", ".", "startswith", "(", "'info_'", ")", ":", "info", "[", "key", ".", "replace", "(", "'info_'", ",", "''", ")", "]", "=", "value", "# Re-compute reward since we may have substituted the goal.", "reward_params", "=", "{", "k", ":", "transitions", "[", "k", "]", "for", "k", "in", "[", "'ag_2'", ",", "'g'", "]", "}", "reward_params", "[", "'info'", "]", "=", "info", "transitions", "[", "'r'", "]", "=", "reward_fun", "(", "*", "*", "reward_params", ")", "transitions", "=", "{", "k", ":", "transitions", "[", "k", "]", ".", "reshape", "(", "batch_size", ",", "*", "transitions", "[", "k", "]", ".", "shape", "[", "1", ":", "]", ")", "for", "k", "in", "transitions", ".", "keys", "(", ")", "}", "assert", "(", "transitions", "[", "'u'", "]", ".", "shape", "[", "0", "]", "==", "batch_size_in_transitions", ")", "return", "transitions", "return", "_sample_her_transitions" ]
Creates a sample function that can be used for HER experience replay. Args: replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none', regular DDPG experience replay is used replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times as many HER replays as regular replays are used) reward_fun (function): function to re-compute the reward with substituted goals
[ "Creates", "a", "sample", "function", "that", "can", "be", "used", "for", "HER", "experience", "replay", "." ]
python
valid
linkhub-sdk/popbill.py
popbill/closedownService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/closedownService.py#L39-L51
def getUnitCost(self, CorpNum): """ 휴폐업조회 단가 확인. args CorpNum : 팝빌회원 사업자번호 return 발행단가 by float raise PopbillException """ result = self._httpget('/CloseDown/UnitCost', CorpNum) return float(result.unitCost)
[ "def", "getUnitCost", "(", "self", ",", "CorpNum", ")", ":", "result", "=", "self", ".", "_httpget", "(", "'/CloseDown/UnitCost'", ",", "CorpNum", ")", "return", "float", "(", "result", ".", "unitCost", ")" ]
휴폐업조회 단가 확인. args CorpNum : 팝빌회원 사업자번호 return 발행단가 by float raise PopbillException
[ "휴폐업조회", "단가", "확인", ".", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "return", "발행단가", "by", "float", "raise", "PopbillException" ]
python
train
horazont/aioxmpp
aioxmpp/stream.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L1894-L1909
def stop(self): """ Send a signal to the main broker task to terminate. You have to check :attr:`running` and possibly wait for it to become :data:`False` --- the task takes at least one loop through the event loop to terminate. It is guarenteed that the task will not attempt to send stanzas over the existing `xmlstream` after a call to :meth:`stop` has been made. It is legal to call :meth:`stop` even if the task is already stopped. It is a no-op in that case. """ if not self.running: return self._logger.debug("sending stop signal to task") self._task.cancel()
[ "def", "stop", "(", "self", ")", ":", "if", "not", "self", ".", "running", ":", "return", "self", ".", "_logger", ".", "debug", "(", "\"sending stop signal to task\"", ")", "self", ".", "_task", ".", "cancel", "(", ")" ]
Send a signal to the main broker task to terminate. You have to check :attr:`running` and possibly wait for it to become :data:`False` --- the task takes at least one loop through the event loop to terminate. It is guarenteed that the task will not attempt to send stanzas over the existing `xmlstream` after a call to :meth:`stop` has been made. It is legal to call :meth:`stop` even if the task is already stopped. It is a no-op in that case.
[ "Send", "a", "signal", "to", "the", "main", "broker", "task", "to", "terminate", ".", "You", "have", "to", "check", ":", "attr", ":", "running", "and", "possibly", "wait", "for", "it", "to", "become", ":", "data", ":", "False", "---", "the", "task", "takes", "at", "least", "one", "loop", "through", "the", "event", "loop", "to", "terminate", "." ]
python
train
PredixDev/predixpy
predix/admin/cf/services.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/services.py#L50-L55
def delete_service_bindings(self, service_name): """ Remove service bindings to applications. """ instance = self.get_instance(service_name) return self.api.delete(instance['service_bindings_url'])
[ "def", "delete_service_bindings", "(", "self", ",", "service_name", ")", ":", "instance", "=", "self", ".", "get_instance", "(", "service_name", ")", "return", "self", ".", "api", ".", "delete", "(", "instance", "[", "'service_bindings_url'", "]", ")" ]
Remove service bindings to applications.
[ "Remove", "service", "bindings", "to", "applications", "." ]
python
train
thecynic/pylutron
pylutron/__init__.py
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L192-L227
def _parse_area(self, area_xml): """Parses an Area tag, which is effectively a room, depending on how the Lutron controller programming was done.""" area = Area(self._lutron, name=area_xml.get('Name'), integration_id=int(area_xml.get('IntegrationID')), occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID')) for output_xml in area_xml.find('Outputs'): output = self._parse_output(output_xml) area.add_output(output) # device group in our case means keypad # device_group.get('Name') is the location of the keypad for device_group in area_xml.find('DeviceGroups'): if device_group.tag == 'DeviceGroup': devs = device_group.find('Devices') elif device_group.tag == 'Device': devs = [device_group] else: _LOGGER.info("Unknown tag in DeviceGroups child %s" % devs) devs = [] for device_xml in devs: if device_xml.tag != 'Device': continue if device_xml.get('DeviceType') in ( 'SEETOUCH_KEYPAD', 'SEETOUCH_TABLETOP_KEYPAD', 'PICO_KEYPAD', 'HYBRID_SEETOUCH_KEYPAD', 'MAIN_REPEATER'): keypad = self._parse_keypad(device_xml) area.add_keypad(keypad) elif device_xml.get('DeviceType') == 'MOTION_SENSOR': motion_sensor = self._parse_motion_sensor(device_xml) area.add_sensor(motion_sensor) #elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER': return area
[ "def", "_parse_area", "(", "self", ",", "area_xml", ")", ":", "area", "=", "Area", "(", "self", ".", "_lutron", ",", "name", "=", "area_xml", ".", "get", "(", "'Name'", ")", ",", "integration_id", "=", "int", "(", "area_xml", ".", "get", "(", "'IntegrationID'", ")", ")", ",", "occupancy_group_id", "=", "area_xml", ".", "get", "(", "'OccupancyGroupAssignedToID'", ")", ")", "for", "output_xml", "in", "area_xml", ".", "find", "(", "'Outputs'", ")", ":", "output", "=", "self", ".", "_parse_output", "(", "output_xml", ")", "area", ".", "add_output", "(", "output", ")", "# device group in our case means keypad", "# device_group.get('Name') is the location of the keypad", "for", "device_group", "in", "area_xml", ".", "find", "(", "'DeviceGroups'", ")", ":", "if", "device_group", ".", "tag", "==", "'DeviceGroup'", ":", "devs", "=", "device_group", ".", "find", "(", "'Devices'", ")", "elif", "device_group", ".", "tag", "==", "'Device'", ":", "devs", "=", "[", "device_group", "]", "else", ":", "_LOGGER", ".", "info", "(", "\"Unknown tag in DeviceGroups child %s\"", "%", "devs", ")", "devs", "=", "[", "]", "for", "device_xml", "in", "devs", ":", "if", "device_xml", ".", "tag", "!=", "'Device'", ":", "continue", "if", "device_xml", ".", "get", "(", "'DeviceType'", ")", "in", "(", "'SEETOUCH_KEYPAD'", ",", "'SEETOUCH_TABLETOP_KEYPAD'", ",", "'PICO_KEYPAD'", ",", "'HYBRID_SEETOUCH_KEYPAD'", ",", "'MAIN_REPEATER'", ")", ":", "keypad", "=", "self", ".", "_parse_keypad", "(", "device_xml", ")", "area", ".", "add_keypad", "(", "keypad", ")", "elif", "device_xml", ".", "get", "(", "'DeviceType'", ")", "==", "'MOTION_SENSOR'", ":", "motion_sensor", "=", "self", ".", "_parse_motion_sensor", "(", "device_xml", ")", "area", ".", "add_sensor", "(", "motion_sensor", ")", "#elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER':", "return", "area" ]
Parses an Area tag, which is effectively a room, depending on how the Lutron controller programming was done.
[ "Parses", "an", "Area", "tag", "which", "is", "effectively", "a", "room", "depending", "on", "how", "the", "Lutron", "controller", "programming", "was", "done", "." ]
python
train
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/renderers/crab.py
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/crab.py#L252-L264
def item_deelgemeente_adapter(obj, request): """ Adapter for rendering a object of :class:`crabpy.gateway.crab.Deelgemeente` to json. """ return { 'id': obj.id, 'naam': obj.naam, 'gemeente': { 'id': obj.gemeente.id, 'naam': obj.gemeente.naam } }
[ "def", "item_deelgemeente_adapter", "(", "obj", ",", "request", ")", ":", "return", "{", "'id'", ":", "obj", ".", "id", ",", "'naam'", ":", "obj", ".", "naam", ",", "'gemeente'", ":", "{", "'id'", ":", "obj", ".", "gemeente", ".", "id", ",", "'naam'", ":", "obj", ".", "gemeente", ".", "naam", "}", "}" ]
Adapter for rendering a object of :class:`crabpy.gateway.crab.Deelgemeente` to json.
[ "Adapter", "for", "rendering", "a", "object", "of", ":", "class", ":", "crabpy", ".", "gateway", ".", "crab", ".", "Deelgemeente", "to", "json", "." ]
python
train
jtpaasch/simplygithub
simplygithub/internals/api.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/api.py#L46-L71
def post_merge_request(profile, payload): """Do a POST request to Github's API to merge. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. payload A dict of information to pass to Github's API as the payload for a merge request, something like this:: { "base": <base>, "head": <head>, "commit_message": <mesg>} Returns: The response returned by the ``requests`` library when it does the POST request. """ repo = profile["repo"] url = GITHUB_API_BASE_URL + "repos/" + repo + "/merges" headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response
[ "def", "post_merge_request", "(", "profile", ",", "payload", ")", ":", "repo", "=", "profile", "[", "\"repo\"", "]", "url", "=", "GITHUB_API_BASE_URL", "+", "\"repos/\"", "+", "repo", "+", "\"/merges\"", "headers", "=", "get_headers", "(", "profile", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "payload", ",", "headers", "=", "headers", ")", "return", "response" ]
Do a POST request to Github's API to merge. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. payload A dict of information to pass to Github's API as the payload for a merge request, something like this:: { "base": <base>, "head": <head>, "commit_message": <mesg>} Returns: The response returned by the ``requests`` library when it does the POST request.
[ "Do", "a", "POST", "request", "to", "Github", "s", "API", "to", "merge", "." ]
python
train
saltstack/salt
salt/states/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L6945-L7046
def accumulated(name, filename, text, **kwargs): ''' Prepare accumulator which can be used in template in file.managed state. Accumulator dictionary becomes available in template. It can also be used in file.blockreplace. name Accumulator name filename Filename which would receive this accumulator (see file.managed state documentation about ``name``) text String or list for adding in accumulator require_in / watch_in One of them required for sure we fill up accumulator before we manage the file. Probably the same as filename Example: Given the following: .. code-block:: yaml animals_doing_things: file.accumulated: - filename: /tmp/animal_file.txt - text: ' jumps over the lazy dog.' - require_in: - file: animal_file animal_file: file.managed: - name: /tmp/animal_file.txt - source: salt://animal_file.txt - template: jinja One might write a template for ``animal_file.txt`` like the following: .. code-block:: jinja The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %} Collectively, the above states and template file will produce: .. code-block:: text The quick brown fox jumps over the lazy dog. Multiple accumulators can be "chained" together. .. note:: The 'accumulator' data structure is a Python dictionary. Do not expect any loop over the keys in a deterministic order! ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '' } if not name: return _error(ret, 'Must provide name to file.accumulated') if text is None: ret['result'] = False ret['comment'] = 'No text supplied for accumulator' return ret require_in = __low__.get('require_in', []) watch_in = __low__.get('watch_in', []) deps = require_in + watch_in if not [x for x in deps if 'file' in x]: ret['result'] = False ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format( name, __low__['__sls__'], __low__['__id__'] ) return ret if isinstance(text, six.string_types): text = (text,) elif isinstance(text, dict): text = (text,) accum_data, accum_deps = _load_accumulators() if filename not in accum_data: accum_data[filename] = {} if filename not in accum_deps: accum_deps[filename] = {} if name not in accum_deps[filename]: accum_deps[filename][name] = [] for accumulator in deps: accum_deps[filename][name].extend(six.itervalues(accumulator)) if name not in accum_data[filename]: accum_data[filename][name] = [] for chunk in text: if chunk not in accum_data[filename][name]: accum_data[filename][name].append(chunk) ret['comment'] = ('Accumulator {0} for file {1} ' 'was charged by text'.format(name, filename)) _persist_accummulators(accum_data, accum_deps) return ret
[ "def", "accumulated", "(", "name", ",", "filename", ",", "text", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "if", "not", "name", ":", "return", "_error", "(", "ret", ",", "'Must provide name to file.accumulated'", ")", "if", "text", "is", "None", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'No text supplied for accumulator'", "return", "ret", "require_in", "=", "__low__", ".", "get", "(", "'require_in'", ",", "[", "]", ")", "watch_in", "=", "__low__", ".", "get", "(", "'watch_in'", ",", "[", "]", ")", "deps", "=", "require_in", "+", "watch_in", "if", "not", "[", "x", "for", "x", "in", "deps", "if", "'file'", "in", "x", "]", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Orphaned accumulator {0} in {1}:{2}'", ".", "format", "(", "name", ",", "__low__", "[", "'__sls__'", "]", ",", "__low__", "[", "'__id__'", "]", ")", "return", "ret", "if", "isinstance", "(", "text", ",", "six", ".", "string_types", ")", ":", "text", "=", "(", "text", ",", ")", "elif", "isinstance", "(", "text", ",", "dict", ")", ":", "text", "=", "(", "text", ",", ")", "accum_data", ",", "accum_deps", "=", "_load_accumulators", "(", ")", "if", "filename", "not", "in", "accum_data", ":", "accum_data", "[", "filename", "]", "=", "{", "}", "if", "filename", "not", "in", "accum_deps", ":", "accum_deps", "[", "filename", "]", "=", "{", "}", "if", "name", "not", "in", "accum_deps", "[", "filename", "]", ":", "accum_deps", "[", "filename", "]", "[", "name", "]", "=", "[", "]", "for", "accumulator", "in", "deps", ":", "accum_deps", "[", "filename", "]", "[", "name", "]", ".", "extend", "(", "six", ".", "itervalues", "(", "accumulator", ")", ")", "if", "name", "not", "in", "accum_data", "[", "filename", "]", ":", "accum_data", "[", "filename", "]", "[", "name", "]", "=", "[", "]", "for", "chunk", "in", "text", ":", "if", "chunk", "not", "in", "accum_data", "[", "filename", "]", "[", "name", "]", ":", "accum_data", "[", "filename", "]", "[", "name", "]", ".", "append", "(", "chunk", ")", "ret", "[", "'comment'", "]", "=", "(", "'Accumulator {0} for file {1} '", "'was charged by text'", ".", "format", "(", "name", ",", "filename", ")", ")", "_persist_accummulators", "(", "accum_data", ",", "accum_deps", ")", "return", "ret" ]
Prepare accumulator which can be used in template in file.managed state. Accumulator dictionary becomes available in template. It can also be used in file.blockreplace. name Accumulator name filename Filename which would receive this accumulator (see file.managed state documentation about ``name``) text String or list for adding in accumulator require_in / watch_in One of them required for sure we fill up accumulator before we manage the file. Probably the same as filename Example: Given the following: .. code-block:: yaml animals_doing_things: file.accumulated: - filename: /tmp/animal_file.txt - text: ' jumps over the lazy dog.' - require_in: - file: animal_file animal_file: file.managed: - name: /tmp/animal_file.txt - source: salt://animal_file.txt - template: jinja One might write a template for ``animal_file.txt`` like the following: .. code-block:: jinja The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %} Collectively, the above states and template file will produce: .. code-block:: text The quick brown fox jumps over the lazy dog. Multiple accumulators can be "chained" together. .. note:: The 'accumulator' data structure is a Python dictionary. Do not expect any loop over the keys in a deterministic order!
[ "Prepare", "accumulator", "which", "can", "be", "used", "in", "template", "in", "file", ".", "managed", "state", ".", "Accumulator", "dictionary", "becomes", "available", "in", "template", ".", "It", "can", "also", "be", "used", "in", "file", ".", "blockreplace", "." ]
python
train
HumanCellAtlas/cloud-blobstore
cloud_blobstore/s3.py
https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/s3.py#L226-L246
def get_all_metadata( self, bucket: str, key: str ) -> dict: """ Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata """ try: return self.s3_client.head_object( Bucket=bucket, Key=key ) except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
[ "def", "get_all_metadata", "(", "self", ",", "bucket", ":", "str", ",", "key", ":", "str", ")", "->", "dict", ":", "try", ":", "return", "self", ".", "s3_client", ".", "head_object", "(", "Bucket", "=", "bucket", ",", "Key", "=", "key", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "ex", ":", "if", "str", "(", "ex", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", ")", "==", "str", "(", "requests", ".", "codes", ".", "not_found", ")", ":", "raise", "BlobNotFoundError", "(", "f\"Could not find s3://{bucket}/{key}\"", ")", "from", "ex", "raise", "BlobStoreUnknownError", "(", "ex", ")" ]
Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata
[ "Retrieves", "all", "the", "metadata", "for", "a", "given", "object", "in", "a", "given", "bucket", ".", ":", "param", "bucket", ":", "the", "bucket", "the", "object", "resides", "in", ".", ":", "param", "key", ":", "the", "key", "of", "the", "object", "for", "which", "metadata", "is", "being", "retrieved", ".", ":", "return", ":", "the", "metadata" ]
python
train
briandilley/ebs-deploy
ebs_deploy/__init__.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/__init__.py#L43-L55
def get(vals, key, default_val=None): """ Returns a dictionary value """ val = vals for part in key.split('.'): if isinstance(val, dict): val = val.get(part, None) if val is None: return default_val else: return default_val return val
[ "def", "get", "(", "vals", ",", "key", ",", "default_val", "=", "None", ")", ":", "val", "=", "vals", "for", "part", "in", "key", ".", "split", "(", "'.'", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "val", "=", "val", ".", "get", "(", "part", ",", "None", ")", "if", "val", "is", "None", ":", "return", "default_val", "else", ":", "return", "default_val", "return", "val" ]
Returns a dictionary value
[ "Returns", "a", "dictionary", "value" ]
python
valid
geophysics-ubonn/reda
lib/reda/importers/sip04.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/sip04.py#L30-L91
def import_sip04_data(data_filename): """Import RELEVANT data from the result files. Refer to the function :func:`reda.importers.sip04.import_sip04_data_all` for an importer that imports ALL data. Exported parameters: ================== ======================================================== key description ================== ======================================================== a First current electrode b Second current electrode m First potential electrode n Second potential electrode frequency Measurement frequency Temp_1 Temperature sensor 1 (optional) Temp_2 Temperature sensor 2 (optional) zt Complex Transfer Impedance (the measurement), mean value r Magnitude of mean measurements (=|zt|) rpha Resistance phase [mrad] zt_1 Complex Transfer Impedance, first repetition zt_2 Complex Transfer Impedance, second repetition zt_3 Complex Transfer Impedance, third repetition ContactResistance Contact resistance (mean value) ShuntResistance Shunt resistance used [Ohm] ================== ======================================================== Parameters ---------- data_filename : string Path to .mat or .csv file containing SIP-04 measurement results. Note that the .csv file does not contain all data contained in the .mat file! Returns ------- df : :py:class:`pandas.DataFrame` The data, contained in a DataFrame """ df_all = import_sip04_data_all(data_filename) columns_to_keep = [ 'a', 'b', 'm', 'n', 'frequency', 'Temp_1', 'Temp_2', 'Zm_1', 'Zm_2', 'Zm_3', 'Zg_m', 'zt', 'Rs', 'r', 'rpha', ] df = df_all[columns_to_keep] df = df.rename(columns={ 'Rs': 'ShuntResistance', 'Zg_m': 'ContactResistance', 'Zm_1': 'zt_1', 'Zm_2': 'zt_2', 'Zm_3': 'zt_3', }) return df
[ "def", "import_sip04_data", "(", "data_filename", ")", ":", "df_all", "=", "import_sip04_data_all", "(", "data_filename", ")", "columns_to_keep", "=", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", ",", "'frequency'", ",", "'Temp_1'", ",", "'Temp_2'", ",", "'Zm_1'", ",", "'Zm_2'", ",", "'Zm_3'", ",", "'Zg_m'", ",", "'zt'", ",", "'Rs'", ",", "'r'", ",", "'rpha'", ",", "]", "df", "=", "df_all", "[", "columns_to_keep", "]", "df", "=", "df", ".", "rename", "(", "columns", "=", "{", "'Rs'", ":", "'ShuntResistance'", ",", "'Zg_m'", ":", "'ContactResistance'", ",", "'Zm_1'", ":", "'zt_1'", ",", "'Zm_2'", ":", "'zt_2'", ",", "'Zm_3'", ":", "'zt_3'", ",", "}", ")", "return", "df" ]
Import RELEVANT data from the result files. Refer to the function :func:`reda.importers.sip04.import_sip04_data_all` for an importer that imports ALL data. Exported parameters: ================== ======================================================== key description ================== ======================================================== a First current electrode b Second current electrode m First potential electrode n Second potential electrode frequency Measurement frequency Temp_1 Temperature sensor 1 (optional) Temp_2 Temperature sensor 2 (optional) zt Complex Transfer Impedance (the measurement), mean value r Magnitude of mean measurements (=|zt|) rpha Resistance phase [mrad] zt_1 Complex Transfer Impedance, first repetition zt_2 Complex Transfer Impedance, second repetition zt_3 Complex Transfer Impedance, third repetition ContactResistance Contact resistance (mean value) ShuntResistance Shunt resistance used [Ohm] ================== ======================================================== Parameters ---------- data_filename : string Path to .mat or .csv file containing SIP-04 measurement results. Note that the .csv file does not contain all data contained in the .mat file! Returns ------- df : :py:class:`pandas.DataFrame` The data, contained in a DataFrame
[ "Import", "RELEVANT", "data", "from", "the", "result", "files", ".", "Refer", "to", "the", "function", ":", "func", ":", "reda", ".", "importers", ".", "sip04", ".", "import_sip04_data_all", "for", "an", "importer", "that", "imports", "ALL", "data", "." ]
python
train
mbedmicro/pyOCD
pyocd/probe/stlink/detect/linux.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/linux.py#L87-L98
def _fat_mounts(self): """! Lists mounted devices with vfat file system (potential mbeds) @result Returns list of all mounted vfat devices @details Uses Linux shell command: 'mount' """ _stdout, _, retval = self._run_cli_process("mount") if not retval: for line in _stdout.splitlines(): if b"vfat" in line: match = self.mmp.search(line.decode("utf-8")) if match: yield match.group("dev"), match.group("dir")
[ "def", "_fat_mounts", "(", "self", ")", ":", "_stdout", ",", "_", ",", "retval", "=", "self", ".", "_run_cli_process", "(", "\"mount\"", ")", "if", "not", "retval", ":", "for", "line", "in", "_stdout", ".", "splitlines", "(", ")", ":", "if", "b\"vfat\"", "in", "line", ":", "match", "=", "self", ".", "mmp", ".", "search", "(", "line", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "match", ":", "yield", "match", ".", "group", "(", "\"dev\"", ")", ",", "match", ".", "group", "(", "\"dir\"", ")" ]
! Lists mounted devices with vfat file system (potential mbeds) @result Returns list of all mounted vfat devices @details Uses Linux shell command: 'mount'
[ "!", "Lists", "mounted", "devices", "with", "vfat", "file", "system", "(", "potential", "mbeds", ")" ]
python
train
addisonlynch/iexfinance
iexfinance/__init__.py
https://github.com/addisonlynch/iexfinance/blob/40f0bdcc51b329031d06178020fd774494250456/iexfinance/__init__.py#L136-L142
def get_market_last(symbols=None, **kwargs): """ MOVED to iexfinance.iexdata.get_last """ import warnings warnings.warn(WNG_MSG % ("get_market_last", "iexdata.get_last")) return Last(symbols, **kwargs).fetch()
[ "def", "get_market_last", "(", "symbols", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "WNG_MSG", "%", "(", "\"get_market_last\"", ",", "\"iexdata.get_last\"", ")", ")", "return", "Last", "(", "symbols", ",", "*", "*", "kwargs", ")", ".", "fetch", "(", ")" ]
MOVED to iexfinance.iexdata.get_last
[ "MOVED", "to", "iexfinance", ".", "iexdata", ".", "get_last" ]
python
train
ravenac95/lxc4u
lxc4u/lxc.py
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/lxc.py#L56-L60
def start(self): """Start this LXC""" if self.status == 'RUNNING': raise LXCAlreadyStarted(self.name) self._service.start(self.name)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "status", "==", "'RUNNING'", ":", "raise", "LXCAlreadyStarted", "(", "self", ".", "name", ")", "self", ".", "_service", ".", "start", "(", "self", ".", "name", ")" ]
Start this LXC
[ "Start", "this", "LXC" ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/api.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/api.py#L93-L124
def oauth_manager(self, oauth_manager): """Use the oauth manager to enable oauth for API :param oauth_manager: the oauth manager """ @self.app.before_request def before_request(): endpoint = request.endpoint resource = self.app.view_functions[endpoint].view_class if not getattr(resource, 'disable_oauth'): scopes = request.args.get('scopes') if getattr(resource, 'schema'): scopes = [self.build_scope(resource, request.method)] elif scopes: scopes = scopes.split(',') if scopes: scopes = scopes.split(',') valid, req = oauth_manager.verify_request(scopes) for func in oauth_manager._after_request_funcs: valid, req = func(valid, req) if not valid: if oauth_manager._invalid_response: return oauth_manager._invalid_response(req) return abort(401) request.oauth = req
[ "def", "oauth_manager", "(", "self", ",", "oauth_manager", ")", ":", "@", "self", ".", "app", ".", "before_request", "def", "before_request", "(", ")", ":", "endpoint", "=", "request", ".", "endpoint", "resource", "=", "self", ".", "app", ".", "view_functions", "[", "endpoint", "]", ".", "view_class", "if", "not", "getattr", "(", "resource", ",", "'disable_oauth'", ")", ":", "scopes", "=", "request", ".", "args", ".", "get", "(", "'scopes'", ")", "if", "getattr", "(", "resource", ",", "'schema'", ")", ":", "scopes", "=", "[", "self", ".", "build_scope", "(", "resource", ",", "request", ".", "method", ")", "]", "elif", "scopes", ":", "scopes", "=", "scopes", ".", "split", "(", "','", ")", "if", "scopes", ":", "scopes", "=", "scopes", ".", "split", "(", "','", ")", "valid", ",", "req", "=", "oauth_manager", ".", "verify_request", "(", "scopes", ")", "for", "func", "in", "oauth_manager", ".", "_after_request_funcs", ":", "valid", ",", "req", "=", "func", "(", "valid", ",", "req", ")", "if", "not", "valid", ":", "if", "oauth_manager", ".", "_invalid_response", ":", "return", "oauth_manager", ".", "_invalid_response", "(", "req", ")", "return", "abort", "(", "401", ")", "request", ".", "oauth", "=", "req" ]
Use the oauth manager to enable oauth for API :param oauth_manager: the oauth manager
[ "Use", "the", "oauth", "manager", "to", "enable", "oauth", "for", "API" ]
python
train
oceanprotocol/squid-py
squid_py/keeper/diagnostics.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/diagnostics.py#L19-L66
def verify_contracts(): """ Verify that the contracts are deployed correctly in the network. :raise Exception: raise exception if the contracts are not deployed correctly. """ artifacts_path = ConfigProvider.get_config().keeper_path logger.info(f'Keeper contract artifacts (JSON abi files) at: {artifacts_path}') if os.environ.get('KEEPER_NETWORK_NAME'): logger.warning(f'The `KEEPER_NETWORK_NAME` env var is set to ' f'{os.environ.get("KEEPER_NETWORK_NAME")}. ' f'This enables the user to override the method of how the network name ' f'is inferred from network id.') # try to find contract with this network name contract_name = Diagnostics.TEST_CONTRACT_NAME network_id = Keeper.get_network_id() network_name = Keeper.get_network_name(network_id) logger.info(f'Using keeper contracts from network {network_name}, ' f'network id is {network_id}') logger.info(f'Looking for keeper contracts ending with ".{network_name}.json", ' f'e.g. "{contract_name}.{network_name}.json".') existing_contract_names = os.listdir(artifacts_path) try: ContractHandler.get(contract_name) except Exception as e: logger.error(e) logger.error(f'Cannot find the keeper contracts. \n' f'Current network id is {network_id} and network name is {network_name}.' f'Expected to find contracts ending with ".{network_name}.json",' f' e.g. "{contract_name}.{network_name}.json"') raise OceanKeeperContractsNotFound( f'Keeper contracts for keeper network {network_name} were not found ' f'in {artifacts_path}. \n' f'Found the following contracts: \n\t{existing_contract_names}' ) keeper = Keeper.get_instance() contracts = [keeper.dispenser, keeper.token, keeper.did_registry, keeper.agreement_manager, keeper.template_manager, keeper.condition_manager, keeper.access_secret_store_condition, keeper.sign_condition, keeper.lock_reward_condition, keeper.escrow_access_secretstore_template, keeper.escrow_reward_condition, keeper.hash_lock_condition ] addresses = '\n'.join([f'\t{c.name}: {c.address}' for c in contracts]) logging.info('Finished loading keeper contracts:\n' '%s', addresses)
[ "def", "verify_contracts", "(", ")", ":", "artifacts_path", "=", "ConfigProvider", ".", "get_config", "(", ")", ".", "keeper_path", "logger", ".", "info", "(", "f'Keeper contract artifacts (JSON abi files) at: {artifacts_path}'", ")", "if", "os", ".", "environ", ".", "get", "(", "'KEEPER_NETWORK_NAME'", ")", ":", "logger", ".", "warning", "(", "f'The `KEEPER_NETWORK_NAME` env var is set to '", "f'{os.environ.get(\"KEEPER_NETWORK_NAME\")}. '", "f'This enables the user to override the method of how the network name '", "f'is inferred from network id.'", ")", "# try to find contract with this network name", "contract_name", "=", "Diagnostics", ".", "TEST_CONTRACT_NAME", "network_id", "=", "Keeper", ".", "get_network_id", "(", ")", "network_name", "=", "Keeper", ".", "get_network_name", "(", "network_id", ")", "logger", ".", "info", "(", "f'Using keeper contracts from network {network_name}, '", "f'network id is {network_id}'", ")", "logger", ".", "info", "(", "f'Looking for keeper contracts ending with \".{network_name}.json\", '", "f'e.g. \"{contract_name}.{network_name}.json\".'", ")", "existing_contract_names", "=", "os", ".", "listdir", "(", "artifacts_path", ")", "try", ":", "ContractHandler", ".", "get", "(", "contract_name", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "e", ")", "logger", ".", "error", "(", "f'Cannot find the keeper contracts. \\n'", "f'Current network id is {network_id} and network name is {network_name}.'", "f'Expected to find contracts ending with \".{network_name}.json\",'", "f' e.g. \"{contract_name}.{network_name}.json\"'", ")", "raise", "OceanKeeperContractsNotFound", "(", "f'Keeper contracts for keeper network {network_name} were not found '", "f'in {artifacts_path}. \\n'", "f'Found the following contracts: \\n\\t{existing_contract_names}'", ")", "keeper", "=", "Keeper", ".", "get_instance", "(", ")", "contracts", "=", "[", "keeper", ".", "dispenser", ",", "keeper", ".", "token", ",", "keeper", ".", "did_registry", ",", "keeper", ".", "agreement_manager", ",", "keeper", ".", "template_manager", ",", "keeper", ".", "condition_manager", ",", "keeper", ".", "access_secret_store_condition", ",", "keeper", ".", "sign_condition", ",", "keeper", ".", "lock_reward_condition", ",", "keeper", ".", "escrow_access_secretstore_template", ",", "keeper", ".", "escrow_reward_condition", ",", "keeper", ".", "hash_lock_condition", "]", "addresses", "=", "'\\n'", ".", "join", "(", "[", "f'\\t{c.name}: {c.address}'", "for", "c", "in", "contracts", "]", ")", "logging", ".", "info", "(", "'Finished loading keeper contracts:\\n'", "'%s'", ",", "addresses", ")" ]
Verify that the contracts are deployed correctly in the network. :raise Exception: raise exception if the contracts are not deployed correctly.
[ "Verify", "that", "the", "contracts", "are", "deployed", "correctly", "in", "the", "network", "." ]
python
train
samjabrahams/anchorhub
anchorhub/util/hasattrs.py
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/util/hasattrs.py#L8-L21
def hasattrs(object, *names): """ Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise """ for name in names: if not hasattr(object, name): return False return True
[ "def", "hasattrs", "(", "object", ",", "*", "names", ")", ":", "for", "name", "in", "names", ":", "if", "not", "hasattr", "(", "object", ",", "name", ")", ":", "return", "False", "return", "True" ]
Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise
[ "Takes", "in", "an", "object", "and", "a", "variable", "length", "amount", "of", "named", "attributes", "and", "checks", "to", "see", "if", "the", "object", "has", "each", "property", ".", "If", "any", "of", "the", "attributes", "are", "missing", "this", "returns", "false", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/tpt/path.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/tpt/path.py#L195-L318
def paths(sources, sinks, net_flux, remove_path='subtract', num_paths=np.inf, flux_cutoff=(1-1E-10)): """ Get the top N paths by iteratively performing Dijkstra's algorithm. Parameters ---------- sources : array_like, int One-dimensional list of nodes to define the source states. sinks : array_like, int One-dimensional list of nodes to define the sink states. net_flux : np.ndarray Net flux of the MSM remove_path : str or callable, optional Function for removing a path from the net flux matrix. (if str, one of {'subtract', 'bottleneck'}) See note below for more details. num_paths : int, optional Number of paths to find flux_cutoff : float, optional Quit looking for paths once the explained flux is greater than this cutoff (as a percentage of the total). Returns ------- paths : list List of paths. Each item is an array of nodes visited in the path. fluxes : np.ndarray, shape = [n_paths,] Flux of each path returned. Notes ----- The Dijkstra algorithm only allows for computing the *single* top flux pathway through the net flux matrix. If we want many paths, there are many ways of finding the *second* highest flux pathway. The algorithm proceeds as follows: 1. Using the Djikstra algorithm, find the highest flux pathway from the sources to the sink states 2. Remove that pathway from the net flux matrix by some criterion 3. Repeat (1) with the modified net flux matrix Currently, there are two schemes for step (2): - 'subtract' : Remove the path by subtracting the flux of the path from every edge in the path. This was suggested by Metzner, Schutte, and Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model. Simul. 7, 1192-1219 (2009). - 'bottleneck' : Remove the path by only removing the edge that corresponds to the bottleneck of the path. If a new scheme is desired, the user may pass a function that takes the net_flux and the path to remove and returns the new net flux matrix. See Also -------- msmbuilder.tpt.top_path : function for computing the single highest flux pathway through a network. References ---------- .. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of transition paths. J. Stat. Phys. 123, 503-523 (2006). .. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory for Markov jump processes. Multiscale Model. Simul. 7, 1192-1219 (2009). .. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding pathways in network models of coarse-grained protein dynamics. J. Chem. Phys. 130, 205102 (2009). .. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs. Numeriche Mathematik 1, 269-271 (1959). .. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations." PNAS 106.45 (2009): 19011-19016. """ if not callable(remove_path): if remove_path == 'subtract': remove_path = _subtract_path_flux elif remove_path == 'bottleneck': remove_path = _remove_bottleneck else: raise ValueError("remove_path_func (%s) must be a callable or one of ['subtract', 'bottleneck']" % str(remove_path)) net_flux = copy.copy(net_flux) paths = [] fluxes = [] total_flux = net_flux[sources, :].sum() # total flux is the total flux coming from the sources (or going into the sinks) not_done = True counter = 0 expl_flux = 0.0 while not_done: path, flux = top_path(sources, sinks, net_flux) if np.isinf(flux): break paths.append(path) fluxes.append(flux) expl_flux += flux / total_flux counter += 1 if counter >= num_paths or expl_flux >= flux_cutoff: break # modify the net_flux matrix net_flux = remove_path(net_flux, path) fluxes = np.array(fluxes) return paths, fluxes
[ "def", "paths", "(", "sources", ",", "sinks", ",", "net_flux", ",", "remove_path", "=", "'subtract'", ",", "num_paths", "=", "np", ".", "inf", ",", "flux_cutoff", "=", "(", "1", "-", "1E-10", ")", ")", ":", "if", "not", "callable", "(", "remove_path", ")", ":", "if", "remove_path", "==", "'subtract'", ":", "remove_path", "=", "_subtract_path_flux", "elif", "remove_path", "==", "'bottleneck'", ":", "remove_path", "=", "_remove_bottleneck", "else", ":", "raise", "ValueError", "(", "\"remove_path_func (%s) must be a callable or one of ['subtract', 'bottleneck']\"", "%", "str", "(", "remove_path", ")", ")", "net_flux", "=", "copy", ".", "copy", "(", "net_flux", ")", "paths", "=", "[", "]", "fluxes", "=", "[", "]", "total_flux", "=", "net_flux", "[", "sources", ",", ":", "]", ".", "sum", "(", ")", "# total flux is the total flux coming from the sources (or going into the sinks)", "not_done", "=", "True", "counter", "=", "0", "expl_flux", "=", "0.0", "while", "not_done", ":", "path", ",", "flux", "=", "top_path", "(", "sources", ",", "sinks", ",", "net_flux", ")", "if", "np", ".", "isinf", "(", "flux", ")", ":", "break", "paths", ".", "append", "(", "path", ")", "fluxes", ".", "append", "(", "flux", ")", "expl_flux", "+=", "flux", "/", "total_flux", "counter", "+=", "1", "if", "counter", ">=", "num_paths", "or", "expl_flux", ">=", "flux_cutoff", ":", "break", "# modify the net_flux matrix", "net_flux", "=", "remove_path", "(", "net_flux", ",", "path", ")", "fluxes", "=", "np", ".", "array", "(", "fluxes", ")", "return", "paths", ",", "fluxes" ]
Get the top N paths by iteratively performing Dijkstra's algorithm. Parameters ---------- sources : array_like, int One-dimensional list of nodes to define the source states. sinks : array_like, int One-dimensional list of nodes to define the sink states. net_flux : np.ndarray Net flux of the MSM remove_path : str or callable, optional Function for removing a path from the net flux matrix. (if str, one of {'subtract', 'bottleneck'}) See note below for more details. num_paths : int, optional Number of paths to find flux_cutoff : float, optional Quit looking for paths once the explained flux is greater than this cutoff (as a percentage of the total). Returns ------- paths : list List of paths. Each item is an array of nodes visited in the path. fluxes : np.ndarray, shape = [n_paths,] Flux of each path returned. Notes ----- The Dijkstra algorithm only allows for computing the *single* top flux pathway through the net flux matrix. If we want many paths, there are many ways of finding the *second* highest flux pathway. The algorithm proceeds as follows: 1. Using the Djikstra algorithm, find the highest flux pathway from the sources to the sink states 2. Remove that pathway from the net flux matrix by some criterion 3. Repeat (1) with the modified net flux matrix Currently, there are two schemes for step (2): - 'subtract' : Remove the path by subtracting the flux of the path from every edge in the path. This was suggested by Metzner, Schutte, and Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model. Simul. 7, 1192-1219 (2009). - 'bottleneck' : Remove the path by only removing the edge that corresponds to the bottleneck of the path. If a new scheme is desired, the user may pass a function that takes the net_flux and the path to remove and returns the new net flux matrix. See Also -------- msmbuilder.tpt.top_path : function for computing the single highest flux pathway through a network. References ---------- .. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of transition paths. J. Stat. Phys. 123, 503-523 (2006). .. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory for Markov jump processes. Multiscale Model. Simul. 7, 1192-1219 (2009). .. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding pathways in network models of coarse-grained protein dynamics. J. Chem. Phys. 130, 205102 (2009). .. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs. Numeriche Mathematik 1, 269-271 (1959). .. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations." PNAS 106.45 (2009): 19011-19016.
[ "Get", "the", "top", "N", "paths", "by", "iteratively", "performing", "Dijkstra", "s", "algorithm", "." ]
python
train
avalente/appmetrics
appmetrics/statistics.py
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/statistics.py#L458-L484
def get_histogram(data): """Return the histogram relative to the given data Assume that the data are already sorted """ count = len(data) if count < 2: raise StatisticsError('Too few data points ({}) for get_histogram'.format(count)) min_ = data[0] max_ = data[-1] std = stdev(data) bins = get_histogram_bins(min_, max_, std, count) res = {x: 0 for x in bins} for value in data: for bin_ in bins: if value <= bin_: res[bin_] += 1 break return sorted(iteritems(res))
[ "def", "get_histogram", "(", "data", ")", ":", "count", "=", "len", "(", "data", ")", "if", "count", "<", "2", ":", "raise", "StatisticsError", "(", "'Too few data points ({}) for get_histogram'", ".", "format", "(", "count", ")", ")", "min_", "=", "data", "[", "0", "]", "max_", "=", "data", "[", "-", "1", "]", "std", "=", "stdev", "(", "data", ")", "bins", "=", "get_histogram_bins", "(", "min_", ",", "max_", ",", "std", ",", "count", ")", "res", "=", "{", "x", ":", "0", "for", "x", "in", "bins", "}", "for", "value", "in", "data", ":", "for", "bin_", "in", "bins", ":", "if", "value", "<=", "bin_", ":", "res", "[", "bin_", "]", "+=", "1", "break", "return", "sorted", "(", "iteritems", "(", "res", ")", ")" ]
Return the histogram relative to the given data Assume that the data are already sorted
[ "Return", "the", "histogram", "relative", "to", "the", "given", "data" ]
python
train
log2timeline/plaso
plaso/parsers/esedb.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/esedb.py#L63-L104
def ParseFileObject(self, parser_mediator, file_object): """Parses an ESE database file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. """ esedb_file = pyesedb.file() try: esedb_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return # Compare the list of available plugin objects. cache = ESEDBCache() try: table_names = frozenset(self._GetTableNames(esedb_file)) for plugin in self._plugins: if parser_mediator.abort: break if not plugin.required_tables.issubset(table_names): continue try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=esedb_file) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse ESE database with error: ' '{1!s}').format(plugin.NAME, exception)) finally: # TODO: explicitly clean up cache. esedb_file.close()
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "esedb_file", "=", "pyesedb", ".", "file", "(", ")", "try", ":", "esedb_file", ".", "open_file_object", "(", "file_object", ")", "except", "IOError", "as", "exception", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to open file with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "return", "# Compare the list of available plugin objects.", "cache", "=", "ESEDBCache", "(", ")", "try", ":", "table_names", "=", "frozenset", "(", "self", ".", "_GetTableNames", "(", "esedb_file", ")", ")", "for", "plugin", "in", "self", ".", "_plugins", ":", "if", "parser_mediator", ".", "abort", ":", "break", "if", "not", "plugin", ".", "required_tables", ".", "issubset", "(", "table_names", ")", ":", "continue", "try", ":", "plugin", ".", "UpdateChainAndProcess", "(", "parser_mediator", ",", "cache", "=", "cache", ",", "database", "=", "esedb_file", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "parser_mediator", ".", "ProduceExtractionWarning", "(", "(", "'plugin: {0:s} unable to parse ESE database with error: '", "'{1!s}'", ")", ".", "format", "(", "plugin", ".", "NAME", ",", "exception", ")", ")", "finally", ":", "# TODO: explicitly clean up cache.", "esedb_file", ".", "close", "(", ")" ]
Parses an ESE database file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
[ "Parses", "an", "ESE", "database", "file", "-", "like", "object", "." ]
python
train
spotify/luigi
luigi/setup_logging.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/setup_logging.py#L157-L168
def _conf(cls, opts): """Setup logging via ini-file from logging_conf_file option.""" if not opts.logging_conf_file: return False if not os.path.exists(opts.logging_conf_file): # FileNotFoundError added only in Python 3.3 # https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy raise OSError("Error: Unable to locate specified logging configuration file!") logging.config.fileConfig(opts.logging_conf_file, disable_existing_loggers=False) return True
[ "def", "_conf", "(", "cls", ",", "opts", ")", ":", "if", "not", "opts", ".", "logging_conf_file", ":", "return", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "opts", ".", "logging_conf_file", ")", ":", "# FileNotFoundError added only in Python 3.3", "# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy", "raise", "OSError", "(", "\"Error: Unable to locate specified logging configuration file!\"", ")", "logging", ".", "config", ".", "fileConfig", "(", "opts", ".", "logging_conf_file", ",", "disable_existing_loggers", "=", "False", ")", "return", "True" ]
Setup logging via ini-file from logging_conf_file option.
[ "Setup", "logging", "via", "ini", "-", "file", "from", "logging_conf_file", "option", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/aux_funcs.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L331-L336
def group_files_by_size_simple(fileslist, nbgroups): # pragma: no cover """ Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together. In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast. """ ford = sorted(fileslist.iteritems(), key=lambda x: x[1], reverse=True) ford = [[x[0]] for x in ford] return [group for group in grouper(nbgroups, ford)]
[ "def", "group_files_by_size_simple", "(", "fileslist", ",", "nbgroups", ")", ":", "# pragma: no cover", "ford", "=", "sorted", "(", "fileslist", ".", "iteritems", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "ford", "=", "[", "[", "x", "[", "0", "]", "]", "for", "x", "in", "ford", "]", "return", "[", "group", "for", "group", "in", "grouper", "(", "nbgroups", ",", "ford", ")", "]" ]
Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together. In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast.
[ "Simple", "and", "fast", "files", "grouping", "strategy", ":", "just", "order", "by", "size", "and", "group", "files", "n", "-", "by", "-", "n", "so", "that", "files", "with", "the", "closest", "sizes", "are", "grouped", "together", ".", "In", "this", "strategy", "there", "is", "only", "one", "file", "per", "subgroup", "and", "thus", "there", "will", "often", "be", "remaining", "space", "left", "because", "there", "is", "no", "filling", "strategy", "here", "but", "it", "s", "very", "fast", "." ]
python
train
elliterate/capybara.py
capybara/server.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/server.py#L68-L98
def boot(self): """ Boots a server for the app, if it isn't already booted. Returns: Server: This server. """ if not self.responsive: # Remember the port so we can reuse it if we try to serve this same app again. type(self)._ports[self.port_key] = self.port init_func = capybara.servers[capybara.server_name] init_args = (self.middleware, self.port, self.host) self.server_thread = Thread(target=init_func, args=init_args) # Inform Python that it shouldn't wait for this thread to terminate before # exiting. (It will still be appropriately terminated when the process exits.) self.server_thread.daemon = True self.server_thread.start() # Make sure the server actually starts and becomes responsive. timer = Timer(60) while not self.responsive: if timer.expired: raise RuntimeError("WSGI application timed out during boot") self.server_thread.join(0.1) return self
[ "def", "boot", "(", "self", ")", ":", "if", "not", "self", ".", "responsive", ":", "# Remember the port so we can reuse it if we try to serve this same app again.", "type", "(", "self", ")", ".", "_ports", "[", "self", ".", "port_key", "]", "=", "self", ".", "port", "init_func", "=", "capybara", ".", "servers", "[", "capybara", ".", "server_name", "]", "init_args", "=", "(", "self", ".", "middleware", ",", "self", ".", "port", ",", "self", ".", "host", ")", "self", ".", "server_thread", "=", "Thread", "(", "target", "=", "init_func", ",", "args", "=", "init_args", ")", "# Inform Python that it shouldn't wait for this thread to terminate before", "# exiting. (It will still be appropriately terminated when the process exits.)", "self", ".", "server_thread", ".", "daemon", "=", "True", "self", ".", "server_thread", ".", "start", "(", ")", "# Make sure the server actually starts and becomes responsive.", "timer", "=", "Timer", "(", "60", ")", "while", "not", "self", ".", "responsive", ":", "if", "timer", ".", "expired", ":", "raise", "RuntimeError", "(", "\"WSGI application timed out during boot\"", ")", "self", ".", "server_thread", ".", "join", "(", "0.1", ")", "return", "self" ]
Boots a server for the app, if it isn't already booted. Returns: Server: This server.
[ "Boots", "a", "server", "for", "the", "app", "if", "it", "isn", "t", "already", "booted", "." ]
python
test
6809/MC6809
MC6809/components/mc6809_ops_logic.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_ops_logic.py#L172-L182
def instruction_LSL_memory(self, opcode, ea, m): """ Logical shift left memory location / Arithmetic shift of memory left """ r = self.LSL(m) # log.debug("$%x LSL memory value $%x << 1 = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
[ "def", "instruction_LSL_memory", "(", "self", ",", "opcode", ",", "ea", ",", "m", ")", ":", "r", "=", "self", ".", "LSL", "(", "m", ")", "# log.debug(\"$%x LSL memory value $%x << 1 = $%x and write it to $%x \\t| %s\" % (", "# self.program_counter,", "# m, r, ea,", "# self.cfg.mem_info.get_shortest(ea)", "# ))", "return", "ea", ",", "r", "&", "0xff" ]
Logical shift left memory location / Arithmetic shift of memory left
[ "Logical", "shift", "left", "memory", "location", "/", "Arithmetic", "shift", "of", "memory", "left" ]
python
train
ska-sa/katversion
katversion/build.py
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/build.py#L32-L58
def patch_init_py(base_dir, name, version): """Patch __init__.py to remove version check and append hard-coded version.""" # Ensure main package dir is there (may be absent in script-only packages) package_dir = os.path.join(base_dir, name) if not os.path.isdir(package_dir): os.makedirs(package_dir) # Open top-level __init__.py and read whole file init_py = os.path.join(package_dir, '__init__.py') log.info("patching %s to bake in version '%s'" % (init_py, version)) with open(init_py, 'r+') as init_file: lines = init_file.readlines() # Search for sentinels indicating version checking block try: begin = lines.index("# BEGIN VERSION CHECK\n") end = lines.index("# END VERSION CHECK\n") except ValueError: begin = end = len(lines) # Delete existing repo version checking block in file init_file.seek(0) init_file.writelines(lines[:begin] + lines[end+1:]) # Append new version attribute to ensure it is authoritative, but only # if it is not already there (this happens in pip sdist installs) version_cmd = "__version__ = '{0}'\n".format(version) if not lines or lines[-1] != version_cmd: init_file.write("\n# Automatically added by katversion\n") init_file.write(version_cmd) init_file.truncate()
[ "def", "patch_init_py", "(", "base_dir", ",", "name", ",", "version", ")", ":", "# Ensure main package dir is there (may be absent in script-only packages)", "package_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "name", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "package_dir", ")", ":", "os", ".", "makedirs", "(", "package_dir", ")", "# Open top-level __init__.py and read whole file", "init_py", "=", "os", ".", "path", ".", "join", "(", "package_dir", ",", "'__init__.py'", ")", "log", ".", "info", "(", "\"patching %s to bake in version '%s'\"", "%", "(", "init_py", ",", "version", ")", ")", "with", "open", "(", "init_py", ",", "'r+'", ")", "as", "init_file", ":", "lines", "=", "init_file", ".", "readlines", "(", ")", "# Search for sentinels indicating version checking block", "try", ":", "begin", "=", "lines", ".", "index", "(", "\"# BEGIN VERSION CHECK\\n\"", ")", "end", "=", "lines", ".", "index", "(", "\"# END VERSION CHECK\\n\"", ")", "except", "ValueError", ":", "begin", "=", "end", "=", "len", "(", "lines", ")", "# Delete existing repo version checking block in file", "init_file", ".", "seek", "(", "0", ")", "init_file", ".", "writelines", "(", "lines", "[", ":", "begin", "]", "+", "lines", "[", "end", "+", "1", ":", "]", ")", "# Append new version attribute to ensure it is authoritative, but only", "# if it is not already there (this happens in pip sdist installs)", "version_cmd", "=", "\"__version__ = '{0}'\\n\"", ".", "format", "(", "version", ")", "if", "not", "lines", "or", "lines", "[", "-", "1", "]", "!=", "version_cmd", ":", "init_file", ".", "write", "(", "\"\\n# Automatically added by katversion\\n\"", ")", "init_file", ".", "write", "(", "version_cmd", ")", "init_file", ".", "truncate", "(", ")" ]
Patch __init__.py to remove version check and append hard-coded version.
[ "Patch", "__init__", ".", "py", "to", "remove", "version", "check", "and", "append", "hard", "-", "coded", "version", "." ]
python
train
spacetelescope/synphot_refactor
synphot/utils.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L54-L73
def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
[ "def", "validate_totalflux", "(", "totalflux", ")", ":", "if", "totalflux", "<=", "0.0", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is <= 0'", ")", "elif", "np", ".", "isnan", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is NaN'", ")", "elif", "np", ".", "isinf", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is infinite'", ")" ]
Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number.
[ "Check", "integrated", "flux", "for", "invalid", "values", "." ]
python
train
etingof/pysnmp
pysnmp/entity/rfc3413/cmdrsp.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/entity/rfc3413/cmdrsp.py#L339-L375
def _getManagedObjectsInstances(self, varBinds, **context): """Iterate over Managed Objects fulfilling SNMP query. Returns ------- :py:class:`list` - List of Managed Objects Instances to respond with or `None` to indicate that not all objects have been gathered so far. """ rspVarBinds = context['rspVarBinds'] varBindsMap = context['varBindsMap'] rtrVarBinds = [] for idx, varBind in enumerate(varBinds): name, val = varBind if (exval.noSuchObject.isSameTypeWith(val) or exval.noSuchInstance.isSameTypeWith(val)): varBindsMap[len(rtrVarBinds)] = varBindsMap.pop(idx, idx) rtrVarBinds.append(varBind) else: rspVarBinds[varBindsMap.pop(idx, idx)] = varBind if rtrVarBinds: snmpEngine = context['snmpEngine'] # Need to unwind stack, can't recurse any more def callLater(*args): snmpEngine.transportDispatcher.unregisterTimerCbFun(callLater) mgmtFun = context['mgmtFun'] mgmtFun(*varBinds, **context) snmpEngine.transportDispatcher.registerTimerCbFun(callLater, 0.01) else: return rspVarBinds
[ "def", "_getManagedObjectsInstances", "(", "self", ",", "varBinds", ",", "*", "*", "context", ")", ":", "rspVarBinds", "=", "context", "[", "'rspVarBinds'", "]", "varBindsMap", "=", "context", "[", "'varBindsMap'", "]", "rtrVarBinds", "=", "[", "]", "for", "idx", ",", "varBind", "in", "enumerate", "(", "varBinds", ")", ":", "name", ",", "val", "=", "varBind", "if", "(", "exval", ".", "noSuchObject", ".", "isSameTypeWith", "(", "val", ")", "or", "exval", ".", "noSuchInstance", ".", "isSameTypeWith", "(", "val", ")", ")", ":", "varBindsMap", "[", "len", "(", "rtrVarBinds", ")", "]", "=", "varBindsMap", ".", "pop", "(", "idx", ",", "idx", ")", "rtrVarBinds", ".", "append", "(", "varBind", ")", "else", ":", "rspVarBinds", "[", "varBindsMap", ".", "pop", "(", "idx", ",", "idx", ")", "]", "=", "varBind", "if", "rtrVarBinds", ":", "snmpEngine", "=", "context", "[", "'snmpEngine'", "]", "# Need to unwind stack, can't recurse any more", "def", "callLater", "(", "*", "args", ")", ":", "snmpEngine", ".", "transportDispatcher", ".", "unregisterTimerCbFun", "(", "callLater", ")", "mgmtFun", "=", "context", "[", "'mgmtFun'", "]", "mgmtFun", "(", "*", "varBinds", ",", "*", "*", "context", ")", "snmpEngine", ".", "transportDispatcher", ".", "registerTimerCbFun", "(", "callLater", ",", "0.01", ")", "else", ":", "return", "rspVarBinds" ]
Iterate over Managed Objects fulfilling SNMP query. Returns ------- :py:class:`list` - List of Managed Objects Instances to respond with or `None` to indicate that not all objects have been gathered so far.
[ "Iterate", "over", "Managed", "Objects", "fulfilling", "SNMP", "query", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/signing_key.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/signing_key.py#L168-L177
def get_instance(self, payload): """ Build an instance of SigningKeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyInstance :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyInstance """ return SigningKeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "SigningKeyInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of SigningKeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyInstance :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyInstance
[ "Build", "an", "instance", "of", "SigningKeyInstance" ]
python
train
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plot_methods.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plot_methods.py#L738-L853
def plot_signal_sum(ax, params, fname='LFPsum.h5', unit='mV', scaling_factor=1., ylabels=True, scalebar=True, vlimround=None, T=[800, 1000], ylim=[-1500, 0], color='k', fancy=False, label='', transient=200, clip_on=False, rasterized=True, **kwargs): ''' on axes plot the summed LFP contributions args: :: ax : matplotlib.axes.AxesSubplot object fname : str/np.ndarray, path to h5 file or ndim=2 numpy.ndarray unit : str, scalebar unit scaling_factor : float, scaling factor (e.g. to scale 10% data set up) ylabels : bool, show labels on y-axis scalebar : bool, show scalebar in plot vlimround : None/float, override autoscaling of data and scalebar T : list, [tstart, tstop], which timeinterval ylim : list of floats, see plt.gca().set_ylim color : str/colorspec tuple, color of shown lines fancy : bool, label : str, line labels rasterized : bool, rasterize line plots if true kwargs : additional keyword arguments passed to ax.plot() returns: :: vlimround : float, scalebar scaling factor, i.e., to match up plots ''' if type(fname) == str and os.path.isfile(fname): f = h5py.File(fname) #load data data = f['data'].value tvec = np.arange(data.shape[1]) * 1000. / f['srate'].value #for mean subtraction datameanaxis1 = f['data'].value[:, tvec >= transient].mean(axis=1) #close dataset f.close() elif type(fname) == np.ndarray and fname.ndim==2: data = fname tvec = np.arange(data.shape[1]) * params.dt_output datameanaxis1 = data[:, tvec >= transient].mean(axis=1) else: raise Exception, 'type(fname)={} not str or numpy.ndarray'.format(type(fname)) # slice slica = (tvec <= T[1]) & (tvec >= T[0]) data = data[:,slica] #subtract mean in each channel #dataT = data.T - data.mean(axis=1) dataT = data.T - datameanaxis1 data = dataT.T # normalize data = data*scaling_factor zvec = np.r_[params.electrodeParams['z']] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] vlim = abs(data).max() if vlimround is None: vlimround = 2.**np.round(np.log2(vlim)) else: pass yticklabels=[] yticks = [] if fancy: colors=phlp.get_colors(data.shape[0]) else: colors = [color]*data.shape[0] for i, z in enumerate(params.electrodeParams['z']): if i == 0: ax.plot(tvec[slica], data[i] * 100 / vlimround + z, color=colors[i], rasterized=rasterized, label=label, clip_on=clip_on, **kwargs) else: ax.plot(tvec[slica], data[i] * 100 / vlimround + z, color=colors[i], rasterized=rasterized, clip_on=clip_on, **kwargs) yticklabels.append('ch. %i' % (i+1)) yticks.append(z) if scalebar: ax.plot([tvec[slica][-1], tvec[slica][-1]], [-1300, -1400], lw=2, color='k', clip_on=False) ax.text(tvec[slica][-1]+np.diff(T)*0.02, -1350, r'%g %s' % (vlimround, unit), color='k', rotation='vertical', va='center') ax.axis(ax.axis('tight')) ax.yaxis.set_ticks(yticks) if ylabels: ax.yaxis.set_ticklabels(yticklabels) else: ax.yaxis.set_ticklabels([]) for loc, spine in ax.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xlabel(r'$t$ (ms)', labelpad=0.1) ax.set_ylim(ylim) return vlimround
[ "def", "plot_signal_sum", "(", "ax", ",", "params", ",", "fname", "=", "'LFPsum.h5'", ",", "unit", "=", "'mV'", ",", "scaling_factor", "=", "1.", ",", "ylabels", "=", "True", ",", "scalebar", "=", "True", ",", "vlimround", "=", "None", ",", "T", "=", "[", "800", ",", "1000", "]", ",", "ylim", "=", "[", "-", "1500", ",", "0", "]", ",", "color", "=", "'k'", ",", "fancy", "=", "False", ",", "label", "=", "''", ",", "transient", "=", "200", ",", "clip_on", "=", "False", ",", "rasterized", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "fname", ")", "==", "str", "and", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "f", "=", "h5py", ".", "File", "(", "fname", ")", "#load data", "data", "=", "f", "[", "'data'", "]", ".", "value", "tvec", "=", "np", ".", "arange", "(", "data", ".", "shape", "[", "1", "]", ")", "*", "1000.", "/", "f", "[", "'srate'", "]", ".", "value", "#for mean subtraction", "datameanaxis1", "=", "f", "[", "'data'", "]", ".", "value", "[", ":", ",", "tvec", ">=", "transient", "]", ".", "mean", "(", "axis", "=", "1", ")", "#close dataset", "f", ".", "close", "(", ")", "elif", "type", "(", "fname", ")", "==", "np", ".", "ndarray", "and", "fname", ".", "ndim", "==", "2", ":", "data", "=", "fname", "tvec", "=", "np", ".", "arange", "(", "data", ".", "shape", "[", "1", "]", ")", "*", "params", ".", "dt_output", "datameanaxis1", "=", "data", "[", ":", ",", "tvec", ">=", "transient", "]", ".", "mean", "(", "axis", "=", "1", ")", "else", ":", "raise", "Exception", ",", "'type(fname)={} not str or numpy.ndarray'", ".", "format", "(", "type", "(", "fname", ")", ")", "# slice", "slica", "=", "(", "tvec", "<=", "T", "[", "1", "]", ")", "&", "(", "tvec", ">=", "T", "[", "0", "]", ")", "data", "=", "data", "[", ":", ",", "slica", "]", "#subtract mean in each channel", "#dataT = data.T - data.mean(axis=1)", "dataT", "=", "data", ".", "T", "-", "datameanaxis1", "data", "=", "dataT", ".", "T", "# normalize", "data", "=", "data", "*", "scaling_factor", "zvec", "=", "np", ".", "r_", "[", "params", ".", "electrodeParams", "[", "'z'", "]", "]", "zvec", "=", "np", ".", "r_", "[", "zvec", ",", "zvec", "[", "-", "1", "]", "+", "np", ".", "diff", "(", "zvec", ")", "[", "-", "1", "]", "]", "vlim", "=", "abs", "(", "data", ")", ".", "max", "(", ")", "if", "vlimround", "is", "None", ":", "vlimround", "=", "2.", "**", "np", ".", "round", "(", "np", ".", "log2", "(", "vlim", ")", ")", "else", ":", "pass", "yticklabels", "=", "[", "]", "yticks", "=", "[", "]", "if", "fancy", ":", "colors", "=", "phlp", ".", "get_colors", "(", "data", ".", "shape", "[", "0", "]", ")", "else", ":", "colors", "=", "[", "color", "]", "*", "data", ".", "shape", "[", "0", "]", "for", "i", ",", "z", "in", "enumerate", "(", "params", ".", "electrodeParams", "[", "'z'", "]", ")", ":", "if", "i", "==", "0", ":", "ax", ".", "plot", "(", "tvec", "[", "slica", "]", ",", "data", "[", "i", "]", "*", "100", "/", "vlimround", "+", "z", ",", "color", "=", "colors", "[", "i", "]", ",", "rasterized", "=", "rasterized", ",", "label", "=", "label", ",", "clip_on", "=", "clip_on", ",", "*", "*", "kwargs", ")", "else", ":", "ax", ".", "plot", "(", "tvec", "[", "slica", "]", ",", "data", "[", "i", "]", "*", "100", "/", "vlimround", "+", "z", ",", "color", "=", "colors", "[", "i", "]", ",", "rasterized", "=", "rasterized", ",", "clip_on", "=", "clip_on", ",", "*", "*", "kwargs", ")", "yticklabels", ".", "append", "(", "'ch. %i'", "%", "(", "i", "+", "1", ")", ")", "yticks", ".", "append", "(", "z", ")", "if", "scalebar", ":", "ax", ".", "plot", "(", "[", "tvec", "[", "slica", "]", "[", "-", "1", "]", ",", "tvec", "[", "slica", "]", "[", "-", "1", "]", "]", ",", "[", "-", "1300", ",", "-", "1400", "]", ",", "lw", "=", "2", ",", "color", "=", "'k'", ",", "clip_on", "=", "False", ")", "ax", ".", "text", "(", "tvec", "[", "slica", "]", "[", "-", "1", "]", "+", "np", ".", "diff", "(", "T", ")", "*", "0.02", ",", "-", "1350", ",", "r'%g %s'", "%", "(", "vlimround", ",", "unit", ")", ",", "color", "=", "'k'", ",", "rotation", "=", "'vertical'", ",", "va", "=", "'center'", ")", "ax", ".", "axis", "(", "ax", ".", "axis", "(", "'tight'", ")", ")", "ax", ".", "yaxis", ".", "set_ticks", "(", "yticks", ")", "if", "ylabels", ":", "ax", ".", "yaxis", ".", "set_ticklabels", "(", "yticklabels", ")", "else", ":", "ax", ".", "yaxis", ".", "set_ticklabels", "(", "[", "]", ")", "for", "loc", ",", "spine", "in", "ax", ".", "spines", ".", "iteritems", "(", ")", ":", "if", "loc", "in", "[", "'right'", ",", "'top'", "]", ":", "spine", ".", "set_color", "(", "'none'", ")", "ax", ".", "xaxis", ".", "set_ticks_position", "(", "'bottom'", ")", "ax", ".", "yaxis", ".", "set_ticks_position", "(", "'left'", ")", "ax", ".", "set_xlabel", "(", "r'$t$ (ms)'", ",", "labelpad", "=", "0.1", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "return", "vlimround" ]
on axes plot the summed LFP contributions args: :: ax : matplotlib.axes.AxesSubplot object fname : str/np.ndarray, path to h5 file or ndim=2 numpy.ndarray unit : str, scalebar unit scaling_factor : float, scaling factor (e.g. to scale 10% data set up) ylabels : bool, show labels on y-axis scalebar : bool, show scalebar in plot vlimround : None/float, override autoscaling of data and scalebar T : list, [tstart, tstop], which timeinterval ylim : list of floats, see plt.gca().set_ylim color : str/colorspec tuple, color of shown lines fancy : bool, label : str, line labels rasterized : bool, rasterize line plots if true kwargs : additional keyword arguments passed to ax.plot() returns: :: vlimround : float, scalebar scaling factor, i.e., to match up plots
[ "on", "axes", "plot", "the", "summed", "LFP", "contributions", "args", ":", "::", "ax", ":", "matplotlib", ".", "axes", ".", "AxesSubplot", "object", "fname", ":", "str", "/", "np", ".", "ndarray", "path", "to", "h5", "file", "or", "ndim", "=", "2", "numpy", ".", "ndarray", "unit", ":", "str", "scalebar", "unit", "scaling_factor", ":", "float", "scaling", "factor", "(", "e", ".", "g", ".", "to", "scale", "10%", "data", "set", "up", ")", "ylabels", ":", "bool", "show", "labels", "on", "y", "-", "axis", "scalebar", ":", "bool", "show", "scalebar", "in", "plot", "vlimround", ":", "None", "/", "float", "override", "autoscaling", "of", "data", "and", "scalebar", "T", ":", "list", "[", "tstart", "tstop", "]", "which", "timeinterval", "ylim", ":", "list", "of", "floats", "see", "plt", ".", "gca", "()", ".", "set_ylim", "color", ":", "str", "/", "colorspec", "tuple", "color", "of", "shown", "lines", "fancy", ":", "bool", "label", ":", "str", "line", "labels", "rasterized", ":", "bool", "rasterize", "line", "plots", "if", "true", "kwargs", ":", "additional", "keyword", "arguments", "passed", "to", "ax", ".", "plot", "()", "returns", ":", "::", "vlimround", ":", "float", "scalebar", "scaling", "factor", "i", ".", "e", ".", "to", "match", "up", "plots" ]
python
train
ajyoon/blur
examples/softlife/softlife.py
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/examples/softlife/softlife.py#L89-L97
def draw_canvas(): """Render the tkinter canvas based on the state of ``world``""" for x in range(len(world)): for y in range(len(world[x])): if world[x][y].value: color = world[x][y].color_alive.get_as_hex() else: color = world[x][y].color_dead.get_as_hex() canvas.itemconfig(canvas_grid[x][y], fill=color)
[ "def", "draw_canvas", "(", ")", ":", "for", "x", "in", "range", "(", "len", "(", "world", ")", ")", ":", "for", "y", "in", "range", "(", "len", "(", "world", "[", "x", "]", ")", ")", ":", "if", "world", "[", "x", "]", "[", "y", "]", ".", "value", ":", "color", "=", "world", "[", "x", "]", "[", "y", "]", ".", "color_alive", ".", "get_as_hex", "(", ")", "else", ":", "color", "=", "world", "[", "x", "]", "[", "y", "]", ".", "color_dead", ".", "get_as_hex", "(", ")", "canvas", ".", "itemconfig", "(", "canvas_grid", "[", "x", "]", "[", "y", "]", ",", "fill", "=", "color", ")" ]
Render the tkinter canvas based on the state of ``world``
[ "Render", "the", "tkinter", "canvas", "based", "on", "the", "state", "of", "world" ]
python
train
secdev/scapy
scapy/layers/tls/record_tls13.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/record_tls13.py#L172-L179
def _tls_auth_encrypt(self, s): """ Return the TLSCiphertext.encrypted_record for AEAD ciphers. """ wcs = self.tls_session.wcs write_seq_num = struct.pack("!Q", wcs.seq_num) wcs.seq_num += 1 return wcs.cipher.auth_encrypt(s, b"", write_seq_num)
[ "def", "_tls_auth_encrypt", "(", "self", ",", "s", ")", ":", "wcs", "=", "self", ".", "tls_session", ".", "wcs", "write_seq_num", "=", "struct", ".", "pack", "(", "\"!Q\"", ",", "wcs", ".", "seq_num", ")", "wcs", ".", "seq_num", "+=", "1", "return", "wcs", ".", "cipher", ".", "auth_encrypt", "(", "s", ",", "b\"\"", ",", "write_seq_num", ")" ]
Return the TLSCiphertext.encrypted_record for AEAD ciphers.
[ "Return", "the", "TLSCiphertext", ".", "encrypted_record", "for", "AEAD", "ciphers", "." ]
python
train
earwig/mwparserfromhell
mwparserfromhell/parser/tokenizer.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L903-L913
def _parse_tag(self): """Parse an HTML tag at the head of the wikicode string.""" reset = self._head self._head += 1 try: tag = self._really_parse_tag() except BadRoute: self._head = reset self._emit_text("<") else: self._emit_all(tag)
[ "def", "_parse_tag", "(", "self", ")", ":", "reset", "=", "self", ".", "_head", "self", ".", "_head", "+=", "1", "try", ":", "tag", "=", "self", ".", "_really_parse_tag", "(", ")", "except", "BadRoute", ":", "self", ".", "_head", "=", "reset", "self", ".", "_emit_text", "(", "\"<\"", ")", "else", ":", "self", ".", "_emit_all", "(", "tag", ")" ]
Parse an HTML tag at the head of the wikicode string.
[ "Parse", "an", "HTML", "tag", "at", "the", "head", "of", "the", "wikicode", "string", "." ]
python
train
SALib/SALib
src/SALib/analyze/rbd_fast.py
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/rbd_fast.py#L85-L99
def permute_outputs(Y, X): """ Permute the output according to one of the inputs as in [_2] References ---------- .. [2] Elmar Plischke (2010) "An effective algorithm for computing global sensitivity indices (EASI) Reliability Engineering & System Safety", 95:4, 354-360. doi:10.1016/j.ress.2009.11.005 """ permutation_index = np.argsort(X) permutation_index = np.concatenate([permutation_index[::2], permutation_index[1::2][::-1]]) return Y[permutation_index]
[ "def", "permute_outputs", "(", "Y", ",", "X", ")", ":", "permutation_index", "=", "np", ".", "argsort", "(", "X", ")", "permutation_index", "=", "np", ".", "concatenate", "(", "[", "permutation_index", "[", ":", ":", "2", "]", ",", "permutation_index", "[", "1", ":", ":", "2", "]", "[", ":", ":", "-", "1", "]", "]", ")", "return", "Y", "[", "permutation_index", "]" ]
Permute the output according to one of the inputs as in [_2] References ---------- .. [2] Elmar Plischke (2010) "An effective algorithm for computing global sensitivity indices (EASI) Reliability Engineering & System Safety", 95:4, 354-360. doi:10.1016/j.ress.2009.11.005
[ "Permute", "the", "output", "according", "to", "one", "of", "the", "inputs", "as", "in", "[", "_2", "]", "References", "----------", "..", "[", "2", "]", "Elmar", "Plischke", "(", "2010", ")", "An", "effective", "algorithm", "for", "computing", "global", "sensitivity", "indices", "(", "EASI", ")", "Reliability", "Engineering", "&", "System", "Safety", "95", ":", "4", "354", "-", "360", ".", "doi", ":", "10", ".", "1016", "/", "j", ".", "ress", ".", "2009", ".", "11", ".", "005" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L439-L477
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra inverse dataset. Each sample is a symbolic math equation involving unknown variables. The task is to solve for the given variable. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to solve for and the math equation, and target-list is a list of tokens encoding the resulting math expression after solving for the variable. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size) for _ in range(nbr_cases): sample, target = generate_algebra_inverse_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
[ "def", "algebra_inverse", "(", "alphabet_size", "=", "26", ",", "min_depth", "=", "0", ",", "max_depth", "=", "2", ",", "nbr_cases", "=", "10000", ")", ":", "if", "max_depth", "<", "min_depth", ":", "raise", "ValueError", "(", "\"max_depth must be greater than or equal to min_depth. \"", "\"Got max_depth=%s, min_depth=%s\"", "%", "(", "max_depth", ",", "min_depth", ")", ")", "alg_cfg", "=", "math_dataset_init", "(", "alphabet_size", ")", "for", "_", "in", "range", "(", "nbr_cases", ")", ":", "sample", ",", "target", "=", "generate_algebra_inverse_sample", "(", "alg_cfg", ".", "vlist", ",", "list", "(", "alg_cfg", ".", "ops", ".", "values", "(", ")", ")", ",", "alg_cfg", ".", "solve_ops", ",", "min_depth", ",", "max_depth", ")", "yield", "{", "\"inputs\"", ":", "alg_cfg", ".", "int_encoder", "(", "sample", ")", ",", "\"targets\"", ":", "alg_cfg", ".", "int_encoder", "(", "target", ")", "}" ]
Generate the algebra inverse dataset. Each sample is a symbolic math equation involving unknown variables. The task is to solve for the given variable. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to solve for and the math equation, and target-list is a list of tokens encoding the resulting math expression after solving for the variable. Raises: ValueError: If `max_depth` < `min_depth`.
[ "Generate", "the", "algebra", "inverse", "dataset", "." ]
python
train
scot-dev/scot
scot/plainica.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/plainica.py#L29-L77
def plainica(x, reducedim=0.99, backend=None, random_state=None): """ Source decomposition with ICA. Apply ICA to the data x, with optional PCA dimensionality reduction. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples) data set reducedim : {int, float, 'no_pca'}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. If set to 'no_pca' the PCA step is skipped. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. Returns ------- result : ResultICA Source decomposition """ x = atleast_3d(x) t, m, l = np.shape(x) if backend is None: backend = scotbackend # pre-transform the data with PCA if reducedim == 'no pca': c = np.eye(m) d = np.eye(m) xpca = x else: c, d, xpca = backend['pca'](x, reducedim) # run on residuals ICA to estimate volume conduction mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state) # correct (un)mixing matrix estimatees mx = mx.dot(d) ux = c.dot(ux) class Result: unmixing = ux mixing = mx return Result
[ "def", "plainica", "(", "x", ",", "reducedim", "=", "0.99", ",", "backend", "=", "None", ",", "random_state", "=", "None", ")", ":", "x", "=", "atleast_3d", "(", "x", ")", "t", ",", "m", ",", "l", "=", "np", ".", "shape", "(", "x", ")", "if", "backend", "is", "None", ":", "backend", "=", "scotbackend", "# pre-transform the data with PCA", "if", "reducedim", "==", "'no pca'", ":", "c", "=", "np", ".", "eye", "(", "m", ")", "d", "=", "np", ".", "eye", "(", "m", ")", "xpca", "=", "x", "else", ":", "c", ",", "d", ",", "xpca", "=", "backend", "[", "'pca'", "]", "(", "x", ",", "reducedim", ")", "# run on residuals ICA to estimate volume conduction", "mx", ",", "ux", "=", "backend", "[", "'ica'", "]", "(", "cat_trials", "(", "xpca", ")", ",", "random_state", "=", "random_state", ")", "# correct (un)mixing matrix estimatees", "mx", "=", "mx", ".", "dot", "(", "d", ")", "ux", "=", "c", ".", "dot", "(", "ux", ")", "class", "Result", ":", "unmixing", "=", "ux", "mixing", "=", "mx", "return", "Result" ]
Source decomposition with ICA. Apply ICA to the data x, with optional PCA dimensionality reduction. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples) data set reducedim : {int, float, 'no_pca'}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. If set to 'no_pca' the PCA step is skipped. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. Returns ------- result : ResultICA Source decomposition
[ "Source", "decomposition", "with", "ICA", "." ]
python
train
rackerlabs/simpl
simpl/config.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L378-L382
def init(cls, *args, **kwargs): """Initialize the config like as you would a regular dict.""" instance = cls() instance._values.update(dict(*args, **kwargs)) return instance
[ "def", "init", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "_values", ".", "update", "(", "dict", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "instance" ]
Initialize the config like as you would a regular dict.
[ "Initialize", "the", "config", "like", "as", "you", "would", "a", "regular", "dict", "." ]
python
train
sony/nnabla
python/src/nnabla/functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L62-L109
def max(x, axis=None, keepdims=False, with_index=False, only_index=False): """Reduce the input N-D array `x` along the given `axis` using the max operation. The `axis` argument may be a single integer to reduce over one axis, a tuple of integers to reduce over multiple axes, or ``None`` to reduce over all axes. If `keepdims` is ``True``, the output will keep all reduced dimensions with size 1. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) maxval = F.max(x, axis=1) assert np.allclose(maxval.d, np.max(x.d, axis=1)) maxval, indices = F.max(x, axis=1, with_index=True) assert np.allclose(maxval.d, np.max(x.d, axis=1)) assert np.all(indices.d == np.argmax(x.d, axis=1)) indices = F.max(x, axis=1, only_index=True) assert np.all(indices.d == np.argmax(x.d, axis=1)) Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which max is calculated. The default value `None` will reduce all dimensions. keepdims(bool): Keep reduced axes as dimension with 1 element. with_index(bool): Return tuple of max values and index. only_index(bool): Return only the index of max values. Returns: ~nnabla.Variable: N-D array. """ from .function_bases import max as max_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] n_outputs = 2 if with_index and not only_index else 1 return max_base(x, axis, keepdims, with_index, only_index, n_outputs)
[ "def", "max", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ",", "with_index", "=", "False", ",", "only_index", "=", "False", ")", ":", "from", ".", "function_bases", "import", "max", "as", "max_base", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "x", ".", "ndim", ")", "elif", "not", "hasattr", "(", "axis", ",", "'__iter__'", ")", ":", "axis", "=", "[", "axis", "]", "n_outputs", "=", "2", "if", "with_index", "and", "not", "only_index", "else", "1", "return", "max_base", "(", "x", ",", "axis", ",", "keepdims", ",", "with_index", ",", "only_index", ",", "n_outputs", ")" ]
Reduce the input N-D array `x` along the given `axis` using the max operation. The `axis` argument may be a single integer to reduce over one axis, a tuple of integers to reduce over multiple axes, or ``None`` to reduce over all axes. If `keepdims` is ``True``, the output will keep all reduced dimensions with size 1. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) maxval = F.max(x, axis=1) assert np.allclose(maxval.d, np.max(x.d, axis=1)) maxval, indices = F.max(x, axis=1, with_index=True) assert np.allclose(maxval.d, np.max(x.d, axis=1)) assert np.all(indices.d == np.argmax(x.d, axis=1)) indices = F.max(x, axis=1, only_index=True) assert np.all(indices.d == np.argmax(x.d, axis=1)) Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which max is calculated. The default value `None` will reduce all dimensions. keepdims(bool): Keep reduced axes as dimension with 1 element. with_index(bool): Return tuple of max values and index. only_index(bool): Return only the index of max values. Returns: ~nnabla.Variable: N-D array.
[ "Reduce", "the", "input", "N", "-", "D", "array", "x", "along", "the", "given", "axis", "using", "the", "max", "operation", ".", "The", "axis", "argument", "may", "be", "a", "single", "integer", "to", "reduce", "over", "one", "axis", "a", "tuple", "of", "integers", "to", "reduce", "over", "multiple", "axes", "or", "None", "to", "reduce", "over", "all", "axes", ".", "If", "keepdims", "is", "True", "the", "output", "will", "keep", "all", "reduced", "dimensions", "with", "size", "1", ".", "If", "with_index", "is", "True", "result", "is", "a", "tuple", "(", "sorted", "indices", ")", "or", "only", "indices", "if", "only_index", "is", "True", ".", "Setting", "only_index", "to", "True", "implies", "that", "with_index", "is", "also", "True", "." ]
python
train
classam/silly
silly/main.py
https://github.com/classam/silly/blob/f3202e997d5ebc9e4f98370b08665fd1178a9556/silly/main.py#L691-L730
def datetime(past=True, random=random): """ Returns a random datetime from the past... or the future! >>> mock_random.seed(0) >>> datetime(random=mock_random).isoformat() '1950-02-03T03:04:05' """ def year(): if past: return random.choice(range(1950,2005)) else: return _datetime.datetime.now().year + random.choice(range(1, 50)) def month(): return random.choice(range(1,12)) def day(): return random.choice(range(1,31)) def hour(): return random.choice(range(0,23)) def minute(): return random.choice(range(0,59)) def second(): return random.choice(range(0,59)) try: return _datetime.datetime(year=year(), month=month(), day=day(), hour=hour(), minute=minute(), second=second()) except ValueError: return datetime(past=past)
[ "def", "datetime", "(", "past", "=", "True", ",", "random", "=", "random", ")", ":", "def", "year", "(", ")", ":", "if", "past", ":", "return", "random", ".", "choice", "(", "range", "(", "1950", ",", "2005", ")", ")", "else", ":", "return", "_datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "+", "random", ".", "choice", "(", "range", "(", "1", ",", "50", ")", ")", "def", "month", "(", ")", ":", "return", "random", ".", "choice", "(", "range", "(", "1", ",", "12", ")", ")", "def", "day", "(", ")", ":", "return", "random", ".", "choice", "(", "range", "(", "1", ",", "31", ")", ")", "def", "hour", "(", ")", ":", "return", "random", ".", "choice", "(", "range", "(", "0", ",", "23", ")", ")", "def", "minute", "(", ")", ":", "return", "random", ".", "choice", "(", "range", "(", "0", ",", "59", ")", ")", "def", "second", "(", ")", ":", "return", "random", ".", "choice", "(", "range", "(", "0", ",", "59", ")", ")", "try", ":", "return", "_datetime", ".", "datetime", "(", "year", "=", "year", "(", ")", ",", "month", "=", "month", "(", ")", ",", "day", "=", "day", "(", ")", ",", "hour", "=", "hour", "(", ")", ",", "minute", "=", "minute", "(", ")", ",", "second", "=", "second", "(", ")", ")", "except", "ValueError", ":", "return", "datetime", "(", "past", "=", "past", ")" ]
Returns a random datetime from the past... or the future! >>> mock_random.seed(0) >>> datetime(random=mock_random).isoformat() '1950-02-03T03:04:05'
[ "Returns", "a", "random", "datetime", "from", "the", "past", "...", "or", "the", "future!" ]
python
train
PierreRust/apigpio
apigpio/apigpio.py
https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L767-L782
def set_bank_1(self, bits): """ Sets gpios 0-31 if the corresponding bit in bits is set. bits:= a 32 bit mask with 1 set if the corresponding gpio is to be set. A returned status of PI_SOME_PERMITTED indicates that the user is not allowed to write to one or more of the gpios. ... pi.set_bank_1(int("111110010000",2)) ... """ res = yield from self._pigpio_aio_command(_PI_CMD_BS1, bits, 0) return _u2i(res)
[ "def", "set_bank_1", "(", "self", ",", "bits", ")", ":", "res", "=", "yield", "from", "self", ".", "_pigpio_aio_command", "(", "_PI_CMD_BS1", ",", "bits", ",", "0", ")", "return", "_u2i", "(", "res", ")" ]
Sets gpios 0-31 if the corresponding bit in bits is set. bits:= a 32 bit mask with 1 set if the corresponding gpio is to be set. A returned status of PI_SOME_PERMITTED indicates that the user is not allowed to write to one or more of the gpios. ... pi.set_bank_1(int("111110010000",2)) ...
[ "Sets", "gpios", "0", "-", "31", "if", "the", "corresponding", "bit", "in", "bits", "is", "set", "." ]
python
train
Ezhil-Language-Foundation/open-tamil
tamil/utf8.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L33-L37
def to_unicode_repr( _letter ): """ helpful in situations where browser/app may recognize Unicode encoding in the \u0b8e type syntax but not actual unicode glyph/code-point""" # Python 2-3 compatible return u"u'"+ u"".join( [ u"\\u%04x"%ord(l) for l in _letter ] ) + u"'"
[ "def", "to_unicode_repr", "(", "_letter", ")", ":", "# Python 2-3 compatible", "return", "u\"u'\"", "+", "u\"\"", ".", "join", "(", "[", "u\"\\\\u%04x\"", "%", "ord", "(", "l", ")", "for", "l", "in", "_letter", "]", ")", "+", "u\"'\"" ]
helpful in situations where browser/app may recognize Unicode encoding in the \u0b8e type syntax but not actual unicode glyph/code-point
[ "helpful", "in", "situations", "where", "browser", "/", "app", "may", "recognize", "Unicode", "encoding", "in", "the", "\\", "u0b8e", "type", "syntax", "but", "not", "actual", "unicode", "glyph", "/", "code", "-", "point" ]
python
train
necaris/python3-openid
openid/consumer/consumer.py
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L1187-L1223
def _negotiateAssociation(self, endpoint): """Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association} """ # Get our preferred session/association type from the negotiatior. assoc_type, session_type = self.negotiator.getAllowedType() try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: supportedTypes = self._extractSupportedAssociationType( why, endpoint, assoc_type) if supportedTypes is not None: assoc_type, session_type = supportedTypes # Attempt to create an association from the assoc_type # and session_type that the server told us it # supported. try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: # Do not keep trying, since it rejected the # association type that it told us to use. logging.error( 'Server %s refused its suggested association ' 'type: session_type=%s, assoc_type=%s' % ( endpoint.server_url, session_type, assoc_type)) return None else: return assoc else: return assoc
[ "def", "_negotiateAssociation", "(", "self", ",", "endpoint", ")", ":", "# Get our preferred session/association type from the negotiatior.", "assoc_type", ",", "session_type", "=", "self", ".", "negotiator", ".", "getAllowedType", "(", ")", "try", ":", "assoc", "=", "self", ".", "_requestAssociation", "(", "endpoint", ",", "assoc_type", ",", "session_type", ")", "except", "ServerError", "as", "why", ":", "supportedTypes", "=", "self", ".", "_extractSupportedAssociationType", "(", "why", ",", "endpoint", ",", "assoc_type", ")", "if", "supportedTypes", "is", "not", "None", ":", "assoc_type", ",", "session_type", "=", "supportedTypes", "# Attempt to create an association from the assoc_type", "# and session_type that the server told us it", "# supported.", "try", ":", "assoc", "=", "self", ".", "_requestAssociation", "(", "endpoint", ",", "assoc_type", ",", "session_type", ")", "except", "ServerError", "as", "why", ":", "# Do not keep trying, since it rejected the", "# association type that it told us to use.", "logging", ".", "error", "(", "'Server %s refused its suggested association '", "'type: session_type=%s, assoc_type=%s'", "%", "(", "endpoint", ".", "server_url", ",", "session_type", ",", "assoc_type", ")", ")", "return", "None", "else", ":", "return", "assoc", "else", ":", "return", "assoc" ]
Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association}
[ "Make", "association", "requests", "to", "the", "server", "attempting", "to", "create", "a", "new", "association", "." ]
python
train
JnyJny/Geometry
Geometry/ellipse.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L118-L122
def xAxisIsMinor(self): ''' Returns True if the minor axis is parallel to the X axis, boolean. ''' return min(self.radius.x, self.radius.y) == self.radius.x
[ "def", "xAxisIsMinor", "(", "self", ")", ":", "return", "min", "(", "self", ".", "radius", ".", "x", ",", "self", ".", "radius", ".", "y", ")", "==", "self", ".", "radius", ".", "x" ]
Returns True if the minor axis is parallel to the X axis, boolean.
[ "Returns", "True", "if", "the", "minor", "axis", "is", "parallel", "to", "the", "X", "axis", "boolean", "." ]
python
train
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L1348-L1393
def share(self, file_ids, pwd=None, **kwargs): """ 创建一个文件的分享链接 :param file_ids: 要分享的文件fid列表 :type file_ids: list :param pwd: 分享密码,没有则没有密码 :type pwd: str :return: requests.Response 对象 .. note:: 返回正确 { "errno": 0, "request_id": 请求识别号, "shareid": 分享识别号, "link": "分享地址", "shorturl": "段网址", "ctime": 创建时间, "premis": false } """ if pwd: data = { 'fid_list': json.dumps([int(fid) for fid in file_ids]), 'pwd': pwd, 'schannel': 4, 'channel_list': json.dumps([]) } else: data = { 'fid_list': json.dumps([int(fid) for fid in file_ids]), 'schannel': 0, 'channel_list': json.dumps([]) } url = 'http://pan.baidu.com/share/set' return self._request('share/set', '', url=url, data=data, **kwargs)
[ "def", "share", "(", "self", ",", "file_ids", ",", "pwd", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "pwd", ":", "data", "=", "{", "'fid_list'", ":", "json", ".", "dumps", "(", "[", "int", "(", "fid", ")", "for", "fid", "in", "file_ids", "]", ")", ",", "'pwd'", ":", "pwd", ",", "'schannel'", ":", "4", ",", "'channel_list'", ":", "json", ".", "dumps", "(", "[", "]", ")", "}", "else", ":", "data", "=", "{", "'fid_list'", ":", "json", ".", "dumps", "(", "[", "int", "(", "fid", ")", "for", "fid", "in", "file_ids", "]", ")", ",", "'schannel'", ":", "0", ",", "'channel_list'", ":", "json", ".", "dumps", "(", "[", "]", ")", "}", "url", "=", "'http://pan.baidu.com/share/set'", "return", "self", ".", "_request", "(", "'share/set'", ",", "''", ",", "url", "=", "url", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
创建一个文件的分享链接 :param file_ids: 要分享的文件fid列表 :type file_ids: list :param pwd: 分享密码,没有则没有密码 :type pwd: str :return: requests.Response 对象 .. note:: 返回正确 { "errno": 0, "request_id": 请求识别号, "shareid": 分享识别号, "link": "分享地址", "shorturl": "段网址", "ctime": 创建时间, "premis": false }
[ "创建一个文件的分享链接" ]
python
train
ereOn/azmq
azmq/socket.py
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/socket.py#L445-L456
async def _fair_recv(self): """ Receive from all the existing peers, rotating the list of peers every time. :returns: The frames. """ with await self._read_lock: peer = await self._fair_get_in_peer() result = peer.inbox.read_nowait() return result
[ "async", "def", "_fair_recv", "(", "self", ")", ":", "with", "await", "self", ".", "_read_lock", ":", "peer", "=", "await", "self", ".", "_fair_get_in_peer", "(", ")", "result", "=", "peer", ".", "inbox", ".", "read_nowait", "(", ")", "return", "result" ]
Receive from all the existing peers, rotating the list of peers every time. :returns: The frames.
[ "Receive", "from", "all", "the", "existing", "peers", "rotating", "the", "list", "of", "peers", "every", "time", "." ]
python
train
icgood/pymap
pymap/parsing/primitives.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/parsing/primitives.py#L188-L227
def build(cls, value: object, binary: bool = False, fallback: object = None) -> Union[Nil, 'String']: """Produce either a :class:`QuotedString` or :class:`LiteralString` based on the contents of ``data``. This is useful to improve readability of response data. Args: value: The string to serialize. binary: True if the string should be transmitted as binary. fallback: The default value to use if ``value`` is None. """ if value is None: if fallback is None: return Nil() else: return cls.build(fallback, binary) elif not value: return QuotedString(b'') elif isinstance(value, bytes): ascii_ = value elif isinstance(value, memoryview): ascii_ = bytes(value) elif hasattr(value, '__bytes__'): ascii_ = bytes(cast(SupportsBytes, value)) elif isinstance(value, str) or hasattr(value, '__str__'): value = str(value) try: ascii_ = bytes(value, 'ascii') except UnicodeEncodeError: ascii_ = bytes(value, 'utf-8', 'replace') return LiteralString(ascii_, binary) else: raise TypeError(value) if not binary and len(ascii_) < 64 \ and b'\n' not in ascii_ \ and b'\x00' not in ascii_: return QuotedString(ascii_) else: return LiteralString(ascii_, binary)
[ "def", "build", "(", "cls", ",", "value", ":", "object", ",", "binary", ":", "bool", "=", "False", ",", "fallback", ":", "object", "=", "None", ")", "->", "Union", "[", "Nil", ",", "'String'", "]", ":", "if", "value", "is", "None", ":", "if", "fallback", "is", "None", ":", "return", "Nil", "(", ")", "else", ":", "return", "cls", ".", "build", "(", "fallback", ",", "binary", ")", "elif", "not", "value", ":", "return", "QuotedString", "(", "b''", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "ascii_", "=", "value", "elif", "isinstance", "(", "value", ",", "memoryview", ")", ":", "ascii_", "=", "bytes", "(", "value", ")", "elif", "hasattr", "(", "value", ",", "'__bytes__'", ")", ":", "ascii_", "=", "bytes", "(", "cast", "(", "SupportsBytes", ",", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "str", ")", "or", "hasattr", "(", "value", ",", "'__str__'", ")", ":", "value", "=", "str", "(", "value", ")", "try", ":", "ascii_", "=", "bytes", "(", "value", ",", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "ascii_", "=", "bytes", "(", "value", ",", "'utf-8'", ",", "'replace'", ")", "return", "LiteralString", "(", "ascii_", ",", "binary", ")", "else", ":", "raise", "TypeError", "(", "value", ")", "if", "not", "binary", "and", "len", "(", "ascii_", ")", "<", "64", "and", "b'\\n'", "not", "in", "ascii_", "and", "b'\\x00'", "not", "in", "ascii_", ":", "return", "QuotedString", "(", "ascii_", ")", "else", ":", "return", "LiteralString", "(", "ascii_", ",", "binary", ")" ]
Produce either a :class:`QuotedString` or :class:`LiteralString` based on the contents of ``data``. This is useful to improve readability of response data. Args: value: The string to serialize. binary: True if the string should be transmitted as binary. fallback: The default value to use if ``value`` is None.
[ "Produce", "either", "a", ":", "class", ":", "QuotedString", "or", ":", "class", ":", "LiteralString", "based", "on", "the", "contents", "of", "data", ".", "This", "is", "useful", "to", "improve", "readability", "of", "response", "data", "." ]
python
train
apache/incubator-mxnet
example/gluon/lipnet/utils/align.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/align.py#L36-L52
def build(self, align_path): """ Build the align array """ file = open(align_path, 'r') lines = file.readlines() file.close() # words: list([op, ed, word]) words = [] for line in lines: _op, _ed, word = line.strip().split(' ') if word not in Align.skip_list: words.append((int(_op), int(_ed), word)) self.words = words self.n_words = len(words) self.sentence_str = " ".join([w[2] for w in self.words]) self.sentence_length = len(self.sentence_str)
[ "def", "build", "(", "self", ",", "align_path", ")", ":", "file", "=", "open", "(", "align_path", ",", "'r'", ")", "lines", "=", "file", ".", "readlines", "(", ")", "file", ".", "close", "(", ")", "# words: list([op, ed, word])", "words", "=", "[", "]", "for", "line", "in", "lines", ":", "_op", ",", "_ed", ",", "word", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "if", "word", "not", "in", "Align", ".", "skip_list", ":", "words", ".", "append", "(", "(", "int", "(", "_op", ")", ",", "int", "(", "_ed", ")", ",", "word", ")", ")", "self", ".", "words", "=", "words", "self", ".", "n_words", "=", "len", "(", "words", ")", "self", ".", "sentence_str", "=", "\" \"", ".", "join", "(", "[", "w", "[", "2", "]", "for", "w", "in", "self", ".", "words", "]", ")", "self", ".", "sentence_length", "=", "len", "(", "self", ".", "sentence_str", ")" ]
Build the align array
[ "Build", "the", "align", "array" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/uncategorized/os_deployment_servers.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/uncategorized/os_deployment_servers.py#L134-L151
def update(self, resource, force=False, timeout=-1): """ Updates the Deployment Server resource. The properties that are omitted (not included as part of the request body) are ignored. Args: resource (dict): Object to update. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: Updated resource. """ return self._client.update(resource, timeout=timeout, force=force)
[ "def", "update", "(", "self", ",", "resource", ",", "force", "=", "False", ",", "timeout", "=", "-", "1", ")", ":", "return", "self", ".", "_client", ".", "update", "(", "resource", ",", "timeout", "=", "timeout", ",", "force", "=", "force", ")" ]
Updates the Deployment Server resource. The properties that are omitted (not included as part of the request body) are ignored. Args: resource (dict): Object to update. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: Updated resource.
[ "Updates", "the", "Deployment", "Server", "resource", ".", "The", "properties", "that", "are", "omitted", "(", "not", "included", "as", "part", "of", "the", "request", "body", ")", "are", "ignored", "." ]
python
train
saltstack/salt
salt/renderers/stateconf.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/stateconf.py#L310-L331
def nvlist(thelist, names=None): ''' Given a list of items:: - whatever - name1: value1 - name2: - key: value - key: value return a generator that yields each (item, key, value) tuple, skipping items that are not name-value's(dictionaries) or those not in the list of matching names. The item in the returned tuple is the single-key dictionary. ''' # iterate over the list under the state dict. for nvitem in thelist: if isinstance(nvitem, dict): # then nvitem is a name-value item(a dict) of the list. name, value = next(six.iteritems(nvitem)) if names is None or name in names: yield nvitem, name, value
[ "def", "nvlist", "(", "thelist", ",", "names", "=", "None", ")", ":", "# iterate over the list under the state dict.", "for", "nvitem", "in", "thelist", ":", "if", "isinstance", "(", "nvitem", ",", "dict", ")", ":", "# then nvitem is a name-value item(a dict) of the list.", "name", ",", "value", "=", "next", "(", "six", ".", "iteritems", "(", "nvitem", ")", ")", "if", "names", "is", "None", "or", "name", "in", "names", ":", "yield", "nvitem", ",", "name", ",", "value" ]
Given a list of items:: - whatever - name1: value1 - name2: - key: value - key: value return a generator that yields each (item, key, value) tuple, skipping items that are not name-value's(dictionaries) or those not in the list of matching names. The item in the returned tuple is the single-key dictionary.
[ "Given", "a", "list", "of", "items", "::" ]
python
train
nerdvegas/rez
src/build_utils/virtualenv/virtualenv.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/virtualenv/virtualenv.py#L913-L937
def find_wheels(projects, search_dirs): """Find wheels from which we can import PROJECTS. Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return a list of the first wheel found for each PROJECT """ wheels = [] # Look through SEARCH_DIRS for the first suitable wheel. Don't bother # about version checking here, as this is simply to get something we can # then use to install the correct version. for project in projects: for dirname in search_dirs: # This relies on only having "universal" wheels available. # The pattern could be tightened to require -py2.py3-none-any.whl. files = glob.glob(os.path.join(dirname, project + '-*.whl')) if files: wheels.append(os.path.abspath(files[0])) break else: # We're out of luck, so quit with a suitable error logger.fatal('Cannot find a wheel for %s' % (project,)) return wheels
[ "def", "find_wheels", "(", "projects", ",", "search_dirs", ")", ":", "wheels", "=", "[", "]", "# Look through SEARCH_DIRS for the first suitable wheel. Don't bother", "# about version checking here, as this is simply to get something we can", "# then use to install the correct version.", "for", "project", "in", "projects", ":", "for", "dirname", "in", "search_dirs", ":", "# This relies on only having \"universal\" wheels available.", "# The pattern could be tightened to require -py2.py3-none-any.whl.", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "project", "+", "'-*.whl'", ")", ")", "if", "files", ":", "wheels", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "files", "[", "0", "]", ")", ")", "break", "else", ":", "# We're out of luck, so quit with a suitable error", "logger", ".", "fatal", "(", "'Cannot find a wheel for %s'", "%", "(", "project", ",", ")", ")", "return", "wheels" ]
Find wheels from which we can import PROJECTS. Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return a list of the first wheel found for each PROJECT
[ "Find", "wheels", "from", "which", "we", "can", "import", "PROJECTS", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/utils.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/utils.py#L242-L251
def query_instances(session, client=None, **query): """Return a list of ec2 instances for the query. """ if client is None: client = session.client('ec2') p = client.get_paginator('describe_instances') results = p.paginate(**query) return list(itertools.chain( *[r["Instances"] for r in itertools.chain( *[pp['Reservations'] for pp in results])]))
[ "def", "query_instances", "(", "session", ",", "client", "=", "None", ",", "*", "*", "query", ")", ":", "if", "client", "is", "None", ":", "client", "=", "session", ".", "client", "(", "'ec2'", ")", "p", "=", "client", ".", "get_paginator", "(", "'describe_instances'", ")", "results", "=", "p", ".", "paginate", "(", "*", "*", "query", ")", "return", "list", "(", "itertools", ".", "chain", "(", "*", "[", "r", "[", "\"Instances\"", "]", "for", "r", "in", "itertools", ".", "chain", "(", "*", "[", "pp", "[", "'Reservations'", "]", "for", "pp", "in", "results", "]", ")", "]", ")", ")" ]
Return a list of ec2 instances for the query.
[ "Return", "a", "list", "of", "ec2", "instances", "for", "the", "query", "." ]
python
train
acristoffers/ahio
ahio/abstract_driver.py
https://github.com/acristoffers/ahio/blob/5d47f1697c173bd1cbdeac930546f97ad8570a38/ahio/abstract_driver.py#L418-L444
def set_pwm_frequency(self, frequency, pin=None): """Sets PWM frequency, if supported by hardware If the driver supports per pin frequency setting, set pin to the desired frequency. If not, passing None means set to all. If only per pin frequency is supported and pin is None, raise RuntimeError. If you're developing a driver, implement _set_pwm_frequency(self, frequency, pin). Raise RuntimeError if pin was set but is not supported by the platform. @arg frequency pwm frequency to be set, in Hz @arg pin if the the driver supports it, the pin that will use `frequency` as pwm frequency. None for all/global. @throw RuntimeError if pin is None on a per pin only hardware, or if it's a valid pin on a global only hardware. @throw KeyError if pin isn't mapped. """ if pin is None: self._set_pwm_frequency(frequency, None) else: pin_id = self._pin_mapping.get(pin, None) if pin_id: self._set_pwm_frequency(frequency, pin_id) else: raise KeyError('Requested pin is not mapped: %s' % pin)
[ "def", "set_pwm_frequency", "(", "self", ",", "frequency", ",", "pin", "=", "None", ")", ":", "if", "pin", "is", "None", ":", "self", ".", "_set_pwm_frequency", "(", "frequency", ",", "None", ")", "else", ":", "pin_id", "=", "self", ".", "_pin_mapping", ".", "get", "(", "pin", ",", "None", ")", "if", "pin_id", ":", "self", ".", "_set_pwm_frequency", "(", "frequency", ",", "pin_id", ")", "else", ":", "raise", "KeyError", "(", "'Requested pin is not mapped: %s'", "%", "pin", ")" ]
Sets PWM frequency, if supported by hardware If the driver supports per pin frequency setting, set pin to the desired frequency. If not, passing None means set to all. If only per pin frequency is supported and pin is None, raise RuntimeError. If you're developing a driver, implement _set_pwm_frequency(self, frequency, pin). Raise RuntimeError if pin was set but is not supported by the platform. @arg frequency pwm frequency to be set, in Hz @arg pin if the the driver supports it, the pin that will use `frequency` as pwm frequency. None for all/global. @throw RuntimeError if pin is None on a per pin only hardware, or if it's a valid pin on a global only hardware. @throw KeyError if pin isn't mapped.
[ "Sets", "PWM", "frequency", "if", "supported", "by", "hardware" ]
python
valid
CLARIAH/grlc
src/pagination.py
https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/pagination.py#L12-L35
def buildPaginationHeader(resultCount, resultsPerPage, pageArg, url): '''Build link header for result pagination''' lastPage = resultCount / resultsPerPage if pageArg: page = int(pageArg) next_url = re.sub("page=[0-9]+", "page={}".format(page + 1), url) prev_url = re.sub("page=[0-9]+", "page={}".format(page - 1), url) first_url = re.sub("page=[0-9]+", "page=1", url) last_url = re.sub("page=[0-9]+", "page={}".format(lastPage), url) else: page = 1 next_url = url + "?page=2" prev_url = "" first_url = url + "?page=1" last_url = url + "?page={}".format(lastPage) if page == 1: headerLink = "<{}>; rel=next, <{}>; rel=last".format(next_url, last_url) elif page == lastPage: headerLink = "<{}>; rel=prev, <{}>; rel=first".format(prev_url, first_url) else: headerLink = "<{}>; rel=next, <{}>; rel=prev, <{}>; rel=first, <{}>; rel=last".format(next_url, prev_url, first_url, last_url) return headerLink
[ "def", "buildPaginationHeader", "(", "resultCount", ",", "resultsPerPage", ",", "pageArg", ",", "url", ")", ":", "lastPage", "=", "resultCount", "/", "resultsPerPage", "if", "pageArg", ":", "page", "=", "int", "(", "pageArg", ")", "next_url", "=", "re", ".", "sub", "(", "\"page=[0-9]+\"", ",", "\"page={}\"", ".", "format", "(", "page", "+", "1", ")", ",", "url", ")", "prev_url", "=", "re", ".", "sub", "(", "\"page=[0-9]+\"", ",", "\"page={}\"", ".", "format", "(", "page", "-", "1", ")", ",", "url", ")", "first_url", "=", "re", ".", "sub", "(", "\"page=[0-9]+\"", ",", "\"page=1\"", ",", "url", ")", "last_url", "=", "re", ".", "sub", "(", "\"page=[0-9]+\"", ",", "\"page={}\"", ".", "format", "(", "lastPage", ")", ",", "url", ")", "else", ":", "page", "=", "1", "next_url", "=", "url", "+", "\"?page=2\"", "prev_url", "=", "\"\"", "first_url", "=", "url", "+", "\"?page=1\"", "last_url", "=", "url", "+", "\"?page={}\"", ".", "format", "(", "lastPage", ")", "if", "page", "==", "1", ":", "headerLink", "=", "\"<{}>; rel=next, <{}>; rel=last\"", ".", "format", "(", "next_url", ",", "last_url", ")", "elif", "page", "==", "lastPage", ":", "headerLink", "=", "\"<{}>; rel=prev, <{}>; rel=first\"", ".", "format", "(", "prev_url", ",", "first_url", ")", "else", ":", "headerLink", "=", "\"<{}>; rel=next, <{}>; rel=prev, <{}>; rel=first, <{}>; rel=last\"", ".", "format", "(", "next_url", ",", "prev_url", ",", "first_url", ",", "last_url", ")", "return", "headerLink" ]
Build link header for result pagination
[ "Build", "link", "header", "for", "result", "pagination" ]
python
train
mongodb/mongo-python-driver
gridfs/__init__.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/gridfs/__init__.py#L578-L619
def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None, session=None): """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) Returns the _id of the uploaded file. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `source`: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - `metadata` (optional): User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 Added ``session`` parameter. """ with self.open_upload_stream( filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) return gin._id
[ "def", "upload_from_stream", "(", "self", ",", "filename", ",", "source", ",", "chunk_size_bytes", "=", "None", ",", "metadata", "=", "None", ",", "session", "=", "None", ")", ":", "with", "self", ".", "open_upload_stream", "(", "filename", ",", "chunk_size_bytes", ",", "metadata", ",", "session", "=", "session", ")", "as", "gin", ":", "gin", ".", "write", "(", "source", ")", "return", "gin", ".", "_id" ]
Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) Returns the _id of the uploaded file. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `source`: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - `metadata` (optional): User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 Added ``session`` parameter.
[ "Uploads", "a", "user", "file", "to", "a", "GridFS", "bucket", "." ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webdriver.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webdriver.py#L1016-L1045
def find_elements(self, by=By.ID, value=None): """ Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when possible. :Usage: :: elements = driver.find_elements(By.CLASS_NAME, 'foo') :rtype: list of WebElement """ if self.w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value # Return empty list if driver returns null # See https://github.com/SeleniumHQ/selenium/issues/4555 return self.execute(Command.FIND_ELEMENTS, { 'using': by, 'value': value})['value'] or []
[ "def", "find_elements", "(", "self", ",", "by", "=", "By", ".", "ID", ",", "value", "=", "None", ")", ":", "if", "self", ".", "w3c", ":", "if", "by", "==", "By", ".", "ID", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "'[id=\"%s\"]'", "%", "value", "elif", "by", "==", "By", ".", "TAG_NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "elif", "by", "==", "By", ".", "CLASS_NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "\".%s\"", "%", "value", "elif", "by", "==", "By", ".", "NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "'[name=\"%s\"]'", "%", "value", "# Return empty list if driver returns null", "# See https://github.com/SeleniumHQ/selenium/issues/4555", "return", "self", ".", "execute", "(", "Command", ".", "FIND_ELEMENTS", ",", "{", "'using'", ":", "by", ",", "'value'", ":", "value", "}", ")", "[", "'value'", "]", "or", "[", "]" ]
Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when possible. :Usage: :: elements = driver.find_elements(By.CLASS_NAME, 'foo') :rtype: list of WebElement
[ "Find", "elements", "given", "a", "By", "strategy", "and", "locator", ".", "Prefer", "the", "find_elements_by_", "*", "methods", "when", "possible", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/orm_query.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L202-L208
def count_star(self) -> int: """ Implements the ``COUNT(*)`` specialization. """ count_query = (self.statement.with_only_columns([func.count()]) .order_by(None)) return self.session.execute(count_query).scalar()
[ "def", "count_star", "(", "self", ")", "->", "int", ":", "count_query", "=", "(", "self", ".", "statement", ".", "with_only_columns", "(", "[", "func", ".", "count", "(", ")", "]", ")", ".", "order_by", "(", "None", ")", ")", "return", "self", ".", "session", ".", "execute", "(", "count_query", ")", ".", "scalar", "(", ")" ]
Implements the ``COUNT(*)`` specialization.
[ "Implements", "the", "COUNT", "(", "*", ")", "specialization", "." ]
python
train
openxc/openxc-python
openxc/sinks/notifier.py
https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/sinks/notifier.py#L33-L40
def unregister(self, measurement_class, callback): """Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect. """ self.callbacks[Measurement.name_from_class(measurement_class) ].remove(callback)
[ "def", "unregister", "(", "self", ",", "measurement_class", ",", "callback", ")", ":", "self", ".", "callbacks", "[", "Measurement", ".", "name_from_class", "(", "measurement_class", ")", "]", ".", "remove", "(", "callback", ")" ]
Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect.
[ "Stop", "notifying", "callback", "of", "new", "values", "of", "measurement_class", "." ]
python
train
moonso/vcftoolbox
vcftoolbox/header_parser.py
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/header_parser.py#L138-L240
def parse_meta_data(self, line): """Parse a vcf metadataline""" line = line.rstrip() logger.debug("Parsing metadata line:{0}".format(line)) line_info = line[2:].split('=') match = False if line_info[0] == 'fileformat': logger.debug("Parsing fileformat") try: self.fileformat = line_info[1] logger.debug("Found fileformat {0}".format(self.fileformat)) except IndexError: raise SyntaxError("fileformat must have a value") elif line_info[0] == 'INFO': match = self.info_pattern.match(line) if not match: raise SyntaxError("One of the INFO lines is malformed:{0}".format(line)) matches = [ match.group('id'), match.group('number'), match.group('type'), match.group('desc') ] # extra_info is a dictionary to check the metadata about the INFO values: self.extra_info[matches[0]] = dict( zip(self.header_keys['info'][1:], matches[1:]) ) info_line = dict(list(zip(self.header_keys['info'],matches))) if len(info_line['Description'].split('Format:')) > 1: info_line['Format'] = [ info.strip() for info in info_line['Description'].split('Format:') ][-1] self.info_lines.append(info_line) # Store the vep columns: if info_line['ID'] == 'CSQ': self.vep_columns = info_line.get('Format', '').split('|') if info_line['ID'] == 'ANN': self.snpeff_columns = [ annotation.strip("' ") for annotation in info_line.get('Description', '').split(':')[-1].split('|')] self.info_dict[match.group('id')] = line elif line_info[0] == 'FILTER': match = self.filter_pattern.match(line) if not match: raise SyntaxError("One of the FILTER lines is malformed: {0}".format(line)) matches = [match.group('id'), match.group('desc')] self.filter_lines.append(dict( list(zip(self.header_keys['filt'],matches))) ) self.filter_dict[match.group('id')] = line elif line_info[0] == 'contig': match = self.contig_pattern.match(line) if not match: print() raise SyntaxError("One of the contig lines is malformed: {0}".format(line)) matches = [match.group('id'), match.group('length')] self.contig_lines.append(dict( list(zip(self.header_keys['contig'],matches))) ) self.contig_dict[match.group('id')] = line elif line_info[0] == 'FORMAT': match = self.format_pattern.match(line) if not match: raise SyntaxError("One of the FORMAT lines is malformed: {0}".format(line)) matches = [ match.group('id'), match.group('number'), match.group('type'), match.group('desc') ] self.format_lines.append(dict( list(zip(self.header_keys['form'],matches))) ) self.format_dict[match.group('id')] = line elif line_info[0] == 'ALT': match = self.alt_pattern.match(line) if not match: raise SyntaxError("One of the ALT lines is malformed: {0}".format(line)) matches = [match.group('id'), match.group('desc')] self.alt_lines.append(dict( list(zip(self.header_keys['alt'],matches))) ) self.alt_dict[match.group('id')] = line else: match = self.meta_pattern.match(line) if not match: raise SyntaxError("One of the meta data lines is malformed: {0}".format(line)) self.other_lines.append({match.group('key'): match.group('val')}) self.other_dict[match.group('key')] = line
[ "def", "parse_meta_data", "(", "self", ",", "line", ")", ":", "line", "=", "line", ".", "rstrip", "(", ")", "logger", ".", "debug", "(", "\"Parsing metadata line:{0}\"", ".", "format", "(", "line", ")", ")", "line_info", "=", "line", "[", "2", ":", "]", ".", "split", "(", "'='", ")", "match", "=", "False", "if", "line_info", "[", "0", "]", "==", "'fileformat'", ":", "logger", ".", "debug", "(", "\"Parsing fileformat\"", ")", "try", ":", "self", ".", "fileformat", "=", "line_info", "[", "1", "]", "logger", ".", "debug", "(", "\"Found fileformat {0}\"", ".", "format", "(", "self", ".", "fileformat", ")", ")", "except", "IndexError", ":", "raise", "SyntaxError", "(", "\"fileformat must have a value\"", ")", "elif", "line_info", "[", "0", "]", "==", "'INFO'", ":", "match", "=", "self", ".", "info_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "SyntaxError", "(", "\"One of the INFO lines is malformed:{0}\"", ".", "format", "(", "line", ")", ")", "matches", "=", "[", "match", ".", "group", "(", "'id'", ")", ",", "match", ".", "group", "(", "'number'", ")", ",", "match", ".", "group", "(", "'type'", ")", ",", "match", ".", "group", "(", "'desc'", ")", "]", "# extra_info is a dictionary to check the metadata about the INFO values:", "self", ".", "extra_info", "[", "matches", "[", "0", "]", "]", "=", "dict", "(", "zip", "(", "self", ".", "header_keys", "[", "'info'", "]", "[", "1", ":", "]", ",", "matches", "[", "1", ":", "]", ")", ")", "info_line", "=", "dict", "(", "list", "(", "zip", "(", "self", ".", "header_keys", "[", "'info'", "]", ",", "matches", ")", ")", ")", "if", "len", "(", "info_line", "[", "'Description'", "]", ".", "split", "(", "'Format:'", ")", ")", ">", "1", ":", "info_line", "[", "'Format'", "]", "=", "[", "info", ".", "strip", "(", ")", "for", "info", "in", "info_line", "[", "'Description'", "]", ".", "split", "(", "'Format:'", ")", "]", "[", "-", "1", "]", "self", ".", "info_lines", ".", "append", "(", "info_line", ")", "# Store the vep columns:", "if", "info_line", "[", "'ID'", "]", "==", "'CSQ'", ":", "self", ".", "vep_columns", "=", "info_line", ".", "get", "(", "'Format'", ",", "''", ")", ".", "split", "(", "'|'", ")", "if", "info_line", "[", "'ID'", "]", "==", "'ANN'", ":", "self", ".", "snpeff_columns", "=", "[", "annotation", ".", "strip", "(", "\"' \"", ")", "for", "annotation", "in", "info_line", ".", "get", "(", "'Description'", ",", "''", ")", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "split", "(", "'|'", ")", "]", "self", ".", "info_dict", "[", "match", ".", "group", "(", "'id'", ")", "]", "=", "line", "elif", "line_info", "[", "0", "]", "==", "'FILTER'", ":", "match", "=", "self", ".", "filter_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "SyntaxError", "(", "\"One of the FILTER lines is malformed: {0}\"", ".", "format", "(", "line", ")", ")", "matches", "=", "[", "match", ".", "group", "(", "'id'", ")", ",", "match", ".", "group", "(", "'desc'", ")", "]", "self", ".", "filter_lines", ".", "append", "(", "dict", "(", "list", "(", "zip", "(", "self", ".", "header_keys", "[", "'filt'", "]", ",", "matches", ")", ")", ")", ")", "self", ".", "filter_dict", "[", "match", ".", "group", "(", "'id'", ")", "]", "=", "line", "elif", "line_info", "[", "0", "]", "==", "'contig'", ":", "match", "=", "self", ".", "contig_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "print", "(", ")", "raise", "SyntaxError", "(", "\"One of the contig lines is malformed: {0}\"", ".", "format", "(", "line", ")", ")", "matches", "=", "[", "match", ".", "group", "(", "'id'", ")", ",", "match", ".", "group", "(", "'length'", ")", "]", "self", ".", "contig_lines", ".", "append", "(", "dict", "(", "list", "(", "zip", "(", "self", ".", "header_keys", "[", "'contig'", "]", ",", "matches", ")", ")", ")", ")", "self", ".", "contig_dict", "[", "match", ".", "group", "(", "'id'", ")", "]", "=", "line", "elif", "line_info", "[", "0", "]", "==", "'FORMAT'", ":", "match", "=", "self", ".", "format_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "SyntaxError", "(", "\"One of the FORMAT lines is malformed: {0}\"", ".", "format", "(", "line", ")", ")", "matches", "=", "[", "match", ".", "group", "(", "'id'", ")", ",", "match", ".", "group", "(", "'number'", ")", ",", "match", ".", "group", "(", "'type'", ")", ",", "match", ".", "group", "(", "'desc'", ")", "]", "self", ".", "format_lines", ".", "append", "(", "dict", "(", "list", "(", "zip", "(", "self", ".", "header_keys", "[", "'form'", "]", ",", "matches", ")", ")", ")", ")", "self", ".", "format_dict", "[", "match", ".", "group", "(", "'id'", ")", "]", "=", "line", "elif", "line_info", "[", "0", "]", "==", "'ALT'", ":", "match", "=", "self", ".", "alt_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "SyntaxError", "(", "\"One of the ALT lines is malformed: {0}\"", ".", "format", "(", "line", ")", ")", "matches", "=", "[", "match", ".", "group", "(", "'id'", ")", ",", "match", ".", "group", "(", "'desc'", ")", "]", "self", ".", "alt_lines", ".", "append", "(", "dict", "(", "list", "(", "zip", "(", "self", ".", "header_keys", "[", "'alt'", "]", ",", "matches", ")", ")", ")", ")", "self", ".", "alt_dict", "[", "match", ".", "group", "(", "'id'", ")", "]", "=", "line", "else", ":", "match", "=", "self", ".", "meta_pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "SyntaxError", "(", "\"One of the meta data lines is malformed: {0}\"", ".", "format", "(", "line", ")", ")", "self", ".", "other_lines", ".", "append", "(", "{", "match", ".", "group", "(", "'key'", ")", ":", "match", ".", "group", "(", "'val'", ")", "}", ")", "self", ".", "other_dict", "[", "match", ".", "group", "(", "'key'", ")", "]", "=", "line" ]
Parse a vcf metadataline
[ "Parse", "a", "vcf", "metadataline" ]
python
train
xlorepdarkhelm/config
xdh/_config.py
https://github.com/xlorepdarkhelm/config/blob/c973d02f6500c7719441e016bc9c3df84104e392/xdh/_config.py#L288-L302
def _attr_data_(self): "Special property containing the memoized data." try: return self.__attr_data except AttributeError: self.__attr_data = type( ''.join([type(self).__name__, 'EmptyData']), (), { '__module__': type(self).__module__, '__slots__': () } )() return self.__attr_data
[ "def", "_attr_data_", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__attr_data", "except", "AttributeError", ":", "self", ".", "__attr_data", "=", "type", "(", "''", ".", "join", "(", "[", "type", "(", "self", ")", ".", "__name__", ",", "'EmptyData'", "]", ")", ",", "(", ")", ",", "{", "'__module__'", ":", "type", "(", "self", ")", ".", "__module__", ",", "'__slots__'", ":", "(", ")", "}", ")", "(", ")", "return", "self", ".", "__attr_data" ]
Special property containing the memoized data.
[ "Special", "property", "containing", "the", "memoized", "data", "." ]
python
train
ml4ai/delphi
delphi/translators/for2py/arrays.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/arrays.py#L152-L162
def all_subs(bounds): """given a list of tuples specifying the bounds of an array, all_subs() returns a list of all the tuples of subscripts for that array.""" idx_list = [] for i in range(len(bounds)): this_dim = bounds[i] lo,hi = this_dim[0],this_dim[1] # bounds for this dimension this_dim_idxs = range(lo,hi+1) # indexes for this dimension idx_list.append(this_dim_idxs) return idx2subs(idx_list)
[ "def", "all_subs", "(", "bounds", ")", ":", "idx_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "bounds", ")", ")", ":", "this_dim", "=", "bounds", "[", "i", "]", "lo", ",", "hi", "=", "this_dim", "[", "0", "]", ",", "this_dim", "[", "1", "]", "# bounds for this dimension", "this_dim_idxs", "=", "range", "(", "lo", ",", "hi", "+", "1", ")", "# indexes for this dimension", "idx_list", ".", "append", "(", "this_dim_idxs", ")", "return", "idx2subs", "(", "idx_list", ")" ]
given a list of tuples specifying the bounds of an array, all_subs() returns a list of all the tuples of subscripts for that array.
[ "given", "a", "list", "of", "tuples", "specifying", "the", "bounds", "of", "an", "array", "all_subs", "()", "returns", "a", "list", "of", "all", "the", "tuples", "of", "subscripts", "for", "that", "array", "." ]
python
train
EventRegistry/event-registry-python
eventregistry/examples/TopicPagesExamples.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/examples/TopicPagesExamples.py#L30-L63
def createTopicPage2(): """ create a topic page directly, set the article threshold, restrict results to set concepts and keywords """ topic = TopicPage(er) topic.addCategory(er.getCategoryUri("renewable"), 50) topic.addKeyword("renewable energy", 30) topic.addConcept(er.getConceptUri("biofuel"), 50) topic.addConcept(er.getConceptUri("solar energy"), 50) # require that the results will mention at least one of the concepts and keywords specified # (even though they might have the category about renewable energy, that will not be enough # for an article to be among the results) topic.restrictToSetConceptsAndKeywords(True) # limit results to English, German and Spanish results topic.setLanguages(["eng", "deu", "spa"]) # get results that are at most 3 days old topic.setMaxDaysBack(3) # require that the articles that will be returned should get at least a total score of 30 points or more # based on the specified list of conditions topic.setArticleThreshold(30) # get first page of articles sorted by date (from most recent backward) to the topic page arts1 = topic.getArticles(page=1, sortBy="date", returnInfo=ReturnInfo( articleInfo = ArticleInfoFlags(concepts=True, categories=True) )) for art in arts1.get("articles", {}).get("results", []): print(art)
[ "def", "createTopicPage2", "(", ")", ":", "topic", "=", "TopicPage", "(", "er", ")", "topic", ".", "addCategory", "(", "er", ".", "getCategoryUri", "(", "\"renewable\"", ")", ",", "50", ")", "topic", ".", "addKeyword", "(", "\"renewable energy\"", ",", "30", ")", "topic", ".", "addConcept", "(", "er", ".", "getConceptUri", "(", "\"biofuel\"", ")", ",", "50", ")", "topic", ".", "addConcept", "(", "er", ".", "getConceptUri", "(", "\"solar energy\"", ")", ",", "50", ")", "# require that the results will mention at least one of the concepts and keywords specified", "# (even though they might have the category about renewable energy, that will not be enough", "# for an article to be among the results)", "topic", ".", "restrictToSetConceptsAndKeywords", "(", "True", ")", "# limit results to English, German and Spanish results", "topic", ".", "setLanguages", "(", "[", "\"eng\"", ",", "\"deu\"", ",", "\"spa\"", "]", ")", "# get results that are at most 3 days old", "topic", ".", "setMaxDaysBack", "(", "3", ")", "# require that the articles that will be returned should get at least a total score of 30 points or more", "# based on the specified list of conditions", "topic", ".", "setArticleThreshold", "(", "30", ")", "# get first page of articles sorted by date (from most recent backward) to the topic page", "arts1", "=", "topic", ".", "getArticles", "(", "page", "=", "1", ",", "sortBy", "=", "\"date\"", ",", "returnInfo", "=", "ReturnInfo", "(", "articleInfo", "=", "ArticleInfoFlags", "(", "concepts", "=", "True", ",", "categories", "=", "True", ")", ")", ")", "for", "art", "in", "arts1", ".", "get", "(", "\"articles\"", ",", "{", "}", ")", ".", "get", "(", "\"results\"", ",", "[", "]", ")", ":", "print", "(", "art", ")" ]
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
[ "create", "a", "topic", "page", "directly", "set", "the", "article", "threshold", "restrict", "results", "to", "set", "concepts", "and", "keywords" ]
python
train
lepture/terminal
terminal/log.py
https://github.com/lepture/terminal/blob/5226d1cac53077f12624aa51f64de7b5b05d9cb8/terminal/log.py#L100-L113
def verbose(self): """ Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info') """ log = copy.copy(self) log._is_verbose = True return log
[ "def", "verbose", "(", "self", ")", ":", "log", "=", "copy", ".", "copy", "(", "self", ")", "log", ".", "_is_verbose", "=", "True", "return", "log" ]
Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info')
[ "Make", "it", "the", "verbose", "log", "." ]
python
train
evansde77/dockerstache
src/dockerstache/templates.py
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L59-L81
def find_templates(input_dir): """ _find_templates_ traverse the input_dir structure and return a list of template files ending with .mustache :param input_dir: Path to start recursive search for mustache templates :returns: List of file paths corresponding to templates """ templates = [] def template_finder(result, dirname): for obj in os.listdir(dirname): if obj.endswith('.mustache'): result.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(template_finder, templates) ) return templates
[ "def", "find_templates", "(", "input_dir", ")", ":", "templates", "=", "[", "]", "def", "template_finder", "(", "result", ",", "dirname", ")", ":", "for", "obj", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "if", "obj", ".", "endswith", "(", "'.mustache'", ")", ":", "result", ".", "append", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "obj", ")", ")", "dir_visitor", "(", "input_dir", ",", "functools", ".", "partial", "(", "template_finder", ",", "templates", ")", ")", "return", "templates" ]
_find_templates_ traverse the input_dir structure and return a list of template files ending with .mustache :param input_dir: Path to start recursive search for mustache templates :returns: List of file paths corresponding to templates
[ "_find_templates_" ]
python
train
SuperCowPowers/workbench
workbench/workers/view_customer.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/view_customer.py#L7-L13
def execute(self, input_data): ''' Execute Method ''' # View on all the meta data files in the sample fields = ['filename', 'md5', 'length', 'customer', 'import_time', 'type_tag'] view = {key:input_data['meta'][key] for key in fields} return view
[ "def", "execute", "(", "self", ",", "input_data", ")", ":", "# View on all the meta data files in the sample", "fields", "=", "[", "'filename'", ",", "'md5'", ",", "'length'", ",", "'customer'", ",", "'import_time'", ",", "'type_tag'", "]", "view", "=", "{", "key", ":", "input_data", "[", "'meta'", "]", "[", "key", "]", "for", "key", "in", "fields", "}", "return", "view" ]
Execute Method
[ "Execute", "Method" ]
python
train
ambitioninc/django-manager-utils
manager_utils/manager_utils.py
https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L375-L398
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs): """ Performs a sync operation on a queryset, making the contents of the queryset match the contents of model_objs. This function calls bulk_upsert underneath the hood with sync=True. :type model_objs: list of :class:`Models<django:django.db.models.Model>` :param model_objs: The models to sync :type update_fields: list of str :param unique_fields: A list of fields that are used to determine if an object in objs matches a model from the queryset. :type update_fields: list of str :param update_fields: A list of fields used from the objects in objs as fields when updating existing models. If None, this function will only perform a bulk create for model_objs that do not currently exist in the database. :type native: bool :param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing bulk upsert. """ return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
[ "def", "sync", "(", "queryset", ",", "model_objs", ",", "unique_fields", ",", "update_fields", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "bulk_upsert", "(", "queryset", ",", "model_objs", ",", "unique_fields", ",", "update_fields", "=", "update_fields", ",", "sync", "=", "True", ",", "*", "*", "kwargs", ")" ]
Performs a sync operation on a queryset, making the contents of the queryset match the contents of model_objs. This function calls bulk_upsert underneath the hood with sync=True. :type model_objs: list of :class:`Models<django:django.db.models.Model>` :param model_objs: The models to sync :type update_fields: list of str :param unique_fields: A list of fields that are used to determine if an object in objs matches a model from the queryset. :type update_fields: list of str :param update_fields: A list of fields used from the objects in objs as fields when updating existing models. If None, this function will only perform a bulk create for model_objs that do not currently exist in the database. :type native: bool :param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing bulk upsert.
[ "Performs", "a", "sync", "operation", "on", "a", "queryset", "making", "the", "contents", "of", "the", "queryset", "match", "the", "contents", "of", "model_objs", "." ]
python
train
biocore/burrito-fillings
bfillings/mothur.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mothur.py#L252-L275
def _derive_log_path(self): """Guess logfile path produced by Mothur This method checks the working directory for log files generated by Mothur. It will raise an ApplicationError if no log file can be found. Mothur generates log files named in a nondeterministic way, using the current time. We return the log file with the most recent time, although this may lead to incorrect log file detection if you are running many instances of mothur simultaneously. """ filenames = listdir(self.WorkingDir) lognames = [ x for x in filenames if re.match( "^mothur\.\d+\.logfile$", x)] if not lognames: raise ApplicationError( 'No log file detected in directory %s. Contents: \n\t%s' % ( input_dir, '\n\t'.join(possible_logfiles))) most_recent_logname = sorted(lognames, reverse=True)[0] return path.join(self.WorkingDir, most_recent_logname)
[ "def", "_derive_log_path", "(", "self", ")", ":", "filenames", "=", "listdir", "(", "self", ".", "WorkingDir", ")", "lognames", "=", "[", "x", "for", "x", "in", "filenames", "if", "re", ".", "match", "(", "\"^mothur\\.\\d+\\.logfile$\"", ",", "x", ")", "]", "if", "not", "lognames", ":", "raise", "ApplicationError", "(", "'No log file detected in directory %s. Contents: \\n\\t%s'", "%", "(", "input_dir", ",", "'\\n\\t'", ".", "join", "(", "possible_logfiles", ")", ")", ")", "most_recent_logname", "=", "sorted", "(", "lognames", ",", "reverse", "=", "True", ")", "[", "0", "]", "return", "path", ".", "join", "(", "self", ".", "WorkingDir", ",", "most_recent_logname", ")" ]
Guess logfile path produced by Mothur This method checks the working directory for log files generated by Mothur. It will raise an ApplicationError if no log file can be found. Mothur generates log files named in a nondeterministic way, using the current time. We return the log file with the most recent time, although this may lead to incorrect log file detection if you are running many instances of mothur simultaneously.
[ "Guess", "logfile", "path", "produced", "by", "Mothur" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/workflow_builder.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/workflow_builder.py#L140-L163
def _version_exists(json_spec, name=None, version=None): """ Returns True if a global workflow with the given name and version already exists in the platform and the user has developer rights to the workflow. "name" and "version" can be passed if we already made a "describe" API call on the global workflow and so know the requested name and version already exists. """ requested_name = json_spec['name'] requested_version = json_spec['version'] if requested_name == name and requested_version == version: return True else: try: desc_output = dxpy.api.global_workflow_describe('globalworkflow-' + json_spec['name'], alias=json_spec['version'], input_params={"fields": {"name": True, "version": True}}) return desc_output['name'] == json_spec['name'] and desc_output['version'] == json_spec['version'] except dxpy.exceptions.DXAPIError: return False except: raise
[ "def", "_version_exists", "(", "json_spec", ",", "name", "=", "None", ",", "version", "=", "None", ")", ":", "requested_name", "=", "json_spec", "[", "'name'", "]", "requested_version", "=", "json_spec", "[", "'version'", "]", "if", "requested_name", "==", "name", "and", "requested_version", "==", "version", ":", "return", "True", "else", ":", "try", ":", "desc_output", "=", "dxpy", ".", "api", ".", "global_workflow_describe", "(", "'globalworkflow-'", "+", "json_spec", "[", "'name'", "]", ",", "alias", "=", "json_spec", "[", "'version'", "]", ",", "input_params", "=", "{", "\"fields\"", ":", "{", "\"name\"", ":", "True", ",", "\"version\"", ":", "True", "}", "}", ")", "return", "desc_output", "[", "'name'", "]", "==", "json_spec", "[", "'name'", "]", "and", "desc_output", "[", "'version'", "]", "==", "json_spec", "[", "'version'", "]", "except", "dxpy", ".", "exceptions", ".", "DXAPIError", ":", "return", "False", "except", ":", "raise" ]
Returns True if a global workflow with the given name and version already exists in the platform and the user has developer rights to the workflow. "name" and "version" can be passed if we already made a "describe" API call on the global workflow and so know the requested name and version already exists.
[ "Returns", "True", "if", "a", "global", "workflow", "with", "the", "given", "name", "and", "version", "already", "exists", "in", "the", "platform", "and", "the", "user", "has", "developer", "rights", "to", "the", "workflow", ".", "name", "and", "version", "can", "be", "passed", "if", "we", "already", "made", "a", "describe", "API", "call", "on", "the", "global", "workflow", "and", "so", "know", "the", "requested", "name", "and", "version", "already", "exists", "." ]
python
train
chaoss/grimoirelab-cereslib
cereslib/events/events.py
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/events/events.py#L464-L600
def eventize(self, granularity): """ This splits the JSON information found at self.events into the several events. For this there are three different levels of time consuming actions: 1-soft, 2-medium and 3-hard. Level 1 provides events about commits Level 2 provides events about files Level 3 provides other events (not used so far) :param granularity: Levels of time consuming actions to calculate events :type granularity: integer :returns: Pandas dataframe with splitted events. :rtype: pandas.DataFrame """ df_columns = {} # Init common columns self._init_common_fields(df_columns) # First level granularity df_columns[Git.COMMIT_ID] = [] df_columns[Git.COMMIT_EVENT] = [] df_columns[Git.COMMIT_DATE] = [] df_columns[Git.COMMIT_OWNER] = [] df_columns[Git.COMMIT_COMMITTER] = [] df_columns[Git.COMMIT_COMMITTER_DATE] = [] df_columns[Git.COMMIT_REPOSITORY] = [] df_columns[Git.COMMIT_MESSAGE] = [] df_columns[Git.COMMIT_NUM_FILES] = [] df_columns[Git.COMMIT_ADDED_LINES] = [] df_columns[Git.COMMIT_REMOVED_LINES] = [] df_columns[Git.COMMIT_HASH] = [] df_columns[Git.AUTHOR_DOMAIN] = [] # Second level of granularity df_columns[Git.FILE_FILES] = [] df_columns[Git.FILE_EVENT] = [] df_columns[Git.FILE_PATH] = [] df_columns[Git.FILE_ADDED_LINES] = [] df_columns[Git.FILE_REMOVED_LINES] = [] events = pandas.DataFrame() for item in self.items: commit_data = item["data"] if granularity == 1: self._add_common_fields(df_columns, item) self.__add_commit_info(df_columns, item) added_lines = 0 removed_lines = 0 files = commit_data["files"] df_columns[Git.COMMIT_NUM_FILES] = int(len(files)) for f in files: if "added" in f.keys() and f["added"] != "-": added_lines = added_lines + int(f["added"]) if "removed" in f.keys() and f["removed"] != "-": removed_lines = removed_lines + int(f["removed"]) df_columns[Git.COMMIT_ADDED_LINES] = added_lines df_columns[Git.COMMIT_REMOVED_LINES] = removed_lines # TODO: this will fail if no files are found in a commit (eg: merge) if granularity == 2: # Add extra info about files actions, if there were any if "files" in commit_data.keys(): files = commit_data["files"] nfiles = 0 for f in files: if "action" in f.keys(): nfiles += 1 for f in files: self._add_common_fields(df_columns, item) self.__add_commit_info(df_columns, item) df_columns[Git.FILE_FILES].append(nfiles) if "action" in f.keys(): df_columns[Git.FILE_EVENT].append(Git.EVENT_FILE + f["action"]) else: df_columns[Git.FILE_EVENT].append("-") if "file" in f.keys(): df_columns[Git.FILE_PATH].append(f["file"]) else: df_columns[Git.FILE_PATH].append("-") if "added" in f.keys(): if f["added"] == "-": df_columns[Git.FILE_ADDED_LINES].append(0) else: df_columns[Git.FILE_ADDED_LINES].append(int(f["added"])) else: df_columns[Git.FILE_ADDED_LINES].append(0) if "removed" in f.keys(): if f["removed"] == "-": df_columns[Git.FILE_REMOVED_LINES].append(0) else: df_columns[Git.FILE_REMOVED_LINES].append(int(f["removed"])) else: df_columns[Git.FILE_REMOVED_LINES].append(0) else: print("Merge found, doing nothing...") if granularity == 3: # TDB pass # Done in this way to have an order (and not a direct cast) self._add_common_events(events, df_columns) events[Git.COMMIT_ID] = df_columns[Git.COMMIT_ID] events[Git.COMMIT_EVENT] = df_columns[Git.COMMIT_EVENT] events[Git.COMMIT_DATE] = df_columns[Git.COMMIT_DATE] events[Git.COMMIT_OWNER] = df_columns[Git.COMMIT_OWNER] events[Git.COMMIT_COMMITTER] = df_columns[Git.COMMIT_COMMITTER] events[Git.COMMIT_COMMITTER_DATE] = df_columns[Git.COMMIT_COMMITTER_DATE] events[Git.COMMIT_REPOSITORY] = df_columns[Git.COMMIT_REPOSITORY] events[Git.COMMIT_MESSAGE] = df_columns[Git.COMMIT_MESSAGE] events[Git.COMMIT_HASH] = df_columns[Git.COMMIT_HASH] events[Git.AUTHOR_DOMAIN] = df_columns[Git.AUTHOR_DOMAIN] if granularity == 1: events[Git.COMMIT_NUM_FILES] = df_columns[Git.COMMIT_NUM_FILES] events[Git.COMMIT_ADDED_LINES] = df_columns[Git.COMMIT_ADDED_LINES] events[Git.COMMIT_REMOVED_LINES] = df_columns[Git.COMMIT_REMOVED_LINES] if granularity == 2: events[Git.FILE_FILES] = df_columns[Git.FILE_FILES] events[Git.FILE_EVENT] = df_columns[Git.FILE_EVENT] events[Git.FILE_PATH] = df_columns[Git.FILE_PATH] events[Git.FILE_ADDED_LINES] = df_columns[Git.FILE_ADDED_LINES] events[Git.FILE_REMOVED_LINES] = df_columns[Git.FILE_REMOVED_LINES] return events
[ "def", "eventize", "(", "self", ",", "granularity", ")", ":", "df_columns", "=", "{", "}", "# Init common columns", "self", ".", "_init_common_fields", "(", "df_columns", ")", "# First level granularity", "df_columns", "[", "Git", ".", "COMMIT_ID", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_EVENT", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_DATE", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_OWNER", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_COMMITTER", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_COMMITTER_DATE", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_REPOSITORY", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_MESSAGE", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_NUM_FILES", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_ADDED_LINES", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_REMOVED_LINES", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "COMMIT_HASH", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "AUTHOR_DOMAIN", "]", "=", "[", "]", "# Second level of granularity", "df_columns", "[", "Git", ".", "FILE_FILES", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "FILE_EVENT", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "FILE_PATH", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "FILE_ADDED_LINES", "]", "=", "[", "]", "df_columns", "[", "Git", ".", "FILE_REMOVED_LINES", "]", "=", "[", "]", "events", "=", "pandas", ".", "DataFrame", "(", ")", "for", "item", "in", "self", ".", "items", ":", "commit_data", "=", "item", "[", "\"data\"", "]", "if", "granularity", "==", "1", ":", "self", ".", "_add_common_fields", "(", "df_columns", ",", "item", ")", "self", ".", "__add_commit_info", "(", "df_columns", ",", "item", ")", "added_lines", "=", "0", "removed_lines", "=", "0", "files", "=", "commit_data", "[", "\"files\"", "]", "df_columns", "[", "Git", ".", "COMMIT_NUM_FILES", "]", "=", "int", "(", "len", "(", "files", ")", ")", "for", "f", "in", "files", ":", "if", "\"added\"", "in", "f", ".", "keys", "(", ")", "and", "f", "[", "\"added\"", "]", "!=", "\"-\"", ":", "added_lines", "=", "added_lines", "+", "int", "(", "f", "[", "\"added\"", "]", ")", "if", "\"removed\"", "in", "f", ".", "keys", "(", ")", "and", "f", "[", "\"removed\"", "]", "!=", "\"-\"", ":", "removed_lines", "=", "removed_lines", "+", "int", "(", "f", "[", "\"removed\"", "]", ")", "df_columns", "[", "Git", ".", "COMMIT_ADDED_LINES", "]", "=", "added_lines", "df_columns", "[", "Git", ".", "COMMIT_REMOVED_LINES", "]", "=", "removed_lines", "# TODO: this will fail if no files are found in a commit (eg: merge)", "if", "granularity", "==", "2", ":", "# Add extra info about files actions, if there were any", "if", "\"files\"", "in", "commit_data", ".", "keys", "(", ")", ":", "files", "=", "commit_data", "[", "\"files\"", "]", "nfiles", "=", "0", "for", "f", "in", "files", ":", "if", "\"action\"", "in", "f", ".", "keys", "(", ")", ":", "nfiles", "+=", "1", "for", "f", "in", "files", ":", "self", ".", "_add_common_fields", "(", "df_columns", ",", "item", ")", "self", ".", "__add_commit_info", "(", "df_columns", ",", "item", ")", "df_columns", "[", "Git", ".", "FILE_FILES", "]", ".", "append", "(", "nfiles", ")", "if", "\"action\"", "in", "f", ".", "keys", "(", ")", ":", "df_columns", "[", "Git", ".", "FILE_EVENT", "]", ".", "append", "(", "Git", ".", "EVENT_FILE", "+", "f", "[", "\"action\"", "]", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_EVENT", "]", ".", "append", "(", "\"-\"", ")", "if", "\"file\"", "in", "f", ".", "keys", "(", ")", ":", "df_columns", "[", "Git", ".", "FILE_PATH", "]", ".", "append", "(", "f", "[", "\"file\"", "]", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_PATH", "]", ".", "append", "(", "\"-\"", ")", "if", "\"added\"", "in", "f", ".", "keys", "(", ")", ":", "if", "f", "[", "\"added\"", "]", "==", "\"-\"", ":", "df_columns", "[", "Git", ".", "FILE_ADDED_LINES", "]", ".", "append", "(", "0", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_ADDED_LINES", "]", ".", "append", "(", "int", "(", "f", "[", "\"added\"", "]", ")", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_ADDED_LINES", "]", ".", "append", "(", "0", ")", "if", "\"removed\"", "in", "f", ".", "keys", "(", ")", ":", "if", "f", "[", "\"removed\"", "]", "==", "\"-\"", ":", "df_columns", "[", "Git", ".", "FILE_REMOVED_LINES", "]", ".", "append", "(", "0", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_REMOVED_LINES", "]", ".", "append", "(", "int", "(", "f", "[", "\"removed\"", "]", ")", ")", "else", ":", "df_columns", "[", "Git", ".", "FILE_REMOVED_LINES", "]", ".", "append", "(", "0", ")", "else", ":", "print", "(", "\"Merge found, doing nothing...\"", ")", "if", "granularity", "==", "3", ":", "# TDB", "pass", "# Done in this way to have an order (and not a direct cast)", "self", ".", "_add_common_events", "(", "events", ",", "df_columns", ")", "events", "[", "Git", ".", "COMMIT_ID", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_ID", "]", "events", "[", "Git", ".", "COMMIT_EVENT", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_EVENT", "]", "events", "[", "Git", ".", "COMMIT_DATE", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_DATE", "]", "events", "[", "Git", ".", "COMMIT_OWNER", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_OWNER", "]", "events", "[", "Git", ".", "COMMIT_COMMITTER", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_COMMITTER", "]", "events", "[", "Git", ".", "COMMIT_COMMITTER_DATE", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_COMMITTER_DATE", "]", "events", "[", "Git", ".", "COMMIT_REPOSITORY", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_REPOSITORY", "]", "events", "[", "Git", ".", "COMMIT_MESSAGE", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_MESSAGE", "]", "events", "[", "Git", ".", "COMMIT_HASH", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_HASH", "]", "events", "[", "Git", ".", "AUTHOR_DOMAIN", "]", "=", "df_columns", "[", "Git", ".", "AUTHOR_DOMAIN", "]", "if", "granularity", "==", "1", ":", "events", "[", "Git", ".", "COMMIT_NUM_FILES", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_NUM_FILES", "]", "events", "[", "Git", ".", "COMMIT_ADDED_LINES", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_ADDED_LINES", "]", "events", "[", "Git", ".", "COMMIT_REMOVED_LINES", "]", "=", "df_columns", "[", "Git", ".", "COMMIT_REMOVED_LINES", "]", "if", "granularity", "==", "2", ":", "events", "[", "Git", ".", "FILE_FILES", "]", "=", "df_columns", "[", "Git", ".", "FILE_FILES", "]", "events", "[", "Git", ".", "FILE_EVENT", "]", "=", "df_columns", "[", "Git", ".", "FILE_EVENT", "]", "events", "[", "Git", ".", "FILE_PATH", "]", "=", "df_columns", "[", "Git", ".", "FILE_PATH", "]", "events", "[", "Git", ".", "FILE_ADDED_LINES", "]", "=", "df_columns", "[", "Git", ".", "FILE_ADDED_LINES", "]", "events", "[", "Git", ".", "FILE_REMOVED_LINES", "]", "=", "df_columns", "[", "Git", ".", "FILE_REMOVED_LINES", "]", "return", "events" ]
This splits the JSON information found at self.events into the several events. For this there are three different levels of time consuming actions: 1-soft, 2-medium and 3-hard. Level 1 provides events about commits Level 2 provides events about files Level 3 provides other events (not used so far) :param granularity: Levels of time consuming actions to calculate events :type granularity: integer :returns: Pandas dataframe with splitted events. :rtype: pandas.DataFrame
[ "This", "splits", "the", "JSON", "information", "found", "at", "self", ".", "events", "into", "the", "several", "events", ".", "For", "this", "there", "are", "three", "different", "levels", "of", "time", "consuming", "actions", ":", "1", "-", "soft", "2", "-", "medium", "and", "3", "-", "hard", "." ]
python
train
aio-libs/aiomysql
aiomysql/sa/result.py
https://github.com/aio-libs/aiomysql/blob/131fb9f914739ff01a24b402d29bfd719f2d1a8b/aiomysql/sa/result.py#L379-L388
async def fetchall(self): """Fetch all rows, just like DB-API cursor.fetchall().""" try: rows = await self._cursor.fetchall() except AttributeError: self._non_result() else: ret = self._process_rows(rows) await self.close() return ret
[ "async", "def", "fetchall", "(", "self", ")", ":", "try", ":", "rows", "=", "await", "self", ".", "_cursor", ".", "fetchall", "(", ")", "except", "AttributeError", ":", "self", ".", "_non_result", "(", ")", "else", ":", "ret", "=", "self", ".", "_process_rows", "(", "rows", ")", "await", "self", ".", "close", "(", ")", "return", "ret" ]
Fetch all rows, just like DB-API cursor.fetchall().
[ "Fetch", "all", "rows", "just", "like", "DB", "-", "API", "cursor", ".", "fetchall", "()", "." ]
python
train
kkroening/ffmpeg-python
ffmpeg/_filters.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_filters.py#L216-L354
def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs): """Draw a text string or text from a specified file on top of a video, using the libfreetype library. To enable compilation of this filter, you need to configure FFmpeg with ``--enable-libfreetype``. To enable default font fallback and the font option you need to configure FFmpeg with ``--enable-libfontconfig``. To enable the text_shaping option, you need to configure FFmpeg with ``--enable-libfribidi``. Args: box: Used to draw a box around text using the background color. The value must be either 1 (enable) or 0 (disable). The default value of box is 0. boxborderw: Set the width of the border to be drawn around the box using boxcolor. The default value of boxborderw is 0. boxcolor: The color to be used for drawing box around text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of boxcolor is "white". line_spacing: Set the line spacing in pixels of the border to be drawn around the box using box. The default value of line_spacing is 0. borderw: Set the width of the border to be drawn around the text using bordercolor. The default value of borderw is 0. bordercolor: Set the color to be used for drawing border around text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of bordercolor is "black". expansion: Select how the text is expanded. Can be either none, strftime (deprecated) or normal (default). See the Text expansion section below for details. basetime: Set a start time for the count. Value is in microseconds. Only applied in the deprecated strftime expansion mode. To emulate in normal expansion mode use the pts function, supplying the start time (in seconds) as the second argument. fix_bounds: If true, check and fix text coords to avoid clipping. fontcolor: The color to be used for drawing fonts. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of fontcolor is "black". fontcolor_expr: String which is expanded the same way as text to obtain dynamic fontcolor value. By default this option has empty value and is not processed. When this option is set, it overrides fontcolor option. font: The font family to be used for drawing text. By default Sans. fontfile: The font file to be used for drawing text. The path must be included. This parameter is mandatory if the fontconfig support is disabled. alpha: Draw the text applying alpha blending. The value can be a number between 0.0 and 1.0. The expression accepts the same variables x, y as well. The default value is 1. Please see fontcolor_expr. fontsize: The font size to be used for drawing text. The default value of fontsize is 16. text_shaping: If set to 1, attempt to shape the text (for example, reverse the order of right-to-left text and join Arabic characters) before drawing it. Otherwise, just draw the text exactly as given. By default 1 (if supported). ft_load_flags: The flags to be used for loading the fonts. The flags map the corresponding flags supported by libfreetype, and are a combination of the following values: * ``default`` * ``no_scale`` * ``no_hinting`` * ``render`` * ``no_bitmap`` * ``vertical_layout`` * ``force_autohint`` * ``crop_bitmap`` * ``pedantic`` * ``ignore_global_advance_width`` * ``no_recurse`` * ``ignore_transform`` * ``monochrome`` * ``linear_design`` * ``no_autohint`` Default value is "default". For more information consult the documentation for the FT_LOAD_* libfreetype flags. shadowcolor: The color to be used for drawing a shadow behind the drawn text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of shadowcolor is "black". shadowx: The x offset for the text shadow position with respect to the position of the text. It can be either positive or negative values. The default value is "0". shadowy: The y offset for the text shadow position with respect to the position of the text. It can be either positive or negative values. The default value is "0". start_number: The starting frame number for the n/frame_num variable. The default value is "0". tabsize: The size in number of spaces to use for rendering the tab. Default value is 4. timecode: Set the initial timecode representation in "hh:mm:ss[:;.]ff" format. It can be used with or without text parameter. timecode_rate option must be specified. rate: Set the timecode frame rate (timecode only). timecode_rate: Alias for ``rate``. r: Alias for ``rate``. tc24hmax: If set to 1, the output of the timecode option will wrap around at 24 hours. Default is 0 (disabled). text: The text string to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is mandatory if no file is specified with the parameter textfile. textfile: A text file containing text to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is mandatory if no text string is specified with the parameter text. If both text and textfile are specified, an error is thrown. reload: If set to 1, the textfile will be reloaded before each frame. Be sure to update it atomically, or it may be read partially, or even fail. x: The expression which specifies the offset where text will be drawn within the video frame. It is relative to the left border of the output image. The default value is "0". y: The expression which specifies the offset where text will be drawn within the video frame. It is relative to the top border of the output image. The default value is "0". See below for the list of accepted constants and functions. Expression constants: The parameters for x and y are expressions containing the following constants and functions: - dar: input display aspect ratio, it is the same as ``(w / h) * sar`` - hsub: horizontal chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub is 1. - vsub: vertical chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub is 1. - line_h: the height of each text line - lh: Alias for ``line_h``. - main_h: the input height - h: Alias for ``main_h``. - H: Alias for ``main_h``. - main_w: the input width - w: Alias for ``main_w``. - W: Alias for ``main_w``. - ascent: the maximum distance from the baseline to the highest/upper grid coordinate used to place a glyph outline point, for all the rendered glyphs. It is a positive value, due to the grid's orientation with the Y axis upwards. - max_glyph_a: Alias for ``ascent``. - descent: the maximum distance from the baseline to the lowest grid coordinate used to place a glyph outline point, for all the rendered glyphs. This is a negative value, due to the grid's orientation, with the Y axis upwards. - max_glyph_d: Alias for ``descent``. - max_glyph_h: maximum glyph height, that is the maximum height for all the glyphs contained in the rendered text, it is equivalent to ascent - descent. - max_glyph_w: maximum glyph width, that is the maximum width for all the glyphs contained in the rendered text. - n: the number of input frame, starting from 0 - rand(min, max): return a random number included between min and max - sar: The input sample aspect ratio. - t: timestamp expressed in seconds, NAN if the input timestamp is unknown - text_h: the height of the rendered text - th: Alias for ``text_h``. - text_w: the width of the rendered text - tw: Alias for ``text_w``. - x: the x offset coordinates where the text is drawn. - y: the y offset coordinates where the text is drawn. These parameters allow the x and y expressions to refer each other, so you can for example specify ``y=x/dar``. Official documentation: `drawtext <https://ffmpeg.org/ffmpeg-filters.html#drawtext>`__ """ if text is not None: if escape_text: text = escape_chars(text, '\\\'%') kwargs['text'] = text if x != 0: kwargs['x'] = x if y != 0: kwargs['y'] = y return filter(stream, drawtext.__name__, **kwargs)
[ "def", "drawtext", "(", "stream", ",", "text", "=", "None", ",", "x", "=", "0", ",", "y", "=", "0", ",", "escape_text", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "text", "is", "not", "None", ":", "if", "escape_text", ":", "text", "=", "escape_chars", "(", "text", ",", "'\\\\\\'%'", ")", "kwargs", "[", "'text'", "]", "=", "text", "if", "x", "!=", "0", ":", "kwargs", "[", "'x'", "]", "=", "x", "if", "y", "!=", "0", ":", "kwargs", "[", "'y'", "]", "=", "y", "return", "filter", "(", "stream", ",", "drawtext", ".", "__name__", ",", "*", "*", "kwargs", ")" ]
Draw a text string or text from a specified file on top of a video, using the libfreetype library. To enable compilation of this filter, you need to configure FFmpeg with ``--enable-libfreetype``. To enable default font fallback and the font option you need to configure FFmpeg with ``--enable-libfontconfig``. To enable the text_shaping option, you need to configure FFmpeg with ``--enable-libfribidi``. Args: box: Used to draw a box around text using the background color. The value must be either 1 (enable) or 0 (disable). The default value of box is 0. boxborderw: Set the width of the border to be drawn around the box using boxcolor. The default value of boxborderw is 0. boxcolor: The color to be used for drawing box around text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of boxcolor is "white". line_spacing: Set the line spacing in pixels of the border to be drawn around the box using box. The default value of line_spacing is 0. borderw: Set the width of the border to be drawn around the text using bordercolor. The default value of borderw is 0. bordercolor: Set the color to be used for drawing border around text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of bordercolor is "black". expansion: Select how the text is expanded. Can be either none, strftime (deprecated) or normal (default). See the Text expansion section below for details. basetime: Set a start time for the count. Value is in microseconds. Only applied in the deprecated strftime expansion mode. To emulate in normal expansion mode use the pts function, supplying the start time (in seconds) as the second argument. fix_bounds: If true, check and fix text coords to avoid clipping. fontcolor: The color to be used for drawing fonts. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of fontcolor is "black". fontcolor_expr: String which is expanded the same way as text to obtain dynamic fontcolor value. By default this option has empty value and is not processed. When this option is set, it overrides fontcolor option. font: The font family to be used for drawing text. By default Sans. fontfile: The font file to be used for drawing text. The path must be included. This parameter is mandatory if the fontconfig support is disabled. alpha: Draw the text applying alpha blending. The value can be a number between 0.0 and 1.0. The expression accepts the same variables x, y as well. The default value is 1. Please see fontcolor_expr. fontsize: The font size to be used for drawing text. The default value of fontsize is 16. text_shaping: If set to 1, attempt to shape the text (for example, reverse the order of right-to-left text and join Arabic characters) before drawing it. Otherwise, just draw the text exactly as given. By default 1 (if supported). ft_load_flags: The flags to be used for loading the fonts. The flags map the corresponding flags supported by libfreetype, and are a combination of the following values: * ``default`` * ``no_scale`` * ``no_hinting`` * ``render`` * ``no_bitmap`` * ``vertical_layout`` * ``force_autohint`` * ``crop_bitmap`` * ``pedantic`` * ``ignore_global_advance_width`` * ``no_recurse`` * ``ignore_transform`` * ``monochrome`` * ``linear_design`` * ``no_autohint`` Default value is "default". For more information consult the documentation for the FT_LOAD_* libfreetype flags. shadowcolor: The color to be used for drawing a shadow behind the drawn text. For the syntax of this option, check the "Color" section in the ffmpeg-utils manual. The default value of shadowcolor is "black". shadowx: The x offset for the text shadow position with respect to the position of the text. It can be either positive or negative values. The default value is "0". shadowy: The y offset for the text shadow position with respect to the position of the text. It can be either positive or negative values. The default value is "0". start_number: The starting frame number for the n/frame_num variable. The default value is "0". tabsize: The size in number of spaces to use for rendering the tab. Default value is 4. timecode: Set the initial timecode representation in "hh:mm:ss[:;.]ff" format. It can be used with or without text parameter. timecode_rate option must be specified. rate: Set the timecode frame rate (timecode only). timecode_rate: Alias for ``rate``. r: Alias for ``rate``. tc24hmax: If set to 1, the output of the timecode option will wrap around at 24 hours. Default is 0 (disabled). text: The text string to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is mandatory if no file is specified with the parameter textfile. textfile: A text file containing text to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is mandatory if no text string is specified with the parameter text. If both text and textfile are specified, an error is thrown. reload: If set to 1, the textfile will be reloaded before each frame. Be sure to update it atomically, or it may be read partially, or even fail. x: The expression which specifies the offset where text will be drawn within the video frame. It is relative to the left border of the output image. The default value is "0". y: The expression which specifies the offset where text will be drawn within the video frame. It is relative to the top border of the output image. The default value is "0". See below for the list of accepted constants and functions. Expression constants: The parameters for x and y are expressions containing the following constants and functions: - dar: input display aspect ratio, it is the same as ``(w / h) * sar`` - hsub: horizontal chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub is 1. - vsub: vertical chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub is 1. - line_h: the height of each text line - lh: Alias for ``line_h``. - main_h: the input height - h: Alias for ``main_h``. - H: Alias for ``main_h``. - main_w: the input width - w: Alias for ``main_w``. - W: Alias for ``main_w``. - ascent: the maximum distance from the baseline to the highest/upper grid coordinate used to place a glyph outline point, for all the rendered glyphs. It is a positive value, due to the grid's orientation with the Y axis upwards. - max_glyph_a: Alias for ``ascent``. - descent: the maximum distance from the baseline to the lowest grid coordinate used to place a glyph outline point, for all the rendered glyphs. This is a negative value, due to the grid's orientation, with the Y axis upwards. - max_glyph_d: Alias for ``descent``. - max_glyph_h: maximum glyph height, that is the maximum height for all the glyphs contained in the rendered text, it is equivalent to ascent - descent. - max_glyph_w: maximum glyph width, that is the maximum width for all the glyphs contained in the rendered text. - n: the number of input frame, starting from 0 - rand(min, max): return a random number included between min and max - sar: The input sample aspect ratio. - t: timestamp expressed in seconds, NAN if the input timestamp is unknown - text_h: the height of the rendered text - th: Alias for ``text_h``. - text_w: the width of the rendered text - tw: Alias for ``text_w``. - x: the x offset coordinates where the text is drawn. - y: the y offset coordinates where the text is drawn. These parameters allow the x and y expressions to refer each other, so you can for example specify ``y=x/dar``. Official documentation: `drawtext <https://ffmpeg.org/ffmpeg-filters.html#drawtext>`__
[ "Draw", "a", "text", "string", "or", "text", "from", "a", "specified", "file", "on", "top", "of", "a", "video", "using", "the", "libfreetype", "library", "." ]
python
train
xtream1101/web-wrapper
web_wrapper/driver_selenium_phantomjs.py
https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_phantomjs.py#L92-L100
def _create_session(self): """ Creates a fresh session with no/default headers and proxies """ logger.debug("Create new phantomjs web driver") self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap, **self.driver_args) self.set_cookies(self.current_cookies) self.driver.set_window_size(1920, 1080)
[ "def", "_create_session", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Create new phantomjs web driver\"", ")", "self", ".", "driver", "=", "webdriver", ".", "PhantomJS", "(", "desired_capabilities", "=", "self", ".", "dcap", ",", "*", "*", "self", ".", "driver_args", ")", "self", ".", "set_cookies", "(", "self", ".", "current_cookies", ")", "self", ".", "driver", ".", "set_window_size", "(", "1920", ",", "1080", ")" ]
Creates a fresh session with no/default headers and proxies
[ "Creates", "a", "fresh", "session", "with", "no", "/", "default", "headers", "and", "proxies" ]
python
train
Valassis-Digital-Media/spylon
spylon/spark/launcher.py
https://github.com/Valassis-Digital-Media/spylon/blob/ac00e285fa1c790674606b793819c3e5baee0d48/spylon/spark/launcher.py#L497-L522
def spark_context(self, application_name): """Create a spark context given the parameters configured in this class. The caller is responsible for calling ``.close`` on the resulting spark context Parameters ---------- application_name : string Returns ------- sc : SparkContext """ # initialize the spark configuration self._init_spark() import pyspark import pyspark.sql # initialize conf spark_conf = pyspark.SparkConf() for k, v in self._spark_conf_helper._conf_dict.items(): spark_conf.set(k, v) log.info("Starting SparkContext") return pyspark.SparkContext(appName=application_name, conf=spark_conf)
[ "def", "spark_context", "(", "self", ",", "application_name", ")", ":", "# initialize the spark configuration", "self", ".", "_init_spark", "(", ")", "import", "pyspark", "import", "pyspark", ".", "sql", "# initialize conf", "spark_conf", "=", "pyspark", ".", "SparkConf", "(", ")", "for", "k", ",", "v", "in", "self", ".", "_spark_conf_helper", ".", "_conf_dict", ".", "items", "(", ")", ":", "spark_conf", ".", "set", "(", "k", ",", "v", ")", "log", ".", "info", "(", "\"Starting SparkContext\"", ")", "return", "pyspark", ".", "SparkContext", "(", "appName", "=", "application_name", ",", "conf", "=", "spark_conf", ")" ]
Create a spark context given the parameters configured in this class. The caller is responsible for calling ``.close`` on the resulting spark context Parameters ---------- application_name : string Returns ------- sc : SparkContext
[ "Create", "a", "spark", "context", "given", "the", "parameters", "configured", "in", "this", "class", "." ]
python
train
aio-libs/aioredis
aioredis/commands/hash.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L124-L135
def ihscan(self, key, *, match=None, count=None): """Incrementally iterate sorted set items using async for. Usage example: >>> async for name, val in redis.ihscan(key, match='something*'): ... print('Matched:', name, '->', val) """ return _ScanIter(lambda cur: self.hscan(key, cur, match=match, count=count))
[ "def", "ihscan", "(", "self", ",", "key", ",", "*", ",", "match", "=", "None", ",", "count", "=", "None", ")", ":", "return", "_ScanIter", "(", "lambda", "cur", ":", "self", ".", "hscan", "(", "key", ",", "cur", ",", "match", "=", "match", ",", "count", "=", "count", ")", ")" ]
Incrementally iterate sorted set items using async for. Usage example: >>> async for name, val in redis.ihscan(key, match='something*'): ... print('Matched:', name, '->', val)
[ "Incrementally", "iterate", "sorted", "set", "items", "using", "async", "for", "." ]
python
train
saltstack/salt
salt/states/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L953-L965
def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label
[ "def", "deployment_label", "(", "self", ")", ":", "label", "=", "dict", "(", ")", "label", "[", "'swagger_info_object'", "]", "=", "self", ".", "info", "label", "[", "'api_name'", "]", "=", "self", ".", "rest_api_name", "label", "[", "'swagger_file'", "]", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "_swagger_file", ")", "label", "[", "'swagger_file_md5sum'", "]", "=", "self", ".", "md5_filehash", "return", "label" ]
this property returns the deployment label dictionary (mainly used by stage description)
[ "this", "property", "returns", "the", "deployment", "label", "dictionary", "(", "mainly", "used", "by", "stage", "description", ")" ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/utils.py#L287-L303
def unicode_urlencode(obj, charset='utf-8', for_qs=False): """URL escapes a single bytestring or unicode string with the given charset if applicable to URL safe quoting under all rules that need to be considered under all supported Python versions. If non strings are provided they are converted to their unicode representation first. """ if not isinstance(obj, string_types): obj = text_type(obj) if isinstance(obj, text_type): obj = obj.encode(charset) safe = not for_qs and b'/' or b'' rv = text_type(url_quote(obj, safe)) if for_qs: rv = rv.replace('%20', '+') return rv
[ "def", "unicode_urlencode", "(", "obj", ",", "charset", "=", "'utf-8'", ",", "for_qs", "=", "False", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "string_types", ")", ":", "obj", "=", "text_type", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "text_type", ")", ":", "obj", "=", "obj", ".", "encode", "(", "charset", ")", "safe", "=", "not", "for_qs", "and", "b'/'", "or", "b''", "rv", "=", "text_type", "(", "url_quote", "(", "obj", ",", "safe", ")", ")", "if", "for_qs", ":", "rv", "=", "rv", ".", "replace", "(", "'%20'", ",", "'+'", ")", "return", "rv" ]
URL escapes a single bytestring or unicode string with the given charset if applicable to URL safe quoting under all rules that need to be considered under all supported Python versions. If non strings are provided they are converted to their unicode representation first.
[ "URL", "escapes", "a", "single", "bytestring", "or", "unicode", "string", "with", "the", "given", "charset", "if", "applicable", "to", "URL", "safe", "quoting", "under", "all", "rules", "that", "need", "to", "be", "considered", "under", "all", "supported", "Python", "versions", "." ]
python
train
dwavesystems/dimod
dimod/reference/composites/roofduality.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/roofduality.py#L54-L79
def sample(self, bqm, sampling_mode=True, **parameters): """Sample from the provided binary quadratic model. Uses the :func:`~dimod.roof_duality.fix_variables` function to determine which variables to fix. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. sampling_mode (bool, optional, default=True): In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly connected components are used to fix more variables, but in some optimal solutions these variables may take different values. **parameters: Parameters for the child sampler. Returns: :obj:`dimod.SampleSet` """ # use roof-duality to decide which variables to fix parameters['fixed_variables'] = fix_variables(bqm, sampling_mode=sampling_mode) return super(RoofDualityComposite, self).sample(bqm, **parameters)
[ "def", "sample", "(", "self", ",", "bqm", ",", "sampling_mode", "=", "True", ",", "*", "*", "parameters", ")", ":", "# use roof-duality to decide which variables to fix", "parameters", "[", "'fixed_variables'", "]", "=", "fix_variables", "(", "bqm", ",", "sampling_mode", "=", "sampling_mode", ")", "return", "super", "(", "RoofDualityComposite", ",", "self", ")", ".", "sample", "(", "bqm", ",", "*", "*", "parameters", ")" ]
Sample from the provided binary quadratic model. Uses the :func:`~dimod.roof_duality.fix_variables` function to determine which variables to fix. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. sampling_mode (bool, optional, default=True): In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly connected components are used to fix more variables, but in some optimal solutions these variables may take different values. **parameters: Parameters for the child sampler. Returns: :obj:`dimod.SampleSet`
[ "Sample", "from", "the", "provided", "binary", "quadratic", "model", "." ]
python
train
LonamiWebs/Telethon
telethon/extensions/binaryreader.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/extensions/binaryreader.py#L60-L73
def read(self, length=None): """Read the given amount of bytes.""" if length is None: return self.reader.read() result = self.reader.read(length) if len(result) != length: raise BufferError( 'No more data left to read (need {}, got {}: {}); last read {}' .format(length, len(result), repr(result), repr(self._last)) ) self._last = result return result
[ "def", "read", "(", "self", ",", "length", "=", "None", ")", ":", "if", "length", "is", "None", ":", "return", "self", ".", "reader", ".", "read", "(", ")", "result", "=", "self", ".", "reader", ".", "read", "(", "length", ")", "if", "len", "(", "result", ")", "!=", "length", ":", "raise", "BufferError", "(", "'No more data left to read (need {}, got {}: {}); last read {}'", ".", "format", "(", "length", ",", "len", "(", "result", ")", ",", "repr", "(", "result", ")", ",", "repr", "(", "self", ".", "_last", ")", ")", ")", "self", ".", "_last", "=", "result", "return", "result" ]
Read the given amount of bytes.
[ "Read", "the", "given", "amount", "of", "bytes", "." ]
python
train
vertexproject/synapse
synapse/lib/migrate.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/migrate.py#L97-L112
async def setFormName(self, oldn, newn): ''' Rename a form within all the layers. ''' logger.info(f'Migrating [{oldn}] to [{newn}]') async with self.getTempSlab(): i = 0 async for buid, valu in self.getFormTodo(oldn): await self.editNodeNdef((oldn, valu), (newn, valu)) i = i + 1 if i and i % _progress == 0: logger.info(f'Migrated {i} buids.')
[ "async", "def", "setFormName", "(", "self", ",", "oldn", ",", "newn", ")", ":", "logger", ".", "info", "(", "f'Migrating [{oldn}] to [{newn}]'", ")", "async", "with", "self", ".", "getTempSlab", "(", ")", ":", "i", "=", "0", "async", "for", "buid", ",", "valu", "in", "self", ".", "getFormTodo", "(", "oldn", ")", ":", "await", "self", ".", "editNodeNdef", "(", "(", "oldn", ",", "valu", ")", ",", "(", "newn", ",", "valu", ")", ")", "i", "=", "i", "+", "1", "if", "i", "and", "i", "%", "_progress", "==", "0", ":", "logger", ".", "info", "(", "f'Migrated {i} buids.'", ")" ]
Rename a form within all the layers.
[ "Rename", "a", "form", "within", "all", "the", "layers", "." ]
python
train
fboender/ansible-cmdb
lib/mako/_ast_util.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/_ast_util.py#L87-L104
def to_source(node, indent_with=' ' * 4): """ This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide. """ generator = SourceGenerator(indent_with) generator.visit(node) return ''.join(generator.result)
[ "def", "to_source", "(", "node", ",", "indent_with", "=", "' '", "*", "4", ")", ":", "generator", "=", "SourceGenerator", "(", "indent_with", ")", "generator", ".", "visit", "(", "node", ")", "return", "''", ".", "join", "(", "generator", ".", "result", ")" ]
This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide.
[ "This", "function", "can", "convert", "a", "node", "tree", "back", "into", "python", "sourcecode", ".", "This", "is", "useful", "for", "debugging", "purposes", "especially", "if", "you", "re", "dealing", "with", "custom", "asts", "not", "generated", "by", "python", "itself", "." ]
python
train
SoCo/SoCo
soco/music_services/music_service.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_services/music_service.py#L488-L505
def get_data_for_name(cls, service_name): """Get the data relating to a named music service. Args: service_name (str): The name of the music service for which data is required. Returns: dict: Data relating to the music service. Raises: `MusicServiceException`: if the music service cannot be found. """ for service in cls._get_music_services_data().values(): if service_name == service["Name"]: return service raise MusicServiceException( "Unknown music service: '%s'" % service_name)
[ "def", "get_data_for_name", "(", "cls", ",", "service_name", ")", ":", "for", "service", "in", "cls", ".", "_get_music_services_data", "(", ")", ".", "values", "(", ")", ":", "if", "service_name", "==", "service", "[", "\"Name\"", "]", ":", "return", "service", "raise", "MusicServiceException", "(", "\"Unknown music service: '%s'\"", "%", "service_name", ")" ]
Get the data relating to a named music service. Args: service_name (str): The name of the music service for which data is required. Returns: dict: Data relating to the music service. Raises: `MusicServiceException`: if the music service cannot be found.
[ "Get", "the", "data", "relating", "to", "a", "named", "music", "service", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L26366-L26383
def dump_guest_core(self, filename, compression): """Takes a core dump of the guest. See include/VBox/dbgfcorefmt.h for details on the file format. in filename of type str The name of the output file. The file must not exist. in compression of type str Reserved for future compression method indicator. """ if not isinstance(filename, basestring): raise TypeError("filename can only be an instance of type basestring") if not isinstance(compression, basestring): raise TypeError("compression can only be an instance of type basestring") self._call("dumpGuestCore", in_p=[filename, compression])
[ "def", "dump_guest_core", "(", "self", ",", "filename", ",", "compression", ")", ":", "if", "not", "isinstance", "(", "filename", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"filename can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "compression", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"compression can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"dumpGuestCore\"", ",", "in_p", "=", "[", "filename", ",", "compression", "]", ")" ]
Takes a core dump of the guest. See include/VBox/dbgfcorefmt.h for details on the file format. in filename of type str The name of the output file. The file must not exist. in compression of type str Reserved for future compression method indicator.
[ "Takes", "a", "core", "dump", "of", "the", "guest", ".", "See", "include", "/", "VBox", "/", "dbgfcorefmt", ".", "h", "for", "details", "on", "the", "file", "format", "." ]
python
train
pywbem/pywbem
pywbem/mof_compiler.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/mof_compiler.py#L490-L614
def p_mp_createClass(p): """mp_createClass : classDeclaration """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals ns = p.parser.handle.default_namespace cc = p[1] try: fixedNS = fixedRefs = fixedSuper = False while not fixedNS or not fixedRefs or not fixedSuper: try: if p.parser.verbose: p.parser.log( _format("Creating class {0!A}:{1!A}", ns, cc.classname)) p.parser.handle.CreateClass(cc) if p.parser.verbose: p.parser.log( _format("Created class {0!A}:{1!A}", ns, cc.classname)) p.parser.classnames[ns].append(cc.classname.lower()) break except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) errcode = ce.status_code if errcode == CIM_ERR_INVALID_NAMESPACE: if fixedNS: raise if p.parser.verbose: p.parser.log( _format("Creating namespace {0!A}", ns)) p.parser.server.create_namespace(ns) fixedNS = True continue if not p.parser.search_paths: raise if errcode == CIM_ERR_INVALID_SUPERCLASS: if fixedSuper: raise moffile = p.parser.mofcomp.find_mof(cc.superclass) if not moffile: raise p.parser.mofcomp.compile_file(moffile, ns) fixedSuper = True elif errcode in [CIM_ERR_INVALID_PARAMETER, CIM_ERR_NOT_FOUND, CIM_ERR_FAILED]: if fixedRefs: raise if not p.parser.qualcache[ns]: for fname in ['qualifiers', 'qualifiers_optional']: qualfile = p.parser.mofcomp.find_mof(fname) if qualfile: p.parser.mofcomp.compile_file(qualfile, ns) if not p.parser.qualcache[ns]: # can't find qualifiers raise objects = list(cc.properties.values()) for meth in cc.methods.values(): objects += list(meth.parameters.values()) dep_classes = NocaseDict() # dict dep_class, ce for obj in objects: if obj.type not in ['reference', 'string']: continue if obj.type == 'reference': if obj.reference_class not in dep_classes: dep_classes[obj.reference_class] = ce elif obj.type == 'string': try: embedded_inst = \ obj.qualifiers['embeddedinstance'] except KeyError: continue if embedded_inst.value not in dep_classes: dep_classes[embedded_inst.value] = ce continue for cln, err in dep_classes.items(): if cln in p.parser.classnames[ns]: continue try: # don't limit it with LocalOnly=True, # PropertyList, IncludeQualifiers=False, ... # because of caching in case we're using the # special WBEMConnection subclass used for # removing schema elements p.parser.handle.GetClass(cln, LocalOnly=False, IncludeQualifiers=True) p.parser.classnames[ns].append(cln) except CIMError: moffile = p.parser.mofcomp.find_mof(cln) if not moffile: raise err try: if p.parser.verbose: p.parser.log( _format("Class {0!A} namespace {1!A} " "depends on class {2!A} which " "is not in repository.", cc.classname, ns, cln)) p.parser.mofcomp.compile_file(moffile, ns) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise err raise p.parser.classnames[ns].append(cln) fixedRefs = True else: raise except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) if ce.status_code != CIM_ERR_ALREADY_EXISTS: raise if p.parser.verbose: p.parser.log( _format("Class {0!A} already exist. Modifying...", cc.classname)) try: p.parser.handle.ModifyClass(cc, ns) except CIMError as ce: p.parser.log( _format("Error modifying class {0!A}: {1}, {2}", cc.classname, ce.status_code, ce.status_description))
[ "def", "p_mp_createClass", "(", "p", ")", ":", "# pylint: disable=too-many-branches,too-many-statements,too-many-locals", "ns", "=", "p", ".", "parser", ".", "handle", ".", "default_namespace", "cc", "=", "p", "[", "1", "]", "try", ":", "fixedNS", "=", "fixedRefs", "=", "fixedSuper", "=", "False", "while", "not", "fixedNS", "or", "not", "fixedRefs", "or", "not", "fixedSuper", ":", "try", ":", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Creating class {0!A}:{1!A}\"", ",", "ns", ",", "cc", ".", "classname", ")", ")", "p", ".", "parser", ".", "handle", ".", "CreateClass", "(", "cc", ")", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Created class {0!A}:{1!A}\"", ",", "ns", ",", "cc", ".", "classname", ")", ")", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cc", ".", "classname", ".", "lower", "(", ")", ")", "break", "except", "CIMError", "as", "ce", ":", "ce", ".", "file_line", "=", "(", "p", ".", "parser", ".", "file", ",", "p", ".", "lexer", ".", "lineno", ")", "errcode", "=", "ce", ".", "status_code", "if", "errcode", "==", "CIM_ERR_INVALID_NAMESPACE", ":", "if", "fixedNS", ":", "raise", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Creating namespace {0!A}\"", ",", "ns", ")", ")", "p", ".", "parser", ".", "server", ".", "create_namespace", "(", "ns", ")", "fixedNS", "=", "True", "continue", "if", "not", "p", ".", "parser", ".", "search_paths", ":", "raise", "if", "errcode", "==", "CIM_ERR_INVALID_SUPERCLASS", ":", "if", "fixedSuper", ":", "raise", "moffile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "cc", ".", "superclass", ")", "if", "not", "moffile", ":", "raise", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "moffile", ",", "ns", ")", "fixedSuper", "=", "True", "elif", "errcode", "in", "[", "CIM_ERR_INVALID_PARAMETER", ",", "CIM_ERR_NOT_FOUND", ",", "CIM_ERR_FAILED", "]", ":", "if", "fixedRefs", ":", "raise", "if", "not", "p", ".", "parser", ".", "qualcache", "[", "ns", "]", ":", "for", "fname", "in", "[", "'qualifiers'", ",", "'qualifiers_optional'", "]", ":", "qualfile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "fname", ")", "if", "qualfile", ":", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "qualfile", ",", "ns", ")", "if", "not", "p", ".", "parser", ".", "qualcache", "[", "ns", "]", ":", "# can't find qualifiers", "raise", "objects", "=", "list", "(", "cc", ".", "properties", ".", "values", "(", ")", ")", "for", "meth", "in", "cc", ".", "methods", ".", "values", "(", ")", ":", "objects", "+=", "list", "(", "meth", ".", "parameters", ".", "values", "(", ")", ")", "dep_classes", "=", "NocaseDict", "(", ")", "# dict dep_class, ce", "for", "obj", "in", "objects", ":", "if", "obj", ".", "type", "not", "in", "[", "'reference'", ",", "'string'", "]", ":", "continue", "if", "obj", ".", "type", "==", "'reference'", ":", "if", "obj", ".", "reference_class", "not", "in", "dep_classes", ":", "dep_classes", "[", "obj", ".", "reference_class", "]", "=", "ce", "elif", "obj", ".", "type", "==", "'string'", ":", "try", ":", "embedded_inst", "=", "obj", ".", "qualifiers", "[", "'embeddedinstance'", "]", "except", "KeyError", ":", "continue", "if", "embedded_inst", ".", "value", "not", "in", "dep_classes", ":", "dep_classes", "[", "embedded_inst", ".", "value", "]", "=", "ce", "continue", "for", "cln", ",", "err", "in", "dep_classes", ".", "items", "(", ")", ":", "if", "cln", "in", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ":", "continue", "try", ":", "# don't limit it with LocalOnly=True,", "# PropertyList, IncludeQualifiers=False, ...", "# because of caching in case we're using the", "# special WBEMConnection subclass used for", "# removing schema elements", "p", ".", "parser", ".", "handle", ".", "GetClass", "(", "cln", ",", "LocalOnly", "=", "False", ",", "IncludeQualifiers", "=", "True", ")", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cln", ")", "except", "CIMError", ":", "moffile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "cln", ")", "if", "not", "moffile", ":", "raise", "err", "try", ":", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Class {0!A} namespace {1!A} \"", "\"depends on class {2!A} which \"", "\"is not in repository.\"", ",", "cc", ".", "classname", ",", "ns", ",", "cln", ")", ")", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "moffile", ",", "ns", ")", "except", "CIMError", "as", "ce", ":", "if", "ce", ".", "status_code", "==", "CIM_ERR_NOT_FOUND", ":", "raise", "err", "raise", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cln", ")", "fixedRefs", "=", "True", "else", ":", "raise", "except", "CIMError", "as", "ce", ":", "ce", ".", "file_line", "=", "(", "p", ".", "parser", ".", "file", ",", "p", ".", "lexer", ".", "lineno", ")", "if", "ce", ".", "status_code", "!=", "CIM_ERR_ALREADY_EXISTS", ":", "raise", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Class {0!A} already exist. Modifying...\"", ",", "cc", ".", "classname", ")", ")", "try", ":", "p", ".", "parser", ".", "handle", ".", "ModifyClass", "(", "cc", ",", "ns", ")", "except", "CIMError", "as", "ce", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Error modifying class {0!A}: {1}, {2}\"", ",", "cc", ".", "classname", ",", "ce", ".", "status_code", ",", "ce", ".", "status_description", ")", ")" ]
mp_createClass : classDeclaration
[ "mp_createClass", ":", "classDeclaration" ]
python
train
plaid/plaid-python
plaid/api/institutions.py
https://github.com/plaid/plaid-python/blob/c549c3108790266a3b344c47e0c83fff59146eeb/plaid/api/institutions.py#L26-L37
def get_by_id(self, institution_id, _options=None): ''' Fetch a single institution by id. :param str institution_id: ''' options = _options or {} return self.client.post_public_key('/institutions/get_by_id', { 'institution_id': institution_id, 'options': options, })
[ "def", "get_by_id", "(", "self", ",", "institution_id", ",", "_options", "=", "None", ")", ":", "options", "=", "_options", "or", "{", "}", "return", "self", ".", "client", ".", "post_public_key", "(", "'/institutions/get_by_id'", ",", "{", "'institution_id'", ":", "institution_id", ",", "'options'", ":", "options", ",", "}", ")" ]
Fetch a single institution by id. :param str institution_id:
[ "Fetch", "a", "single", "institution", "by", "id", "." ]
python
train
pycontribs/pyrax
pyrax/utils.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L277-L287
def random_unicode(length=20): """ Generates a random name; useful for testing. Returns an encoded string of the specified length containing unicode values up to code point 1000. """ def get_char(): return six.unichr(random.randint(32, 1000)) chars = u"".join([get_char() for ii in six.moves.range(length)]) return _join_chars(chars, length)
[ "def", "random_unicode", "(", "length", "=", "20", ")", ":", "def", "get_char", "(", ")", ":", "return", "six", ".", "unichr", "(", "random", ".", "randint", "(", "32", ",", "1000", ")", ")", "chars", "=", "u\"\"", ".", "join", "(", "[", "get_char", "(", ")", "for", "ii", "in", "six", ".", "moves", ".", "range", "(", "length", ")", "]", ")", "return", "_join_chars", "(", "chars", ",", "length", ")" ]
Generates a random name; useful for testing. Returns an encoded string of the specified length containing unicode values up to code point 1000.
[ "Generates", "a", "random", "name", ";", "useful", "for", "testing", "." ]
python
train
hollenstein/maspy
maspy_resources/pparse.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy_resources/pparse.py#L167-L205
def cleanUpPparse(outputpath, rawfilename, mgf=False): """Delete temporary files generated by pparse, including the filetypes ".csv", ".ms1", ".ms2", ".xtract", the files "pParsePlusLog.txt" and "pParse.para" and optionally also the ".mgf" file generated by pParse. .. warning: When the parameter "mgf" is set to "True" all files ending with ".mgf" and containing the specified "filename" are deleted. This could potentially also affect MGF files not generated by pParse. :param outputpath: path to the output directory of pParse :param rawfilename: filename of the thermo ".raw" file :param mgf: bool, if True the ".mgf" file generated by pParse is also removed """ extensions = ['csv', 'ms1', 'ms2', 'xtract'] filename, fileext = os.path.splitext(os.path.basename(rawfilename)) additionalFiles = [aux.joinpath(outputpath, 'pParsePlusLog.txt'), aux.joinpath(outputpath, filename+'.pparse.para'), ] for ext in extensions: filepath = aux.joinpath(outputpath, '.'.join([filename, ext])) if os.path.isfile(filepath): print('Removing file: ', filepath) os.remove(filepath) for filepath in additionalFiles: if os.path.isfile(filepath): print('Removing file: ', filepath) os.remove(filepath) if mgf: for _filename in os.listdir(outputpath): _basename, _fileext = os.path.splitext(_filename) if _fileext.lower() != '.mgf': continue if _basename.find(basename) != -1 and _basename != basename: filepath = aux.joinpath(outputpath, _filename) print('Removing file: ', filepath) os.remove(filepath)
[ "def", "cleanUpPparse", "(", "outputpath", ",", "rawfilename", ",", "mgf", "=", "False", ")", ":", "extensions", "=", "[", "'csv'", ",", "'ms1'", ",", "'ms2'", ",", "'xtract'", "]", "filename", ",", "fileext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "rawfilename", ")", ")", "additionalFiles", "=", "[", "aux", ".", "joinpath", "(", "outputpath", ",", "'pParsePlusLog.txt'", ")", ",", "aux", ".", "joinpath", "(", "outputpath", ",", "filename", "+", "'.pparse.para'", ")", ",", "]", "for", "ext", "in", "extensions", ":", "filepath", "=", "aux", ".", "joinpath", "(", "outputpath", ",", "'.'", ".", "join", "(", "[", "filename", ",", "ext", "]", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "print", "(", "'Removing file: '", ",", "filepath", ")", "os", ".", "remove", "(", "filepath", ")", "for", "filepath", "in", "additionalFiles", ":", "if", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "print", "(", "'Removing file: '", ",", "filepath", ")", "os", ".", "remove", "(", "filepath", ")", "if", "mgf", ":", "for", "_filename", "in", "os", ".", "listdir", "(", "outputpath", ")", ":", "_basename", ",", "_fileext", "=", "os", ".", "path", ".", "splitext", "(", "_filename", ")", "if", "_fileext", ".", "lower", "(", ")", "!=", "'.mgf'", ":", "continue", "if", "_basename", ".", "find", "(", "basename", ")", "!=", "-", "1", "and", "_basename", "!=", "basename", ":", "filepath", "=", "aux", ".", "joinpath", "(", "outputpath", ",", "_filename", ")", "print", "(", "'Removing file: '", ",", "filepath", ")", "os", ".", "remove", "(", "filepath", ")" ]
Delete temporary files generated by pparse, including the filetypes ".csv", ".ms1", ".ms2", ".xtract", the files "pParsePlusLog.txt" and "pParse.para" and optionally also the ".mgf" file generated by pParse. .. warning: When the parameter "mgf" is set to "True" all files ending with ".mgf" and containing the specified "filename" are deleted. This could potentially also affect MGF files not generated by pParse. :param outputpath: path to the output directory of pParse :param rawfilename: filename of the thermo ".raw" file :param mgf: bool, if True the ".mgf" file generated by pParse is also removed
[ "Delete", "temporary", "files", "generated", "by", "pparse", "including", "the", "filetypes", ".", "csv", ".", "ms1", ".", "ms2", ".", "xtract", "the", "files", "pParsePlusLog", ".", "txt", "and", "pParse", ".", "para", "and", "optionally", "also", "the", ".", "mgf", "file", "generated", "by", "pParse", "." ]
python
train
diamondman/proteusisc
proteusisc/drivers/digilentdriver.py
https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L170-L188
def _get_adv_trans_stats(self, cmd, return_tdo=False): """Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests """ t = time() code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10) if self._scanchain and self._scanchain._print_statistics: print("GET STATS TIME", time()-t)#pragma: no cover if len(res) == 4: count = struct.unpack('<I', res)[0] return count elif len(res) == 8: written, read = struct.unpack('<II', res) return written, read return res
[ "def", "_get_adv_trans_stats", "(", "self", ",", "cmd", ",", "return_tdo", "=", "False", ")", ":", "t", "=", "time", "(", ")", "code", ",", "res", "=", "self", ".", "bulkCommand", "(", "b'\\x03\\x02%c\\x00'", "%", "(", "0x80", "|", "cmd", ")", ",", "10", ")", "if", "self", ".", "_scanchain", "and", "self", ".", "_scanchain", ".", "_print_statistics", ":", "print", "(", "\"GET STATS TIME\"", ",", "time", "(", ")", "-", "t", ")", "#pragma: no cover", "if", "len", "(", "res", ")", "==", "4", ":", "count", "=", "struct", ".", "unpack", "(", "'<I'", ",", "res", ")", "[", "0", "]", "return", "count", "elif", "len", "(", "res", ")", "==", "8", ":", "written", ",", "read", "=", "struct", ".", "unpack", "(", "'<II'", ",", "res", ")", "return", "written", ",", "read", "return", "res" ]
Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
[ "Utility", "function", "to", "fetch", "the", "transfer", "statistics", "for", "the", "last", "advanced", "transfer", ".", "Checking", "the", "stats", "appears", "to", "sync", "the", "controller", ".", "For", "details", "on", "the", "advanced", "transfer", "please", "refer", "to", "the", "documentation", "at", "http", ":", "//", "diamondman", ".", "github", ".", "io", "/", "Adapt", "/", "cable_digilent_adept", ".", "html#bulk", "-", "requests" ]
python
train
spencerahill/aospy
aospy/automate.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/automate.py#L302-L339
def _exec_calcs(calcs, parallelize=False, client=None, **compute_kwargs): """Execute the given calculations. Parameters ---------- calcs : Sequence of ``aospy.Calc`` objects parallelize : bool, default False Whether to submit the calculations in parallel or not client : distributed.Client or None The distributed Client used if parallelize is set to True; if None a distributed LocalCluster is used. compute_kwargs : dict of keyword arguments passed to ``Calc.compute`` Returns ------- A list of the values returned by each Calc object that was executed. """ if parallelize: def func(calc): """Wrap _compute_or_skip_on_error to require only the calc argument""" if 'write_to_tar' in compute_kwargs: compute_kwargs['write_to_tar'] = False return _compute_or_skip_on_error(calc, compute_kwargs) if client is None: n_workers = _n_workers_for_local_cluster(calcs) with distributed.LocalCluster(n_workers=n_workers) as cluster: with distributed.Client(cluster) as client: result = _submit_calcs_on_client(calcs, client, func) else: result = _submit_calcs_on_client(calcs, client, func) if compute_kwargs['write_to_tar']: _serial_write_to_tar(calcs) return result else: return [_compute_or_skip_on_error(calc, compute_kwargs) for calc in calcs]
[ "def", "_exec_calcs", "(", "calcs", ",", "parallelize", "=", "False", ",", "client", "=", "None", ",", "*", "*", "compute_kwargs", ")", ":", "if", "parallelize", ":", "def", "func", "(", "calc", ")", ":", "\"\"\"Wrap _compute_or_skip_on_error to require only the calc\n argument\"\"\"", "if", "'write_to_tar'", "in", "compute_kwargs", ":", "compute_kwargs", "[", "'write_to_tar'", "]", "=", "False", "return", "_compute_or_skip_on_error", "(", "calc", ",", "compute_kwargs", ")", "if", "client", "is", "None", ":", "n_workers", "=", "_n_workers_for_local_cluster", "(", "calcs", ")", "with", "distributed", ".", "LocalCluster", "(", "n_workers", "=", "n_workers", ")", "as", "cluster", ":", "with", "distributed", ".", "Client", "(", "cluster", ")", "as", "client", ":", "result", "=", "_submit_calcs_on_client", "(", "calcs", ",", "client", ",", "func", ")", "else", ":", "result", "=", "_submit_calcs_on_client", "(", "calcs", ",", "client", ",", "func", ")", "if", "compute_kwargs", "[", "'write_to_tar'", "]", ":", "_serial_write_to_tar", "(", "calcs", ")", "return", "result", "else", ":", "return", "[", "_compute_or_skip_on_error", "(", "calc", ",", "compute_kwargs", ")", "for", "calc", "in", "calcs", "]" ]
Execute the given calculations. Parameters ---------- calcs : Sequence of ``aospy.Calc`` objects parallelize : bool, default False Whether to submit the calculations in parallel or not client : distributed.Client or None The distributed Client used if parallelize is set to True; if None a distributed LocalCluster is used. compute_kwargs : dict of keyword arguments passed to ``Calc.compute`` Returns ------- A list of the values returned by each Calc object that was executed.
[ "Execute", "the", "given", "calculations", "." ]
python
train
victorlei/smop
smop/parse.py
https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/parse.py#L119-L140
def p_case_list(p): """ case_list : | CASE expr sep stmt_list_opt case_list | CASE expr error stmt_list_opt case_list | OTHERWISE stmt_list """ if len(p) == 1: p[0] = node.stmt_list() elif len(p) == 3: assert isinstance(p[2], node.stmt_list) p[0] = p[2] elif len(p) == 6: p[0] = node.if_stmt( cond_expr=node.expr( op="==", args=node.expr_list([p[2]])), then_stmt=p[4], else_stmt=p[5]) p[0].cond_expr.args.append( None) # None will be replaced using backpatch() else: assert 0
[ "def", "p_case_list", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "1", ":", "p", "[", "0", "]", "=", "node", ".", "stmt_list", "(", ")", "elif", "len", "(", "p", ")", "==", "3", ":", "assert", "isinstance", "(", "p", "[", "2", "]", ",", "node", ".", "stmt_list", ")", "p", "[", "0", "]", "=", "p", "[", "2", "]", "elif", "len", "(", "p", ")", "==", "6", ":", "p", "[", "0", "]", "=", "node", ".", "if_stmt", "(", "cond_expr", "=", "node", ".", "expr", "(", "op", "=", "\"==\"", ",", "args", "=", "node", ".", "expr_list", "(", "[", "p", "[", "2", "]", "]", ")", ")", ",", "then_stmt", "=", "p", "[", "4", "]", ",", "else_stmt", "=", "p", "[", "5", "]", ")", "p", "[", "0", "]", ".", "cond_expr", ".", "args", ".", "append", "(", "None", ")", "# None will be replaced using backpatch()", "else", ":", "assert", "0" ]
case_list : | CASE expr sep stmt_list_opt case_list | CASE expr error stmt_list_opt case_list | OTHERWISE stmt_list
[ "case_list", ":", "|", "CASE", "expr", "sep", "stmt_list_opt", "case_list", "|", "CASE", "expr", "error", "stmt_list_opt", "case_list", "|", "OTHERWISE", "stmt_list" ]
python
train
knipknap/exscript
Exscript/util/mail.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/mail.py#L417-L430
def from_template(filename, **kwargs): """ Like from_template_string(), but reads the template from the file with the given name instead. :type filename: string :param filename: The name of the template file. :type kwargs: str :param kwargs: Variables to replace in the template. :rtype: Mail :return: The resulting mail. """ with open(filename) as fp: return from_template_string(fp.read(), **kwargs)
[ "def", "from_template", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "return", "from_template_string", "(", "fp", ".", "read", "(", ")", ",", "*", "*", "kwargs", ")" ]
Like from_template_string(), but reads the template from the file with the given name instead. :type filename: string :param filename: The name of the template file. :type kwargs: str :param kwargs: Variables to replace in the template. :rtype: Mail :return: The resulting mail.
[ "Like", "from_template_string", "()", "but", "reads", "the", "template", "from", "the", "file", "with", "the", "given", "name", "instead", "." ]
python
train
eyeseast/python-metalsmyth
metalsmyth/plugins/template.py
https://github.com/eyeseast/python-metalsmyth/blob/8c99746d4987ab8ec88d6ba84b6092c51dfbbe3e/metalsmyth/plugins/template.py#L33-L54
def run(self, files, stack): "Render templates" # make stack available to all templates self.env.globals['stack'] = stack for filename, post in files.items(): # render content first post.content = self.env.from_string(post.content).render(post.metadata) # check for a template field if "template" in post.metadata: template = self.env.get_template(post['template']) # or use the default template elif hasattr(self, 'default_template'): template = self.default_template else: # no template, so bail continue # at this point, we have a template, so render post.content = template.render(post=post)
[ "def", "run", "(", "self", ",", "files", ",", "stack", ")", ":", "# make stack available to all templates", "self", ".", "env", ".", "globals", "[", "'stack'", "]", "=", "stack", "for", "filename", ",", "post", "in", "files", ".", "items", "(", ")", ":", "# render content first", "post", ".", "content", "=", "self", ".", "env", ".", "from_string", "(", "post", ".", "content", ")", ".", "render", "(", "post", ".", "metadata", ")", "# check for a template field", "if", "\"template\"", "in", "post", ".", "metadata", ":", "template", "=", "self", ".", "env", ".", "get_template", "(", "post", "[", "'template'", "]", ")", "# or use the default template", "elif", "hasattr", "(", "self", ",", "'default_template'", ")", ":", "template", "=", "self", ".", "default_template", "else", ":", "# no template, so bail", "continue", "# at this point, we have a template, so render", "post", ".", "content", "=", "template", ".", "render", "(", "post", "=", "post", ")" ]
Render templates
[ "Render", "templates" ]
python
train
jobovy/galpy
galpy/potential/BurkertPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/BurkertPotential.py#L178-L210
def _surfdens(self,R,z,phi=0.,t=0.): """ NAME: _surfdens PURPOSE: evaluate the surface density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the surface density HISTORY: 2018-08-19 - Written - Bovy (UofT) """ r= numpy.sqrt(R**2.+z**2.) x= r/self.a Rpa= numpy.sqrt(R**2.+self.a**2.) Rma= numpy.sqrt(R**2.-self.a**2.+0j) if Rma == 0: za= z/self.a return self.a**2./2.*((2.-2.*numpy.sqrt(za**2.+1) +numpy.sqrt(2.)*za\ *numpy.arctan(za/numpy.sqrt(2.)))/z +numpy.sqrt(2*za**2.+2.)\ *numpy.arctanh(za/numpy.sqrt(2.*(za**2.+1))) /numpy.sqrt(self.a**2.+z**2.)) else: return self.a**2.*(numpy.arctan(z/x/Rma)/Rma +numpy.arctanh(z/x/Rpa)/Rpa -numpy.arctan(z/Rma)/Rma +numpy.arctan(z/Rpa)/Rpa).real
[ "def", "_surfdens", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "z", "**", "2.", ")", "x", "=", "r", "/", "self", ".", "a", "Rpa", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "self", ".", "a", "**", "2.", ")", "Rma", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "-", "self", ".", "a", "**", "2.", "+", "0j", ")", "if", "Rma", "==", "0", ":", "za", "=", "z", "/", "self", ".", "a", "return", "self", ".", "a", "**", "2.", "/", "2.", "*", "(", "(", "2.", "-", "2.", "*", "numpy", ".", "sqrt", "(", "za", "**", "2.", "+", "1", ")", "+", "numpy", ".", "sqrt", "(", "2.", ")", "*", "za", "*", "numpy", ".", "arctan", "(", "za", "/", "numpy", ".", "sqrt", "(", "2.", ")", ")", ")", "/", "z", "+", "numpy", ".", "sqrt", "(", "2", "*", "za", "**", "2.", "+", "2.", ")", "*", "numpy", ".", "arctanh", "(", "za", "/", "numpy", ".", "sqrt", "(", "2.", "*", "(", "za", "**", "2.", "+", "1", ")", ")", ")", "/", "numpy", ".", "sqrt", "(", "self", ".", "a", "**", "2.", "+", "z", "**", "2.", ")", ")", "else", ":", "return", "self", ".", "a", "**", "2.", "*", "(", "numpy", ".", "arctan", "(", "z", "/", "x", "/", "Rma", ")", "/", "Rma", "+", "numpy", ".", "arctanh", "(", "z", "/", "x", "/", "Rpa", ")", "/", "Rpa", "-", "numpy", ".", "arctan", "(", "z", "/", "Rma", ")", "/", "Rma", "+", "numpy", ".", "arctan", "(", "z", "/", "Rpa", ")", "/", "Rpa", ")", ".", "real" ]
NAME: _surfdens PURPOSE: evaluate the surface density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the surface density HISTORY: 2018-08-19 - Written - Bovy (UofT)
[ "NAME", ":", "_surfdens", "PURPOSE", ":", "evaluate", "the", "surface", "density", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "surface", "density", "HISTORY", ":", "2018", "-", "08", "-", "19", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
kervi/kervi-core
kervi/values/__init__.py
https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/values/__init__.py#L401-L409
def value(self, new_value): """ Updates the value. If the change exceeds the change delta observers and linked values are notified. """ datetime_value = None if new_value: datetime_value = new_value.strftime("%Y-%M-%dT%H:%M:%SZ") self._set_value(datetime_value)
[ "def", "value", "(", "self", ",", "new_value", ")", ":", "datetime_value", "=", "None", "if", "new_value", ":", "datetime_value", "=", "new_value", ".", "strftime", "(", "\"%Y-%M-%dT%H:%M:%SZ\"", ")", "self", ".", "_set_value", "(", "datetime_value", ")" ]
Updates the value. If the change exceeds the change delta observers and linked values are notified.
[ "Updates", "the", "value", ".", "If", "the", "change", "exceeds", "the", "change", "delta", "observers", "and", "linked", "values", "are", "notified", "." ]
python
train
apache/incubator-mxnet
python/mxnet/symbol/symbol.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L817-L839
def list_inputs(self): """Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var'] """ size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.NNSymbolListInputNames( self.handle, 0, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
[ "def", "list_inputs", "(", "self", ")", ":", "size", "=", "ctypes", ".", "c_uint", "(", ")", "sarr", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "check_call", "(", "_LIB", ".", "NNSymbolListInputNames", "(", "self", ".", "handle", ",", "0", ",", "ctypes", ".", "byref", "(", "size", ")", ",", "ctypes", ".", "byref", "(", "sarr", ")", ")", ")", "return", "[", "py_str", "(", "sarr", "[", "i", "]", ")", "for", "i", "in", "range", "(", "size", ".", "value", ")", "]" ]
Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']
[ "Lists", "all", "arguments", "and", "auxiliary", "states", "of", "this", "Symbol", "." ]
python
train
mardix/Mocha
mocha/extras/md.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/md.py#L25-L29
def run(self, root): "Find all images and append to markdown.images. " self.markdown.images = [] for image in root.getiterator("img"): self.markdown.images.append(image.attrib["src"])
[ "def", "run", "(", "self", ",", "root", ")", ":", "self", ".", "markdown", ".", "images", "=", "[", "]", "for", "image", "in", "root", ".", "getiterator", "(", "\"img\"", ")", ":", "self", ".", "markdown", ".", "images", ".", "append", "(", "image", ".", "attrib", "[", "\"src\"", "]", ")" ]
Find all images and append to markdown.images.
[ "Find", "all", "images", "and", "append", "to", "markdown", ".", "images", "." ]
python
train