repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L486-L495
def delete_one_series(self, id, **data): """ DELETE /series/:id/ Deletes a repeating event series and all of its occurrences if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders for any dates in the series. Returns a boolean indicating success or failure of the delete. .. _get-series-events: """ return self.delete("/series/{0}/".format(id), data=data)
[ "def", "delete_one_series", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "delete", "(", "\"/series/{0}/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
DELETE /series/:id/ Deletes a repeating event series and all of its occurrences if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders for any dates in the series. Returns a boolean indicating success or failure of the delete. .. _get-series-events:
[ "DELETE", "/", "series", "/", ":", "id", "/", "Deletes", "a", "repeating", "event", "series", "and", "all", "of", "its", "occurrences", "if", "the", "delete", "is", "permitted", ".", "In", "order", "for", "a", "delete", "to", "be", "permitted", "there", "must", "be", "no", "pending", "or", "completed", "orders", "for", "any", "dates", "in", "the", "series", ".", "Returns", "a", "boolean", "indicating", "success", "or", "failure", "of", "the", "delete", ".", "..", "_get", "-", "series", "-", "events", ":" ]
python
train
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4955-L5055
def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest()
[ "def", "nsmallest", "(", "self", ",", "n", ",", "columns", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNFrame", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ",", "columns", "=", "columns", ")", ".", "nsmallest", "(", ")" ]
Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI
[ "Return", "the", "first", "n", "rows", "ordered", "by", "columns", "in", "ascending", "order", "." ]
python
train
hirokiky/uiro
uiro/view.py
https://github.com/hirokiky/uiro/blob/8436976b21ac9b0eac4243768f5ada12479b9e00/uiro/view.py#L14-L22
def get_base_wrappers(method='get', template_name='', predicates=(), wrappers=()): """ basic View Wrappers used by view_config. """ wrappers += (preserve_view(MethodPredicate(method), *predicates),) if template_name: wrappers += (render_template(template_name),) return wrappers
[ "def", "get_base_wrappers", "(", "method", "=", "'get'", ",", "template_name", "=", "''", ",", "predicates", "=", "(", ")", ",", "wrappers", "=", "(", ")", ")", ":", "wrappers", "+=", "(", "preserve_view", "(", "MethodPredicate", "(", "method", ")", ",", "*", "predicates", ")", ",", ")", "if", "template_name", ":", "wrappers", "+=", "(", "render_template", "(", "template_name", ")", ",", ")", "return", "wrappers" ]
basic View Wrappers used by view_config.
[ "basic", "View", "Wrappers", "used", "by", "view_config", "." ]
python
train
huge-success/sanic
sanic/exceptions.py
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/exceptions.py#L124-L134
def add_status_code(code): """ Decorator used for adding exceptions to :class:`SanicException`. """ def class_decorator(cls): cls.status_code = code _sanic_exceptions[code] = cls return cls return class_decorator
[ "def", "add_status_code", "(", "code", ")", ":", "def", "class_decorator", "(", "cls", ")", ":", "cls", ".", "status_code", "=", "code", "_sanic_exceptions", "[", "code", "]", "=", "cls", "return", "cls", "return", "class_decorator" ]
Decorator used for adding exceptions to :class:`SanicException`.
[ "Decorator", "used", "for", "adding", "exceptions", "to", ":", "class", ":", "SanicException", "." ]
python
train
XuShaohua/bcloud
bcloud/auth.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/auth.py#L82-L102
def get_UBI(cookie, tokens): '''检查登录历史, 可以获得一个Cookie - UBI. 返回的信息类似于: {"errInfo":{ "no": "0" }, "data": {'displayname':['[email protected]']}} ''' url = ''.join([ const.PASSPORT_URL, '?loginhistory', '&token=', tokens['token'], '&tpl=pp&apiver=v3', '&tt=', util.timestamp(), ]) headers={ 'Cookie': cookie.header_output(), 'Referer': const.REFERER, } req = net.urlopen(url, headers=headers) if req: return req.headers.get_all('Set-Cookie') else: return None
[ "def", "get_UBI", "(", "cookie", ",", "tokens", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PASSPORT_URL", ",", "'?loginhistory'", ",", "'&token='", ",", "tokens", "[", "'token'", "]", ",", "'&tpl=pp&apiver=v3'", ",", "'&tt='", ",", "util", ".", "timestamp", "(", ")", ",", "]", ")", "headers", "=", "{", "'Cookie'", ":", "cookie", ".", "header_output", "(", ")", ",", "'Referer'", ":", "const", ".", "REFERER", ",", "}", "req", "=", "net", ".", "urlopen", "(", "url", ",", "headers", "=", "headers", ")", "if", "req", ":", "return", "req", ".", "headers", ".", "get_all", "(", "'Set-Cookie'", ")", "else", ":", "return", "None" ]
检查登录历史, 可以获得一个Cookie - UBI. 返回的信息类似于: {"errInfo":{ "no": "0" }, "data": {'displayname':['[email protected]']}}
[ "检查登录历史", "可以获得一个Cookie", "-", "UBI", ".", "返回的信息类似于", ":", "{", "errInfo", ":", "{", "no", ":", "0", "}", "data", ":", "{", "displayname", ":", "[", "xxx" ]
python
train
kwikteam/phy
phy/cluster/_history.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L28-L32
def current_item(self): """Return the current element.""" if self._history and self._index >= 0: self._check_index() return self._history[self._index]
[ "def", "current_item", "(", "self", ")", ":", "if", "self", ".", "_history", "and", "self", ".", "_index", ">=", "0", ":", "self", ".", "_check_index", "(", ")", "return", "self", ".", "_history", "[", "self", ".", "_index", "]" ]
Return the current element.
[ "Return", "the", "current", "element", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/json.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/json.py#L26-L56
def slotsToJSON(obj, slots=None): """Converts the given Python object to one suitable for Javascript Object Notation (JSON) serialization via :func:`json.dump` or :func:`json.dumps`. This function delegates to :func:`toJSON`. Specifically only attributes in the list of *slots* are converted. If *slots* is not provided, it defaults to the object's ``__slots__` and any inherited ``__slots__``. To omit certain slots from serialization, the object may define a :meth:`__jsonOmit__(key, val)` method. When the method returns True for any particular slot name (i.e. key) and value combination, the slot will not serialized. """ if slots is None: slots = list(obj.__slots__) if hasattr(obj, '__slots__') else [ ] for base in obj.__class__.__bases__: if hasattr(base, '__slots__'): slots.extend(base.__slots__) testOmit = hasattr(obj, '__jsonOmit__') and callable(obj.__jsonOmit__) result = { } for slot in slots: key = slot[1:] if slot.startswith('_') else slot val = getattr(obj, slot, None) if testOmit is False or obj.__jsonOmit__(key, val) is False: result[key] = toJSON(val) return result
[ "def", "slotsToJSON", "(", "obj", ",", "slots", "=", "None", ")", ":", "if", "slots", "is", "None", ":", "slots", "=", "list", "(", "obj", ".", "__slots__", ")", "if", "hasattr", "(", "obj", ",", "'__slots__'", ")", "else", "[", "]", "for", "base", "in", "obj", ".", "__class__", ".", "__bases__", ":", "if", "hasattr", "(", "base", ",", "'__slots__'", ")", ":", "slots", ".", "extend", "(", "base", ".", "__slots__", ")", "testOmit", "=", "hasattr", "(", "obj", ",", "'__jsonOmit__'", ")", "and", "callable", "(", "obj", ".", "__jsonOmit__", ")", "result", "=", "{", "}", "for", "slot", "in", "slots", ":", "key", "=", "slot", "[", "1", ":", "]", "if", "slot", ".", "startswith", "(", "'_'", ")", "else", "slot", "val", "=", "getattr", "(", "obj", ",", "slot", ",", "None", ")", "if", "testOmit", "is", "False", "or", "obj", ".", "__jsonOmit__", "(", "key", ",", "val", ")", "is", "False", ":", "result", "[", "key", "]", "=", "toJSON", "(", "val", ")", "return", "result" ]
Converts the given Python object to one suitable for Javascript Object Notation (JSON) serialization via :func:`json.dump` or :func:`json.dumps`. This function delegates to :func:`toJSON`. Specifically only attributes in the list of *slots* are converted. If *slots* is not provided, it defaults to the object's ``__slots__` and any inherited ``__slots__``. To omit certain slots from serialization, the object may define a :meth:`__jsonOmit__(key, val)` method. When the method returns True for any particular slot name (i.e. key) and value combination, the slot will not serialized.
[ "Converts", "the", "given", "Python", "object", "to", "one", "suitable", "for", "Javascript", "Object", "Notation", "(", "JSON", ")", "serialization", "via", ":", "func", ":", "json", ".", "dump", "or", ":", "func", ":", "json", ".", "dumps", ".", "This", "function", "delegates", "to", ":", "func", ":", "toJSON", "." ]
python
train
Jammy2211/PyAutoLens
autolens/data/ccd.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/ccd.py#L688-L700
def setup_random_seed(seed): """Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \ positive, that seed is used for all runs, thereby giving reproducible results. Parameters ---------- seed : int The seed of the random number generator. """ if seed == -1: seed = np.random.randint(0, int(1e9)) # Use one seed, so all regions have identical column non-uniformity. np.random.seed(seed)
[ "def", "setup_random_seed", "(", "seed", ")", ":", "if", "seed", "==", "-", "1", ":", "seed", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "int", "(", "1e9", ")", ")", "# Use one seed, so all regions have identical column non-uniformity.", "np", ".", "random", ".", "seed", "(", "seed", ")" ]
Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \ positive, that seed is used for all runs, thereby giving reproducible results. Parameters ---------- seed : int The seed of the random number generator.
[ "Setup", "the", "random", "seed", ".", "If", "the", "input", "seed", "is", "-", "1", "the", "code", "will", "use", "a", "random", "seed", "for", "every", "run", ".", "If", "it", "is", "\\", "positive", "that", "seed", "is", "used", "for", "all", "runs", "thereby", "giving", "reproducible", "results", "." ]
python
valid
watchforstock/evohome-client
evohomeclient2/__init__.py
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L84-L93
def _headers(self): """Ensure the Authorization Header has a valid Access Token.""" if not self.access_token or not self.access_token_expires: self._basic_login() elif datetime.now() > self.access_token_expires - timedelta(seconds=30): self._basic_login() return {'Accept': HEADER_ACCEPT, 'Authorization': 'bearer ' + self.access_token}
[ "def", "_headers", "(", "self", ")", ":", "if", "not", "self", ".", "access_token", "or", "not", "self", ".", "access_token_expires", ":", "self", ".", "_basic_login", "(", ")", "elif", "datetime", ".", "now", "(", ")", ">", "self", ".", "access_token_expires", "-", "timedelta", "(", "seconds", "=", "30", ")", ":", "self", ".", "_basic_login", "(", ")", "return", "{", "'Accept'", ":", "HEADER_ACCEPT", ",", "'Authorization'", ":", "'bearer '", "+", "self", ".", "access_token", "}" ]
Ensure the Authorization Header has a valid Access Token.
[ "Ensure", "the", "Authorization", "Header", "has", "a", "valid", "Access", "Token", "." ]
python
train
brouberol/contexttimer
contexttimer/timeout.py
https://github.com/brouberol/contexttimer/blob/a866f420ed4c10f29abf252c58b11f9db6706100/contexttimer/timeout.py#L39-L80
def timeout(limit, handler): """A decorator ensuring that the decorated function tun time does not exceeds the argument limit. :args limit: the time limit :type limit: int :args handler: the handler function called when the decorated function times out. :type handler: callable Example: >>>def timeout_handler(limit, f, *args, **kwargs): ... print "{func} call timed out after {lim}s.".format( ... func=f.__name__, lim=limit) ... >>>@timeout(limit=5, handler=timeout_handler) ... def work(foo, bar, baz="spam") ... time.sleep(10) >>>work("foo", "bar", "baz") # time passes... work call timed out after 5s. >>> """ def wrapper(f): def wrapped_f(*args, **kwargs): old_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(limit) try: res = f(*args, **kwargs) except Timeout: handler(limit, f, args, kwargs) else: return res finally: signal.signal(signal.SIGALRM, old_handler) signal.alarm(0) return wrapped_f return wrapper
[ "def", "timeout", "(", "limit", ",", "handler", ")", ":", "def", "wrapper", "(", "f", ")", ":", "def", "wrapped_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "old_handler", "=", "signal", ".", "getsignal", "(", "signal", ".", "SIGALRM", ")", "signal", ".", "signal", "(", "signal", ".", "SIGALRM", ",", "timeout_handler", ")", "signal", ".", "alarm", "(", "limit", ")", "try", ":", "res", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Timeout", ":", "handler", "(", "limit", ",", "f", ",", "args", ",", "kwargs", ")", "else", ":", "return", "res", "finally", ":", "signal", ".", "signal", "(", "signal", ".", "SIGALRM", ",", "old_handler", ")", "signal", ".", "alarm", "(", "0", ")", "return", "wrapped_f", "return", "wrapper" ]
A decorator ensuring that the decorated function tun time does not exceeds the argument limit. :args limit: the time limit :type limit: int :args handler: the handler function called when the decorated function times out. :type handler: callable Example: >>>def timeout_handler(limit, f, *args, **kwargs): ... print "{func} call timed out after {lim}s.".format( ... func=f.__name__, lim=limit) ... >>>@timeout(limit=5, handler=timeout_handler) ... def work(foo, bar, baz="spam") ... time.sleep(10) >>>work("foo", "bar", "baz") # time passes... work call timed out after 5s. >>>
[ "A", "decorator", "ensuring", "that", "the", "decorated", "function", "tun", "time", "does", "not", "exceeds", "the", "argument", "limit", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L38-L108
def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, max_input_length, max_target_length, fixed_train_length): """Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature. """ if task.has_inputs: example["inputs"] = example["inputs"][:-1] # remove EOS token if hasattr(task, "class_labels"): if vocab_type == text_problems.VocabType.CHARACTER: # TODO(urvashik): handle the case where num_labels > 9 example["targets"] = tf.cast(discretization.int_to_bit( example["targets"], 1, base=10) + 50, tf.int64) example["targets"] = tf.squeeze(example["targets"], axis=[-1]) elif vocab_type == text_problems.VocabType.SUBWORD: example["targets"] = vocab_offset + example["targets"] else: # sequence with inputs and targets eg: summarization if task.has_inputs: if max_input_length > 0: example["inputs"] = example["inputs"][:max_input_length] # Do not truncate targets during inference with beam decoding. if max_target_length > 0 and not is_infer: example["targets"] = example["targets"][:max_target_length] def make_constant_shape(x, size): x = x[:size] xlen = tf.shape(x)[0] x = tf.pad(x, [[0, size - xlen]]) return tf.reshape(x, [size]) if task.has_inputs: if is_infer: concat_list = [example["inputs"], [task.task_id]] example["inputs"] = tf.concat(concat_list, axis=0) else: inputs = example.pop("inputs") concat_list = [inputs, [task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) else: concat_list = [[task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if not is_infer and fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) return example
[ "def", "normalize_example_nlp", "(", "task", ",", "example", ",", "is_infer", ",", "vocab_type", ",", "vocab_offset", ",", "max_input_length", ",", "max_target_length", ",", "fixed_train_length", ")", ":", "if", "task", ".", "has_inputs", ":", "example", "[", "\"inputs\"", "]", "=", "example", "[", "\"inputs\"", "]", "[", ":", "-", "1", "]", "# remove EOS token", "if", "hasattr", "(", "task", ",", "\"class_labels\"", ")", ":", "if", "vocab_type", "==", "text_problems", ".", "VocabType", ".", "CHARACTER", ":", "# TODO(urvashik): handle the case where num_labels > 9", "example", "[", "\"targets\"", "]", "=", "tf", ".", "cast", "(", "discretization", ".", "int_to_bit", "(", "example", "[", "\"targets\"", "]", ",", "1", ",", "base", "=", "10", ")", "+", "50", ",", "tf", ".", "int64", ")", "example", "[", "\"targets\"", "]", "=", "tf", ".", "squeeze", "(", "example", "[", "\"targets\"", "]", ",", "axis", "=", "[", "-", "1", "]", ")", "elif", "vocab_type", "==", "text_problems", ".", "VocabType", ".", "SUBWORD", ":", "example", "[", "\"targets\"", "]", "=", "vocab_offset", "+", "example", "[", "\"targets\"", "]", "else", ":", "# sequence with inputs and targets eg: summarization", "if", "task", ".", "has_inputs", ":", "if", "max_input_length", ">", "0", ":", "example", "[", "\"inputs\"", "]", "=", "example", "[", "\"inputs\"", "]", "[", ":", "max_input_length", "]", "# Do not truncate targets during inference with beam decoding.", "if", "max_target_length", ">", "0", "and", "not", "is_infer", ":", "example", "[", "\"targets\"", "]", "=", "example", "[", "\"targets\"", "]", "[", ":", "max_target_length", "]", "def", "make_constant_shape", "(", "x", ",", "size", ")", ":", "x", "=", "x", "[", ":", "size", "]", "xlen", "=", "tf", ".", "shape", "(", "x", ")", "[", "0", "]", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "size", "-", "xlen", "]", "]", ")", "return", "tf", ".", "reshape", "(", "x", ",", "[", "size", "]", ")", "if", "task", ".", "has_inputs", ":", "if", "is_infer", ":", "concat_list", "=", "[", "example", "[", "\"inputs\"", "]", ",", "[", "task", ".", "task_id", "]", "]", "example", "[", "\"inputs\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "else", ":", "inputs", "=", "example", ".", "pop", "(", "\"inputs\"", ")", "concat_list", "=", "[", "inputs", ",", "[", "task", ".", "task_id", "]", ",", "example", "[", "\"targets\"", "]", "]", "example", "[", "\"targets\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "if", "fixed_train_length", ">", "0", ":", "example", "[", "\"targets\"", "]", "=", "make_constant_shape", "(", "example", "[", "\"targets\"", "]", ",", "fixed_train_length", ")", "else", ":", "concat_list", "=", "[", "[", "task", ".", "task_id", "]", ",", "example", "[", "\"targets\"", "]", "]", "example", "[", "\"targets\"", "]", "=", "tf", ".", "concat", "(", "concat_list", ",", "axis", "=", "0", ")", "if", "not", "is_infer", "and", "fixed_train_length", ">", "0", ":", "example", "[", "\"targets\"", "]", "=", "make_constant_shape", "(", "example", "[", "\"targets\"", "]", ",", "fixed_train_length", ")", "example", "[", "\"task_id\"", "]", "=", "tf", ".", "constant", "(", "[", "task", ".", "task_id", "]", ",", "dtype", "=", "tf", ".", "int64", ")", "return", "example" ]
Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature.
[ "Normalize", "the", "examples", "from", "different", "tasks", "so", "they", "can", "be", "merged", "." ]
python
train
Dallinger/Dallinger
demos/dlgr/demos/mcmcp/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/demos/dlgr/demos/mcmcp/models.py#L94-L103
def perturbed_contents(self): """Perturb the given animal.""" animal = json.loads(self.contents) for prop, prop_range in self.properties.items(): range = prop_range[1] - prop_range[0] jittered = animal[prop] + random.gauss(0, 0.1 * range) animal[prop] = max(min(jittered, prop_range[1]), prop_range[0]) return json.dumps(animal)
[ "def", "perturbed_contents", "(", "self", ")", ":", "animal", "=", "json", ".", "loads", "(", "self", ".", "contents", ")", "for", "prop", ",", "prop_range", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "range", "=", "prop_range", "[", "1", "]", "-", "prop_range", "[", "0", "]", "jittered", "=", "animal", "[", "prop", "]", "+", "random", ".", "gauss", "(", "0", ",", "0.1", "*", "range", ")", "animal", "[", "prop", "]", "=", "max", "(", "min", "(", "jittered", ",", "prop_range", "[", "1", "]", ")", ",", "prop_range", "[", "0", "]", ")", "return", "json", ".", "dumps", "(", "animal", ")" ]
Perturb the given animal.
[ "Perturb", "the", "given", "animal", "." ]
python
train
veeti/decent
decent/validators.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L92-L105
def Type(expected, message="Not of type {}"): """ Creates a validator that compares the type of the given value to ``expected``. This is a direct type() equality check. Also see ``Instance``, which is an isinstance() check. A custom message can be specified with ``message``. """ @wraps(Type) def built(value): if type(value) != expected: raise Error(message.format(expected.__name__)) return value return built
[ "def", "Type", "(", "expected", ",", "message", "=", "\"Not of type {}\"", ")", ":", "@", "wraps", "(", "Type", ")", "def", "built", "(", "value", ")", ":", "if", "type", "(", "value", ")", "!=", "expected", ":", "raise", "Error", "(", "message", ".", "format", "(", "expected", ".", "__name__", ")", ")", "return", "value", "return", "built" ]
Creates a validator that compares the type of the given value to ``expected``. This is a direct type() equality check. Also see ``Instance``, which is an isinstance() check. A custom message can be specified with ``message``.
[ "Creates", "a", "validator", "that", "compares", "the", "type", "of", "the", "given", "value", "to", "expected", ".", "This", "is", "a", "direct", "type", "()", "equality", "check", ".", "Also", "see", "Instance", "which", "is", "an", "isinstance", "()", "check", "." ]
python
train
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L1995-L2014
def get_filter_rules(self, way='in'): """ :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of zobjects.FilterRule """ try: if way == 'in': filters = self.request( 'GetFilterRules')['filterRules']['filterRule'] elif way == 'out': filters = self.request( 'GetOutgoingFilterRules')['filterRules']['filterRule'] # Zimbra return a dict if there is only one instance if isinstance(filters, dict): filters = [filters] return [zobjects.FilterRule.from_dict(f) for f in filters] except KeyError: return []
[ "def", "get_filter_rules", "(", "self", ",", "way", "=", "'in'", ")", ":", "try", ":", "if", "way", "==", "'in'", ":", "filters", "=", "self", ".", "request", "(", "'GetFilterRules'", ")", "[", "'filterRules'", "]", "[", "'filterRule'", "]", "elif", "way", "==", "'out'", ":", "filters", "=", "self", ".", "request", "(", "'GetOutgoingFilterRules'", ")", "[", "'filterRules'", "]", "[", "'filterRule'", "]", "# Zimbra return a dict if there is only one instance", "if", "isinstance", "(", "filters", ",", "dict", ")", ":", "filters", "=", "[", "filters", "]", "return", "[", "zobjects", ".", "FilterRule", ".", "from_dict", "(", "f", ")", "for", "f", "in", "filters", "]", "except", "KeyError", ":", "return", "[", "]" ]
:param: way string discribing if filter is for 'in' or 'out' messages :returns: list of zobjects.FilterRule
[ ":", "param", ":", "way", "string", "discribing", "if", "filter", "is", "for", "in", "or", "out", "messages", ":", "returns", ":", "list", "of", "zobjects", ".", "FilterRule" ]
python
train
tech-pi/doufo
src/python/doufo/function.py
https://github.com/tech-pi/doufo/blob/3d375fef30670597768a6eef809b75b4b1b5a3fd/src/python/doufo/function.py#L329-L335
def flip(f: Callable) -> Function: """ flip order of first two arguments to function. """ nargs_, nouts_, ndefs_ = nargs(f), nouts(f), ndefs(f) return WrappedFunction(lambda *args, **kwargs: f(args[1], args[0], *args[2:], **kwargs), nargs=nargs_, nouts=nouts_, ndefs=ndefs_)
[ "def", "flip", "(", "f", ":", "Callable", ")", "->", "Function", ":", "nargs_", ",", "nouts_", ",", "ndefs_", "=", "nargs", "(", "f", ")", ",", "nouts", "(", "f", ")", ",", "ndefs", "(", "f", ")", "return", "WrappedFunction", "(", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "f", "(", "args", "[", "1", "]", ",", "args", "[", "0", "]", ",", "*", "args", "[", "2", ":", "]", ",", "*", "*", "kwargs", ")", ",", "nargs", "=", "nargs_", ",", "nouts", "=", "nouts_", ",", "ndefs", "=", "ndefs_", ")" ]
flip order of first two arguments to function.
[ "flip", "order", "of", "first", "two", "arguments", "to", "function", "." ]
python
train
theosysbio/means
src/means/approximation/mea/closure_normal.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/approximation/mea/closure_normal.py#L61-L98
def _compute_one_closed_central_moment(self, moment, covariance_matrix): r""" Compute each row of closed central moment based on Isserlis' Theorem of calculating higher order moments of multivariate normal distribution in terms of covariance matrix :param moment: moment matrix :param covariance_matrix: matrix containing variances and covariances :return: each row of closed central moment """ # If moment order is odd, higher order moments equals 0 if moment.order % 2 != 0: return sp.Integer(0) # index of species idx = [i for i in range(len(moment.n_vector))] # repeat the index of a species as many time as its value in counter list_for_partition = reduce(operator.add, map(lambda i, c: [i] * c, idx, moment.n_vector)) # If moment order is even, :math: '\mathbb{E} [x_1x_2 \ldots x_2_n] = \sum \prod\mathbb{E} [x_ix_j] ' # e.g.:math: '\mathbb{E} [x_1x_2x_3x_4] = \mathbb{E} [x_1x_2] +\mathbb{E} [x_1x_3] +\mathbb{E} [x_1x_4] # +\mathbb{E} [x_2x_3]+\mathbb{E} [x_2x_4]+\mathbb{E} [x_3x_4]' # For second order moment, there is only one way of partitioning. Hence, no need to generate partitions if moment.order == 2: return covariance_matrix[list_for_partition[0], list_for_partition[1]] # For even moment order other than 2, generate a list of partitions of the indices of covariances else: each_row = [] for idx_pair in self._generate_partitions(list_for_partition): # Retrieve the pairs of covariances using the pairs of partitioned indices l = [covariance_matrix[i, j] for i,j in idx_pair] # Calculate the product of each pair of covariances each_row.append(product(l)) # The corresponding closed central moment of that moment order is the sum of the products return sum(each_row)
[ "def", "_compute_one_closed_central_moment", "(", "self", ",", "moment", ",", "covariance_matrix", ")", ":", "# If moment order is odd, higher order moments equals 0", "if", "moment", ".", "order", "%", "2", "!=", "0", ":", "return", "sp", ".", "Integer", "(", "0", ")", "# index of species", "idx", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "moment", ".", "n_vector", ")", ")", "]", "# repeat the index of a species as many time as its value in counter", "list_for_partition", "=", "reduce", "(", "operator", ".", "add", ",", "map", "(", "lambda", "i", ",", "c", ":", "[", "i", "]", "*", "c", ",", "idx", ",", "moment", ".", "n_vector", ")", ")", "# If moment order is even, :math: '\\mathbb{E} [x_1x_2 \\ldots x_2_n] = \\sum \\prod\\mathbb{E} [x_ix_j] '", "# e.g.:math: '\\mathbb{E} [x_1x_2x_3x_4] = \\mathbb{E} [x_1x_2] +\\mathbb{E} [x_1x_3] +\\mathbb{E} [x_1x_4]", "# +\\mathbb{E} [x_2x_3]+\\mathbb{E} [x_2x_4]+\\mathbb{E} [x_3x_4]'", "# For second order moment, there is only one way of partitioning. Hence, no need to generate partitions", "if", "moment", ".", "order", "==", "2", ":", "return", "covariance_matrix", "[", "list_for_partition", "[", "0", "]", ",", "list_for_partition", "[", "1", "]", "]", "# For even moment order other than 2, generate a list of partitions of the indices of covariances", "else", ":", "each_row", "=", "[", "]", "for", "idx_pair", "in", "self", ".", "_generate_partitions", "(", "list_for_partition", ")", ":", "# Retrieve the pairs of covariances using the pairs of partitioned indices", "l", "=", "[", "covariance_matrix", "[", "i", ",", "j", "]", "for", "i", ",", "j", "in", "idx_pair", "]", "# Calculate the product of each pair of covariances", "each_row", ".", "append", "(", "product", "(", "l", ")", ")", "# The corresponding closed central moment of that moment order is the sum of the products", "return", "sum", "(", "each_row", ")" ]
r""" Compute each row of closed central moment based on Isserlis' Theorem of calculating higher order moments of multivariate normal distribution in terms of covariance matrix :param moment: moment matrix :param covariance_matrix: matrix containing variances and covariances :return: each row of closed central moment
[ "r", "Compute", "each", "row", "of", "closed", "central", "moment", "based", "on", "Isserlis", "Theorem", "of", "calculating", "higher", "order", "moments", "of", "multivariate", "normal", "distribution", "in", "terms", "of", "covariance", "matrix" ]
python
train
richardkiss/pycoin
pycoin/crack/ecdsa.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/crack/ecdsa.py#L2-L7
def crack_secret_exponent_from_k(generator, signed_value, sig, k): """ Given a signature of a signed_value and a known k, return the secret exponent. """ r, s = sig return ((s * k - signed_value) * generator.inverse(r)) % generator.order()
[ "def", "crack_secret_exponent_from_k", "(", "generator", ",", "signed_value", ",", "sig", ",", "k", ")", ":", "r", ",", "s", "=", "sig", "return", "(", "(", "s", "*", "k", "-", "signed_value", ")", "*", "generator", ".", "inverse", "(", "r", ")", ")", "%", "generator", ".", "order", "(", ")" ]
Given a signature of a signed_value and a known k, return the secret exponent.
[ "Given", "a", "signature", "of", "a", "signed_value", "and", "a", "known", "k", "return", "the", "secret", "exponent", "." ]
python
train
codenerix/django-codenerix
codenerix/multiforms.py
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/multiforms.py#L312-L327
def form_valid(self, form, forms): """ Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page. """ if self.object: form.save() for (formobj, linkerfield) in forms: if form != formobj: formobj.save() else: self.object = form.save() for (formobj, linkerfield) in forms: if form != formobj: setattr(formobj.instance, linkerfield, self.object) formobj.save() return HttpResponseRedirect(self.get_success_url())
[ "def", "form_valid", "(", "self", ",", "form", ",", "forms", ")", ":", "if", "self", ".", "object", ":", "form", ".", "save", "(", ")", "for", "(", "formobj", ",", "linkerfield", ")", "in", "forms", ":", "if", "form", "!=", "formobj", ":", "formobj", ".", "save", "(", ")", "else", ":", "self", ".", "object", "=", "form", ".", "save", "(", ")", "for", "(", "formobj", ",", "linkerfield", ")", "in", "forms", ":", "if", "form", "!=", "formobj", ":", "setattr", "(", "formobj", ".", "instance", ",", "linkerfield", ",", "self", ".", "object", ")", "formobj", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "self", ".", "get_success_url", "(", ")", ")" ]
Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page.
[ "Called", "if", "all", "forms", "are", "valid", ".", "Creates", "a", "Recipe", "instance", "along", "with", "associated", "Ingredients", "and", "Instructions", "and", "then", "redirects", "to", "a", "success", "page", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L905-L934
def TEST(cpu, src1, src2): """ Logical compare. Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded:: TEMP = SRC1 AND SRC2; SF = MSB(TEMP); IF TEMP = 0 THEN ZF = 1; ELSE ZF = 0; FI: PF = BitwiseXNOR(TEMP[0:7]); CF = 0; OF = 0; (*AF is Undefined*) :param cpu: current CPU. :param src1: first operand. :param src2: second operand. """ # Defined Flags: szp temp = src1.read() & src2.read() cpu.SF = (temp & (1 << (src1.size - 1))) != 0 cpu.ZF = temp == 0 cpu.PF = cpu._calculate_parity_flag(temp) cpu.CF = False cpu.OF = False
[ "def", "TEST", "(", "cpu", ",", "src1", ",", "src2", ")", ":", "# Defined Flags: szp", "temp", "=", "src1", ".", "read", "(", ")", "&", "src2", ".", "read", "(", ")", "cpu", ".", "SF", "=", "(", "temp", "&", "(", "1", "<<", "(", "src1", ".", "size", "-", "1", ")", ")", ")", "!=", "0", "cpu", ".", "ZF", "=", "temp", "==", "0", "cpu", ".", "PF", "=", "cpu", ".", "_calculate_parity_flag", "(", "temp", ")", "cpu", ".", "CF", "=", "False", "cpu", ".", "OF", "=", "False" ]
Logical compare. Computes the bit-wise logical AND of first operand (source 1 operand) and the second operand (source 2 operand) and sets the SF, ZF, and PF status flags according to the result. The result is then discarded:: TEMP = SRC1 AND SRC2; SF = MSB(TEMP); IF TEMP = 0 THEN ZF = 1; ELSE ZF = 0; FI: PF = BitwiseXNOR(TEMP[0:7]); CF = 0; OF = 0; (*AF is Undefined*) :param cpu: current CPU. :param src1: first operand. :param src2: second operand.
[ "Logical", "compare", "." ]
python
valid
tisimst/mcerp
mcerp/__init__.py
https://github.com/tisimst/mcerp/blob/2bb8260c9ad2d58a806847f1b627b6451e407de1/mcerp/__init__.py#L799-L809
def ChiSquared(k, tag=None): """ A Chi-Squared random variate Parameters ---------- k : int The degrees of freedom of the distribution (must be greater than one) """ assert int(k) == k and k >= 1, 'Chi-Squared "k" must be an integer greater than 0' return uv(ss.chi2(k), tag=tag)
[ "def", "ChiSquared", "(", "k", ",", "tag", "=", "None", ")", ":", "assert", "int", "(", "k", ")", "==", "k", "and", "k", ">=", "1", ",", "'Chi-Squared \"k\" must be an integer greater than 0'", "return", "uv", "(", "ss", ".", "chi2", "(", "k", ")", ",", "tag", "=", "tag", ")" ]
A Chi-Squared random variate Parameters ---------- k : int The degrees of freedom of the distribution (must be greater than one)
[ "A", "Chi", "-", "Squared", "random", "variate", "Parameters", "----------", "k", ":", "int", "The", "degrees", "of", "freedom", "of", "the", "distribution", "(", "must", "be", "greater", "than", "one", ")" ]
python
train
joke2k/faker
faker/utils/text.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/utils/text.py#L14-L35
def slugify(value, allow_dots=False, allow_unicode=False): """ Converts to lowercase, removes non-word characters (alphanumerics and underscores) and converts spaces to hyphens. Also strips leading and trailing whitespace. Modified to optionally allow dots. Adapted from Django 1.9 """ if allow_dots: pattern = _re_pattern_allow_dots else: pattern = _re_pattern value = six.text_type(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) value = pattern.sub('', value).strip().lower() return _re_spaces.sub('-', value) value = unicodedata.normalize('NFKD', value).encode( 'ascii', 'ignore').decode('ascii') value = pattern.sub('', value).strip().lower() return _re_spaces.sub('-', value)
[ "def", "slugify", "(", "value", ",", "allow_dots", "=", "False", ",", "allow_unicode", "=", "False", ")", ":", "if", "allow_dots", ":", "pattern", "=", "_re_pattern_allow_dots", "else", ":", "pattern", "=", "_re_pattern", "value", "=", "six", ".", "text_type", "(", "value", ")", "if", "allow_unicode", ":", "value", "=", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "value", ")", "value", "=", "pattern", ".", "sub", "(", "''", ",", "value", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "return", "_re_spaces", ".", "sub", "(", "'-'", ",", "value", ")", "value", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "value", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "'ascii'", ")", "value", "=", "pattern", ".", "sub", "(", "''", ",", "value", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "return", "_re_spaces", ".", "sub", "(", "'-'", ",", "value", ")" ]
Converts to lowercase, removes non-word characters (alphanumerics and underscores) and converts spaces to hyphens. Also strips leading and trailing whitespace. Modified to optionally allow dots. Adapted from Django 1.9
[ "Converts", "to", "lowercase", "removes", "non", "-", "word", "characters", "(", "alphanumerics", "and", "underscores", ")", "and", "converts", "spaces", "to", "hyphens", ".", "Also", "strips", "leading", "and", "trailing", "whitespace", ".", "Modified", "to", "optionally", "allow", "dots", "." ]
python
train
talkincode/toughlib
toughlib/apiutils.py
https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/apiutils.py#L98-L106
def parse_form_request(api_secret, request): """ >>> parse_form_request("123456",{"nonce": 1451122677, "msg": "helllo", "code": 0, "sign": "DB30F4D1112C20DFA736F65458F89C64"}) <Storage {'nonce': 1451122677, 'msg': 'helllo', 'code': 0, 'sign': 'DB30F4D1112C20DFA736F65458F89C64'}> """ if not check_sign(api_secret, request): raise SignError(u"message sign error") return Storage(request)
[ "def", "parse_form_request", "(", "api_secret", ",", "request", ")", ":", "if", "not", "check_sign", "(", "api_secret", ",", "request", ")", ":", "raise", "SignError", "(", "u\"message sign error\"", ")", "return", "Storage", "(", "request", ")" ]
>>> parse_form_request("123456",{"nonce": 1451122677, "msg": "helllo", "code": 0, "sign": "DB30F4D1112C20DFA736F65458F89C64"}) <Storage {'nonce': 1451122677, 'msg': 'helllo', 'code': 0, 'sign': 'DB30F4D1112C20DFA736F65458F89C64'}>
[ ">>>", "parse_form_request", "(", "123456", "{", "nonce", ":", "1451122677", "msg", ":", "helllo", "code", ":", "0", "sign", ":", "DB30F4D1112C20DFA736F65458F89C64", "}", ")", "<Storage", "{", "nonce", ":", "1451122677", "msg", ":", "helllo", "code", ":", "0", "sign", ":", "DB30F4D1112C20DFA736F65458F89C64", "}", ">" ]
python
train
inspirehep/inspire-schemas
inspire_schemas/utils.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L681-L707
def get_validation_errors(data, schema=None): """Validation errors for a given record. Args: data (dict): record to validate. schema (Union[dict, str]): schema to validate against. If it is a string, it is intepreted as the name of the schema to load (e.g. ``authors`` or ``jobs``). If it is ``None``, the schema is taken from ``data['$schema']``. If it is a dictionary, it is used directly. Yields: jsonschema.exceptions.ValidationError: validation errors. Raises: SchemaNotFound: if the given schema was not found. SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was found in ``data``. jsonschema.SchemaError: if the schema is invalid. """ schema = _load_schema_for_record(data, schema) errors = Draft4Validator( schema, resolver=LocalRefResolver.from_schema(schema), format_checker=inspire_format_checker ) return errors.iter_errors(data)
[ "def", "get_validation_errors", "(", "data", ",", "schema", "=", "None", ")", ":", "schema", "=", "_load_schema_for_record", "(", "data", ",", "schema", ")", "errors", "=", "Draft4Validator", "(", "schema", ",", "resolver", "=", "LocalRefResolver", ".", "from_schema", "(", "schema", ")", ",", "format_checker", "=", "inspire_format_checker", ")", "return", "errors", ".", "iter_errors", "(", "data", ")" ]
Validation errors for a given record. Args: data (dict): record to validate. schema (Union[dict, str]): schema to validate against. If it is a string, it is intepreted as the name of the schema to load (e.g. ``authors`` or ``jobs``). If it is ``None``, the schema is taken from ``data['$schema']``. If it is a dictionary, it is used directly. Yields: jsonschema.exceptions.ValidationError: validation errors. Raises: SchemaNotFound: if the given schema was not found. SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was found in ``data``. jsonschema.SchemaError: if the schema is invalid.
[ "Validation", "errors", "for", "a", "given", "record", "." ]
python
train
viniciuschiele/flask-apscheduler
flask_apscheduler/utils.py
https://github.com/viniciuschiele/flask-apscheduler/blob/cc52c39e1948c4e8de5da0d01db45f1779f61997/flask_apscheduler/utils.py#L26-L43
def job_to_dict(job): """Converts a job to an OrderedDict.""" data = OrderedDict() data['id'] = job.id data['name'] = job.name data['func'] = job.func_ref data['args'] = job.args data['kwargs'] = job.kwargs data.update(trigger_to_dict(job.trigger)) if not job.pending: data['misfire_grace_time'] = job.misfire_grace_time data['max_instances'] = job.max_instances data['next_run_time'] = None if job.next_run_time is None else job.next_run_time return data
[ "def", "job_to_dict", "(", "job", ")", ":", "data", "=", "OrderedDict", "(", ")", "data", "[", "'id'", "]", "=", "job", ".", "id", "data", "[", "'name'", "]", "=", "job", ".", "name", "data", "[", "'func'", "]", "=", "job", ".", "func_ref", "data", "[", "'args'", "]", "=", "job", ".", "args", "data", "[", "'kwargs'", "]", "=", "job", ".", "kwargs", "data", ".", "update", "(", "trigger_to_dict", "(", "job", ".", "trigger", ")", ")", "if", "not", "job", ".", "pending", ":", "data", "[", "'misfire_grace_time'", "]", "=", "job", ".", "misfire_grace_time", "data", "[", "'max_instances'", "]", "=", "job", ".", "max_instances", "data", "[", "'next_run_time'", "]", "=", "None", "if", "job", ".", "next_run_time", "is", "None", "else", "job", ".", "next_run_time", "return", "data" ]
Converts a job to an OrderedDict.
[ "Converts", "a", "job", "to", "an", "OrderedDict", "." ]
python
train
awslabs/aws-sam-cli
samcli/lib/build/workflow_config.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/lib/build/workflow_config.py#L188-L216
def get_config(self, code_dir, project_dir): """ Finds a configuration by looking for a manifest in the given directories. Returns ------- samcli.lib.build.workflow_config.CONFIG A supported configuration if one is found Raises ------ ValueError If none of the supported manifests files are found """ # Search for manifest first in code directory and then in the project directory. # Search order is important here because we want to prefer the manifest present within the code directory over # a manifest present in project directory. search_dirs = [code_dir, project_dir] LOG.debug("Looking for a supported build workflow in following directories: %s", search_dirs) for config in self.configs: if any([self._has_manifest(config, directory) for directory in search_dirs]): return config raise ValueError("None of the supported manifests '{}' were found in the following paths '{}'".format( [config.manifest_name for config in self.configs], search_dirs))
[ "def", "get_config", "(", "self", ",", "code_dir", ",", "project_dir", ")", ":", "# Search for manifest first in code directory and then in the project directory.", "# Search order is important here because we want to prefer the manifest present within the code directory over", "# a manifest present in project directory.", "search_dirs", "=", "[", "code_dir", ",", "project_dir", "]", "LOG", ".", "debug", "(", "\"Looking for a supported build workflow in following directories: %s\"", ",", "search_dirs", ")", "for", "config", "in", "self", ".", "configs", ":", "if", "any", "(", "[", "self", ".", "_has_manifest", "(", "config", ",", "directory", ")", "for", "directory", "in", "search_dirs", "]", ")", ":", "return", "config", "raise", "ValueError", "(", "\"None of the supported manifests '{}' were found in the following paths '{}'\"", ".", "format", "(", "[", "config", ".", "manifest_name", "for", "config", "in", "self", ".", "configs", "]", ",", "search_dirs", ")", ")" ]
Finds a configuration by looking for a manifest in the given directories. Returns ------- samcli.lib.build.workflow_config.CONFIG A supported configuration if one is found Raises ------ ValueError If none of the supported manifests files are found
[ "Finds", "a", "configuration", "by", "looking", "for", "a", "manifest", "in", "the", "given", "directories", "." ]
python
train
goerz/better-apidoc
better_apidoc.py
https://github.com/goerz/better-apidoc/blob/bbf979e01d7eff1a597c2608ef2609d1e83e8001/better_apidoc.py#L160-L255
def _get_members( mod, typ=None, include_imported=False, out_format='names', in_list=None, known_refs=None): """Get (filtered) public/total members of the module or package `mod`. Returns: lists `public` and `items`. The lists contains the public and private + public members, as strings. """ roles = {'function': 'func', 'module': 'mod', 'class': 'class', 'exception': 'exc', 'data': 'data'} # not included, because they cannot occur at modul level: # 'method': 'meth', 'attribute': 'attr', 'instanceattribute': 'attr' out_formats = ['names', 'fullnames', 'refs', 'table'] if out_format not in out_formats: raise ValueError("out_format %s not in %r" % (out_format, out_formats)) def check_typ(typ, mod, member): """Check if mod.member is of the desired typ""" if inspect.ismodule(member): return False documenter = _get_documenter(APP, member, mod) if typ is None: return True if typ == getattr(documenter, 'objtype', None): return True if hasattr(documenter, 'directivetype'): return roles[typ] == getattr(documenter, 'directivetype') def is_local(mod, member, name): """Check whether mod.member is defined locally in module mod""" if hasattr(member, '__module__'): return getattr(member, '__module__') == mod.__name__ else: # we take missing __module__ to mean the member is a data object # it is recommended to filter data by e.g. __all__ return True if typ is not None and typ not in roles: raise ValueError("typ must be None or one of %s" % str(list(roles.keys()))) items = [] public = [] if known_refs is None: known_refs = {} elif isinstance(known_refs, str): known_refs = getattr(mod, known_refs) if in_list is not None: try: in_list = getattr(mod, in_list) except AttributeError: in_list = [] for name in dir(mod): if name.startswith('__'): continue try: member = safe_getattr(mod, name) except AttributeError: continue if check_typ(typ, mod, member): if in_list is not None: if name not in in_list: continue if not (include_imported or is_local(mod, member, name)): continue if out_format in ['table', 'refs']: documenter = _get_documenter(APP, member, mod) role = roles.get(documenter.objtype, 'obj') ref = _get_member_ref_str( name, obj=member, role=role, known_refs=known_refs) if out_format == 'table': docsummary = extract_summary(member) items.append((ref, docsummary)) if not name.startswith('_'): public.append((ref, docsummary)) elif out_format == 'refs': items.append(ref) if not name.startswith('_'): public.append(ref) elif out_format == 'fullnames': fullname = _get_fullname(name, obj=member) items.append(fullname) if not name.startswith('_'): public.append(fullname) else: assert out_format == 'names', str(out_format) items.append(name) if not name.startswith('_'): public.append(name) if out_format == 'table': return _assemble_table(public), _assemble_table(items) else: return public, items
[ "def", "_get_members", "(", "mod", ",", "typ", "=", "None", ",", "include_imported", "=", "False", ",", "out_format", "=", "'names'", ",", "in_list", "=", "None", ",", "known_refs", "=", "None", ")", ":", "roles", "=", "{", "'function'", ":", "'func'", ",", "'module'", ":", "'mod'", ",", "'class'", ":", "'class'", ",", "'exception'", ":", "'exc'", ",", "'data'", ":", "'data'", "}", "# not included, because they cannot occur at modul level:", "# 'method': 'meth', 'attribute': 'attr', 'instanceattribute': 'attr'", "out_formats", "=", "[", "'names'", ",", "'fullnames'", ",", "'refs'", ",", "'table'", "]", "if", "out_format", "not", "in", "out_formats", ":", "raise", "ValueError", "(", "\"out_format %s not in %r\"", "%", "(", "out_format", ",", "out_formats", ")", ")", "def", "check_typ", "(", "typ", ",", "mod", ",", "member", ")", ":", "\"\"\"Check if mod.member is of the desired typ\"\"\"", "if", "inspect", ".", "ismodule", "(", "member", ")", ":", "return", "False", "documenter", "=", "_get_documenter", "(", "APP", ",", "member", ",", "mod", ")", "if", "typ", "is", "None", ":", "return", "True", "if", "typ", "==", "getattr", "(", "documenter", ",", "'objtype'", ",", "None", ")", ":", "return", "True", "if", "hasattr", "(", "documenter", ",", "'directivetype'", ")", ":", "return", "roles", "[", "typ", "]", "==", "getattr", "(", "documenter", ",", "'directivetype'", ")", "def", "is_local", "(", "mod", ",", "member", ",", "name", ")", ":", "\"\"\"Check whether mod.member is defined locally in module mod\"\"\"", "if", "hasattr", "(", "member", ",", "'__module__'", ")", ":", "return", "getattr", "(", "member", ",", "'__module__'", ")", "==", "mod", ".", "__name__", "else", ":", "# we take missing __module__ to mean the member is a data object", "# it is recommended to filter data by e.g. __all__", "return", "True", "if", "typ", "is", "not", "None", "and", "typ", "not", "in", "roles", ":", "raise", "ValueError", "(", "\"typ must be None or one of %s\"", "%", "str", "(", "list", "(", "roles", ".", "keys", "(", ")", ")", ")", ")", "items", "=", "[", "]", "public", "=", "[", "]", "if", "known_refs", "is", "None", ":", "known_refs", "=", "{", "}", "elif", "isinstance", "(", "known_refs", ",", "str", ")", ":", "known_refs", "=", "getattr", "(", "mod", ",", "known_refs", ")", "if", "in_list", "is", "not", "None", ":", "try", ":", "in_list", "=", "getattr", "(", "mod", ",", "in_list", ")", "except", "AttributeError", ":", "in_list", "=", "[", "]", "for", "name", "in", "dir", "(", "mod", ")", ":", "if", "name", ".", "startswith", "(", "'__'", ")", ":", "continue", "try", ":", "member", "=", "safe_getattr", "(", "mod", ",", "name", ")", "except", "AttributeError", ":", "continue", "if", "check_typ", "(", "typ", ",", "mod", ",", "member", ")", ":", "if", "in_list", "is", "not", "None", ":", "if", "name", "not", "in", "in_list", ":", "continue", "if", "not", "(", "include_imported", "or", "is_local", "(", "mod", ",", "member", ",", "name", ")", ")", ":", "continue", "if", "out_format", "in", "[", "'table'", ",", "'refs'", "]", ":", "documenter", "=", "_get_documenter", "(", "APP", ",", "member", ",", "mod", ")", "role", "=", "roles", ".", "get", "(", "documenter", ".", "objtype", ",", "'obj'", ")", "ref", "=", "_get_member_ref_str", "(", "name", ",", "obj", "=", "member", ",", "role", "=", "role", ",", "known_refs", "=", "known_refs", ")", "if", "out_format", "==", "'table'", ":", "docsummary", "=", "extract_summary", "(", "member", ")", "items", ".", "append", "(", "(", "ref", ",", "docsummary", ")", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", ":", "public", ".", "append", "(", "(", "ref", ",", "docsummary", ")", ")", "elif", "out_format", "==", "'refs'", ":", "items", ".", "append", "(", "ref", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", ":", "public", ".", "append", "(", "ref", ")", "elif", "out_format", "==", "'fullnames'", ":", "fullname", "=", "_get_fullname", "(", "name", ",", "obj", "=", "member", ")", "items", ".", "append", "(", "fullname", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", ":", "public", ".", "append", "(", "fullname", ")", "else", ":", "assert", "out_format", "==", "'names'", ",", "str", "(", "out_format", ")", "items", ".", "append", "(", "name", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", ":", "public", ".", "append", "(", "name", ")", "if", "out_format", "==", "'table'", ":", "return", "_assemble_table", "(", "public", ")", ",", "_assemble_table", "(", "items", ")", "else", ":", "return", "public", ",", "items" ]
Get (filtered) public/total members of the module or package `mod`. Returns: lists `public` and `items`. The lists contains the public and private + public members, as strings.
[ "Get", "(", "filtered", ")", "public", "/", "total", "members", "of", "the", "module", "or", "package", "mod", "." ]
python
train
earlzo/hfut
hfut/util.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/hfut/util.py#L71-L95
def cal_gpa(grades): """ 根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考 :param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组 :return: 包含了课程平均绩点和 gpa 的元组 """ # 课程总数 courses_sum = len(grades) # 课程绩点和 points_sum = 0 # 学分和 credit_sum = 0 # 课程学分 x 课程绩点之和 gpa_points_sum = 0 for grade in grades: point = get_point(grade.get('补考成绩') or grade['成绩']) credit = float(grade['学分']) points_sum += point credit_sum += credit gpa_points_sum += credit * point ave_point = points_sum / courses_sum gpa = gpa_points_sum / credit_sum return round(ave_point, 5), round(gpa, 5)
[ "def", "cal_gpa", "(", "grades", ")", ":", "# 课程总数", "courses_sum", "=", "len", "(", "grades", ")", "# 课程绩点和", "points_sum", "=", "0", "# 学分和", "credit_sum", "=", "0", "# 课程学分 x 课程绩点之和", "gpa_points_sum", "=", "0", "for", "grade", "in", "grades", ":", "point", "=", "get_point", "(", "grade", ".", "get", "(", "'补考成绩') or gra", "d", "['", "绩'])", "", "", "", "", "credit", "=", "float", "(", "grade", "[", "'学分'])", "", "", "points_sum", "+=", "point", "credit_sum", "+=", "credit", "gpa_points_sum", "+=", "credit", "*", "point", "ave_point", "=", "points_sum", "/", "courses_sum", "gpa", "=", "gpa_points_sum", "/", "credit_sum", "return", "round", "(", "ave_point", ",", "5", ")", ",", "round", "(", "gpa", ",", "5", ")" ]
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考 :param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组 :return: 包含了课程平均绩点和 gpa 的元组
[ "根据成绩数组计算课程平均绩点和", "gpa", "算法不一定与学校一致", "结果仅供参考" ]
python
train
miguelgrinberg/slam
slam/cli.py
https://github.com/miguelgrinberg/slam/blob/cf68a4bbc16d909718f8a9e71072b822e0a3d94b/slam/cli.py#L47-L56
def on_error(e): # pragma: no cover """Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function. """ exname = {'RuntimeError': 'Runtime error', 'Value Error': 'Value error'} sys.stderr.write('{}: {}\n'.format(exname[e.__class__.__name__], str(e))) sys.stderr.write('See file slam_error.log for additional details.\n') sys.exit(1)
[ "def", "on_error", "(", "e", ")", ":", "# pragma: no cover", "exname", "=", "{", "'RuntimeError'", ":", "'Runtime error'", ",", "'Value Error'", ":", "'Value error'", "}", "sys", ".", "stderr", ".", "write", "(", "'{}: {}\\n'", ".", "format", "(", "exname", "[", "e", ".", "__class__", ".", "__name__", "]", ",", "str", "(", "e", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'See file slam_error.log for additional details.\\n'", ")", "sys", ".", "exit", "(", "1", ")" ]
Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function.
[ "Error", "handler" ]
python
train
majerteam/sqla_inspect
sqla_inspect/py3o.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L262-L295
def _get_to_many_relationship_value(self, obj, column): """ Get the resulting datas for a One To many or a many to many relationship :param obj obj: The instance we manage :param dict column: The column description dictionnary :returns: The associated value """ related_key = column.get('related_key', None) related = getattr(obj, column['__col__'].key) value = {} if related: total = len(related) for index, rel_obj in enumerate(related): if related_key: compiled_res = self._get_formatted_val( rel_obj, related_key, column ) else: compiled_res = column['__prop__'].compile_obj( rel_obj ) value['item_%d' % index] = compiled_res value[str(index)] = compiled_res value["_" + str(index)] = compiled_res if index == 0: value['first'] = compiled_res if index == total - 1: value['last'] = compiled_res return value
[ "def", "_get_to_many_relationship_value", "(", "self", ",", "obj", ",", "column", ")", ":", "related_key", "=", "column", ".", "get", "(", "'related_key'", ",", "None", ")", "related", "=", "getattr", "(", "obj", ",", "column", "[", "'__col__'", "]", ".", "key", ")", "value", "=", "{", "}", "if", "related", ":", "total", "=", "len", "(", "related", ")", "for", "index", ",", "rel_obj", "in", "enumerate", "(", "related", ")", ":", "if", "related_key", ":", "compiled_res", "=", "self", ".", "_get_formatted_val", "(", "rel_obj", ",", "related_key", ",", "column", ")", "else", ":", "compiled_res", "=", "column", "[", "'__prop__'", "]", ".", "compile_obj", "(", "rel_obj", ")", "value", "[", "'item_%d'", "%", "index", "]", "=", "compiled_res", "value", "[", "str", "(", "index", ")", "]", "=", "compiled_res", "value", "[", "\"_\"", "+", "str", "(", "index", ")", "]", "=", "compiled_res", "if", "index", "==", "0", ":", "value", "[", "'first'", "]", "=", "compiled_res", "if", "index", "==", "total", "-", "1", ":", "value", "[", "'last'", "]", "=", "compiled_res", "return", "value" ]
Get the resulting datas for a One To many or a many to many relationship :param obj obj: The instance we manage :param dict column: The column description dictionnary :returns: The associated value
[ "Get", "the", "resulting", "datas", "for", "a", "One", "To", "many", "or", "a", "many", "to", "many", "relationship" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/clusters.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L140-L150
def update_cdh_version(self, new_cdh_version): """ Manually set the CDH version. @param new_cdh_version: New CDH version, e.g. 4.5.1 @return: An ApiCluster object @since: API v6 """ dic = self.to_json_dict() dic['fullVersion'] = new_cdh_version return self._put_cluster(dic)
[ "def", "update_cdh_version", "(", "self", ",", "new_cdh_version", ")", ":", "dic", "=", "self", ".", "to_json_dict", "(", ")", "dic", "[", "'fullVersion'", "]", "=", "new_cdh_version", "return", "self", ".", "_put_cluster", "(", "dic", ")" ]
Manually set the CDH version. @param new_cdh_version: New CDH version, e.g. 4.5.1 @return: An ApiCluster object @since: API v6
[ "Manually", "set", "the", "CDH", "version", "." ]
python
train
knipknap/exscript
Exscript/util/ipv4.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/ipv4.py#L72-L86
def normalize_ip(ip): """ Transform the address into a fixed-length form, such as:: 192.168.0.1 -> 192.168.000.001 :type ip: string :param ip: An IP address. :rtype: string :return: The normalized IP. """ theip = ip.split('.') if len(theip) != 4: raise ValueError('ip should be 4 tuples') return '.'.join(str(int(l)).rjust(3, '0') for l in theip)
[ "def", "normalize_ip", "(", "ip", ")", ":", "theip", "=", "ip", ".", "split", "(", "'.'", ")", "if", "len", "(", "theip", ")", "!=", "4", ":", "raise", "ValueError", "(", "'ip should be 4 tuples'", ")", "return", "'.'", ".", "join", "(", "str", "(", "int", "(", "l", ")", ")", ".", "rjust", "(", "3", ",", "'0'", ")", "for", "l", "in", "theip", ")" ]
Transform the address into a fixed-length form, such as:: 192.168.0.1 -> 192.168.000.001 :type ip: string :param ip: An IP address. :rtype: string :return: The normalized IP.
[ "Transform", "the", "address", "into", "a", "fixed", "-", "length", "form", "such", "as", "::" ]
python
train
rh-marketingops/dwm
dwm/dwmmain.py
https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/dwmmain.py#L229-L242
def _norm_lookup(self, record, hist=None): """ Perform generic validation lookup :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ record, hist = self.data_lookup_method(fields_list=self.fields, mongo_db_obj=self.mongo, hist=hist, record=record, lookup_type='normLookup') return record, hist
[ "def", "_norm_lookup", "(", "self", ",", "record", ",", "hist", "=", "None", ")", ":", "record", ",", "hist", "=", "self", ".", "data_lookup_method", "(", "fields_list", "=", "self", ".", "fields", ",", "mongo_db_obj", "=", "self", ".", "mongo", ",", "hist", "=", "hist", ",", "record", "=", "record", ",", "lookup_type", "=", "'normLookup'", ")", "return", "record", ",", "hist" ]
Perform generic validation lookup :param dict record: dictionary of values to validate :param dict hist: existing input of history values
[ "Perform", "generic", "validation", "lookup" ]
python
train
quantmind/pulsar
pulsar/async/monitor.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/monitor.py#L153-L164
def stop_actors(self, monitor): """Maintain the number of workers by spawning or killing as required """ if monitor.cfg.workers: num_to_kill = len(self.managed_actors) - monitor.cfg.workers for i in range(num_to_kill, 0, -1): w, kage = 0, sys.maxsize for worker in self.managed_actors.values(): age = worker.impl.age if age < kage: w, kage = worker, age self.manage_actor(monitor, w, True)
[ "def", "stop_actors", "(", "self", ",", "monitor", ")", ":", "if", "monitor", ".", "cfg", ".", "workers", ":", "num_to_kill", "=", "len", "(", "self", ".", "managed_actors", ")", "-", "monitor", ".", "cfg", ".", "workers", "for", "i", "in", "range", "(", "num_to_kill", ",", "0", ",", "-", "1", ")", ":", "w", ",", "kage", "=", "0", ",", "sys", ".", "maxsize", "for", "worker", "in", "self", ".", "managed_actors", ".", "values", "(", ")", ":", "age", "=", "worker", ".", "impl", ".", "age", "if", "age", "<", "kage", ":", "w", ",", "kage", "=", "worker", ",", "age", "self", ".", "manage_actor", "(", "monitor", ",", "w", ",", "True", ")" ]
Maintain the number of workers by spawning or killing as required
[ "Maintain", "the", "number", "of", "workers", "by", "spawning", "or", "killing", "as", "required" ]
python
train
dmort27/panphon
panphon/_panphon.py
https://github.com/dmort27/panphon/blob/17eaa482e3edb211f3a8138137d76e4b9246d201/panphon/_panphon.py#L77-L101
def word2array(ft_names, word): """Converts `word` [[(value, feature),...],...] to a NumPy array Given a word consisting of lists of lists/sets of (value, feature) tuples, return a NumPy array where each row is a segment and each column is a feature. Args: ft_names (list): list of feature names (as strings) in order; this argument controls what features are included in the array that is output and their order vis-a-vis the columns of the array word (list): list of lists of feature tuples (output by FeatureTable.word_fts) Returns: ndarray: array in which each row is a segment and each column is a feature """ vdict = {'+': 1, '-': -1, '0': 0} def seg2col(seg): seg = dict([(k, v) for (v, k) in seg]) return [vdict[seg[ft]] for ft in ft_names] return numpy.array([seg2col(s) for s in word], order='F')
[ "def", "word2array", "(", "ft_names", ",", "word", ")", ":", "vdict", "=", "{", "'+'", ":", "1", ",", "'-'", ":", "-", "1", ",", "'0'", ":", "0", "}", "def", "seg2col", "(", "seg", ")", ":", "seg", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "(", "v", ",", "k", ")", "in", "seg", "]", ")", "return", "[", "vdict", "[", "seg", "[", "ft", "]", "]", "for", "ft", "in", "ft_names", "]", "return", "numpy", ".", "array", "(", "[", "seg2col", "(", "s", ")", "for", "s", "in", "word", "]", ",", "order", "=", "'F'", ")" ]
Converts `word` [[(value, feature),...],...] to a NumPy array Given a word consisting of lists of lists/sets of (value, feature) tuples, return a NumPy array where each row is a segment and each column is a feature. Args: ft_names (list): list of feature names (as strings) in order; this argument controls what features are included in the array that is output and their order vis-a-vis the columns of the array word (list): list of lists of feature tuples (output by FeatureTable.word_fts) Returns: ndarray: array in which each row is a segment and each column is a feature
[ "Converts", "word", "[[", "(", "value", "feature", ")", "...", "]", "...", "]", "to", "a", "NumPy", "array" ]
python
train
numenta/nupic
src/nupic/algorithms/backtracking_tm_shim.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm_shim.py#L256-L278
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): """ (From `backtracking_tm.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @param enableLearn If true, perform learning @param computeInfOutput If None, default behavior is to disable the inference output when enableLearn is on. If true, compute the inference output If false, do not compute the inference output """ super(MonitoredTMShim, self).compute(set(bottomUpInput.nonzero()[0]), learn=enableLearn) numberOfCells = self.numberOfCells() activeState = numpy.zeros(numberOfCells) activeState[self.getActiveCells()] = 1 self.infActiveState["t"] = activeState output = numpy.zeros(numberOfCells) output[self.getPredictiveCells() + self.getActiveCells()] = 1 return output
[ "def", "compute", "(", "self", ",", "bottomUpInput", ",", "enableLearn", ",", "computeInfOutput", "=", "None", ")", ":", "super", "(", "MonitoredTMShim", ",", "self", ")", ".", "compute", "(", "set", "(", "bottomUpInput", ".", "nonzero", "(", ")", "[", "0", "]", ")", ",", "learn", "=", "enableLearn", ")", "numberOfCells", "=", "self", ".", "numberOfCells", "(", ")", "activeState", "=", "numpy", ".", "zeros", "(", "numberOfCells", ")", "activeState", "[", "self", ".", "getActiveCells", "(", ")", "]", "=", "1", "self", ".", "infActiveState", "[", "\"t\"", "]", "=", "activeState", "output", "=", "numpy", ".", "zeros", "(", "numberOfCells", ")", "output", "[", "self", ".", "getPredictiveCells", "(", ")", "+", "self", ".", "getActiveCells", "(", ")", "]", "=", "1", "return", "output" ]
(From `backtracking_tm.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @param enableLearn If true, perform learning @param computeInfOutput If None, default behavior is to disable the inference output when enableLearn is on. If true, compute the inference output If false, do not compute the inference output
[ "(", "From", "backtracking_tm", ".", "py", ")", "Handle", "one", "compute", "possibly", "learning", "." ]
python
valid
adrn/gala
gala/dynamics/orbit.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L733-L800
def align_circulation_with_z(self, circulation=None): """ If the input orbit is a tube orbit, this function aligns the circulation axis with the z axis and returns a copy. Parameters ---------- circulation : array_like (optional) Array of bits that specify the axis about which the orbit circulates. If not provided, will compute this using :meth:`~gala.dynamics.Orbit.circulation`. See that method for more information. Returns ------- orb : :class:`~gala.dynamics.Orbit` A copy of the original orbit object with circulation aligned with the z axis. """ if circulation is None: circulation = self.circulation() circulation = atleast_2d(circulation, insert_axis=1) cart = self.cartesian pos = cart.xyz vel = np.vstack((cart.v_x.value[None], cart.v_y.value[None], cart.v_z.value[None])) * cart.v_x.unit if pos.ndim < 3: pos = pos[...,np.newaxis] vel = vel[...,np.newaxis] if (circulation.shape[0] != self.ndim or circulation.shape[1] != pos.shape[2]): raise ValueError("Shape of 'circulation' array should match the " "shape of the position/velocity (minus the time " "axis).") new_pos = pos.copy() new_vel = vel.copy() for n in range(pos.shape[2]): if circulation[2,n] == 1 or np.all(circulation[:,n] == 0): # already circulating about z or box orbit continue if sum(circulation[:,n]) > 1: logger.warning("Circulation about multiple axes - are you sure " "the orbit has been integrated for long enough?") if circulation[0,n] == 1: circ = 0 elif circulation[1,n] == 1: circ = 1 else: raise RuntimeError("Should never get here...") new_pos[circ,:,n] = pos[2,:,n] new_pos[2,:,n] = pos[circ,:,n] new_vel[circ,:,n] = vel[2,:,n] new_vel[2,:,n] = vel[circ,:,n] return self.__class__(pos=new_pos.reshape(cart.xyz.shape), vel=new_vel.reshape(cart.xyz.shape), t=self.t, hamiltonian=self.hamiltonian)
[ "def", "align_circulation_with_z", "(", "self", ",", "circulation", "=", "None", ")", ":", "if", "circulation", "is", "None", ":", "circulation", "=", "self", ".", "circulation", "(", ")", "circulation", "=", "atleast_2d", "(", "circulation", ",", "insert_axis", "=", "1", ")", "cart", "=", "self", ".", "cartesian", "pos", "=", "cart", ".", "xyz", "vel", "=", "np", ".", "vstack", "(", "(", "cart", ".", "v_x", ".", "value", "[", "None", "]", ",", "cart", ".", "v_y", ".", "value", "[", "None", "]", ",", "cart", ".", "v_z", ".", "value", "[", "None", "]", ")", ")", "*", "cart", ".", "v_x", ".", "unit", "if", "pos", ".", "ndim", "<", "3", ":", "pos", "=", "pos", "[", "...", ",", "np", ".", "newaxis", "]", "vel", "=", "vel", "[", "...", ",", "np", ".", "newaxis", "]", "if", "(", "circulation", ".", "shape", "[", "0", "]", "!=", "self", ".", "ndim", "or", "circulation", ".", "shape", "[", "1", "]", "!=", "pos", ".", "shape", "[", "2", "]", ")", ":", "raise", "ValueError", "(", "\"Shape of 'circulation' array should match the \"", "\"shape of the position/velocity (minus the time \"", "\"axis).\"", ")", "new_pos", "=", "pos", ".", "copy", "(", ")", "new_vel", "=", "vel", ".", "copy", "(", ")", "for", "n", "in", "range", "(", "pos", ".", "shape", "[", "2", "]", ")", ":", "if", "circulation", "[", "2", ",", "n", "]", "==", "1", "or", "np", ".", "all", "(", "circulation", "[", ":", ",", "n", "]", "==", "0", ")", ":", "# already circulating about z or box orbit", "continue", "if", "sum", "(", "circulation", "[", ":", ",", "n", "]", ")", ">", "1", ":", "logger", ".", "warning", "(", "\"Circulation about multiple axes - are you sure \"", "\"the orbit has been integrated for long enough?\"", ")", "if", "circulation", "[", "0", ",", "n", "]", "==", "1", ":", "circ", "=", "0", "elif", "circulation", "[", "1", ",", "n", "]", "==", "1", ":", "circ", "=", "1", "else", ":", "raise", "RuntimeError", "(", "\"Should never get here...\"", ")", "new_pos", "[", "circ", ",", ":", ",", "n", "]", "=", "pos", "[", "2", ",", ":", ",", "n", "]", "new_pos", "[", "2", ",", ":", ",", "n", "]", "=", "pos", "[", "circ", ",", ":", ",", "n", "]", "new_vel", "[", "circ", ",", ":", ",", "n", "]", "=", "vel", "[", "2", ",", ":", ",", "n", "]", "new_vel", "[", "2", ",", ":", ",", "n", "]", "=", "vel", "[", "circ", ",", ":", ",", "n", "]", "return", "self", ".", "__class__", "(", "pos", "=", "new_pos", ".", "reshape", "(", "cart", ".", "xyz", ".", "shape", ")", ",", "vel", "=", "new_vel", ".", "reshape", "(", "cart", ".", "xyz", ".", "shape", ")", ",", "t", "=", "self", ".", "t", ",", "hamiltonian", "=", "self", ".", "hamiltonian", ")" ]
If the input orbit is a tube orbit, this function aligns the circulation axis with the z axis and returns a copy. Parameters ---------- circulation : array_like (optional) Array of bits that specify the axis about which the orbit circulates. If not provided, will compute this using :meth:`~gala.dynamics.Orbit.circulation`. See that method for more information. Returns ------- orb : :class:`~gala.dynamics.Orbit` A copy of the original orbit object with circulation aligned with the z axis.
[ "If", "the", "input", "orbit", "is", "a", "tube", "orbit", "this", "function", "aligns", "the", "circulation", "axis", "with", "the", "z", "axis", "and", "returns", "a", "copy", "." ]
python
train
syndbg/demonoid-api
demonoid/parser.py
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L154-L174
def parse_torrent_properties(table_datas): """ Static method that parses a given list of table data elements and using helper methods `Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties. :param list lxml.HtmlElement table_datas: table_datas to parse :return: identified category, subcategory, quality and languages. :rtype: dict """ output = {'category': table_datas[0].text, 'subcategory': None, 'quality': None, 'language': None} for i in range(1, len(table_datas)): td = table_datas[i] url = td.get('href') params = Parser.get_params(url) if Parser.is_subcategory(params) and not output['subcategory']: output['subcategory'] = td.text elif Parser.is_quality(params) and not output['quality']: output['quality'] = td.text elif Parser.is_language(params) and not output['language']: output['language'] = td.text return output
[ "def", "parse_torrent_properties", "(", "table_datas", ")", ":", "output", "=", "{", "'category'", ":", "table_datas", "[", "0", "]", ".", "text", ",", "'subcategory'", ":", "None", ",", "'quality'", ":", "None", ",", "'language'", ":", "None", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "table_datas", ")", ")", ":", "td", "=", "table_datas", "[", "i", "]", "url", "=", "td", ".", "get", "(", "'href'", ")", "params", "=", "Parser", ".", "get_params", "(", "url", ")", "if", "Parser", ".", "is_subcategory", "(", "params", ")", "and", "not", "output", "[", "'subcategory'", "]", ":", "output", "[", "'subcategory'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_quality", "(", "params", ")", "and", "not", "output", "[", "'quality'", "]", ":", "output", "[", "'quality'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_language", "(", "params", ")", "and", "not", "output", "[", "'language'", "]", ":", "output", "[", "'language'", "]", "=", "td", ".", "text", "return", "output" ]
Static method that parses a given list of table data elements and using helper methods `Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties. :param list lxml.HtmlElement table_datas: table_datas to parse :return: identified category, subcategory, quality and languages. :rtype: dict
[ "Static", "method", "that", "parses", "a", "given", "list", "of", "table", "data", "elements", "and", "using", "helper", "methods", "Parser", ".", "is_subcategory", "Parser", ".", "is_quality", "Parser", ".", "is_language", "collects", "torrent", "properties", "." ]
python
train
shendo/websnort
websnort/runner.py
https://github.com/shendo/websnort/blob/19495e8834a111e889ba28efad8cd90cf55eb661/websnort/runner.py#L86-L126
def run(pcap): """ Runs all configured IDS instances against the supplied pcap. :param pcap: File path to pcap file to analyse :returns: Dict with details and results of run/s """ start = datetime.now() errors = [] status = STATUS_FAILED analyses = [] pool = ThreadPool(MAX_THREADS) try: if not is_pcap(pcap): raise Exception("Not a valid pcap file") runners = [] for conf in Config().modules.values(): runner = registry.get(conf['module']) if not runner: raise Exception("No module named: '{0}' found registered" .format(conf['module'])) runners.append(runner(conf)) # launch via worker pool analyses = [ pool.apply_async(_run_ids, (runner, pcap)) for runner in runners ] analyses = [ x.get() for x in analyses ] # were all runs successful? if all([ x['status'] == STATUS_SUCCESS for x in analyses ]): status = STATUS_SUCCESS # propagate any errors to the main list for run in [ x for x in analyses if x['status'] != STATUS_SUCCESS ]: errors.append("Failed to run {0}: {1}".format(run['name'], run['error'])) except Exception as ex: errors.append(str(ex)) return {'start': start, 'duration': duration(start), 'status': status, 'analyses': analyses, 'errors': errors, }
[ "def", "run", "(", "pcap", ")", ":", "start", "=", "datetime", ".", "now", "(", ")", "errors", "=", "[", "]", "status", "=", "STATUS_FAILED", "analyses", "=", "[", "]", "pool", "=", "ThreadPool", "(", "MAX_THREADS", ")", "try", ":", "if", "not", "is_pcap", "(", "pcap", ")", ":", "raise", "Exception", "(", "\"Not a valid pcap file\"", ")", "runners", "=", "[", "]", "for", "conf", "in", "Config", "(", ")", ".", "modules", ".", "values", "(", ")", ":", "runner", "=", "registry", ".", "get", "(", "conf", "[", "'module'", "]", ")", "if", "not", "runner", ":", "raise", "Exception", "(", "\"No module named: '{0}' found registered\"", ".", "format", "(", "conf", "[", "'module'", "]", ")", ")", "runners", ".", "append", "(", "runner", "(", "conf", ")", ")", "# launch via worker pool", "analyses", "=", "[", "pool", ".", "apply_async", "(", "_run_ids", ",", "(", "runner", ",", "pcap", ")", ")", "for", "runner", "in", "runners", "]", "analyses", "=", "[", "x", ".", "get", "(", ")", "for", "x", "in", "analyses", "]", "# were all runs successful?", "if", "all", "(", "[", "x", "[", "'status'", "]", "==", "STATUS_SUCCESS", "for", "x", "in", "analyses", "]", ")", ":", "status", "=", "STATUS_SUCCESS", "# propagate any errors to the main list", "for", "run", "in", "[", "x", "for", "x", "in", "analyses", "if", "x", "[", "'status'", "]", "!=", "STATUS_SUCCESS", "]", ":", "errors", ".", "append", "(", "\"Failed to run {0}: {1}\"", ".", "format", "(", "run", "[", "'name'", "]", ",", "run", "[", "'error'", "]", ")", ")", "except", "Exception", "as", "ex", ":", "errors", ".", "append", "(", "str", "(", "ex", ")", ")", "return", "{", "'start'", ":", "start", ",", "'duration'", ":", "duration", "(", "start", ")", ",", "'status'", ":", "status", ",", "'analyses'", ":", "analyses", ",", "'errors'", ":", "errors", ",", "}" ]
Runs all configured IDS instances against the supplied pcap. :param pcap: File path to pcap file to analyse :returns: Dict with details and results of run/s
[ "Runs", "all", "configured", "IDS", "instances", "against", "the", "supplied", "pcap", "." ]
python
train
inspirehep/inspire-query-parser
inspire_query_parser/visitors/elastic_search_visitor.py
https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/inspire_query_parser/visitors/elastic_search_visitor.py#L191-L250
def _generate_author_query(self, author_name): """Generates a query handling specifically authors. Notes: The match query is generic enough to return many results. Then, using the filter clause we truncate these so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John` shouldn't return papers of 'Smith, Bob'. Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by requiring that ``full_name`` field contains both """ name_variations = [name_variation.lower() for name_variation in generate_minimal_name_variations(author_name)] # When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or # ``Mele``) we can improve our filtering in order to filter out results containing records with authors that # have the same non lastnames prefix, e.g. 'Mele, Samuele'. if author_name_contains_fullnames(author_name): specialized_author_filter = [ { 'bool': { 'must': [ { 'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]} }, generate_match_query( ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'], names_variation[1], with_operator_and=True ) ] } } for names_variation in product(name_variations, name_variations) ] else: # In the case of initials or even single lastname search, filter with only the name variations. specialized_author_filter = [ {'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}} for name_variation in name_variations ] query = { 'bool': { 'filter': { 'bool': { 'should': specialized_author_filter } }, 'must': { 'match': { ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']: author_name } } } } return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query)
[ "def", "_generate_author_query", "(", "self", ",", "author_name", ")", ":", "name_variations", "=", "[", "name_variation", ".", "lower", "(", ")", "for", "name_variation", "in", "generate_minimal_name_variations", "(", "author_name", ")", "]", "# When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or", "# ``Mele``) we can improve our filtering in order to filter out results containing records with authors that", "# have the same non lastnames prefix, e.g. 'Mele, Samuele'.", "if", "author_name_contains_fullnames", "(", "author_name", ")", ":", "specialized_author_filter", "=", "[", "{", "'bool'", ":", "{", "'must'", ":", "[", "{", "'term'", ":", "{", "ElasticSearchVisitor", ".", "AUTHORS_NAME_VARIATIONS_FIELD", ":", "names_variation", "[", "0", "]", "}", "}", ",", "generate_match_query", "(", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'author'", "]", ",", "names_variation", "[", "1", "]", ",", "with_operator_and", "=", "True", ")", "]", "}", "}", "for", "names_variation", "in", "product", "(", "name_variations", ",", "name_variations", ")", "]", "else", ":", "# In the case of initials or even single lastname search, filter with only the name variations.", "specialized_author_filter", "=", "[", "{", "'term'", ":", "{", "ElasticSearchVisitor", ".", "AUTHORS_NAME_VARIATIONS_FIELD", ":", "name_variation", "}", "}", "for", "name_variation", "in", "name_variations", "]", "query", "=", "{", "'bool'", ":", "{", "'filter'", ":", "{", "'bool'", ":", "{", "'should'", ":", "specialized_author_filter", "}", "}", ",", "'must'", ":", "{", "'match'", ":", "{", "ElasticSearchVisitor", ".", "KEYWORD_TO_ES_FIELDNAME", "[", "'author'", "]", ":", "author_name", "}", "}", "}", "}", "return", "generate_nested_query", "(", "ElasticSearchVisitor", ".", "AUTHORS_NESTED_QUERY_PATH", ",", "query", ")" ]
Generates a query handling specifically authors. Notes: The match query is generic enough to return many results. Then, using the filter clause we truncate these so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John` shouldn't return papers of 'Smith, Bob'. Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by requiring that ``full_name`` field contains both
[ "Generates", "a", "query", "handling", "specifically", "authors", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/interconnect.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/interconnect.py#L481-L590
def setup(self, socket_type, complete_or_error_queue): """Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None """ try: if self._secured: if self._server_public_key is None or \ self._server_private_key is None: raise LocalConfigurationError( "Attempting to start socket in secure mode, " "but complete server keys were not provided") self._event_loop = zmq.asyncio.ZMQEventLoop() asyncio.set_event_loop(self._event_loop) self._context = zmq.asyncio.Context() self._socket = self._context.socket(socket_type) self._socket.set(zmq.TCP_KEEPALIVE, 1) self._socket.set(zmq.TCP_KEEPALIVE_IDLE, self._connection_timeout) self._socket.set(zmq.TCP_KEEPALIVE_INTVL, self._heartbeat_interval) if socket_type == zmq.DEALER: self._socket.identity = "{}-{}".format( self._zmq_identity, hashlib.sha512(uuid.uuid4().hex.encode() ).hexdigest()[:23]).encode('ascii') if self._secured: # Generate ephemeral certificates for this connection public_key, secretkey = zmq.curve_keypair() self._socket.curve_publickey = public_key self._socket.curve_secretkey = secretkey self._socket.curve_serverkey = self._server_public_key self._socket.connect(self._address) elif socket_type == zmq.ROUTER: if self._secured: auth = AsyncioAuthenticator(self._context) self._auth = auth auth.start() auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) self._socket.curve_secretkey = self._server_private_key self._socket.curve_publickey = self._server_public_key self._socket.curve_server = True try: self._socket.bind(self._address) except zmq.error.ZMQError as e: raise LocalConfigurationError( "Can't bind to {}: {}".format(self._address, str(e))) else: LOGGER.info("Listening on %s", self._address) self._dispatcher.add_send_message(self._connection, self.send_message) self._dispatcher.add_send_last_message(self._connection, self.send_last_message) asyncio.ensure_future(self._remove_expired_futures(), loop=self._event_loop) asyncio.ensure_future(self._receive_message(), loop=self._event_loop) asyncio.ensure_future(self._dispatch_message(), loop=self._event_loop) self._dispatcher_queue = asyncio.Queue() if self._monitor: self._monitor_fd = "inproc://monitor.s-{}".format( _generate_id()[0:5]) self._monitor_sock = self._socket.get_monitor_socket( zmq.EVENT_DISCONNECTED, addr=self._monitor_fd) asyncio.ensure_future(self._monitor_disconnects(), loop=self._event_loop) except Exception as e: # Put the exception on the queue where in start we are waiting # for it. complete_or_error_queue.put_nowait(e) self._close_sockets() raise if self._heartbeat: asyncio.ensure_future(self._do_heartbeat(), loop=self._event_loop) # Put a 'complete with the setup tasks' sentinel on the queue. complete_or_error_queue.put_nowait(_STARTUP_COMPLETE_SENTINEL) asyncio.ensure_future(self._notify_started(), loop=self._event_loop) self._event_loop.run_forever() # event_loop.stop called elsewhere will cause the loop to break out # of run_forever then it can be closed and the context destroyed. self._event_loop.close() self._close_sockets()
[ "def", "setup", "(", "self", ",", "socket_type", ",", "complete_or_error_queue", ")", ":", "try", ":", "if", "self", ".", "_secured", ":", "if", "self", ".", "_server_public_key", "is", "None", "or", "self", ".", "_server_private_key", "is", "None", ":", "raise", "LocalConfigurationError", "(", "\"Attempting to start socket in secure mode, \"", "\"but complete server keys were not provided\"", ")", "self", ".", "_event_loop", "=", "zmq", ".", "asyncio", ".", "ZMQEventLoop", "(", ")", "asyncio", ".", "set_event_loop", "(", "self", ".", "_event_loop", ")", "self", ".", "_context", "=", "zmq", ".", "asyncio", ".", "Context", "(", ")", "self", ".", "_socket", "=", "self", ".", "_context", ".", "socket", "(", "socket_type", ")", "self", ".", "_socket", ".", "set", "(", "zmq", ".", "TCP_KEEPALIVE", ",", "1", ")", "self", ".", "_socket", ".", "set", "(", "zmq", ".", "TCP_KEEPALIVE_IDLE", ",", "self", ".", "_connection_timeout", ")", "self", ".", "_socket", ".", "set", "(", "zmq", ".", "TCP_KEEPALIVE_INTVL", ",", "self", ".", "_heartbeat_interval", ")", "if", "socket_type", "==", "zmq", ".", "DEALER", ":", "self", ".", "_socket", ".", "identity", "=", "\"{}-{}\"", ".", "format", "(", "self", ".", "_zmq_identity", ",", "hashlib", ".", "sha512", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "23", "]", ")", ".", "encode", "(", "'ascii'", ")", "if", "self", ".", "_secured", ":", "# Generate ephemeral certificates for this connection", "public_key", ",", "secretkey", "=", "zmq", ".", "curve_keypair", "(", ")", "self", ".", "_socket", ".", "curve_publickey", "=", "public_key", "self", ".", "_socket", ".", "curve_secretkey", "=", "secretkey", "self", ".", "_socket", ".", "curve_serverkey", "=", "self", ".", "_server_public_key", "self", ".", "_socket", ".", "connect", "(", "self", ".", "_address", ")", "elif", "socket_type", "==", "zmq", ".", "ROUTER", ":", "if", "self", ".", "_secured", ":", "auth", "=", "AsyncioAuthenticator", "(", "self", ".", "_context", ")", "self", ".", "_auth", "=", "auth", "auth", ".", "start", "(", ")", "auth", ".", "configure_curve", "(", "domain", "=", "'*'", ",", "location", "=", "zmq", ".", "auth", ".", "CURVE_ALLOW_ANY", ")", "self", ".", "_socket", ".", "curve_secretkey", "=", "self", ".", "_server_private_key", "self", ".", "_socket", ".", "curve_publickey", "=", "self", ".", "_server_public_key", "self", ".", "_socket", ".", "curve_server", "=", "True", "try", ":", "self", ".", "_socket", ".", "bind", "(", "self", ".", "_address", ")", "except", "zmq", ".", "error", ".", "ZMQError", "as", "e", ":", "raise", "LocalConfigurationError", "(", "\"Can't bind to {}: {}\"", ".", "format", "(", "self", ".", "_address", ",", "str", "(", "e", ")", ")", ")", "else", ":", "LOGGER", ".", "info", "(", "\"Listening on %s\"", ",", "self", ".", "_address", ")", "self", ".", "_dispatcher", ".", "add_send_message", "(", "self", ".", "_connection", ",", "self", ".", "send_message", ")", "self", ".", "_dispatcher", ".", "add_send_last_message", "(", "self", ".", "_connection", ",", "self", ".", "send_last_message", ")", "asyncio", ".", "ensure_future", "(", "self", ".", "_remove_expired_futures", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "asyncio", ".", "ensure_future", "(", "self", ".", "_receive_message", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "asyncio", ".", "ensure_future", "(", "self", ".", "_dispatch_message", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "self", ".", "_dispatcher_queue", "=", "asyncio", ".", "Queue", "(", ")", "if", "self", ".", "_monitor", ":", "self", ".", "_monitor_fd", "=", "\"inproc://monitor.s-{}\"", ".", "format", "(", "_generate_id", "(", ")", "[", "0", ":", "5", "]", ")", "self", ".", "_monitor_sock", "=", "self", ".", "_socket", ".", "get_monitor_socket", "(", "zmq", ".", "EVENT_DISCONNECTED", ",", "addr", "=", "self", ".", "_monitor_fd", ")", "asyncio", ".", "ensure_future", "(", "self", ".", "_monitor_disconnects", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "except", "Exception", "as", "e", ":", "# Put the exception on the queue where in start we are waiting", "# for it.", "complete_or_error_queue", ".", "put_nowait", "(", "e", ")", "self", ".", "_close_sockets", "(", ")", "raise", "if", "self", ".", "_heartbeat", ":", "asyncio", ".", "ensure_future", "(", "self", ".", "_do_heartbeat", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "# Put a 'complete with the setup tasks' sentinel on the queue.", "complete_or_error_queue", ".", "put_nowait", "(", "_STARTUP_COMPLETE_SENTINEL", ")", "asyncio", ".", "ensure_future", "(", "self", ".", "_notify_started", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "self", ".", "_event_loop", ".", "run_forever", "(", ")", "# event_loop.stop called elsewhere will cause the loop to break out", "# of run_forever then it can be closed and the context destroyed.", "self", ".", "_event_loop", ".", "close", "(", ")", "self", ".", "_close_sockets", "(", ")" ]
Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None
[ "Setup", "the", "asyncio", "event", "loop", "." ]
python
train
molmod/molmod
molmod/pairff.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L282-L287
def esp(self): """Compute the electrostatic potential at each atom due to other atoms""" result = np.zeros(self.numc, float) for index1 in range(self.numc): result[index1] = self.esp_component(index1) return result
[ "def", "esp", "(", "self", ")", ":", "result", "=", "np", ".", "zeros", "(", "self", ".", "numc", ",", "float", ")", "for", "index1", "in", "range", "(", "self", ".", "numc", ")", ":", "result", "[", "index1", "]", "=", "self", ".", "esp_component", "(", "index1", ")", "return", "result" ]
Compute the electrostatic potential at each atom due to other atoms
[ "Compute", "the", "electrostatic", "potential", "at", "each", "atom", "due", "to", "other", "atoms" ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/output_picker.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/output_picker.py#L52-L62
def show(cls, values=None): """ Show the interface for picking / downloading the datasets. """ if values: cls.set(values) cls.el.style.display = "block" cls.overlay.show() cls.overlay.el.bind("click", lambda x: cls.hide())
[ "def", "show", "(", "cls", ",", "values", "=", "None", ")", ":", "if", "values", ":", "cls", ".", "set", "(", "values", ")", "cls", ".", "el", ".", "style", ".", "display", "=", "\"block\"", "cls", ".", "overlay", ".", "show", "(", ")", "cls", ".", "overlay", ".", "el", ".", "bind", "(", "\"click\"", ",", "lambda", "x", ":", "cls", ".", "hide", "(", ")", ")" ]
Show the interface for picking / downloading the datasets.
[ "Show", "the", "interface", "for", "picking", "/", "downloading", "the", "datasets", "." ]
python
train
Qiskit/qiskit-terra
qiskit/tools/qcvv/fitters.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qcvv/fitters.py#L93-L119
def rb_epc(fit, rb_pattern): """Take the rb fit data and convert it into EPC (error per Clifford) Args: fit (dict): dictionary of the fit quantities (A, alpha, B) with the keys 'qn' where n is the qubit and subkeys 'fit', e.g. {'q0':{'fit': [1, 0, 0.9], 'fiterr': [0, 0, 0]}}} rb_pattern (list): (see randomized benchmarking functions). Pattern which specifies which qubits performing RB with which qubits. E.g. [[1],[0,2]] is Q1 doing 1Q RB simultaneously with Q0/Q2 doing 2Q RB Return: dict: updates the passed in fit dictionary with the epc """ for patterns in rb_pattern: for qubit in patterns: fitalpha = fit['q%d' % qubit]['fit'][1] fitalphaerr = fit['q%d' % qubit]['fiterr'][1] nrb = 2 ** len(patterns) fit['q%d' % qubit]['fit_calcs'] = {} fit['q%d' % qubit]['fit_calcs']['epc'] = [(nrb - 1) / nrb * (1 - fitalpha), fitalphaerr / fitalpha] fit['q%d' % qubit]['fit_calcs']['epc'][1] *= fit['q%d' % qubit]['fit_calcs']['epc'][0] return fit
[ "def", "rb_epc", "(", "fit", ",", "rb_pattern", ")", ":", "for", "patterns", "in", "rb_pattern", ":", "for", "qubit", "in", "patterns", ":", "fitalpha", "=", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fit'", "]", "[", "1", "]", "fitalphaerr", "=", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fiterr'", "]", "[", "1", "]", "nrb", "=", "2", "**", "len", "(", "patterns", ")", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fit_calcs'", "]", "=", "{", "}", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fit_calcs'", "]", "[", "'epc'", "]", "=", "[", "(", "nrb", "-", "1", ")", "/", "nrb", "*", "(", "1", "-", "fitalpha", ")", ",", "fitalphaerr", "/", "fitalpha", "]", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fit_calcs'", "]", "[", "'epc'", "]", "[", "1", "]", "*=", "fit", "[", "'q%d'", "%", "qubit", "]", "[", "'fit_calcs'", "]", "[", "'epc'", "]", "[", "0", "]", "return", "fit" ]
Take the rb fit data and convert it into EPC (error per Clifford) Args: fit (dict): dictionary of the fit quantities (A, alpha, B) with the keys 'qn' where n is the qubit and subkeys 'fit', e.g. {'q0':{'fit': [1, 0, 0.9], 'fiterr': [0, 0, 0]}}} rb_pattern (list): (see randomized benchmarking functions). Pattern which specifies which qubits performing RB with which qubits. E.g. [[1],[0,2]] is Q1 doing 1Q RB simultaneously with Q0/Q2 doing 2Q RB Return: dict: updates the passed in fit dictionary with the epc
[ "Take", "the", "rb", "fit", "data", "and", "convert", "it", "into", "EPC", "(", "error", "per", "Clifford", ")" ]
python
test
tomduck/pandoc-tablenos
pandoc_tablenos.py
https://github.com/tomduck/pandoc-tablenos/blob/b3c7b6a259eec5fb7c8420033d05b32640f1f266/pandoc_tablenos.py#L89-L107
def attach_attrs_table(key, value, fmt, meta): """Extracts attributes and attaches them to element.""" # We can't use attach_attrs_factory() because Table is a block-level element if key in ['Table']: assert len(value) == 5 caption = value[0] # caption, align, x, head, body # Set n to the index where the attributes start n = 0 while n < len(caption) and not \ (caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')): n += 1 try: attrs = extract_attrs(caption, n) value.insert(0, attrs) except (ValueError, IndexError): pass
[ "def", "attach_attrs_table", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# We can't use attach_attrs_factory() because Table is a block-level element", "if", "key", "in", "[", "'Table'", "]", ":", "assert", "len", "(", "value", ")", "==", "5", "caption", "=", "value", "[", "0", "]", "# caption, align, x, head, body", "# Set n to the index where the attributes start", "n", "=", "0", "while", "n", "<", "len", "(", "caption", ")", "and", "not", "(", "caption", "[", "n", "]", "[", "'t'", "]", "==", "'Str'", "and", "caption", "[", "n", "]", "[", "'c'", "]", ".", "startswith", "(", "'{'", ")", ")", ":", "n", "+=", "1", "try", ":", "attrs", "=", "extract_attrs", "(", "caption", ",", "n", ")", "value", ".", "insert", "(", "0", ",", "attrs", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "pass" ]
Extracts attributes and attaches them to element.
[ "Extracts", "attributes", "and", "attaches", "them", "to", "element", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L100-L120
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
[ "def", "_run_theta", "(", "cnv_info", ",", "data", ",", "work_dir", ",", "run_n3", "=", "True", ")", ":", "out", "=", "{", "\"caller\"", ":", "\"theta\"", "}", "max_normal", "=", "\"0.9\"", "opts", "=", "[", "\"-m\"", ",", "max_normal", "]", "n2_result", "=", "_safe_run_theta", "(", "cnv_info", "[", "\"theta_input\"", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"n2\"", ")", ",", "\".n2.results\"", ",", "[", "\"-n\"", ",", "\"2\"", "]", "+", "opts", ",", "data", ")", "if", "n2_result", ":", "out", "[", "\"estimate\"", "]", "=", "n2_result", "if", "run_n3", ":", "n2_bounds", "=", "\"%s.withBounds\"", "%", "os", ".", "path", ".", "splitext", "(", "n2_result", ")", "[", "0", "]", "n3_result", "=", "_safe_run_theta", "(", "n2_bounds", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"n3\"", ")", ",", "\".n3.results\"", ",", "[", "\"-n\"", ",", "\"3\"", ",", "\"--RESULTS\"", ",", "n2_result", "]", "+", "opts", ",", "data", ")", "if", "n3_result", ":", "best_result", "=", "_select_model", "(", "n2_bounds", ",", "n2_result", ",", "n3_result", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"n3\"", ")", ",", "data", ")", "out", "[", "\"estimate\"", "]", "=", "best_result", "out", "[", "\"cnvs\"", "]", "=", "_merge_theta_calls", "(", "n2_bounds", ",", "best_result", ",", "cnv_info", "[", "\"vrn_file\"", "]", ",", "data", ")", "return", "out" ]
Run theta, calculating subpopulations and normal contamination.
[ "Run", "theta", "calculating", "subpopulations", "and", "normal", "contamination", "." ]
python
train
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L4818-L4872
def weight_map(self): """Return 3-D weights map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase` """ # EAC we need the try blocks b/c older versions of the ST don't have some of these functions if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() try: p_method = cmap.projection().method() except AttributeError: p_method = 0 try: if self.like.components[0].logLike.has_weights(): wmap = self.like.components[0].logLike.weightMap() else: wmap = None except Exception: wmap = None else: cmap = self.like.logLike.countsMap() try: p_method = cmap.projection().method() except AttributeError: p_method = 0 try: if self.like.logLike.has_weights(): wmap = self.like.logLike.weightMap() else: wmap = None except Exception: wmap = None if p_method == 0: # WCS if wmap is None: z = np.ones((self.enumbins, self.npix, self.npix)) else: z = wmap.model() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self._geom), z) elif p_method == 1: # HPX nhpix = np.max(self.geom.npix) if wmap is None: z = np.ones((self.enumbins, nhpix)) else: z = wmap.model() z = np.array(z).reshape(self.enumbins, nhpix) return HpxNDMap(self.geom, z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
[ "def", "weight_map", "(", "self", ")", ":", "# EAC we need the try blocks b/c older versions of the ST don't have some of these functions", "if", "isinstance", "(", "self", ".", "like", ",", "gtutils", ".", "SummedLikelihood", ")", ":", "cmap", "=", "self", ".", "like", ".", "components", "[", "0", "]", ".", "logLike", ".", "countsMap", "(", ")", "try", ":", "p_method", "=", "cmap", ".", "projection", "(", ")", ".", "method", "(", ")", "except", "AttributeError", ":", "p_method", "=", "0", "try", ":", "if", "self", ".", "like", ".", "components", "[", "0", "]", ".", "logLike", ".", "has_weights", "(", ")", ":", "wmap", "=", "self", ".", "like", ".", "components", "[", "0", "]", ".", "logLike", ".", "weightMap", "(", ")", "else", ":", "wmap", "=", "None", "except", "Exception", ":", "wmap", "=", "None", "else", ":", "cmap", "=", "self", ".", "like", ".", "logLike", ".", "countsMap", "(", ")", "try", ":", "p_method", "=", "cmap", ".", "projection", "(", ")", ".", "method", "(", ")", "except", "AttributeError", ":", "p_method", "=", "0", "try", ":", "if", "self", ".", "like", ".", "logLike", ".", "has_weights", "(", ")", ":", "wmap", "=", "self", ".", "like", ".", "logLike", ".", "weightMap", "(", ")", "else", ":", "wmap", "=", "None", "except", "Exception", ":", "wmap", "=", "None", "if", "p_method", "==", "0", ":", "# WCS", "if", "wmap", "is", "None", ":", "z", "=", "np", ".", "ones", "(", "(", "self", ".", "enumbins", ",", "self", ".", "npix", ",", "self", ".", "npix", ")", ")", "else", ":", "z", "=", "wmap", ".", "model", "(", ")", "z", "=", "np", ".", "array", "(", "z", ")", ".", "reshape", "(", "self", ".", "enumbins", ",", "self", ".", "npix", ",", "self", ".", "npix", ")", "return", "WcsNDMap", "(", "copy", ".", "deepcopy", "(", "self", ".", "_geom", ")", ",", "z", ")", "elif", "p_method", "==", "1", ":", "# HPX", "nhpix", "=", "np", ".", "max", "(", "self", ".", "geom", ".", "npix", ")", "if", "wmap", "is", "None", ":", "z", "=", "np", ".", "ones", "(", "(", "self", ".", "enumbins", ",", "nhpix", ")", ")", "else", ":", "z", "=", "wmap", ".", "model", "(", ")", "z", "=", "np", ".", "array", "(", "z", ")", ".", "reshape", "(", "self", ".", "enumbins", ",", "nhpix", ")", "return", "HpxNDMap", "(", "self", ".", "geom", ",", "z", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "'Did not recognize CountsMap type %i'", "%", "p_method", ",", "exc_info", "=", "True", ")", "return", "None" ]
Return 3-D weights map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase`
[ "Return", "3", "-", "D", "weights", "map", "for", "this", "component", "as", "a", "Map", "object", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py#L215-L237
def get_hyperparameter_configurations(self, num, r, config_generator): """generate num hyperparameter configurations from search space using Bayesian optimization Parameters ---------- num: int the number of hyperparameter configurations Returns ------- list a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] """ global _KEY assert self.i == 0 hyperparameter_configs = dict() for _ in range(num): params_id = create_bracket_parameter_id(self.s, self.i) params = config_generator.get_config(r) params[_KEY] = r hyperparameter_configs[params_id] = params self._record_hyper_configs(hyperparameter_configs) return [[key, value] for key, value in hyperparameter_configs.items()]
[ "def", "get_hyperparameter_configurations", "(", "self", ",", "num", ",", "r", ",", "config_generator", ")", ":", "global", "_KEY", "assert", "self", ".", "i", "==", "0", "hyperparameter_configs", "=", "dict", "(", ")", "for", "_", "in", "range", "(", "num", ")", ":", "params_id", "=", "create_bracket_parameter_id", "(", "self", ".", "s", ",", "self", ".", "i", ")", "params", "=", "config_generator", ".", "get_config", "(", "r", ")", "params", "[", "_KEY", "]", "=", "r", "hyperparameter_configs", "[", "params_id", "]", "=", "params", "self", ".", "_record_hyper_configs", "(", "hyperparameter_configs", ")", "return", "[", "[", "key", ",", "value", "]", "for", "key", ",", "value", "in", "hyperparameter_configs", ".", "items", "(", ")", "]" ]
generate num hyperparameter configurations from search space using Bayesian optimization Parameters ---------- num: int the number of hyperparameter configurations Returns ------- list a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
[ "generate", "num", "hyperparameter", "configurations", "from", "search", "space", "using", "Bayesian", "optimization" ]
python
train
KrishnaswamyLab/PHATE
Python/phate/phate.py
https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/phate.py#L813-L863
def calculate_potential(self, t=None, t_max=100, plot_optimal_t=False, ax=None): """Calculates the diffusion potential Parameters ---------- t : int power to which the diffusion operator is powered sets the level of diffusion t_max : int, default: 100 Maximum value of `t` to test plot_optimal_t : boolean, default: False If true, plots the Von Neumann Entropy and knee point ax : matplotlib.Axes, default: None If plot=True and ax is not None, plots the VNE on the given axis Otherwise, creates a new axis and displays the plot Returns ------- diff_potential : array-like, shape=[n_samples, n_samples] The diffusion potential fit on the input data """ if t is None: t = self.t if self.diff_potential is None: if t == 'auto': t = self.optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax) else: t = self.t tasklogger.log_start("diffusion potential") # diffused diffusion operator diff_op_t = np.linalg.matrix_power(self.diff_op, t) if self.gamma == 1: # handling small values diff_op_t = diff_op_t + 1e-7 self.diff_potential = -1 * np.log(diff_op_t) elif self.gamma == -1: self.diff_potential = diff_op_t else: c = (1 - self.gamma) / 2 self.diff_potential = ((diff_op_t)**c) / c tasklogger.log_complete("diffusion potential") elif plot_optimal_t: self.optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax) return self.diff_potential
[ "def", "calculate_potential", "(", "self", ",", "t", "=", "None", ",", "t_max", "=", "100", ",", "plot_optimal_t", "=", "False", ",", "ax", "=", "None", ")", ":", "if", "t", "is", "None", ":", "t", "=", "self", ".", "t", "if", "self", ".", "diff_potential", "is", "None", ":", "if", "t", "==", "'auto'", ":", "t", "=", "self", ".", "optimal_t", "(", "t_max", "=", "t_max", ",", "plot", "=", "plot_optimal_t", ",", "ax", "=", "ax", ")", "else", ":", "t", "=", "self", ".", "t", "tasklogger", ".", "log_start", "(", "\"diffusion potential\"", ")", "# diffused diffusion operator", "diff_op_t", "=", "np", ".", "linalg", ".", "matrix_power", "(", "self", ".", "diff_op", ",", "t", ")", "if", "self", ".", "gamma", "==", "1", ":", "# handling small values", "diff_op_t", "=", "diff_op_t", "+", "1e-7", "self", ".", "diff_potential", "=", "-", "1", "*", "np", ".", "log", "(", "diff_op_t", ")", "elif", "self", ".", "gamma", "==", "-", "1", ":", "self", ".", "diff_potential", "=", "diff_op_t", "else", ":", "c", "=", "(", "1", "-", "self", ".", "gamma", ")", "/", "2", "self", ".", "diff_potential", "=", "(", "(", "diff_op_t", ")", "**", "c", ")", "/", "c", "tasklogger", ".", "log_complete", "(", "\"diffusion potential\"", ")", "elif", "plot_optimal_t", ":", "self", ".", "optimal_t", "(", "t_max", "=", "t_max", ",", "plot", "=", "plot_optimal_t", ",", "ax", "=", "ax", ")", "return", "self", ".", "diff_potential" ]
Calculates the diffusion potential Parameters ---------- t : int power to which the diffusion operator is powered sets the level of diffusion t_max : int, default: 100 Maximum value of `t` to test plot_optimal_t : boolean, default: False If true, plots the Von Neumann Entropy and knee point ax : matplotlib.Axes, default: None If plot=True and ax is not None, plots the VNE on the given axis Otherwise, creates a new axis and displays the plot Returns ------- diff_potential : array-like, shape=[n_samples, n_samples] The diffusion potential fit on the input data
[ "Calculates", "the", "diffusion", "potential" ]
python
train
klen/muffin-redis
muffin_redis.py
https://github.com/klen/muffin-redis/blob/b0cb8c1ba1511d501c2084def156710e75aaf781/muffin_redis.py#L126-L133
def publish(self, channel, message): """Publish message to channel. :returns: a coroutine """ if self.cfg.jsonpickle: message = jsonpickle.encode(message) return self.conn.publish(channel, message)
[ "def", "publish", "(", "self", ",", "channel", ",", "message", ")", ":", "if", "self", ".", "cfg", ".", "jsonpickle", ":", "message", "=", "jsonpickle", ".", "encode", "(", "message", ")", "return", "self", ".", "conn", ".", "publish", "(", "channel", ",", "message", ")" ]
Publish message to channel. :returns: a coroutine
[ "Publish", "message", "to", "channel", "." ]
python
train
linkedin/naarad
lib/luminol/src/luminol/algorithms/correlator_algorithms/cross_correlator.py
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/lib/luminol/src/luminol/algorithms/correlator_algorithms/cross_correlator.py#L94-L108
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound): """ Find the first element in timestamps whose value is bigger than target. param list values: list of timestamps(epoch number). param target: target value. param lower_bound: lower bound for binary search. param upper_bound: upper bound for binary search. """ while lower_bound < upper_bound: pos = lower_bound + (upper_bound - lower_bound) / 2 if timestamps[pos] > target: upper_bound = pos else: lower_bound = pos + 1 return pos
[ "def", "_find_first_bigger", "(", "self", ",", "timestamps", ",", "target", ",", "lower_bound", ",", "upper_bound", ")", ":", "while", "lower_bound", "<", "upper_bound", ":", "pos", "=", "lower_bound", "+", "(", "upper_bound", "-", "lower_bound", ")", "/", "2", "if", "timestamps", "[", "pos", "]", ">", "target", ":", "upper_bound", "=", "pos", "else", ":", "lower_bound", "=", "pos", "+", "1", "return", "pos" ]
Find the first element in timestamps whose value is bigger than target. param list values: list of timestamps(epoch number). param target: target value. param lower_bound: lower bound for binary search. param upper_bound: upper bound for binary search.
[ "Find", "the", "first", "element", "in", "timestamps", "whose", "value", "is", "bigger", "than", "target", ".", "param", "list", "values", ":", "list", "of", "timestamps", "(", "epoch", "number", ")", ".", "param", "target", ":", "target", "value", ".", "param", "lower_bound", ":", "lower", "bound", "for", "binary", "search", ".", "param", "upper_bound", ":", "upper", "bound", "for", "binary", "search", "." ]
python
valid
log2timeline/dfvfs
dfvfs/helpers/windows_path_resolver.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/windows_path_resolver.py#L261-L273
def SetEnvironmentVariable(self, name, value): """Sets an environment variable in the Windows path helper. Args: name (str): name of the environment variable without enclosing %-characters, e.g. SystemRoot as in %SystemRoot%. value (str): value of the environment variable. """ if isinstance(value, py2to3.STRING_TYPES): value = self._PathStripPrefix(value) if value is not None: self._environment_variables[name.upper()] = value
[ "def", "SetEnvironmentVariable", "(", "self", ",", "name", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "py2to3", ".", "STRING_TYPES", ")", ":", "value", "=", "self", ".", "_PathStripPrefix", "(", "value", ")", "if", "value", "is", "not", "None", ":", "self", ".", "_environment_variables", "[", "name", ".", "upper", "(", ")", "]", "=", "value" ]
Sets an environment variable in the Windows path helper. Args: name (str): name of the environment variable without enclosing %-characters, e.g. SystemRoot as in %SystemRoot%. value (str): value of the environment variable.
[ "Sets", "an", "environment", "variable", "in", "the", "Windows", "path", "helper", "." ]
python
train
MIT-LCP/wfdb-python
wfdb/plot/plot.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L11-L123
def plot_items(signal=None, ann_samp=None, ann_sym=None, fs=None, time_units='samples', sig_name=None, sig_units=None, ylabel=None, title=None, sig_style=[''], ann_style=['r*'], ecg_grids=[], figsize=None, return_fig=False): """ Subplot individual channels of signals and/or annotations. Parameters ---------- signal : 1d or 2d numpy array, optional The uniformly sampled signal to be plotted. If signal.ndim is 1, it is assumed to be a one channel signal. If it is 2, axes 0 and 1, must represent time and channel number respectively. ann_samp: list, optional A list of annotation locations to plot, with each list item corresponding to a different channel. List items may be: - 1d numpy array, with values representing sample indices. Empty arrays are skipped. - list, with values representing sample indices. Empty lists are skipped. - None. For channels in which nothing is to be plotted. If `signal` is defined, the annotation locations will be overlaid on the signals, with the list index corresponding to the signal channel. The length of `annotation` does not have to match the number of channels of `signal`. ann_sym: list, optional A list of annotation symbols to plot, with each list item corresponding to a different channel. List items should be lists of strings. The symbols are plotted over the corresponding `ann_samp` index locations. fs : int or float, optional The sampling frequency of the signals and/or annotations. Used to calculate time intervals if `time_units` is not 'samples'. Also required for plotting ecg grids. time_units : str, optional The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. sig_name : list, optional A list of strings specifying the signal names. Used with `sig_units` to form y labels, if `ylabel` is not set. sig_units : list, optional A list of strings specifying the units of each signal channel. Used with `sig_name` to form y labels, if `ylabel` is not set. This parameter is required for plotting ecg grids. ylabel : list, optional A list of strings specifying the final y labels. If this option is present, `sig_name` and `sig_units` will not be used for labels. title : str, optional The title of the graph. sig_style : list, optional A list of strings, specifying the style of the matplotlib plot for each signal channel. The list length should match the number of signal channels. If the list has a length of 1, the style will be used for all channels. ann_style : list, optional A list of strings, specifying the style of the matplotlib plot for each annotation channel. If the list has a length of 1, the style will be used for all channels. ecg_grids : list, optional A list of integers specifying channels in which to plot ecg grids. May also be set to 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be plotted with grids must have `sig_units` equal to 'uV', 'mV', or 'V'. figsize : tuple, optional Tuple pair specifying the width, and height of the figure. It is the 'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. Returns ------- figure : matplotlib figure, optional The matplotlib figure generated. Only returned if the 'return_fig' parameter is set to True. Examples -------- >>> record = wfdb.rdrecord('sample-data/100', sampto=3000) >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=3000) >>> wfdb.plot_items(signal=record.p_signal, annotation=[ann.sample, ann.sample], title='MIT-BIH Record 100', time_units='seconds', figsize=(10,4), ecg_grids='all') """ # Figure out number of subplots required sig_len, n_sig, n_annot, n_subplots = get_plot_dims(signal, ann_samp) # Create figure fig, axes = create_figure(n_subplots, figsize) if signal is not None: plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes) if ann_samp is not None: plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units, ann_style, axes) if ecg_grids: plot_ecg_grids(ecg_grids, fs, sig_units, time_units, axes) # Add title and axis labels. label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel, title) plt.show(fig) if return_fig: return fig
[ "def", "plot_items", "(", "signal", "=", "None", ",", "ann_samp", "=", "None", ",", "ann_sym", "=", "None", ",", "fs", "=", "None", ",", "time_units", "=", "'samples'", ",", "sig_name", "=", "None", ",", "sig_units", "=", "None", ",", "ylabel", "=", "None", ",", "title", "=", "None", ",", "sig_style", "=", "[", "''", "]", ",", "ann_style", "=", "[", "'r*'", "]", ",", "ecg_grids", "=", "[", "]", ",", "figsize", "=", "None", ",", "return_fig", "=", "False", ")", ":", "# Figure out number of subplots required", "sig_len", ",", "n_sig", ",", "n_annot", ",", "n_subplots", "=", "get_plot_dims", "(", "signal", ",", "ann_samp", ")", "# Create figure", "fig", ",", "axes", "=", "create_figure", "(", "n_subplots", ",", "figsize", ")", "if", "signal", "is", "not", "None", ":", "plot_signal", "(", "signal", ",", "sig_len", ",", "n_sig", ",", "fs", ",", "time_units", ",", "sig_style", ",", "axes", ")", "if", "ann_samp", "is", "not", "None", ":", "plot_annotation", "(", "ann_samp", ",", "n_annot", ",", "ann_sym", ",", "signal", ",", "n_sig", ",", "fs", ",", "time_units", ",", "ann_style", ",", "axes", ")", "if", "ecg_grids", ":", "plot_ecg_grids", "(", "ecg_grids", ",", "fs", ",", "sig_units", ",", "time_units", ",", "axes", ")", "# Add title and axis labels.", "label_figure", "(", "axes", ",", "n_subplots", ",", "time_units", ",", "sig_name", ",", "sig_units", ",", "ylabel", ",", "title", ")", "plt", ".", "show", "(", "fig", ")", "if", "return_fig", ":", "return", "fig" ]
Subplot individual channels of signals and/or annotations. Parameters ---------- signal : 1d or 2d numpy array, optional The uniformly sampled signal to be plotted. If signal.ndim is 1, it is assumed to be a one channel signal. If it is 2, axes 0 and 1, must represent time and channel number respectively. ann_samp: list, optional A list of annotation locations to plot, with each list item corresponding to a different channel. List items may be: - 1d numpy array, with values representing sample indices. Empty arrays are skipped. - list, with values representing sample indices. Empty lists are skipped. - None. For channels in which nothing is to be plotted. If `signal` is defined, the annotation locations will be overlaid on the signals, with the list index corresponding to the signal channel. The length of `annotation` does not have to match the number of channels of `signal`. ann_sym: list, optional A list of annotation symbols to plot, with each list item corresponding to a different channel. List items should be lists of strings. The symbols are plotted over the corresponding `ann_samp` index locations. fs : int or float, optional The sampling frequency of the signals and/or annotations. Used to calculate time intervals if `time_units` is not 'samples'. Also required for plotting ecg grids. time_units : str, optional The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. sig_name : list, optional A list of strings specifying the signal names. Used with `sig_units` to form y labels, if `ylabel` is not set. sig_units : list, optional A list of strings specifying the units of each signal channel. Used with `sig_name` to form y labels, if `ylabel` is not set. This parameter is required for plotting ecg grids. ylabel : list, optional A list of strings specifying the final y labels. If this option is present, `sig_name` and `sig_units` will not be used for labels. title : str, optional The title of the graph. sig_style : list, optional A list of strings, specifying the style of the matplotlib plot for each signal channel. The list length should match the number of signal channels. If the list has a length of 1, the style will be used for all channels. ann_style : list, optional A list of strings, specifying the style of the matplotlib plot for each annotation channel. If the list has a length of 1, the style will be used for all channels. ecg_grids : list, optional A list of integers specifying channels in which to plot ecg grids. May also be set to 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be plotted with grids must have `sig_units` equal to 'uV', 'mV', or 'V'. figsize : tuple, optional Tuple pair specifying the width, and height of the figure. It is the 'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. Returns ------- figure : matplotlib figure, optional The matplotlib figure generated. Only returned if the 'return_fig' parameter is set to True. Examples -------- >>> record = wfdb.rdrecord('sample-data/100', sampto=3000) >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=3000) >>> wfdb.plot_items(signal=record.p_signal, annotation=[ann.sample, ann.sample], title='MIT-BIH Record 100', time_units='seconds', figsize=(10,4), ecg_grids='all')
[ "Subplot", "individual", "channels", "of", "signals", "and", "/", "or", "annotations", "." ]
python
train
retr0h/git-url-parse
giturlparse/parser.py
https://github.com/retr0h/git-url-parse/blob/98a5377aa8c8f3b8896f277c5c81558749feef58/giturlparse/parser.py#L78-L106
def parse(self): """ Parses a GIT URL and returns an object. Raises an exception on invalid URL. :returns: Parsed object :raise: :class:`.ParserError` """ d = { 'pathname': None, 'protocols': self._get_protocols(), 'protocol': 'ssh', 'href': self._url, 'resource': None, 'user': None, 'port': None, 'name': None, 'owner': None, } for regex in POSSIBLE_REGEXES: match = regex.search(self._url) if match: d.update(match.groupdict()) break else: msg = "Invalid URL '{}'".format(self._url) raise ParserError(msg) return Parsed(**d)
[ "def", "parse", "(", "self", ")", ":", "d", "=", "{", "'pathname'", ":", "None", ",", "'protocols'", ":", "self", ".", "_get_protocols", "(", ")", ",", "'protocol'", ":", "'ssh'", ",", "'href'", ":", "self", ".", "_url", ",", "'resource'", ":", "None", ",", "'user'", ":", "None", ",", "'port'", ":", "None", ",", "'name'", ":", "None", ",", "'owner'", ":", "None", ",", "}", "for", "regex", "in", "POSSIBLE_REGEXES", ":", "match", "=", "regex", ".", "search", "(", "self", ".", "_url", ")", "if", "match", ":", "d", ".", "update", "(", "match", ".", "groupdict", "(", ")", ")", "break", "else", ":", "msg", "=", "\"Invalid URL '{}'\"", ".", "format", "(", "self", ".", "_url", ")", "raise", "ParserError", "(", "msg", ")", "return", "Parsed", "(", "*", "*", "d", ")" ]
Parses a GIT URL and returns an object. Raises an exception on invalid URL. :returns: Parsed object :raise: :class:`.ParserError`
[ "Parses", "a", "GIT", "URL", "and", "returns", "an", "object", ".", "Raises", "an", "exception", "on", "invalid", "URL", "." ]
python
train
google/flatbuffers
python/flatbuffers/table.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L32-L41
def Offset(self, vtableOffset): """Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.""" vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos) vtableEnd = self.Get(N.VOffsetTFlags, vtable) if vtableOffset < vtableEnd: return self.Get(N.VOffsetTFlags, vtable + vtableOffset) return 0
[ "def", "Offset", "(", "self", ",", "vtableOffset", ")", ":", "vtable", "=", "self", ".", "Pos", "-", "self", ".", "Get", "(", "N", ".", "SOffsetTFlags", ",", "self", ".", "Pos", ")", "vtableEnd", "=", "self", ".", "Get", "(", "N", ".", "VOffsetTFlags", ",", "vtable", ")", "if", "vtableOffset", "<", "vtableEnd", ":", "return", "self", ".", "Get", "(", "N", ".", "VOffsetTFlags", ",", "vtable", "+", "vtableOffset", ")", "return", "0" ]
Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.
[ "Offset", "provides", "access", "into", "the", "Table", "s", "vtable", "." ]
python
train
iskandr/serializable
serializable/helpers.py
https://github.com/iskandr/serializable/blob/6807dfd582567b3bda609910806b7429d8d53b44/serializable/helpers.py#L188-L224
def from_serializable_dict(x): """ Reconstruct a dictionary by recursively reconstructing all its keys and values. This is the most hackish part since we rely on key names such as __name__, __class__, __module__ as metadata about how to reconstruct an object. TODO: It would be cleaner to always wrap each object in a layer of type metadata and then have an inner dictionary which represents the flattened result of to_dict() for user-defined objects. """ if "__name__" in x: return _lookup_value(x.pop("__module__"), x.pop("__name__")) non_string_key_objects = [ from_json(serialized_key) for serialized_key in x.pop(SERIALIZED_DICTIONARY_KEYS_FIELD, []) ] converted_dict = type(x)() for k, v in x.items(): serialized_key_index = parse_serialized_keys_index(k) if serialized_key_index is not None: k = non_string_key_objects[serialized_key_index] converted_dict[k] = from_serializable_repr(v) if "__class__" in converted_dict: class_object = converted_dict.pop("__class__") if "__value__" in converted_dict: return class_object(converted_dict["__value__"]) elif hasattr(class_object, "from_dict"): return class_object.from_dict(converted_dict) else: return class_object(**converted_dict) return converted_dict
[ "def", "from_serializable_dict", "(", "x", ")", ":", "if", "\"__name__\"", "in", "x", ":", "return", "_lookup_value", "(", "x", ".", "pop", "(", "\"__module__\"", ")", ",", "x", ".", "pop", "(", "\"__name__\"", ")", ")", "non_string_key_objects", "=", "[", "from_json", "(", "serialized_key", ")", "for", "serialized_key", "in", "x", ".", "pop", "(", "SERIALIZED_DICTIONARY_KEYS_FIELD", ",", "[", "]", ")", "]", "converted_dict", "=", "type", "(", "x", ")", "(", ")", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", ":", "serialized_key_index", "=", "parse_serialized_keys_index", "(", "k", ")", "if", "serialized_key_index", "is", "not", "None", ":", "k", "=", "non_string_key_objects", "[", "serialized_key_index", "]", "converted_dict", "[", "k", "]", "=", "from_serializable_repr", "(", "v", ")", "if", "\"__class__\"", "in", "converted_dict", ":", "class_object", "=", "converted_dict", ".", "pop", "(", "\"__class__\"", ")", "if", "\"__value__\"", "in", "converted_dict", ":", "return", "class_object", "(", "converted_dict", "[", "\"__value__\"", "]", ")", "elif", "hasattr", "(", "class_object", ",", "\"from_dict\"", ")", ":", "return", "class_object", ".", "from_dict", "(", "converted_dict", ")", "else", ":", "return", "class_object", "(", "*", "*", "converted_dict", ")", "return", "converted_dict" ]
Reconstruct a dictionary by recursively reconstructing all its keys and values. This is the most hackish part since we rely on key names such as __name__, __class__, __module__ as metadata about how to reconstruct an object. TODO: It would be cleaner to always wrap each object in a layer of type metadata and then have an inner dictionary which represents the flattened result of to_dict() for user-defined objects.
[ "Reconstruct", "a", "dictionary", "by", "recursively", "reconstructing", "all", "its", "keys", "and", "values", "." ]
python
train
openai/baselines
baselines/common/tf_util.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L404-L416
def _check_shape(placeholder_shape, data_shape): ''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)''' return True squeezed_placeholder_shape = _squeeze_shape(placeholder_shape) squeezed_data_shape = _squeeze_shape(data_shape) for i, s_data in enumerate(squeezed_data_shape): s_placeholder = squeezed_placeholder_shape[i] if s_placeholder != -1 and s_data != s_placeholder: return False return True
[ "def", "_check_shape", "(", "placeholder_shape", ",", "data_shape", ")", ":", "return", "True", "squeezed_placeholder_shape", "=", "_squeeze_shape", "(", "placeholder_shape", ")", "squeezed_data_shape", "=", "_squeeze_shape", "(", "data_shape", ")", "for", "i", ",", "s_data", "in", "enumerate", "(", "squeezed_data_shape", ")", ":", "s_placeholder", "=", "squeezed_placeholder_shape", "[", "i", "]", "if", "s_placeholder", "!=", "-", "1", "and", "s_data", "!=", "s_placeholder", ":", "return", "False", "return", "True" ]
check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)
[ "check", "if", "two", "shapes", "are", "compatible", "(", "i", ".", "e", ".", "differ", "only", "by", "dimensions", "of", "size", "1", "or", "by", "the", "batch", "dimension", ")" ]
python
valid
timothyb0912/pylogit
pylogit/mixed_logit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/mixed_logit.py#L358-L375
def convenience_calc_fisher_approx(self, params): """ Calculates the BHHH approximation of the Fisher Information Matrix for this model / dataset. Note that this function name is INCORRECT with regard to the actual actions performed. The Mixed Logit model uses a placeholder for the BHHH approximation of the Fisher Information Matrix because the BHHH approximation is already being used to approximate the hessian. This placeholder allows calculation of a value for the 'robust' standard errors, even though such a value is not useful since it is not correct... """ shapes, intercepts, betas = self.convenience_split_params(params) placeholder_bhhh = np.diag(-1 * np.ones(betas.shape[0])) return placeholder_bhhh
[ "def", "convenience_calc_fisher_approx", "(", "self", ",", "params", ")", ":", "shapes", ",", "intercepts", ",", "betas", "=", "self", ".", "convenience_split_params", "(", "params", ")", "placeholder_bhhh", "=", "np", ".", "diag", "(", "-", "1", "*", "np", ".", "ones", "(", "betas", ".", "shape", "[", "0", "]", ")", ")", "return", "placeholder_bhhh" ]
Calculates the BHHH approximation of the Fisher Information Matrix for this model / dataset. Note that this function name is INCORRECT with regard to the actual actions performed. The Mixed Logit model uses a placeholder for the BHHH approximation of the Fisher Information Matrix because the BHHH approximation is already being used to approximate the hessian. This placeholder allows calculation of a value for the 'robust' standard errors, even though such a value is not useful since it is not correct...
[ "Calculates", "the", "BHHH", "approximation", "of", "the", "Fisher", "Information", "Matrix", "for", "this", "model", "/", "dataset", ".", "Note", "that", "this", "function", "name", "is", "INCORRECT", "with", "regard", "to", "the", "actual", "actions", "performed", ".", "The", "Mixed", "Logit", "model", "uses", "a", "placeholder", "for", "the", "BHHH", "approximation", "of", "the", "Fisher", "Information", "Matrix", "because", "the", "BHHH", "approximation", "is", "already", "being", "used", "to", "approximate", "the", "hessian", "." ]
python
train
portfors-lab/sparkle
sparkle/gui/plotting/pyqtgraph_widgets.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/pyqtgraph_widgets.py#L403-L412
def fromFile(self, fname): """Displays a spectrogram of an audio file. Supported formats see :func:`sparkle.tools.audiotools.audioread` :param fname: file path of the audiofile to display :type fname: str :returns: float -- duration of audio recording (seconds) """ spec, f, bins, dur = audiotools.spectrogram(fname, **self.specgramArgs) self.updateImage(spec, bins, f) return dur
[ "def", "fromFile", "(", "self", ",", "fname", ")", ":", "spec", ",", "f", ",", "bins", ",", "dur", "=", "audiotools", ".", "spectrogram", "(", "fname", ",", "*", "*", "self", ".", "specgramArgs", ")", "self", ".", "updateImage", "(", "spec", ",", "bins", ",", "f", ")", "return", "dur" ]
Displays a spectrogram of an audio file. Supported formats see :func:`sparkle.tools.audiotools.audioread` :param fname: file path of the audiofile to display :type fname: str :returns: float -- duration of audio recording (seconds)
[ "Displays", "a", "spectrogram", "of", "an", "audio", "file", ".", "Supported", "formats", "see", ":", "func", ":", "sparkle", ".", "tools", ".", "audiotools", ".", "audioread" ]
python
train
jaraco/hgtools
hgtools/versioning.py
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L111-L122
def get_tagged_version(self): """ Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset. """ tags = list(self.get_tags()) if 'tip' in tags and not self.is_modified(): tags = self.get_parent_tags('tip') versions = self.__versions_from_tags(tags) return self.__best_version(versions)
[ "def", "get_tagged_version", "(", "self", ")", ":", "tags", "=", "list", "(", "self", ".", "get_tags", "(", ")", ")", "if", "'tip'", "in", "tags", "and", "not", "self", ".", "is_modified", "(", ")", ":", "tags", "=", "self", ".", "get_parent_tags", "(", "'tip'", ")", "versions", "=", "self", ".", "__versions_from_tags", "(", "tags", ")", "return", "self", ".", "__best_version", "(", "versions", ")" ]
Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset.
[ "Get", "the", "version", "of", "the", "local", "working", "set", "as", "a", "StrictVersion", "or", "None", "if", "no", "viable", "tag", "exists", ".", "If", "the", "local", "working", "set", "is", "itself", "the", "tagged", "commit", "and", "the", "tip", "and", "there", "are", "no", "local", "modifications", "use", "the", "tag", "on", "the", "parent", "changeset", "." ]
python
train
apache/spark
python/pyspark/mllib/linalg/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L297-L316
def parse(s): """ Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0]) """ start = s.find('[') if start == -1: raise ValueError("Array should start with '['.") end = s.find(']') if end == -1: raise ValueError("Array should end with ']'.") s = s[start + 1: end] try: values = [float(val) for val in s.split(',') if val] except ValueError: raise ValueError("Unable to parse values from %s" % s) return DenseVector(values)
[ "def", "parse", "(", "s", ")", ":", "start", "=", "s", ".", "find", "(", "'['", ")", "if", "start", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Array should start with '['.\"", ")", "end", "=", "s", ".", "find", "(", "']'", ")", "if", "end", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Array should end with ']'.\"", ")", "s", "=", "s", "[", "start", "+", "1", ":", "end", "]", "try", ":", "values", "=", "[", "float", "(", "val", ")", "for", "val", "in", "s", ".", "split", "(", "','", ")", "if", "val", "]", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse values from %s\"", "%", "s", ")", "return", "DenseVector", "(", "values", ")" ]
Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0])
[ "Parse", "string", "representation", "back", "into", "the", "DenseVector", "." ]
python
train
cdeboever3/cdpybio
cdpybio/analysis.py
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L704-L753
def plot_variance_explained(self, cumulative=False, xtick_start=1, xtick_spacing=1, num_pc=None): """ Plot amount of variance explained by each principal component. Parameters ---------- num_pc : int Number of principal components to plot. If None, plot all. cumulative : bool If True, include cumulative variance. xtick_start : int The first principal component to label on the x-axis. xtick_spacing : int The spacing between labels on the x-axis. """ import matplotlib.pyplot as plt from numpy import arange if num_pc: s_norm = self.s_norm[0:num_pc] else: s_norm = self.s_norm if cumulative: s_cumsum = s_norm.cumsum() plt.bar(range(s_cumsum.shape[0]), s_cumsum.values, label='Cumulative', color=(0.17254901960784313, 0.6274509803921569, 0.17254901960784313)) plt.bar(range(s_norm.shape[0]), s_norm.values, label='Per PC', color=(0.12156862745098039, 0.4666666666666667, 0.7058823529411765)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.ylabel('Variance') else: plt.bar(range(s_norm.shape[0]), s_norm.values, color=(0.12156862745098039, 0.4666666666666667, 0.7058823529411765)) plt.ylabel('Proportion variance explained') plt.xlabel('PC') plt.xlim(0, s_norm.shape[0]) tick_locs = arange(xtick_start - 1, s_norm.shape[0], step=xtick_spacing) # 0.8 is the width of the bars. tick_locs = tick_locs + 0.4 plt.xticks(tick_locs, arange(xtick_start, s_norm.shape[0] + 1, xtick_spacing))
[ "def", "plot_variance_explained", "(", "self", ",", "cumulative", "=", "False", ",", "xtick_start", "=", "1", ",", "xtick_spacing", "=", "1", ",", "num_pc", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "numpy", "import", "arange", "if", "num_pc", ":", "s_norm", "=", "self", ".", "s_norm", "[", "0", ":", "num_pc", "]", "else", ":", "s_norm", "=", "self", ".", "s_norm", "if", "cumulative", ":", "s_cumsum", "=", "s_norm", ".", "cumsum", "(", ")", "plt", ".", "bar", "(", "range", "(", "s_cumsum", ".", "shape", "[", "0", "]", ")", ",", "s_cumsum", ".", "values", ",", "label", "=", "'Cumulative'", ",", "color", "=", "(", "0.17254901960784313", ",", "0.6274509803921569", ",", "0.17254901960784313", ")", ")", "plt", ".", "bar", "(", "range", "(", "s_norm", ".", "shape", "[", "0", "]", ")", ",", "s_norm", ".", "values", ",", "label", "=", "'Per PC'", ",", "color", "=", "(", "0.12156862745098039", ",", "0.4666666666666667", ",", "0.7058823529411765", ")", ")", "plt", ".", "legend", "(", "loc", "=", "'center left'", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ")", "plt", ".", "ylabel", "(", "'Variance'", ")", "else", ":", "plt", ".", "bar", "(", "range", "(", "s_norm", ".", "shape", "[", "0", "]", ")", ",", "s_norm", ".", "values", ",", "color", "=", "(", "0.12156862745098039", ",", "0.4666666666666667", ",", "0.7058823529411765", ")", ")", "plt", ".", "ylabel", "(", "'Proportion variance explained'", ")", "plt", ".", "xlabel", "(", "'PC'", ")", "plt", ".", "xlim", "(", "0", ",", "s_norm", ".", "shape", "[", "0", "]", ")", "tick_locs", "=", "arange", "(", "xtick_start", "-", "1", ",", "s_norm", ".", "shape", "[", "0", "]", ",", "step", "=", "xtick_spacing", ")", "# 0.8 is the width of the bars.", "tick_locs", "=", "tick_locs", "+", "0.4", "plt", ".", "xticks", "(", "tick_locs", ",", "arange", "(", "xtick_start", ",", "s_norm", ".", "shape", "[", "0", "]", "+", "1", ",", "xtick_spacing", ")", ")" ]
Plot amount of variance explained by each principal component. Parameters ---------- num_pc : int Number of principal components to plot. If None, plot all. cumulative : bool If True, include cumulative variance. xtick_start : int The first principal component to label on the x-axis. xtick_spacing : int The spacing between labels on the x-axis.
[ "Plot", "amount", "of", "variance", "explained", "by", "each", "principal", "component", ".", "Parameters", "----------", "num_pc", ":", "int", "Number", "of", "principal", "components", "to", "plot", ".", "If", "None", "plot", "all", ".", "cumulative", ":", "bool", "If", "True", "include", "cumulative", "variance", ".", "xtick_start", ":", "int", "The", "first", "principal", "component", "to", "label", "on", "the", "x", "-", "axis", ".", "xtick_spacing", ":", "int", "The", "spacing", "between", "labels", "on", "the", "x", "-", "axis", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/align.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/align.py#L363-L398
def get_SAM(self,min_intron_size=68): """Get a SAM object representation of the alignment. :returns: SAM representation :rtype: SAM """ from seqtools.format.sam import SAM #ar is target then query qname = self.alignment_ranges[0][1].chr flag = 0 if self.strand == '-': flag = 16 rname = self.alignment_ranges[0][0].chr pos = self.alignment_ranges[0][0].start mapq = 255 cigar = self.construct_cigar(min_intron_size) rnext = '*' pnext = 0 tlen = 0 # possible to set if we have a reference if self._options.reference: if rname in self._options.reference: tlen = len(self._options.reference[rname]) seq = self.query_sequence if not seq: seq = '*' qual = self.query_quality if not qual: qual = '*' #seq = '*' #qual = '*' if self.strand == '-': seq = rc(seq) qual = qual[::-1] ln = qname + "\t" + str(flag) + "\t" + rname + "\t" + \ str(pos) + "\t" + str(mapq) + "\t" + cigar + "\t" + \ rnext + "\t" + str(pnext) + "\t" + str(tlen) + "\t" + \ seq + "\t" + qual return SAM(ln,reference=self._reference)
[ "def", "get_SAM", "(", "self", ",", "min_intron_size", "=", "68", ")", ":", "from", "seqtools", ".", "format", ".", "sam", "import", "SAM", "#ar is target then query", "qname", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "1", "]", ".", "chr", "flag", "=", "0", "if", "self", ".", "strand", "==", "'-'", ":", "flag", "=", "16", "rname", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "0", "]", ".", "chr", "pos", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "0", "]", ".", "start", "mapq", "=", "255", "cigar", "=", "self", ".", "construct_cigar", "(", "min_intron_size", ")", "rnext", "=", "'*'", "pnext", "=", "0", "tlen", "=", "0", "# possible to set if we have a reference", "if", "self", ".", "_options", ".", "reference", ":", "if", "rname", "in", "self", ".", "_options", ".", "reference", ":", "tlen", "=", "len", "(", "self", ".", "_options", ".", "reference", "[", "rname", "]", ")", "seq", "=", "self", ".", "query_sequence", "if", "not", "seq", ":", "seq", "=", "'*'", "qual", "=", "self", ".", "query_quality", "if", "not", "qual", ":", "qual", "=", "'*'", "#seq = '*'", "#qual = '*'", "if", "self", ".", "strand", "==", "'-'", ":", "seq", "=", "rc", "(", "seq", ")", "qual", "=", "qual", "[", ":", ":", "-", "1", "]", "ln", "=", "qname", "+", "\"\\t\"", "+", "str", "(", "flag", ")", "+", "\"\\t\"", "+", "rname", "+", "\"\\t\"", "+", "str", "(", "pos", ")", "+", "\"\\t\"", "+", "str", "(", "mapq", ")", "+", "\"\\t\"", "+", "cigar", "+", "\"\\t\"", "+", "rnext", "+", "\"\\t\"", "+", "str", "(", "pnext", ")", "+", "\"\\t\"", "+", "str", "(", "tlen", ")", "+", "\"\\t\"", "+", "seq", "+", "\"\\t\"", "+", "qual", "return", "SAM", "(", "ln", ",", "reference", "=", "self", ".", "_reference", ")" ]
Get a SAM object representation of the alignment. :returns: SAM representation :rtype: SAM
[ "Get", "a", "SAM", "object", "representation", "of", "the", "alignment", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/views/mixins/entry_protection.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/mixins/entry_protection.py#L57-L73
def post(self, request, *args, **kwargs): """ Do the login and password protection. """ self.object = self.get_object() self.login() if self.object.password: entry_password = self.request.POST.get('entry_password') if entry_password: if entry_password == self.object.password: self.request.session[self.session_key % self.object.pk] = self.object.password return self.get(request, *args, **kwargs) else: self.error = True return self.password() return self.get(request, *args, **kwargs)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "object", "=", "self", ".", "get_object", "(", ")", "self", ".", "login", "(", ")", "if", "self", ".", "object", ".", "password", ":", "entry_password", "=", "self", ".", "request", ".", "POST", ".", "get", "(", "'entry_password'", ")", "if", "entry_password", ":", "if", "entry_password", "==", "self", ".", "object", ".", "password", ":", "self", ".", "request", ".", "session", "[", "self", ".", "session_key", "%", "self", ".", "object", ".", "pk", "]", "=", "self", ".", "object", ".", "password", "return", "self", ".", "get", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "error", "=", "True", "return", "self", ".", "password", "(", ")", "return", "self", ".", "get", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Do the login and password protection.
[ "Do", "the", "login", "and", "password", "protection", "." ]
python
train
vertexproject/synapse
synapse/lib/chop.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/chop.py#L72-L78
def tags(norm): ''' Divide a normalized tag string into hierarchical layers. ''' # this is ugly for speed.... parts = norm.split('.') return ['.'.join(parts[:i]) for i in range(1, len(parts) + 1)]
[ "def", "tags", "(", "norm", ")", ":", "# this is ugly for speed....", "parts", "=", "norm", ".", "split", "(", "'.'", ")", "return", "[", "'.'", ".", "join", "(", "parts", "[", ":", "i", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "parts", ")", "+", "1", ")", "]" ]
Divide a normalized tag string into hierarchical layers.
[ "Divide", "a", "normalized", "tag", "string", "into", "hierarchical", "layers", "." ]
python
train
Parisson/TimeSide
timeside/server/models.py
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/server/models.py#L184-L190
def get_uri(self): """Return the Item source""" if self.source_file and os.path.exists(self.source_file.path): return self.source_file.path elif self.source_url: return self.source_url return None
[ "def", "get_uri", "(", "self", ")", ":", "if", "self", ".", "source_file", "and", "os", ".", "path", ".", "exists", "(", "self", ".", "source_file", ".", "path", ")", ":", "return", "self", ".", "source_file", ".", "path", "elif", "self", ".", "source_url", ":", "return", "self", ".", "source_url", "return", "None" ]
Return the Item source
[ "Return", "the", "Item", "source" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetdelegate.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetdelegate.py#L265-L299
def drawGrid(self, painter, opt, rect, index): """ Draws the grid lines for this delegate. :param painter | <QtGui.QPainter> opt | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> index | <QtGui.QModelIndex> """ if not self.showGrid(): return painter.setBrush(QtCore.Qt.NoBrush) painter.setPen(self.gridPen()) size = self.gridPen().width() + 1 # draw the lines lines = [] # add the column line if self.showGridColumns(): lines.append(QtCore.QLine(rect.width() - size, 0, rect.width() - size, rect.height() - size)) # add the row line if (self.showGridRows()): lines.append(QtCore.QLine(0, rect.height() - size, rect.width() - size, rect.height() - size)) painter.drawLines(lines)
[ "def", "drawGrid", "(", "self", ",", "painter", ",", "opt", ",", "rect", ",", "index", ")", ":", "if", "not", "self", ".", "showGrid", "(", ")", ":", "return", "painter", ".", "setBrush", "(", "QtCore", ".", "Qt", ".", "NoBrush", ")", "painter", ".", "setPen", "(", "self", ".", "gridPen", "(", ")", ")", "size", "=", "self", ".", "gridPen", "(", ")", ".", "width", "(", ")", "+", "1", "# draw the lines\r", "lines", "=", "[", "]", "# add the column line\r", "if", "self", ".", "showGridColumns", "(", ")", ":", "lines", ".", "append", "(", "QtCore", ".", "QLine", "(", "rect", ".", "width", "(", ")", "-", "size", ",", "0", ",", "rect", ".", "width", "(", ")", "-", "size", ",", "rect", ".", "height", "(", ")", "-", "size", ")", ")", "# add the row line\r", "if", "(", "self", ".", "showGridRows", "(", ")", ")", ":", "lines", ".", "append", "(", "QtCore", ".", "QLine", "(", "0", ",", "rect", ".", "height", "(", ")", "-", "size", ",", "rect", ".", "width", "(", ")", "-", "size", ",", "rect", ".", "height", "(", ")", "-", "size", ")", ")", "painter", ".", "drawLines", "(", "lines", ")" ]
Draws the grid lines for this delegate. :param painter | <QtGui.QPainter> opt | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> index | <QtGui.QModelIndex>
[ "Draws", "the", "grid", "lines", "for", "this", "delegate", ".", ":", "param", "painter", "|", "<QtGui", ".", "QPainter", ">", "opt", "|", "<QtGui", ".", "QStyleOptionItem", ">", "rect", "|", "<QtCore", ".", "QRect", ">", "index", "|", "<QtGui", ".", "QModelIndex", ">" ]
python
train
metagriffin/fso
fso/filesystemoverlay.py
https://github.com/metagriffin/fso/blob/c37701fbfdfde359a2044eb9420abe569a7b35e4/fso/filesystemoverlay.py#L374-L379
def _lexists(self, path): '''IMPORTANT: expects `path` to already be deref()'erenced.''' try: return bool(self._lstat(path)) except os.error: return False
[ "def", "_lexists", "(", "self", ",", "path", ")", ":", "try", ":", "return", "bool", "(", "self", ".", "_lstat", "(", "path", ")", ")", "except", "os", ".", "error", ":", "return", "False" ]
IMPORTANT: expects `path` to already be deref()'erenced.
[ "IMPORTANT", ":", "expects", "path", "to", "already", "be", "deref", "()", "erenced", "." ]
python
valid
deschler/django-modeltranslation
modeltranslation/translator.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/translator.py#L390-L428
def register(self, model_or_iterable, opts_class=None, **options): """ Registers the given model(s) with the given translation options. The model(s) should be Model classes, not instances. Fields declared for translation on a base class are inherited by subclasses. If the model or one of its subclasses is already registered for translation, this will raise an exception. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: # Ensure that a base is not registered after a subclass (_registry # is closed with respect to taking bases, so we can just check if # we've seen the model). if model in self._registry: if self._registry[model].registered: raise AlreadyRegistered( 'Model "%s" is already registered for translation' % model.__name__) else: descendants = [d.__name__ for d in self._registry.keys() if issubclass(d, model) and d != model] if descendants: raise DescendantRegistered( 'Model "%s" cannot be registered after its subclass' ' "%s"' % (model.__name__, descendants[0])) # Find inherited fields and create options instance for the model. opts = self._get_options_for_model(model, opts_class, **options) # If an exception is raised during registration, mark model as not-registered try: self._register_single_model(model, opts) except Exception: self._registry[model].registered = False raise
[ "def", "register", "(", "self", ",", "model_or_iterable", ",", "opts_class", "=", "None", ",", "*", "*", "options", ")", ":", "if", "isinstance", "(", "model_or_iterable", ",", "ModelBase", ")", ":", "model_or_iterable", "=", "[", "model_or_iterable", "]", "for", "model", "in", "model_or_iterable", ":", "# Ensure that a base is not registered after a subclass (_registry", "# is closed with respect to taking bases, so we can just check if", "# we've seen the model).", "if", "model", "in", "self", ".", "_registry", ":", "if", "self", ".", "_registry", "[", "model", "]", ".", "registered", ":", "raise", "AlreadyRegistered", "(", "'Model \"%s\" is already registered for translation'", "%", "model", ".", "__name__", ")", "else", ":", "descendants", "=", "[", "d", ".", "__name__", "for", "d", "in", "self", ".", "_registry", ".", "keys", "(", ")", "if", "issubclass", "(", "d", ",", "model", ")", "and", "d", "!=", "model", "]", "if", "descendants", ":", "raise", "DescendantRegistered", "(", "'Model \"%s\" cannot be registered after its subclass'", "' \"%s\"'", "%", "(", "model", ".", "__name__", ",", "descendants", "[", "0", "]", ")", ")", "# Find inherited fields and create options instance for the model.", "opts", "=", "self", ".", "_get_options_for_model", "(", "model", ",", "opts_class", ",", "*", "*", "options", ")", "# If an exception is raised during registration, mark model as not-registered", "try", ":", "self", ".", "_register_single_model", "(", "model", ",", "opts", ")", "except", "Exception", ":", "self", ".", "_registry", "[", "model", "]", ".", "registered", "=", "False", "raise" ]
Registers the given model(s) with the given translation options. The model(s) should be Model classes, not instances. Fields declared for translation on a base class are inherited by subclasses. If the model or one of its subclasses is already registered for translation, this will raise an exception.
[ "Registers", "the", "given", "model", "(", "s", ")", "with", "the", "given", "translation", "options", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/isis_state/router_isis_config/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/isis_state/router_isis_config/__init__.py#L1219-L1242
def _set_l2_spf_timer(self, v, load=False): """ Setter method for l2_spf_timer, mapped from YANG variable /isis_state/router_isis_config/l2_spf_timer (container) If this variable is read-only (config: false) in the source YANG file, then _set_l2_spf_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_l2_spf_timer() directly. YANG Description: Timer for IS-IS Level-2 SPF calculation for IPv4 """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=l2_spf_timer.l2_spf_timer, is_container='container', presence=False, yang_name="l2-spf-timer", rest_name="l2-spf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-spf-timer-l2', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """l2_spf_timer must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=l2_spf_timer.l2_spf_timer, is_container='container', presence=False, yang_name="l2-spf-timer", rest_name="l2-spf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-spf-timer-l2', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__l2_spf_timer = t if hasattr(self, '_set'): self._set()
[ "def", "_set_l2_spf_timer", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "l2_spf_timer", ".", "l2_spf_timer", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"l2-spf-timer\"", ",", "rest_name", "=", "\"l2-spf-timer\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'isis-spf-timer-l2'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-isis-operational'", ",", "defining_module", "=", "'brocade-isis-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"l2_spf_timer must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=l2_spf_timer.l2_spf_timer, is_container='container', presence=False, yang_name=\"l2-spf-timer\", rest_name=\"l2-spf-timer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-spf-timer-l2', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__l2_spf_timer", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for l2_spf_timer, mapped from YANG variable /isis_state/router_isis_config/l2_spf_timer (container) If this variable is read-only (config: false) in the source YANG file, then _set_l2_spf_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_l2_spf_timer() directly. YANG Description: Timer for IS-IS Level-2 SPF calculation for IPv4
[ "Setter", "method", "for", "l2_spf_timer", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "router_isis_config", "/", "l2_spf_timer", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_l2_spf_timer", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_l2_spf_timer", "()", "directly", "." ]
python
train
nvbn/thefuck
thefuck/corrector.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L8-L19
def get_loaded_rules(rules_paths): """Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule] """ for path in rules_paths: if path.name != '__init__.py': rule = Rule.from_path(path) if rule.is_enabled: yield rule
[ "def", "get_loaded_rules", "(", "rules_paths", ")", ":", "for", "path", "in", "rules_paths", ":", "if", "path", ".", "name", "!=", "'__init__.py'", ":", "rule", "=", "Rule", ".", "from_path", "(", "path", ")", "if", "rule", ".", "is_enabled", ":", "yield", "rule" ]
Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule]
[ "Yields", "all", "available", "rules", "." ]
python
train
has2k1/plotnine
plotnine/__init__.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/__init__.py#L22-L40
def _get_all_imports(): """ Return list of all the imports This prevents sub-modules (geoms, stats, utils, ...) from being imported into the user namespace by the following import statement from plotnine import * This is because `from Module import Something` leads to `Module` itself coming into the namespace!! """ import types lst = [name for name, obj in globals().items() if not (name.startswith('_') or name == 'absolute_import' or isinstance(obj, types.ModuleType))] return lst
[ "def", "_get_all_imports", "(", ")", ":", "import", "types", "lst", "=", "[", "name", "for", "name", ",", "obj", "in", "globals", "(", ")", ".", "items", "(", ")", "if", "not", "(", "name", ".", "startswith", "(", "'_'", ")", "or", "name", "==", "'absolute_import'", "or", "isinstance", "(", "obj", ",", "types", ".", "ModuleType", ")", ")", "]", "return", "lst" ]
Return list of all the imports This prevents sub-modules (geoms, stats, utils, ...) from being imported into the user namespace by the following import statement from plotnine import * This is because `from Module import Something` leads to `Module` itself coming into the namespace!!
[ "Return", "list", "of", "all", "the", "imports" ]
python
train
pymupdf/PyMuPDF
fitz/utils.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/utils.py#L140-L288
def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0, keep_proportion = True, overlay=True): """Insert an image in a rectangle on the current page. Notes: Exactly one of filename, pixmap or stream must be provided. Args: rect: (rect-like) where to place the source image filename: (str) name of an image file pixmap: (obj) a Pixmap object stream: (bytes) an image in memory rotate: (int) degrees (multiple of 90) keep_proportion: (bool) whether to maintain aspect ratio overlay: (bool) put in foreground """ def calc_matrix(fw, fh, tr, rotate=0): """ Calculate transformation matrix for image insertion. Notes: The image will preserve its aspect ratio if and only if arguments fw, fh are both equal to 1. Args: fw, fh: width / height ratio factors of image - floats in (0,1]. At least one of them (corresponding to the longer side) is equal to 1. tr: target rect in PDF coordinates rotate: rotation angle in degrees Returns: Transformation matrix. """ # center point of target rect tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.) rot = Matrix(rotate) # rotation matrix # matrix m moves image center to (0, 0), then rotates m = Matrix(1, 0, 0, 1, -0.5, -0.5) * rot #sr1 = sr * m # resulting image rect # -------------------------------------------------------------------- # calculate the scale matrix # -------------------------------------------------------------------- small = min(fw, fh) # factor of the smaller side if rotate not in (0, 180): fw, fh = fh, fw # width / height exchange their roles if fw < 1: # portrait if (float(tr.width) / fw) > (float(tr.height) / fh): w = tr.height * small h = tr.height else: w = tr.width h = tr.width / small elif fw != fh: # landscape if (float(tr.width) / fw) > (float(tr.height) / fh): w = tr.height / small h = tr.height else: w = tr.width h = tr.width * small else: # (treated as) equal sided w = tr.width h = tr.height m *= Matrix(w, h) # concat scale matrix m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center return m # ------------------------------------------------------------------------- CheckParent(page) doc = page.parent if not doc.isPDF: raise ValueError("not a PDF") if bool(filename) + bool(stream) + bool(pixmap) != 1: raise ValueError("need exactly one of filename, pixmap, stream") if filename and not os.path.exists(filename): raise FileNotFoundError("No such file: '%s'" % filename) elif stream and type(stream) not in (bytes, bytearray, io.BytesIO): raise ValueError("stream must be bytes-like or BytesIO") elif pixmap and type(pixmap) is not Pixmap: raise ValueError("pixmap must be a Pixmap") while rotate < 0: rotate += 360 while rotate > 360: rotate -= 360 if rotate not in (0, 90, 180, 270): raise ValueError("bad rotate value") r = page.rect & rect if r.isEmpty or r.isInfinite: raise ValueError("rect must be finite and not empty") _imgpointer = None if keep_proportion is True: # for this we need the image dimension if pixmap: # this is the easy case w = pixmap.width h = pixmap.height elif stream: # use tool to access the information # we also pass through the generated fz_image address img_size = TOOLS.image_size(stream, keep_image=True) w, h = img_size[:2] stream = None # make sure this arg is NOT used _imgpointer = img_size[-1] # pointer to fz_image else: # worst case, we need to read the file ourselves img = open(filename, "rb") stream = img.read() img_size = TOOLS.image_size(stream, keep_image=True) w, h = img_size[:2] _imgpointer = img_size[-1] # pointer to fz_image stream = None # make sure this arg is NOT used filename = None # make sure this arg is NOT used img.close() # close image file maxf = max(w, h).__float__() fw = w / maxf fh = h / maxf else: fw = fh = 1.0 clip = r * ~page._getTransformation() # target rect in PDF coordinates matrix = calc_matrix(fw, fh, clip, rotate=rotate) ilst = [i[7] for i in doc.getPageImageList(page.number)] n = "fzImg" i = 0 _imgname = n + "0" while _imgname in ilst: i += 1 _imgname = n + str(i) page._insertImage( filename=filename, # image in file pixmap=pixmap, # image in pixmap stream=stream, # image in memory matrix=matrix, # generated matrix overlay=overlay, _imgname=_imgname, # generated PDF resource name _imgpointer=_imgpointer, # address of fz_image )
[ "def", "insertImage", "(", "page", ",", "rect", ",", "filename", "=", "None", ",", "pixmap", "=", "None", ",", "stream", "=", "None", ",", "rotate", "=", "0", ",", "keep_proportion", "=", "True", ",", "overlay", "=", "True", ")", ":", "def", "calc_matrix", "(", "fw", ",", "fh", ",", "tr", ",", "rotate", "=", "0", ")", ":", "\"\"\" Calculate transformation matrix for image insertion.\n\n Notes:\n The image will preserve its aspect ratio if and only if arguments\n fw, fh are both equal to 1.\n Args:\n fw, fh: width / height ratio factors of image - floats in (0,1].\n At least one of them (corresponding to the longer side) is equal to 1.\n tr: target rect in PDF coordinates\n rotate: rotation angle in degrees\n Returns:\n Transformation matrix.\n \"\"\"", "# center point of target rect", "tmp", "=", "Point", "(", "(", "tr", ".", "x1", "+", "tr", ".", "x0", ")", "/", "2.", ",", "(", "tr", ".", "y1", "+", "tr", ".", "y0", ")", "/", "2.", ")", "rot", "=", "Matrix", "(", "rotate", ")", "# rotation matrix", "# matrix m moves image center to (0, 0), then rotates", "m", "=", "Matrix", "(", "1", ",", "0", ",", "0", ",", "1", ",", "-", "0.5", ",", "-", "0.5", ")", "*", "rot", "#sr1 = sr * m # resulting image rect", "# --------------------------------------------------------------------", "# calculate the scale matrix", "# --------------------------------------------------------------------", "small", "=", "min", "(", "fw", ",", "fh", ")", "# factor of the smaller side", "if", "rotate", "not", "in", "(", "0", ",", "180", ")", ":", "fw", ",", "fh", "=", "fh", ",", "fw", "# width / height exchange their roles", "if", "fw", "<", "1", ":", "# portrait", "if", "(", "float", "(", "tr", ".", "width", ")", "/", "fw", ")", ">", "(", "float", "(", "tr", ".", "height", ")", "/", "fh", ")", ":", "w", "=", "tr", ".", "height", "*", "small", "h", "=", "tr", ".", "height", "else", ":", "w", "=", "tr", ".", "width", "h", "=", "tr", ".", "width", "/", "small", "elif", "fw", "!=", "fh", ":", "# landscape", "if", "(", "float", "(", "tr", ".", "width", ")", "/", "fw", ")", ">", "(", "float", "(", "tr", ".", "height", ")", "/", "fh", ")", ":", "w", "=", "tr", ".", "height", "/", "small", "h", "=", "tr", ".", "height", "else", ":", "w", "=", "tr", ".", "width", "h", "=", "tr", ".", "width", "*", "small", "else", ":", "# (treated as) equal sided", "w", "=", "tr", ".", "width", "h", "=", "tr", ".", "height", "m", "*=", "Matrix", "(", "w", ",", "h", ")", "# concat scale matrix", "m", "*=", "Matrix", "(", "1", ",", "0", ",", "0", ",", "1", ",", "tmp", ".", "x", ",", "tmp", ".", "y", ")", "# concat move to target center", "return", "m", "# -------------------------------------------------------------------------", "CheckParent", "(", "page", ")", "doc", "=", "page", ".", "parent", "if", "not", "doc", ".", "isPDF", ":", "raise", "ValueError", "(", "\"not a PDF\"", ")", "if", "bool", "(", "filename", ")", "+", "bool", "(", "stream", ")", "+", "bool", "(", "pixmap", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"need exactly one of filename, pixmap, stream\"", ")", "if", "filename", "and", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "FileNotFoundError", "(", "\"No such file: '%s'\"", "%", "filename", ")", "elif", "stream", "and", "type", "(", "stream", ")", "not", "in", "(", "bytes", ",", "bytearray", ",", "io", ".", "BytesIO", ")", ":", "raise", "ValueError", "(", "\"stream must be bytes-like or BytesIO\"", ")", "elif", "pixmap", "and", "type", "(", "pixmap", ")", "is", "not", "Pixmap", ":", "raise", "ValueError", "(", "\"pixmap must be a Pixmap\"", ")", "while", "rotate", "<", "0", ":", "rotate", "+=", "360", "while", "rotate", ">", "360", ":", "rotate", "-=", "360", "if", "rotate", "not", "in", "(", "0", ",", "90", ",", "180", ",", "270", ")", ":", "raise", "ValueError", "(", "\"bad rotate value\"", ")", "r", "=", "page", ".", "rect", "&", "rect", "if", "r", ".", "isEmpty", "or", "r", ".", "isInfinite", ":", "raise", "ValueError", "(", "\"rect must be finite and not empty\"", ")", "_imgpointer", "=", "None", "if", "keep_proportion", "is", "True", ":", "# for this we need the image dimension", "if", "pixmap", ":", "# this is the easy case", "w", "=", "pixmap", ".", "width", "h", "=", "pixmap", ".", "height", "elif", "stream", ":", "# use tool to access the information", "# we also pass through the generated fz_image address", "img_size", "=", "TOOLS", ".", "image_size", "(", "stream", ",", "keep_image", "=", "True", ")", "w", ",", "h", "=", "img_size", "[", ":", "2", "]", "stream", "=", "None", "# make sure this arg is NOT used", "_imgpointer", "=", "img_size", "[", "-", "1", "]", "# pointer to fz_image", "else", ":", "# worst case, we need to read the file ourselves", "img", "=", "open", "(", "filename", ",", "\"rb\"", ")", "stream", "=", "img", ".", "read", "(", ")", "img_size", "=", "TOOLS", ".", "image_size", "(", "stream", ",", "keep_image", "=", "True", ")", "w", ",", "h", "=", "img_size", "[", ":", "2", "]", "_imgpointer", "=", "img_size", "[", "-", "1", "]", "# pointer to fz_image", "stream", "=", "None", "# make sure this arg is NOT used", "filename", "=", "None", "# make sure this arg is NOT used", "img", ".", "close", "(", ")", "# close image file", "maxf", "=", "max", "(", "w", ",", "h", ")", ".", "__float__", "(", ")", "fw", "=", "w", "/", "maxf", "fh", "=", "h", "/", "maxf", "else", ":", "fw", "=", "fh", "=", "1.0", "clip", "=", "r", "*", "~", "page", ".", "_getTransformation", "(", ")", "# target rect in PDF coordinates", "matrix", "=", "calc_matrix", "(", "fw", ",", "fh", ",", "clip", ",", "rotate", "=", "rotate", ")", "ilst", "=", "[", "i", "[", "7", "]", "for", "i", "in", "doc", ".", "getPageImageList", "(", "page", ".", "number", ")", "]", "n", "=", "\"fzImg\"", "i", "=", "0", "_imgname", "=", "n", "+", "\"0\"", "while", "_imgname", "in", "ilst", ":", "i", "+=", "1", "_imgname", "=", "n", "+", "str", "(", "i", ")", "page", ".", "_insertImage", "(", "filename", "=", "filename", ",", "# image in file", "pixmap", "=", "pixmap", ",", "# image in pixmap", "stream", "=", "stream", ",", "# image in memory", "matrix", "=", "matrix", ",", "# generated matrix", "overlay", "=", "overlay", ",", "_imgname", "=", "_imgname", ",", "# generated PDF resource name", "_imgpointer", "=", "_imgpointer", ",", "# address of fz_image", ")" ]
Insert an image in a rectangle on the current page. Notes: Exactly one of filename, pixmap or stream must be provided. Args: rect: (rect-like) where to place the source image filename: (str) name of an image file pixmap: (obj) a Pixmap object stream: (bytes) an image in memory rotate: (int) degrees (multiple of 90) keep_proportion: (bool) whether to maintain aspect ratio overlay: (bool) put in foreground
[ "Insert", "an", "image", "in", "a", "rectangle", "on", "the", "current", "page", "." ]
python
train
onicagroup/runway
runway/hooks/staticsite/build_staticsite.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/hooks/staticsite/build_staticsite.py#L37-L54
def download_and_extract_to_mkdtemp(bucket, key, session=None): """Download zip archive and extract it to temporary directory.""" if session: s3_client = session.client('s3') else: s3_client = boto3.client('s3') transfer = S3Transfer(s3_client) filedes, temp_file = tempfile.mkstemp() os.close(filedes) transfer.download_file(bucket, key, temp_file) output_dir = tempfile.mkdtemp() zip_ref = zipfile.ZipFile(temp_file, 'r') zip_ref.extractall(output_dir) zip_ref.close() os.remove(temp_file) return output_dir
[ "def", "download_and_extract_to_mkdtemp", "(", "bucket", ",", "key", ",", "session", "=", "None", ")", ":", "if", "session", ":", "s3_client", "=", "session", ".", "client", "(", "'s3'", ")", "else", ":", "s3_client", "=", "boto3", ".", "client", "(", "'s3'", ")", "transfer", "=", "S3Transfer", "(", "s3_client", ")", "filedes", ",", "temp_file", "=", "tempfile", ".", "mkstemp", "(", ")", "os", ".", "close", "(", "filedes", ")", "transfer", ".", "download_file", "(", "bucket", ",", "key", ",", "temp_file", ")", "output_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "zip_ref", "=", "zipfile", ".", "ZipFile", "(", "temp_file", ",", "'r'", ")", "zip_ref", ".", "extractall", "(", "output_dir", ")", "zip_ref", ".", "close", "(", ")", "os", ".", "remove", "(", "temp_file", ")", "return", "output_dir" ]
Download zip archive and extract it to temporary directory.
[ "Download", "zip", "archive", "and", "extract", "it", "to", "temporary", "directory", "." ]
python
train
Phylliade/ikpy
contrib/transformations.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/contrib/transformations.py#L71-L78
def list_to_quat(quatlist): """ Convert a quaternion in the form of a list in geometry_msgs/Quaternion :param quatlist: [x, y, z, w] :return: """ return geometry_msgs.msg.Quaternion( x=quatlist[0], y=quatlist[1], z=quatlist[2], w=quatlist[3])
[ "def", "list_to_quat", "(", "quatlist", ")", ":", "return", "geometry_msgs", ".", "msg", ".", "Quaternion", "(", "x", "=", "quatlist", "[", "0", "]", ",", "y", "=", "quatlist", "[", "1", "]", ",", "z", "=", "quatlist", "[", "2", "]", ",", "w", "=", "quatlist", "[", "3", "]", ")" ]
Convert a quaternion in the form of a list in geometry_msgs/Quaternion :param quatlist: [x, y, z, w] :return:
[ "Convert", "a", "quaternion", "in", "the", "form", "of", "a", "list", "in", "geometry_msgs", "/", "Quaternion", ":", "param", "quatlist", ":", "[", "x", "y", "z", "w", "]", ":", "return", ":" ]
python
train
mitsei/dlkit
dlkit/json_/resource/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L2970-L2989
def is_ancestor_of_bin(self, id_, bin_id): """Tests if an ``Id`` is an ancestor of a bin. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if this ``id`` is an ancestor of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_ancestor_of_bin if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=bin_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=bin_id)
[ "def", "is_ancestor_of_bin", "(", "self", ",", "id_", ",", "bin_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_ancestor_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "is_ancestor_of_catalog", "(", "id_", "=", "id_", ",", "catalog_id", "=", "bin_id", ")", "return", "self", ".", "_hierarchy_session", ".", "is_ancestor", "(", "id_", "=", "id_", ",", "ancestor_id", "=", "bin_id", ")" ]
Tests if an ``Id`` is an ancestor of a bin. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if this ``id`` is an ancestor of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "an", "Id", "is", "an", "ancestor", "of", "a", "bin", "." ]
python
train
wind-python/windpowerlib
windpowerlib/wind_speed.py
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_speed.py#L92-L171
def hellman(wind_speed, wind_speed_height, hub_height, roughness_length=None, hellman_exponent=None): r""" Calculates the wind speed at hub height using the hellman equation. It is assumed that the wind profile follows a power law. This function is carried out when the parameter `wind_speed_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'hellman'. Parameters ---------- wind_speed : pandas.Series or numpy.array Wind speed time series. wind_speed_height : float Height for which the parameter `wind_speed` applies. hub_height : float Hub height of wind turbine. roughness_length : pandas.Series or numpy.array or float Roughness length. If given and `hellman_exponent` is None: `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. hellman_exponent : None or float The Hellman exponent, which combines the increase in wind speed due to stability of atmospheric conditions and surface roughness into one constant. If None and roughness length is given `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. Returns ------- pandas.Series or numpy.array Wind speed at hub height. Data type depends on type of `wind_speed`. Notes ----- The following equation is used [1]_, [2]_, [3]_: .. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}} \right)^\alpha with: v: wind speed, h: height, :math:`\alpha`: Hellman exponent :math:`h_{data}` is the height in which the wind speed :math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind speed at hub height :math:`h_{hub}` of the wind turbine. For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for onshore and a value of 1/9 for offshore. The Hellman exponent can also be calulated by the following equation [2]_, [3]_: .. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)} with: :math:`z_{0}`: roughness length Parameters `wind_speed_height`, `roughness_length`, `hub_height` and `obstacle_height` have to be of the same unit. References ---------- .. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting increased wind capacity and electrified heat demand in dwellings". UCL, Energy Institute, 2015, p. 83 .. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz, Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517 .. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser Verlag, 2011, p. 279 """ if hellman_exponent is None: if roughness_length is not None: # Return np.array if wind_speed is np.array if (isinstance(wind_speed, np.ndarray) and isinstance(roughness_length, pd.Series)): roughness_length = np.array(roughness_length) hellman_exponent = 1 / np.log(hub_height / roughness_length) else: hellman_exponent = 1/7 return wind_speed * (hub_height / wind_speed_height) ** hellman_exponent
[ "def", "hellman", "(", "wind_speed", ",", "wind_speed_height", ",", "hub_height", ",", "roughness_length", "=", "None", ",", "hellman_exponent", "=", "None", ")", ":", "if", "hellman_exponent", "is", "None", ":", "if", "roughness_length", "is", "not", "None", ":", "# Return np.array if wind_speed is np.array", "if", "(", "isinstance", "(", "wind_speed", ",", "np", ".", "ndarray", ")", "and", "isinstance", "(", "roughness_length", ",", "pd", ".", "Series", ")", ")", ":", "roughness_length", "=", "np", ".", "array", "(", "roughness_length", ")", "hellman_exponent", "=", "1", "/", "np", ".", "log", "(", "hub_height", "/", "roughness_length", ")", "else", ":", "hellman_exponent", "=", "1", "/", "7", "return", "wind_speed", "*", "(", "hub_height", "/", "wind_speed_height", ")", "**", "hellman_exponent" ]
r""" Calculates the wind speed at hub height using the hellman equation. It is assumed that the wind profile follows a power law. This function is carried out when the parameter `wind_speed_model` of an instance of the :class:`~.modelchain.ModelChain` class is 'hellman'. Parameters ---------- wind_speed : pandas.Series or numpy.array Wind speed time series. wind_speed_height : float Height for which the parameter `wind_speed` applies. hub_height : float Hub height of wind turbine. roughness_length : pandas.Series or numpy.array or float Roughness length. If given and `hellman_exponent` is None: `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. hellman_exponent : None or float The Hellman exponent, which combines the increase in wind speed due to stability of atmospheric conditions and surface roughness into one constant. If None and roughness length is given `hellman_exponent` = 1 / ln(hub_height/roughness_length), otherwise `hellman_exponent` = 1/7. Default: None. Returns ------- pandas.Series or numpy.array Wind speed at hub height. Data type depends on type of `wind_speed`. Notes ----- The following equation is used [1]_, [2]_, [3]_: .. math:: v_{wind,hub}=v_{wind,data}\cdot \left(\frac{h_{hub}}{h_{data}} \right)^\alpha with: v: wind speed, h: height, :math:`\alpha`: Hellman exponent :math:`h_{data}` is the height in which the wind speed :math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind speed at hub height :math:`h_{hub}` of the wind turbine. For the Hellman exponent :math:`\alpha` many studies use a value of 1/7 for onshore and a value of 1/9 for offshore. The Hellman exponent can also be calulated by the following equation [2]_, [3]_: .. math:: \alpha = \frac{1}{\ln\left(\frac{h_{hub}}{z_0} \right)} with: :math:`z_{0}`: roughness length Parameters `wind_speed_height`, `roughness_length`, `hub_height` and `obstacle_height` have to be of the same unit. References ---------- .. [1] Sharp, E.: "Spatiotemporal disaggregation of GB scenarios depicting increased wind capacity and electrified heat demand in dwellings". UCL, Energy Institute, 2015, p. 83 .. [2] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz, Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 517 .. [3] Quaschning V.: "Regenerative Energiesysteme". München, Hanser Verlag, 2011, p. 279
[ "r", "Calculates", "the", "wind", "speed", "at", "hub", "height", "using", "the", "hellman", "equation", "." ]
python
train
BeyondTheClouds/enoslib
docs/tutorials/grid5000/virt/tuto_grid5000_virt.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/docs/tutorials/grid5000/virt/tuto_grid5000_virt.py#L16-L23
def range_mac(mac_start, mac_end, step=1): """Iterate over mac addresses (given as string).""" start = int(EUI(mac_start)) end = int(EUI(mac_end)) for i_mac in range(start, end, step): mac = EUI(int(EUI(i_mac)) + 1) ip = ['10'] + [str(int(i, 2)) for i in mac.bits().split('-')[-3:]] yield str(mac).replace('-', ':'), '.'.join(ip)
[ "def", "range_mac", "(", "mac_start", ",", "mac_end", ",", "step", "=", "1", ")", ":", "start", "=", "int", "(", "EUI", "(", "mac_start", ")", ")", "end", "=", "int", "(", "EUI", "(", "mac_end", ")", ")", "for", "i_mac", "in", "range", "(", "start", ",", "end", ",", "step", ")", ":", "mac", "=", "EUI", "(", "int", "(", "EUI", "(", "i_mac", ")", ")", "+", "1", ")", "ip", "=", "[", "'10'", "]", "+", "[", "str", "(", "int", "(", "i", ",", "2", ")", ")", "for", "i", "in", "mac", ".", "bits", "(", ")", ".", "split", "(", "'-'", ")", "[", "-", "3", ":", "]", "]", "yield", "str", "(", "mac", ")", ".", "replace", "(", "'-'", ",", "':'", ")", ",", "'.'", ".", "join", "(", "ip", ")" ]
Iterate over mac addresses (given as string).
[ "Iterate", "over", "mac", "addresses", "(", "given", "as", "string", ")", "." ]
python
train
waqasbhatti/astrobase
astrobase/periodbase/kbls.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/kbls.py#L626-L1049
def bls_parallel_pfind( times, mags, errs, magsarefluxes=False, startp=0.1, # by default, search from 0.1 d to... endp=100.0, # ... 100.0 d -- don't search full timebase stepsize=1.0e-4, mintransitduration=0.01, # minimum transit length in phase maxtransitduration=0.4, # maximum transit length in phase nphasebins=200, autofreq=True, # figure out f0, nf, and df automatically nbestpeaks=5, periodepsilon=0.1, # 0.1 sigclip=10.0, verbose=True, nworkers=None, get_stats=True, ): '''Runs the Box Least Squares Fitting Search for transit-shaped signals. Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey et al. 2015. Breaks up the full frequency space into chunks and passes them to parallel BLS workers. NOTE: the combined BLS spectrum produced by this function is not identical to that produced by running BLS in one shot for the entire frequency space. There are differences on the order of 1.0e-3 or so in the respective peak values, but peaks appear at the same frequencies for both methods. This is likely due to different aliasing caused by smaller chunks of the frequency space used by the parallel workers in this function. When in doubt, confirm results for this parallel implementation by comparing to those from the serial implementation above. Parameters ---------- times,mags,errs : np.array The magnitude/flux time-series to search for transits. magsarefluxes : bool If the input measurement values in `mags` and `errs` are in fluxes, set this to True. startp,endp : float The minimum and maximum periods to consider for the transit search. stepsize : float The step-size in frequency to use when constructing a frequency grid for the period search. mintransitduration,maxtransitduration : float The minimum and maximum transitdurations (in units of phase) to consider for the transit search. nphasebins : int The number of phase bins to use in the period search. autofreq : bool If this is True, the values of `stepsize` and `nphasebins` will be ignored, and these, along with a frequency-grid, will be determined based on the following relations:: nphasebins = int(ceil(2.0/mintransitduration)) if nphasebins > 3000: nphasebins = 3000 stepsize = 0.25*mintransitduration/(times.max()-times.min()) minfreq = 1.0/endp maxfreq = 1.0/startp nfreq = int(ceil((maxfreq - minfreq)/stepsize)) periodepsilon : float The fractional difference between successive values of 'best' periods when sorting by periodogram power to consider them as separate periods (as opposed to part of the same periodogram peak). This is used to avoid broad peaks in the periodogram and make sure the 'best' periods returned are all actually independent. nbestpeaks : int The number of 'best' peaks to return from the periodogram results, starting from the global maximum of the periodogram peak values. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If this is True, will indicate progress and details about the frequency grid used for the period search. nworkers : int or None The number of parallel workers to launch for period-search. If None, nworkers = NCPUS. get_stats : bool If True, runs :py:func:`.bls_stats_singleperiod` for each of the best periods in the output and injects the output into the output dict so you only have to run this function to get the periods and their stats. Returns ------- dict This function returns a dict, referred to as an `lspinfo` dict in other astrobase functions that operate on periodogram results. This is a standardized format across all astrobase period-finders, and is of the form below:: {'bestperiod': the best period value in the periodogram, 'bestlspval': the periodogram peak associated with the best period, 'nbestpeaks': the input value of nbestpeaks, 'nbestlspvals': nbestpeaks-size list of best period peak values, 'nbestperiods': nbestpeaks-size list of best periods, 'stats': list of stats dicts returned for each best period, 'lspvals': the full array of periodogram powers, 'frequencies': the full array of frequencies considered, 'periods': the full array of periods considered, 'blsresult': list of result dicts from eebls.f wrapper functions, 'stepsize': the actual stepsize used, 'nfreq': the actual nfreq used, 'nphasebins': the actual nphasebins used, 'mintransitduration': the input mintransitduration, 'maxtransitduration': the input maxtransitdurations, 'method':'bls' -> the name of the period-finder method, 'kwargs':{ dict of all of the input kwargs for record-keeping}} ''' # get rid of nans first and sigclip stimes, smags, serrs = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip) # make sure there are enough points to calculate a spectrum if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9: # if we're setting up everything automatically if autofreq: # figure out the best number of phasebins to use nphasebins = int(npceil(2.0/mintransitduration)) if nphasebins > 3000: nphasebins = 3000 # use heuristic to figure out best timestep stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min()) # now figure out the frequencies to use minfreq = 1.0/endp maxfreq = 1.0/startp nfreq = int(npceil((maxfreq - minfreq)/stepsize)) # say what we're using if verbose: LOGINFO('min P: %s, max P: %s, nfreq: %s, ' 'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq, minfreq, maxfreq)) LOGINFO('autofreq = True: using AUTOMATIC values for ' 'freq stepsize: %s, nphasebins: %s, ' 'min transit duration: %s, max transit duration: %s' % (stepsize, nphasebins, mintransitduration, maxtransitduration)) else: minfreq = 1.0/endp maxfreq = 1.0/startp nfreq = int(npceil((maxfreq - minfreq)/stepsize)) # say what we're using if verbose: LOGINFO('min P: %s, max P: %s, nfreq: %s, ' 'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq, minfreq, maxfreq)) LOGINFO('autofreq = False: using PROVIDED values for ' 'freq stepsize: %s, nphasebins: %s, ' 'min transit duration: %s, max transit duration: %s' % (stepsize, nphasebins, mintransitduration, maxtransitduration)) # check the minimum frequency if minfreq < (1.0/(stimes.max() - stimes.min())): minfreq = 2.0/(stimes.max() - stimes.min()) if verbose: LOGWARNING('the requested max P = %.3f is larger than ' 'the time base of the observations = %.3f, ' ' will make minfreq = 2 x 1/timebase' % (endp, stimes.max() - stimes.min())) LOGINFO('new minfreq: %s, maxfreq: %s' % (minfreq, maxfreq)) ############################# ## NOW RUN BLS IN PARALLEL ## ############################# # fix number of CPUs if needed if not nworkers or nworkers > NCPUS: nworkers = NCPUS if verbose: LOGINFO('using %s workers...' % nworkers) # the frequencies array to be searched frequencies = minfreq + nparange(nfreq)*stepsize # break up the tasks into chunks csrem = int(fmod(nfreq, nworkers)) csint = int(float(nfreq/nworkers)) chunk_minfreqs, chunk_nfreqs = [], [] for x in range(nworkers): this_minfreqs = frequencies[x*csint] # handle usual nfreqs if x < (nworkers - 1): this_nfreqs = frequencies[x*csint:x*csint+csint].size else: this_nfreqs = frequencies[x*csint:x*csint+csint+csrem].size chunk_minfreqs.append(this_minfreqs) chunk_nfreqs.append(this_nfreqs) # populate the tasks list tasks = [(stimes, smags, chunk_minf, chunk_nf, stepsize, nphasebins, mintransitduration, maxtransitduration) for (chunk_nf, chunk_minf) in zip(chunk_minfreqs, chunk_nfreqs)] if verbose: for ind, task in enumerate(tasks): LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' % (ind+1, task[3], task[2])) LOGINFO('running...') # return tasks # start the pool pool = Pool(nworkers) results = pool.map(_parallel_bls_worker, tasks) pool.close() pool.join() del pool # now concatenate the output lsp arrays lsp = npconcatenate([x['power'] for x in results]) periods = 1.0/frequencies # find the nbestpeaks for the periodogram: 1. sort the lsp array # by highest value first 2. go down the values until we find # five values that are separated by at least periodepsilon in # period # make sure to get only the finite peaks in the periodogram # this is needed because BLS may produce infs for some peaks finitepeakind = npisfinite(lsp) finlsp = lsp[finitepeakind] finperiods = periods[finitepeakind] # make sure that finlsp has finite values before we work on it try: bestperiodind = npargmax(finlsp) except ValueError: LOGERROR('no finite periodogram values ' 'for this mag series, skipping...') return {'bestperiod':npnan, 'bestlspval':npnan, 'nbestpeaks':nbestpeaks, 'nbestlspvals':None, 'nbestperiods':None, 'lspvals':None, 'periods':None, 'blsresult':None, 'method':'bls', 'kwargs':{'startp':startp, 'endp':endp, 'stepsize':stepsize, 'mintransitduration':mintransitduration, 'maxtransitduration':maxtransitduration, 'nphasebins':nphasebins, 'autofreq':autofreq, 'periodepsilon':periodepsilon, 'nbestpeaks':nbestpeaks, 'sigclip':sigclip, 'magsarefluxes':magsarefluxes}} sortedlspind = npargsort(finlsp)[::-1] sortedlspperiods = finperiods[sortedlspind] sortedlspvals = finlsp[sortedlspind] # now get the nbestpeaks nbestperiods, nbestlspvals, peakcount = ( [finperiods[bestperiodind]], [finlsp[bestperiodind]], 1 ) prevperiod = sortedlspperiods[0] # find the best nbestpeaks in the lsp and their periods for period, lspval in zip(sortedlspperiods, sortedlspvals): if peakcount == nbestpeaks: break perioddiff = abs(period - prevperiod) bestperiodsdiff = [abs(period - x) for x in nbestperiods] # this ensures that this period is different from the last # period and from all the other existing best periods by # periodepsilon to make sure we jump to an entire different # peak in the periodogram if (perioddiff > (periodepsilon*prevperiod) and all(x > (periodepsilon*period) for x in bestperiodsdiff)): nbestperiods.append(period) nbestlspvals.append(lspval) peakcount = peakcount + 1 prevperiod = period # generate the return dict resultdict = { 'bestperiod':finperiods[bestperiodind], 'bestlspval':finlsp[bestperiodind], 'nbestpeaks':nbestpeaks, 'nbestlspvals':nbestlspvals, 'nbestperiods':nbestperiods, 'lspvals':lsp, 'frequencies':frequencies, 'periods':periods, 'blsresult':results, 'stepsize':stepsize, 'nfreq':nfreq, 'nphasebins':nphasebins, 'mintransitduration':mintransitduration, 'maxtransitduration':maxtransitduration, 'method':'bls', 'kwargs':{'startp':startp, 'endp':endp, 'stepsize':stepsize, 'mintransitduration':mintransitduration, 'maxtransitduration':maxtransitduration, 'nphasebins':nphasebins, 'autofreq':autofreq, 'periodepsilon':periodepsilon, 'nbestpeaks':nbestpeaks, 'sigclip':sigclip, 'magsarefluxes':magsarefluxes} } # get stats if requested if get_stats: resultdict['stats'] = [] for bp in nbestperiods.copy(): if verbose: LOGINFO("Getting stats for best period: %.6f" % bp) this_pstats = bls_stats_singleperiod( times, mags, errs, bp, magsarefluxes=resultdict['kwargs']['magsarefluxes'], sigclip=resultdict['kwargs']['sigclip'], nphasebins=resultdict['nphasebins'], mintransitduration=resultdict['mintransitduration'], maxtransitduration=resultdict['maxtransitduration'], verbose=verbose, ) resultdict['stats'].append(this_pstats) return resultdict else: LOGERROR('no good detections for these times and mags, skipping...') return {'bestperiod':npnan, 'bestlspval':npnan, 'nbestpeaks':nbestpeaks, 'nbestlspvals':None, 'nbestperiods':None, 'lspvals':None, 'periods':None, 'blsresult':None, 'stepsize':stepsize, 'nfreq':None, 'nphasebins':None, 'mintransitduration':mintransitduration, 'maxtransitduration':maxtransitduration, 'method':'bls', 'kwargs':{'startp':startp, 'endp':endp, 'stepsize':stepsize, 'mintransitduration':mintransitduration, 'maxtransitduration':maxtransitduration, 'nphasebins':nphasebins, 'autofreq':autofreq, 'periodepsilon':periodepsilon, 'nbestpeaks':nbestpeaks, 'sigclip':sigclip, 'magsarefluxes':magsarefluxes}}
[ "def", "bls_parallel_pfind", "(", "times", ",", "mags", ",", "errs", ",", "magsarefluxes", "=", "False", ",", "startp", "=", "0.1", ",", "# by default, search from 0.1 d to...", "endp", "=", "100.0", ",", "# ... 100.0 d -- don't search full timebase", "stepsize", "=", "1.0e-4", ",", "mintransitduration", "=", "0.01", ",", "# minimum transit length in phase", "maxtransitduration", "=", "0.4", ",", "# maximum transit length in phase", "nphasebins", "=", "200", ",", "autofreq", "=", "True", ",", "# figure out f0, nf, and df automatically", "nbestpeaks", "=", "5", ",", "periodepsilon", "=", "0.1", ",", "# 0.1", "sigclip", "=", "10.0", ",", "verbose", "=", "True", ",", "nworkers", "=", "None", ",", "get_stats", "=", "True", ",", ")", ":", "# get rid of nans first and sigclip", "stimes", ",", "smags", ",", "serrs", "=", "sigclip_magseries", "(", "times", ",", "mags", ",", "errs", ",", "magsarefluxes", "=", "magsarefluxes", ",", "sigclip", "=", "sigclip", ")", "# make sure there are enough points to calculate a spectrum", "if", "len", "(", "stimes", ")", ">", "9", "and", "len", "(", "smags", ")", ">", "9", "and", "len", "(", "serrs", ")", ">", "9", ":", "# if we're setting up everything automatically", "if", "autofreq", ":", "# figure out the best number of phasebins to use", "nphasebins", "=", "int", "(", "npceil", "(", "2.0", "/", "mintransitduration", ")", ")", "if", "nphasebins", ">", "3000", ":", "nphasebins", "=", "3000", "# use heuristic to figure out best timestep", "stepsize", "=", "0.25", "*", "mintransitduration", "/", "(", "stimes", ".", "max", "(", ")", "-", "stimes", ".", "min", "(", ")", ")", "# now figure out the frequencies to use", "minfreq", "=", "1.0", "/", "endp", "maxfreq", "=", "1.0", "/", "startp", "nfreq", "=", "int", "(", "npceil", "(", "(", "maxfreq", "-", "minfreq", ")", "/", "stepsize", ")", ")", "# say what we're using", "if", "verbose", ":", "LOGINFO", "(", "'min P: %s, max P: %s, nfreq: %s, '", "'minfreq: %s, maxfreq: %s'", "%", "(", "startp", ",", "endp", ",", "nfreq", ",", "minfreq", ",", "maxfreq", ")", ")", "LOGINFO", "(", "'autofreq = True: using AUTOMATIC values for '", "'freq stepsize: %s, nphasebins: %s, '", "'min transit duration: %s, max transit duration: %s'", "%", "(", "stepsize", ",", "nphasebins", ",", "mintransitduration", ",", "maxtransitduration", ")", ")", "else", ":", "minfreq", "=", "1.0", "/", "endp", "maxfreq", "=", "1.0", "/", "startp", "nfreq", "=", "int", "(", "npceil", "(", "(", "maxfreq", "-", "minfreq", ")", "/", "stepsize", ")", ")", "# say what we're using", "if", "verbose", ":", "LOGINFO", "(", "'min P: %s, max P: %s, nfreq: %s, '", "'minfreq: %s, maxfreq: %s'", "%", "(", "startp", ",", "endp", ",", "nfreq", ",", "minfreq", ",", "maxfreq", ")", ")", "LOGINFO", "(", "'autofreq = False: using PROVIDED values for '", "'freq stepsize: %s, nphasebins: %s, '", "'min transit duration: %s, max transit duration: %s'", "%", "(", "stepsize", ",", "nphasebins", ",", "mintransitduration", ",", "maxtransitduration", ")", ")", "# check the minimum frequency", "if", "minfreq", "<", "(", "1.0", "/", "(", "stimes", ".", "max", "(", ")", "-", "stimes", ".", "min", "(", ")", ")", ")", ":", "minfreq", "=", "2.0", "/", "(", "stimes", ".", "max", "(", ")", "-", "stimes", ".", "min", "(", ")", ")", "if", "verbose", ":", "LOGWARNING", "(", "'the requested max P = %.3f is larger than '", "'the time base of the observations = %.3f, '", "' will make minfreq = 2 x 1/timebase'", "%", "(", "endp", ",", "stimes", ".", "max", "(", ")", "-", "stimes", ".", "min", "(", ")", ")", ")", "LOGINFO", "(", "'new minfreq: %s, maxfreq: %s'", "%", "(", "minfreq", ",", "maxfreq", ")", ")", "#############################", "## NOW RUN BLS IN PARALLEL ##", "#############################", "# fix number of CPUs if needed", "if", "not", "nworkers", "or", "nworkers", ">", "NCPUS", ":", "nworkers", "=", "NCPUS", "if", "verbose", ":", "LOGINFO", "(", "'using %s workers...'", "%", "nworkers", ")", "# the frequencies array to be searched", "frequencies", "=", "minfreq", "+", "nparange", "(", "nfreq", ")", "*", "stepsize", "# break up the tasks into chunks", "csrem", "=", "int", "(", "fmod", "(", "nfreq", ",", "nworkers", ")", ")", "csint", "=", "int", "(", "float", "(", "nfreq", "/", "nworkers", ")", ")", "chunk_minfreqs", ",", "chunk_nfreqs", "=", "[", "]", ",", "[", "]", "for", "x", "in", "range", "(", "nworkers", ")", ":", "this_minfreqs", "=", "frequencies", "[", "x", "*", "csint", "]", "# handle usual nfreqs", "if", "x", "<", "(", "nworkers", "-", "1", ")", ":", "this_nfreqs", "=", "frequencies", "[", "x", "*", "csint", ":", "x", "*", "csint", "+", "csint", "]", ".", "size", "else", ":", "this_nfreqs", "=", "frequencies", "[", "x", "*", "csint", ":", "x", "*", "csint", "+", "csint", "+", "csrem", "]", ".", "size", "chunk_minfreqs", ".", "append", "(", "this_minfreqs", ")", "chunk_nfreqs", ".", "append", "(", "this_nfreqs", ")", "# populate the tasks list", "tasks", "=", "[", "(", "stimes", ",", "smags", ",", "chunk_minf", ",", "chunk_nf", ",", "stepsize", ",", "nphasebins", ",", "mintransitduration", ",", "maxtransitduration", ")", "for", "(", "chunk_nf", ",", "chunk_minf", ")", "in", "zip", "(", "chunk_minfreqs", ",", "chunk_nfreqs", ")", "]", "if", "verbose", ":", "for", "ind", ",", "task", "in", "enumerate", "(", "tasks", ")", ":", "LOGINFO", "(", "'worker %s: minfreq = %.6f, nfreqs = %s'", "%", "(", "ind", "+", "1", ",", "task", "[", "3", "]", ",", "task", "[", "2", "]", ")", ")", "LOGINFO", "(", "'running...'", ")", "# return tasks", "# start the pool", "pool", "=", "Pool", "(", "nworkers", ")", "results", "=", "pool", ".", "map", "(", "_parallel_bls_worker", ",", "tasks", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "del", "pool", "# now concatenate the output lsp arrays", "lsp", "=", "npconcatenate", "(", "[", "x", "[", "'power'", "]", "for", "x", "in", "results", "]", ")", "periods", "=", "1.0", "/", "frequencies", "# find the nbestpeaks for the periodogram: 1. sort the lsp array", "# by highest value first 2. go down the values until we find", "# five values that are separated by at least periodepsilon in", "# period", "# make sure to get only the finite peaks in the periodogram", "# this is needed because BLS may produce infs for some peaks", "finitepeakind", "=", "npisfinite", "(", "lsp", ")", "finlsp", "=", "lsp", "[", "finitepeakind", "]", "finperiods", "=", "periods", "[", "finitepeakind", "]", "# make sure that finlsp has finite values before we work on it", "try", ":", "bestperiodind", "=", "npargmax", "(", "finlsp", ")", "except", "ValueError", ":", "LOGERROR", "(", "'no finite periodogram values '", "'for this mag series, skipping...'", ")", "return", "{", "'bestperiod'", ":", "npnan", ",", "'bestlspval'", ":", "npnan", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'nbestlspvals'", ":", "None", ",", "'nbestperiods'", ":", "None", ",", "'lspvals'", ":", "None", ",", "'periods'", ":", "None", ",", "'blsresult'", ":", "None", ",", "'method'", ":", "'bls'", ",", "'kwargs'", ":", "{", "'startp'", ":", "startp", ",", "'endp'", ":", "endp", ",", "'stepsize'", ":", "stepsize", ",", "'mintransitduration'", ":", "mintransitduration", ",", "'maxtransitduration'", ":", "maxtransitduration", ",", "'nphasebins'", ":", "nphasebins", ",", "'autofreq'", ":", "autofreq", ",", "'periodepsilon'", ":", "periodepsilon", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'sigclip'", ":", "sigclip", ",", "'magsarefluxes'", ":", "magsarefluxes", "}", "}", "sortedlspind", "=", "npargsort", "(", "finlsp", ")", "[", ":", ":", "-", "1", "]", "sortedlspperiods", "=", "finperiods", "[", "sortedlspind", "]", "sortedlspvals", "=", "finlsp", "[", "sortedlspind", "]", "# now get the nbestpeaks", "nbestperiods", ",", "nbestlspvals", ",", "peakcount", "=", "(", "[", "finperiods", "[", "bestperiodind", "]", "]", ",", "[", "finlsp", "[", "bestperiodind", "]", "]", ",", "1", ")", "prevperiod", "=", "sortedlspperiods", "[", "0", "]", "# find the best nbestpeaks in the lsp and their periods", "for", "period", ",", "lspval", "in", "zip", "(", "sortedlspperiods", ",", "sortedlspvals", ")", ":", "if", "peakcount", "==", "nbestpeaks", ":", "break", "perioddiff", "=", "abs", "(", "period", "-", "prevperiod", ")", "bestperiodsdiff", "=", "[", "abs", "(", "period", "-", "x", ")", "for", "x", "in", "nbestperiods", "]", "# this ensures that this period is different from the last", "# period and from all the other existing best periods by", "# periodepsilon to make sure we jump to an entire different", "# peak in the periodogram", "if", "(", "perioddiff", ">", "(", "periodepsilon", "*", "prevperiod", ")", "and", "all", "(", "x", ">", "(", "periodepsilon", "*", "period", ")", "for", "x", "in", "bestperiodsdiff", ")", ")", ":", "nbestperiods", ".", "append", "(", "period", ")", "nbestlspvals", ".", "append", "(", "lspval", ")", "peakcount", "=", "peakcount", "+", "1", "prevperiod", "=", "period", "# generate the return dict", "resultdict", "=", "{", "'bestperiod'", ":", "finperiods", "[", "bestperiodind", "]", ",", "'bestlspval'", ":", "finlsp", "[", "bestperiodind", "]", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'nbestlspvals'", ":", "nbestlspvals", ",", "'nbestperiods'", ":", "nbestperiods", ",", "'lspvals'", ":", "lsp", ",", "'frequencies'", ":", "frequencies", ",", "'periods'", ":", "periods", ",", "'blsresult'", ":", "results", ",", "'stepsize'", ":", "stepsize", ",", "'nfreq'", ":", "nfreq", ",", "'nphasebins'", ":", "nphasebins", ",", "'mintransitduration'", ":", "mintransitduration", ",", "'maxtransitduration'", ":", "maxtransitduration", ",", "'method'", ":", "'bls'", ",", "'kwargs'", ":", "{", "'startp'", ":", "startp", ",", "'endp'", ":", "endp", ",", "'stepsize'", ":", "stepsize", ",", "'mintransitduration'", ":", "mintransitduration", ",", "'maxtransitduration'", ":", "maxtransitduration", ",", "'nphasebins'", ":", "nphasebins", ",", "'autofreq'", ":", "autofreq", ",", "'periodepsilon'", ":", "periodepsilon", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'sigclip'", ":", "sigclip", ",", "'magsarefluxes'", ":", "magsarefluxes", "}", "}", "# get stats if requested", "if", "get_stats", ":", "resultdict", "[", "'stats'", "]", "=", "[", "]", "for", "bp", "in", "nbestperiods", ".", "copy", "(", ")", ":", "if", "verbose", ":", "LOGINFO", "(", "\"Getting stats for best period: %.6f\"", "%", "bp", ")", "this_pstats", "=", "bls_stats_singleperiod", "(", "times", ",", "mags", ",", "errs", ",", "bp", ",", "magsarefluxes", "=", "resultdict", "[", "'kwargs'", "]", "[", "'magsarefluxes'", "]", ",", "sigclip", "=", "resultdict", "[", "'kwargs'", "]", "[", "'sigclip'", "]", ",", "nphasebins", "=", "resultdict", "[", "'nphasebins'", "]", ",", "mintransitduration", "=", "resultdict", "[", "'mintransitduration'", "]", ",", "maxtransitduration", "=", "resultdict", "[", "'maxtransitduration'", "]", ",", "verbose", "=", "verbose", ",", ")", "resultdict", "[", "'stats'", "]", ".", "append", "(", "this_pstats", ")", "return", "resultdict", "else", ":", "LOGERROR", "(", "'no good detections for these times and mags, skipping...'", ")", "return", "{", "'bestperiod'", ":", "npnan", ",", "'bestlspval'", ":", "npnan", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'nbestlspvals'", ":", "None", ",", "'nbestperiods'", ":", "None", ",", "'lspvals'", ":", "None", ",", "'periods'", ":", "None", ",", "'blsresult'", ":", "None", ",", "'stepsize'", ":", "stepsize", ",", "'nfreq'", ":", "None", ",", "'nphasebins'", ":", "None", ",", "'mintransitduration'", ":", "mintransitduration", ",", "'maxtransitduration'", ":", "maxtransitduration", ",", "'method'", ":", "'bls'", ",", "'kwargs'", ":", "{", "'startp'", ":", "startp", ",", "'endp'", ":", "endp", ",", "'stepsize'", ":", "stepsize", ",", "'mintransitduration'", ":", "mintransitduration", ",", "'maxtransitduration'", ":", "maxtransitduration", ",", "'nphasebins'", ":", "nphasebins", ",", "'autofreq'", ":", "autofreq", ",", "'periodepsilon'", ":", "periodepsilon", ",", "'nbestpeaks'", ":", "nbestpeaks", ",", "'sigclip'", ":", "sigclip", ",", "'magsarefluxes'", ":", "magsarefluxes", "}", "}" ]
Runs the Box Least Squares Fitting Search for transit-shaped signals. Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey et al. 2015. Breaks up the full frequency space into chunks and passes them to parallel BLS workers. NOTE: the combined BLS spectrum produced by this function is not identical to that produced by running BLS in one shot for the entire frequency space. There are differences on the order of 1.0e-3 or so in the respective peak values, but peaks appear at the same frequencies for both methods. This is likely due to different aliasing caused by smaller chunks of the frequency space used by the parallel workers in this function. When in doubt, confirm results for this parallel implementation by comparing to those from the serial implementation above. Parameters ---------- times,mags,errs : np.array The magnitude/flux time-series to search for transits. magsarefluxes : bool If the input measurement values in `mags` and `errs` are in fluxes, set this to True. startp,endp : float The minimum and maximum periods to consider for the transit search. stepsize : float The step-size in frequency to use when constructing a frequency grid for the period search. mintransitduration,maxtransitduration : float The minimum and maximum transitdurations (in units of phase) to consider for the transit search. nphasebins : int The number of phase bins to use in the period search. autofreq : bool If this is True, the values of `stepsize` and `nphasebins` will be ignored, and these, along with a frequency-grid, will be determined based on the following relations:: nphasebins = int(ceil(2.0/mintransitduration)) if nphasebins > 3000: nphasebins = 3000 stepsize = 0.25*mintransitduration/(times.max()-times.min()) minfreq = 1.0/endp maxfreq = 1.0/startp nfreq = int(ceil((maxfreq - minfreq)/stepsize)) periodepsilon : float The fractional difference between successive values of 'best' periods when sorting by periodogram power to consider them as separate periods (as opposed to part of the same periodogram peak). This is used to avoid broad peaks in the periodogram and make sure the 'best' periods returned are all actually independent. nbestpeaks : int The number of 'best' peaks to return from the periodogram results, starting from the global maximum of the periodogram peak values. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If this is True, will indicate progress and details about the frequency grid used for the period search. nworkers : int or None The number of parallel workers to launch for period-search. If None, nworkers = NCPUS. get_stats : bool If True, runs :py:func:`.bls_stats_singleperiod` for each of the best periods in the output and injects the output into the output dict so you only have to run this function to get the periods and their stats. Returns ------- dict This function returns a dict, referred to as an `lspinfo` dict in other astrobase functions that operate on periodogram results. This is a standardized format across all astrobase period-finders, and is of the form below:: {'bestperiod': the best period value in the periodogram, 'bestlspval': the periodogram peak associated with the best period, 'nbestpeaks': the input value of nbestpeaks, 'nbestlspvals': nbestpeaks-size list of best period peak values, 'nbestperiods': nbestpeaks-size list of best periods, 'stats': list of stats dicts returned for each best period, 'lspvals': the full array of periodogram powers, 'frequencies': the full array of frequencies considered, 'periods': the full array of periods considered, 'blsresult': list of result dicts from eebls.f wrapper functions, 'stepsize': the actual stepsize used, 'nfreq': the actual nfreq used, 'nphasebins': the actual nphasebins used, 'mintransitduration': the input mintransitduration, 'maxtransitduration': the input maxtransitdurations, 'method':'bls' -> the name of the period-finder method, 'kwargs':{ dict of all of the input kwargs for record-keeping}}
[ "Runs", "the", "Box", "Least", "Squares", "Fitting", "Search", "for", "transit", "-", "shaped", "signals", "." ]
python
valid
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L2620-L2641
def is_address_reserved(self, address): """ Determines if an address belongs to a reserved page. @note: Returns always C{False} for kernel mode addresses. @type address: int @param address: Memory address to query. @rtype: bool @return: C{True} if the address belongs to a reserved page. @raise WindowsError: An exception is raised on error. """ try: mbi = self.mquery(address) except WindowsError: e = sys.exc_info()[1] if e.winerror == win32.ERROR_INVALID_PARAMETER: return False raise return mbi.is_reserved()
[ "def", "is_address_reserved", "(", "self", ",", "address", ")", ":", "try", ":", "mbi", "=", "self", ".", "mquery", "(", "address", ")", "except", "WindowsError", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "e", ".", "winerror", "==", "win32", ".", "ERROR_INVALID_PARAMETER", ":", "return", "False", "raise", "return", "mbi", ".", "is_reserved", "(", ")" ]
Determines if an address belongs to a reserved page. @note: Returns always C{False} for kernel mode addresses. @type address: int @param address: Memory address to query. @rtype: bool @return: C{True} if the address belongs to a reserved page. @raise WindowsError: An exception is raised on error.
[ "Determines", "if", "an", "address", "belongs", "to", "a", "reserved", "page", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_paths.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_paths.py#L193-L199
def MultiWritePathInfos(self, path_infos): """Writes a collection of path info records for specified clients.""" try: self._MultiWritePathInfos(path_infos) except MySQLdb.IntegrityError as error: client_ids = list(iterkeys(path_infos)) raise db.AtLeastOneUnknownClientError(client_ids=client_ids, cause=error)
[ "def", "MultiWritePathInfos", "(", "self", ",", "path_infos", ")", ":", "try", ":", "self", ".", "_MultiWritePathInfos", "(", "path_infos", ")", "except", "MySQLdb", ".", "IntegrityError", "as", "error", ":", "client_ids", "=", "list", "(", "iterkeys", "(", "path_infos", ")", ")", "raise", "db", ".", "AtLeastOneUnknownClientError", "(", "client_ids", "=", "client_ids", ",", "cause", "=", "error", ")" ]
Writes a collection of path info records for specified clients.
[ "Writes", "a", "collection", "of", "path", "info", "records", "for", "specified", "clients", "." ]
python
train
infothrill/python-dyndnsc
dyndnsc/conf.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/conf.py#L76-L126
def collect_config(cfg): """ Construct configuration dictionary from configparser. Resolves presets and returns a dictionary containing: .. code-block:: bash { "client_name": { "detector": ("detector_name", detector_opts), "updater": [ ("updater_name", updater_opts), ... ] }, ... } :param cfg: ConfigParser """ collected_configs = {} _updater_str = "updater" _detector_str = "detector" _dash = "-" for client_name, client_cfg_dict in _iraw_client_configs(cfg): detector_name = None detector_options = {} updater_name = None updater_options = {} collected_config = {} for k in client_cfg_dict: if k.startswith(_detector_str + _dash): detector_options[ k.replace(_detector_str + _dash, "")] = client_cfg_dict[k] elif k == _updater_str: updater_name = client_cfg_dict.get(k) elif k == _detector_str: detector_name = client_cfg_dict.get(k) elif k.startswith(_updater_str + _dash): updater_options[ k.replace(_updater_str + _dash, "")] = client_cfg_dict[k] else: # options passed "as is" to the dyndnsc client collected_config[k] = client_cfg_dict[k] collected_config[_detector_str] = [(detector_name, detector_options)] collected_config[_updater_str] = [(updater_name, updater_options)] collected_configs[client_name] = collected_config return collected_configs
[ "def", "collect_config", "(", "cfg", ")", ":", "collected_configs", "=", "{", "}", "_updater_str", "=", "\"updater\"", "_detector_str", "=", "\"detector\"", "_dash", "=", "\"-\"", "for", "client_name", ",", "client_cfg_dict", "in", "_iraw_client_configs", "(", "cfg", ")", ":", "detector_name", "=", "None", "detector_options", "=", "{", "}", "updater_name", "=", "None", "updater_options", "=", "{", "}", "collected_config", "=", "{", "}", "for", "k", "in", "client_cfg_dict", ":", "if", "k", ".", "startswith", "(", "_detector_str", "+", "_dash", ")", ":", "detector_options", "[", "k", ".", "replace", "(", "_detector_str", "+", "_dash", ",", "\"\"", ")", "]", "=", "client_cfg_dict", "[", "k", "]", "elif", "k", "==", "_updater_str", ":", "updater_name", "=", "client_cfg_dict", ".", "get", "(", "k", ")", "elif", "k", "==", "_detector_str", ":", "detector_name", "=", "client_cfg_dict", ".", "get", "(", "k", ")", "elif", "k", ".", "startswith", "(", "_updater_str", "+", "_dash", ")", ":", "updater_options", "[", "k", ".", "replace", "(", "_updater_str", "+", "_dash", ",", "\"\"", ")", "]", "=", "client_cfg_dict", "[", "k", "]", "else", ":", "# options passed \"as is\" to the dyndnsc client", "collected_config", "[", "k", "]", "=", "client_cfg_dict", "[", "k", "]", "collected_config", "[", "_detector_str", "]", "=", "[", "(", "detector_name", ",", "detector_options", ")", "]", "collected_config", "[", "_updater_str", "]", "=", "[", "(", "updater_name", ",", "updater_options", ")", "]", "collected_configs", "[", "client_name", "]", "=", "collected_config", "return", "collected_configs" ]
Construct configuration dictionary from configparser. Resolves presets and returns a dictionary containing: .. code-block:: bash { "client_name": { "detector": ("detector_name", detector_opts), "updater": [ ("updater_name", updater_opts), ... ] }, ... } :param cfg: ConfigParser
[ "Construct", "configuration", "dictionary", "from", "configparser", "." ]
python
train
quantopian/alphalens
alphalens/utils.py
https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/utils.py#L856-L862
def get_forward_returns_columns(columns): """ Utility that detects and returns the columns that are forward returns """ pattern = re.compile(r"^(\d+([Dhms]|ms|us|ns))+$", re.IGNORECASE) valid_columns = [(pattern.match(col) is not None) for col in columns] return columns[valid_columns]
[ "def", "get_forward_returns_columns", "(", "columns", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r\"^(\\d+([Dhms]|ms|us|ns))+$\"", ",", "re", ".", "IGNORECASE", ")", "valid_columns", "=", "[", "(", "pattern", ".", "match", "(", "col", ")", "is", "not", "None", ")", "for", "col", "in", "columns", "]", "return", "columns", "[", "valid_columns", "]" ]
Utility that detects and returns the columns that are forward returns
[ "Utility", "that", "detects", "and", "returns", "the", "columns", "that", "are", "forward", "returns" ]
python
train
econ-ark/HARK
HARK/utilities.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L1106-L1147
def kernelRegression(x,y,bot=None,top=None,N=500,h=None): ''' Performs a non-parametric Nadaraya-Watson 1D kernel regression on given data with optionally specified range, number of points, and kernel bandwidth. Parameters ---------- x : np.array The independent variable in the kernel regression. y : np.array The dependent variable in the kernel regression. bot : float Minimum value of interest in the regression; defaults to min(x). top : float Maximum value of interest in the regression; defaults to max(y). N : int Number of points to compute. h : float The bandwidth of the (Epanechnikov) kernel. To-do: GENERALIZE. Returns ------- regression : LinearInterp A piecewise locally linear kernel regression: y = f(x). ''' # Fix omitted inputs if bot is None: bot = np.min(x) if top is None: top = np.max(x) if h is None: h = 2.0*(top - bot)/float(N) # This is an arbitrary default # Construct a local linear approximation x_vec = np.linspace(bot,top,num=N) y_vec = np.zeros_like(x_vec) + np.nan for j in range(N): x_here = x_vec[j] weights = epanechnikovKernel(x,x_here,h) y_vec[j] = np.dot(weights,y)/np.sum(weights) regression = interp1d(x_vec,y_vec,bounds_error=False,assume_sorted=True) return regression
[ "def", "kernelRegression", "(", "x", ",", "y", ",", "bot", "=", "None", ",", "top", "=", "None", ",", "N", "=", "500", ",", "h", "=", "None", ")", ":", "# Fix omitted inputs", "if", "bot", "is", "None", ":", "bot", "=", "np", ".", "min", "(", "x", ")", "if", "top", "is", "None", ":", "top", "=", "np", ".", "max", "(", "x", ")", "if", "h", "is", "None", ":", "h", "=", "2.0", "*", "(", "top", "-", "bot", ")", "/", "float", "(", "N", ")", "# This is an arbitrary default", "# Construct a local linear approximation", "x_vec", "=", "np", ".", "linspace", "(", "bot", ",", "top", ",", "num", "=", "N", ")", "y_vec", "=", "np", ".", "zeros_like", "(", "x_vec", ")", "+", "np", ".", "nan", "for", "j", "in", "range", "(", "N", ")", ":", "x_here", "=", "x_vec", "[", "j", "]", "weights", "=", "epanechnikovKernel", "(", "x", ",", "x_here", ",", "h", ")", "y_vec", "[", "j", "]", "=", "np", ".", "dot", "(", "weights", ",", "y", ")", "/", "np", ".", "sum", "(", "weights", ")", "regression", "=", "interp1d", "(", "x_vec", ",", "y_vec", ",", "bounds_error", "=", "False", ",", "assume_sorted", "=", "True", ")", "return", "regression" ]
Performs a non-parametric Nadaraya-Watson 1D kernel regression on given data with optionally specified range, number of points, and kernel bandwidth. Parameters ---------- x : np.array The independent variable in the kernel regression. y : np.array The dependent variable in the kernel regression. bot : float Minimum value of interest in the regression; defaults to min(x). top : float Maximum value of interest in the regression; defaults to max(y). N : int Number of points to compute. h : float The bandwidth of the (Epanechnikov) kernel. To-do: GENERALIZE. Returns ------- regression : LinearInterp A piecewise locally linear kernel regression: y = f(x).
[ "Performs", "a", "non", "-", "parametric", "Nadaraya", "-", "Watson", "1D", "kernel", "regression", "on", "given", "data", "with", "optionally", "specified", "range", "number", "of", "points", "and", "kernel", "bandwidth", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/objects.py#L104-L122
def get_resource(self): """Gets the ``Resource`` for this authorization. return: (osid.resource.Resource) - the ``Resource`` raise: IllegalState - ``has_resource()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_template if not bool(self._my_map['resourceId']): raise errors.IllegalState('this Authorization has no resource') mgr = self._get_provider_manager('RESOURCE') if not mgr.supports_resource_lookup(): raise errors.OperationFailed('Resource does not support Resource lookup') lookup_session = mgr.get_resource_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_bin_view() osid_object = lookup_session.get_resource(self.get_resource_id()) return osid_object
[ "def", "get_resource", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'resourceId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Authorization has no resource'", ")", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'RESOURCE'", ")", "if", "not", "mgr", ".", "supports_resource_lookup", "(", ")", ":", "raise", "errors", ".", "OperationFailed", "(", "'Resource does not support Resource lookup'", ")", "lookup_session", "=", "mgr", ".", "get_resource_lookup_session", "(", "proxy", "=", "getattr", "(", "self", ",", "\"_proxy\"", ",", "None", ")", ")", "lookup_session", ".", "use_federated_bin_view", "(", ")", "osid_object", "=", "lookup_session", ".", "get_resource", "(", "self", ".", "get_resource_id", "(", ")", ")", "return", "osid_object" ]
Gets the ``Resource`` for this authorization. return: (osid.resource.Resource) - the ``Resource`` raise: IllegalState - ``has_resource()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Resource", "for", "this", "authorization", "." ]
python
train
biolink/ontobio
ontobio/ontol.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L491-L526
def descendants(self, node, relations=None, reflexive=False): """ Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs """ if reflexive: decs = self.descendants(node, relations, reflexive=False) decs.append(node) return decs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.descendants(g, node)) else: return []
[ "def", "descendants", "(", "self", ",", "node", ",", "relations", "=", "None", ",", "reflexive", "=", "False", ")", ":", "if", "reflexive", ":", "decs", "=", "self", ".", "descendants", "(", "node", ",", "relations", ",", "reflexive", "=", "False", ")", "decs", ".", "append", "(", "node", ")", "return", "decs", "g", "=", "None", "if", "relations", "is", "None", ":", "g", "=", "self", ".", "get_graph", "(", ")", "else", ":", "g", "=", "self", ".", "get_filtered_graph", "(", "relations", ")", "if", "node", "in", "g", ":", "return", "list", "(", "nx", ".", "descendants", "(", "g", ",", "node", ")", ")", "else", ":", "return", "[", "]" ]
Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs
[ "Returns", "all", "descendants", "of", "specified", "node", "." ]
python
train
Azure/azure-storage-python
azure-storage-file/azure/storage/file/fileservice.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-file/azure/storage/file/fileservice.py#L1555-L1597
def create_file(self, share_name, directory_name, file_name, content_length, content_settings=None, metadata=None, timeout=None): ''' Creates a new file. See create_file_from_* for high level functions that handle the creation and upload of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of file to create or update. :param int content_length: Length of the file in bytes. :param ~azure.storage.file.models.ContentSettings content_settings: ContentSettings object used to set file properties. :param metadata: Name-value pairs associated with the file as metadata. :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds. ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('content_length', content_length) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name, directory_name, file_name) request.query = {'timeout': _int_to_str(timeout)} request.headers = { 'x-ms-content-length': _to_str(content_length), 'x-ms-type': 'file' } _add_metadata_headers(metadata, request) if content_settings is not None: request.headers.update(content_settings._to_headers()) self._perform_request(request)
[ "def", "create_file", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "content_length", ",", "content_settings", "=", "None", ",", "metadata", "=", "None", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'file_name'", ",", "file_name", ")", "_validate_not_none", "(", "'content_length'", ",", "content_length", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ",", "file_name", ")", "request", ".", "query", "=", "{", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", "}", "request", ".", "headers", "=", "{", "'x-ms-content-length'", ":", "_to_str", "(", "content_length", ")", ",", "'x-ms-type'", ":", "'file'", "}", "_add_metadata_headers", "(", "metadata", ",", "request", ")", "if", "content_settings", "is", "not", "None", ":", "request", ".", "headers", ".", "update", "(", "content_settings", ".", "_to_headers", "(", ")", ")", "self", ".", "_perform_request", "(", "request", ")" ]
Creates a new file. See create_file_from_* for high level functions that handle the creation and upload of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of file to create or update. :param int content_length: Length of the file in bytes. :param ~azure.storage.file.models.ContentSettings content_settings: ContentSettings object used to set file properties. :param metadata: Name-value pairs associated with the file as metadata. :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds.
[ "Creates", "a", "new", "file", "." ]
python
train
fastai/fastai
fastai/vision/gan.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L226-L228
def wgan(cls, data:DataBunch, generator:nn.Module, critic:nn.Module, switcher:Callback=None, clip:float=0.01, **learn_kwargs): "Create a WGAN from `data`, `generator` and `critic`." return cls(data, generator, critic, NoopLoss(), WassersteinLoss(), switcher=switcher, clip=clip, **learn_kwargs)
[ "def", "wgan", "(", "cls", ",", "data", ":", "DataBunch", ",", "generator", ":", "nn", ".", "Module", ",", "critic", ":", "nn", ".", "Module", ",", "switcher", ":", "Callback", "=", "None", ",", "clip", ":", "float", "=", "0.01", ",", "*", "*", "learn_kwargs", ")", ":", "return", "cls", "(", "data", ",", "generator", ",", "critic", ",", "NoopLoss", "(", ")", ",", "WassersteinLoss", "(", ")", ",", "switcher", "=", "switcher", ",", "clip", "=", "clip", ",", "*", "*", "learn_kwargs", ")" ]
Create a WGAN from `data`, `generator` and `critic`.
[ "Create", "a", "WGAN", "from", "data", "generator", "and", "critic", "." ]
python
train
openeemeter/eemeter
eemeter/caltrack/usage_per_day.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1018-L1136
def get_single_cdd_only_candidate_model( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col, balance_point, ): """ Return a single candidate cdd-only model for a particular balance point. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``cdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The cooling balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single cdd-only candidate model, with any associated warnings. """ model_type = "cdd_only" cdd_column = "cdd_%s" % balance_point formula = "meter_value ~ %s" % cdd_column if weights_col is None: weights = 1 else: weights = data[weights_col] period_days = weights degree_day_warnings = [] degree_day_warnings.extend( get_total_degree_day_too_low_warning( model_type, balance_point, "cdd", data[cdd_column], period_days, minimum_total_cdd, ) ) degree_day_warnings.extend( get_too_few_non_zero_degree_day_warning( model_type, balance_point, "cdd", data[cdd_column], minimum_non_zero_cdd ) ) if len(degree_day_warnings) > 0: return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="NOT ATTEMPTED", warnings=degree_day_warnings, ) try: model = smf.wls(formula=formula, data=data, weights=weights) except Exception as e: return get_fit_failed_candidate_model(model_type, formula) result = model.fit() r_squared_adj = result.rsquared_adj beta_cdd_p_value = result.pvalues[cdd_column] # CalTrack 3.3.1.3 model_params = { "intercept": result.params["Intercept"], "beta_cdd": result.params[cdd_column], "cooling_balance_point": balance_point, } model_warnings = [] # CalTrack 3.4.3.2 for parameter in ["intercept", "beta_cdd"]: model_warnings.extend( get_parameter_negative_warning(model_type, model_params, parameter) ) model_warnings.extend( get_parameter_p_value_too_high_warning( model_type, model_params, parameter, beta_cdd_p_value, beta_cdd_maximum_p_value, ) ) if len(model_warnings) > 0: status = "DISQUALIFIED" else: status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status=status, warnings=model_warnings, model_params=model_params, model=model, result=result, r_squared_adj=r_squared_adj, )
[ "def", "get_single_cdd_only_candidate_model", "(", "data", ",", "minimum_non_zero_cdd", ",", "minimum_total_cdd", ",", "beta_cdd_maximum_p_value", ",", "weights_col", ",", "balance_point", ",", ")", ":", "model_type", "=", "\"cdd_only\"", "cdd_column", "=", "\"cdd_%s\"", "%", "balance_point", "formula", "=", "\"meter_value ~ %s\"", "%", "cdd_column", "if", "weights_col", "is", "None", ":", "weights", "=", "1", "else", ":", "weights", "=", "data", "[", "weights_col", "]", "period_days", "=", "weights", "degree_day_warnings", "=", "[", "]", "degree_day_warnings", ".", "extend", "(", "get_total_degree_day_too_low_warning", "(", "model_type", ",", "balance_point", ",", "\"cdd\"", ",", "data", "[", "cdd_column", "]", ",", "period_days", ",", "minimum_total_cdd", ",", ")", ")", "degree_day_warnings", ".", "extend", "(", "get_too_few_non_zero_degree_day_warning", "(", "model_type", ",", "balance_point", ",", "\"cdd\"", ",", "data", "[", "cdd_column", "]", ",", "minimum_non_zero_cdd", ")", ")", "if", "len", "(", "degree_day_warnings", ")", ">", "0", ":", "return", "CalTRACKUsagePerDayCandidateModel", "(", "model_type", "=", "model_type", ",", "formula", "=", "formula", ",", "status", "=", "\"NOT ATTEMPTED\"", ",", "warnings", "=", "degree_day_warnings", ",", ")", "try", ":", "model", "=", "smf", ".", "wls", "(", "formula", "=", "formula", ",", "data", "=", "data", ",", "weights", "=", "weights", ")", "except", "Exception", "as", "e", ":", "return", "get_fit_failed_candidate_model", "(", "model_type", ",", "formula", ")", "result", "=", "model", ".", "fit", "(", ")", "r_squared_adj", "=", "result", ".", "rsquared_adj", "beta_cdd_p_value", "=", "result", ".", "pvalues", "[", "cdd_column", "]", "# CalTrack 3.3.1.3", "model_params", "=", "{", "\"intercept\"", ":", "result", ".", "params", "[", "\"Intercept\"", "]", ",", "\"beta_cdd\"", ":", "result", ".", "params", "[", "cdd_column", "]", ",", "\"cooling_balance_point\"", ":", "balance_point", ",", "}", "model_warnings", "=", "[", "]", "# CalTrack 3.4.3.2", "for", "parameter", "in", "[", "\"intercept\"", ",", "\"beta_cdd\"", "]", ":", "model_warnings", ".", "extend", "(", "get_parameter_negative_warning", "(", "model_type", ",", "model_params", ",", "parameter", ")", ")", "model_warnings", ".", "extend", "(", "get_parameter_p_value_too_high_warning", "(", "model_type", ",", "model_params", ",", "parameter", ",", "beta_cdd_p_value", ",", "beta_cdd_maximum_p_value", ",", ")", ")", "if", "len", "(", "model_warnings", ")", ">", "0", ":", "status", "=", "\"DISQUALIFIED\"", "else", ":", "status", "=", "\"QUALIFIED\"", "return", "CalTRACKUsagePerDayCandidateModel", "(", "model_type", "=", "model_type", ",", "formula", "=", "formula", ",", "status", "=", "status", ",", "warnings", "=", "model_warnings", ",", "model_params", "=", "model_params", ",", "model", "=", "model", ",", "result", "=", "result", ",", "r_squared_adj", "=", "r_squared_adj", ",", ")" ]
Return a single candidate cdd-only model for a particular balance point. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``cdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The cooling balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single cdd-only candidate model, with any associated warnings.
[ "Return", "a", "single", "candidate", "cdd", "-", "only", "model", "for", "a", "particular", "balance", "point", "." ]
python
train
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1433-L1446
def _create_paths(self, basedir, name=None): """Create datadir and subdir paths.""" if name: datapath = os.path.join(basedir, name) else: datapath = basedir dbpath = os.path.join(datapath, 'db') if not os.path.exists(dbpath): os.makedirs(dbpath) if self.args['verbose']: print('creating directory: %s' % dbpath) return datapath
[ "def", "_create_paths", "(", "self", ",", "basedir", ",", "name", "=", "None", ")", ":", "if", "name", ":", "datapath", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "name", ")", "else", ":", "datapath", "=", "basedir", "dbpath", "=", "os", ".", "path", ".", "join", "(", "datapath", ",", "'db'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dbpath", ")", ":", "os", ".", "makedirs", "(", "dbpath", ")", "if", "self", ".", "args", "[", "'verbose'", "]", ":", "print", "(", "'creating directory: %s'", "%", "dbpath", ")", "return", "datapath" ]
Create datadir and subdir paths.
[ "Create", "datadir", "and", "subdir", "paths", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site.py#L133-L141
def addbuilddir(): """Append ./build/lib.<platform> in case we're running in the build dir (especially for Guido :-)""" from distutils.util import get_platform s = "build/lib.%s-%.3s" % (get_platform(), sys.version) if hasattr(sys, 'gettotalrefcount'): s += '-pydebug' s = os.path.join(os.path.dirname(sys.path[-1]), s) sys.path.append(s)
[ "def", "addbuilddir", "(", ")", ":", "from", "distutils", ".", "util", "import", "get_platform", "s", "=", "\"build/lib.%s-%.3s\"", "%", "(", "get_platform", "(", ")", ",", "sys", ".", "version", ")", "if", "hasattr", "(", "sys", ",", "'gettotalrefcount'", ")", ":", "s", "+=", "'-pydebug'", "s", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "path", "[", "-", "1", "]", ")", ",", "s", ")", "sys", ".", "path", ".", "append", "(", "s", ")" ]
Append ./build/lib.<platform> in case we're running in the build dir (especially for Guido :-)
[ "Append", ".", "/", "build", "/", "lib", ".", "<platform", ">", "in", "case", "we", "re", "running", "in", "the", "build", "dir", "(", "especially", "for", "Guido", ":", "-", ")" ]
python
test
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4601-L4611
def findspan(self, *words): """Returns the span element which spans over the specified words or morphemes. See also: :meth:`Word.findspans` """ for span in self.select(AbstractSpanAnnotation,None,True): if tuple(span.wrefs()) == words: return span raise NoSuchAnnotation
[ "def", "findspan", "(", "self", ",", "*", "words", ")", ":", "for", "span", "in", "self", ".", "select", "(", "AbstractSpanAnnotation", ",", "None", ",", "True", ")", ":", "if", "tuple", "(", "span", ".", "wrefs", "(", ")", ")", "==", "words", ":", "return", "span", "raise", "NoSuchAnnotation" ]
Returns the span element which spans over the specified words or morphemes. See also: :meth:`Word.findspans`
[ "Returns", "the", "span", "element", "which", "spans", "over", "the", "specified", "words", "or", "morphemes", "." ]
python
train
ansible/ansible-runner
ansible_runner/runner_config.py
https://github.com/ansible/ansible-runner/blob/8ce485480a5d0b602428d9d64a752e06fb46cdb8/ansible_runner/runner_config.py#L278-L288
def prepare_command(self): """ Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command` """ try: cmdline_args = self.loader.load_file('args', string_types) self.command = shlex.split(cmdline_args.decode('utf-8')) self.execution_mode = ExecutionMode.RAW except ConfigurationError: self.command = self.generate_ansible_command()
[ "def", "prepare_command", "(", "self", ")", ":", "try", ":", "cmdline_args", "=", "self", ".", "loader", ".", "load_file", "(", "'args'", ",", "string_types", ")", "self", ".", "command", "=", "shlex", ".", "split", "(", "cmdline_args", ".", "decode", "(", "'utf-8'", ")", ")", "self", ".", "execution_mode", "=", "ExecutionMode", ".", "RAW", "except", "ConfigurationError", ":", "self", ".", "command", "=", "self", ".", "generate_ansible_command", "(", ")" ]
Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
[ "Determines", "if", "the", "literal", "ansible", "or", "ansible", "-", "playbook", "commands", "are", "given", "and", "if", "not", "calls", ":", "py", ":", "meth", ":", "ansible_runner", ".", "runner_config", ".", "RunnerConfig", ".", "generate_ansible_command" ]
python
train
pandas-dev/pandas
pandas/core/dtypes/common.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1666-L1722
def is_extension_type(arr): """ Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
[ "def", "is_extension_type", "(", "arr", ")", ":", "if", "is_categorical", "(", "arr", ")", ":", "return", "True", "elif", "is_sparse", "(", "arr", ")", ":", "return", "True", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "return", "True", "return", "False" ]
Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True
[ "Check", "whether", "an", "array", "-", "like", "is", "of", "a", "pandas", "extension", "class", "instance", "." ]
python
train
annoviko/pyclustering
pyclustering/container/kdtree.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/kdtree.py#L547-L560
def children(self, node_parent): """! @brief Returns list of children of node. @param[in] node_parent (node): Node whose children are required. @return (list) Children of node. If node haven't got any child then None is returned. """ if node_parent.left is not None: yield node_parent.left if node_parent.right is not None: yield node_parent.right
[ "def", "children", "(", "self", ",", "node_parent", ")", ":", "if", "node_parent", ".", "left", "is", "not", "None", ":", "yield", "node_parent", ".", "left", "if", "node_parent", ".", "right", "is", "not", "None", ":", "yield", "node_parent", ".", "right" ]
! @brief Returns list of children of node. @param[in] node_parent (node): Node whose children are required. @return (list) Children of node. If node haven't got any child then None is returned.
[ "!" ]
python
valid
ilevkivskyi/typing_inspect
typing_inspect.py
https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L93-L116
def is_tuple_type(tp): """Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7 """ if NEW_TYPING: return (tp is Tuple or isinstance(tp, _GenericAlias) and tp.__origin__ is tuple or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, tuple)) return type(tp) is TupleMeta
[ "def", "is_tuple_type", "(", "tp", ")", ":", "if", "NEW_TYPING", ":", "return", "(", "tp", "is", "Tuple", "or", "isinstance", "(", "tp", ",", "_GenericAlias", ")", "and", "tp", ".", "__origin__", "is", "tuple", "or", "isinstance", "(", "tp", ",", "type", ")", "and", "issubclass", "(", "tp", ",", "Generic", ")", "and", "issubclass", "(", "tp", ",", "tuple", ")", ")", "return", "type", "(", "tp", ")", "is", "TupleMeta" ]
Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7
[ "Test", "if", "the", "type", "is", "a", "generic", "tuple", "type", "including", "subclasses", "excluding", "non", "-", "generic", "classes", ".", "Examples", "::" ]
python
train
scot-dev/scot
scot/varbase.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/varbase.py#L114-L156
def from_yw(self, acms): """Determine VAR model from autocorrelation matrices by solving the Yule-Walker equations. Parameters ---------- acms : array, shape (n_lags, n_channels, n_channels) acms[l] contains the autocorrelation matrix at lag l. The highest lag must equal the model order. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example). """ if len(acms) != self.p + 1: raise ValueError("Number of autocorrelation matrices ({}) does not" " match model order ({}) + 1.".format(len(acms), self.p)) n_channels = acms[0].shape[0] acm = lambda l: acms[l] if l >= 0 else acms[-l].T r = np.concatenate(acms[1:], 0) rr = np.array([[acm(m-k) for k in range(self.p)] for m in range(self.p)]) rr = np.concatenate(np.concatenate(rr, -2), -1) c = sp.linalg.solve(rr, r) # calculate residual covariance r = acm(0) for k in range(self.p): bs = k * n_channels r -= np.dot(c[bs:bs + n_channels, :].T, acm(k + 1)) self.coef = np.concatenate([c[m::n_channels, :] for m in range(n_channels)]).T self.rescov = r return self
[ "def", "from_yw", "(", "self", ",", "acms", ")", ":", "if", "len", "(", "acms", ")", "!=", "self", ".", "p", "+", "1", ":", "raise", "ValueError", "(", "\"Number of autocorrelation matrices ({}) does not\"", "\" match model order ({}) + 1.\"", ".", "format", "(", "len", "(", "acms", ")", ",", "self", ".", "p", ")", ")", "n_channels", "=", "acms", "[", "0", "]", ".", "shape", "[", "0", "]", "acm", "=", "lambda", "l", ":", "acms", "[", "l", "]", "if", "l", ">=", "0", "else", "acms", "[", "-", "l", "]", ".", "T", "r", "=", "np", ".", "concatenate", "(", "acms", "[", "1", ":", "]", ",", "0", ")", "rr", "=", "np", ".", "array", "(", "[", "[", "acm", "(", "m", "-", "k", ")", "for", "k", "in", "range", "(", "self", ".", "p", ")", "]", "for", "m", "in", "range", "(", "self", ".", "p", ")", "]", ")", "rr", "=", "np", ".", "concatenate", "(", "np", ".", "concatenate", "(", "rr", ",", "-", "2", ")", ",", "-", "1", ")", "c", "=", "sp", ".", "linalg", ".", "solve", "(", "rr", ",", "r", ")", "# calculate residual covariance", "r", "=", "acm", "(", "0", ")", "for", "k", "in", "range", "(", "self", ".", "p", ")", ":", "bs", "=", "k", "*", "n_channels", "r", "-=", "np", ".", "dot", "(", "c", "[", "bs", ":", "bs", "+", "n_channels", ",", ":", "]", ".", "T", ",", "acm", "(", "k", "+", "1", ")", ")", "self", ".", "coef", "=", "np", ".", "concatenate", "(", "[", "c", "[", "m", ":", ":", "n_channels", ",", ":", "]", "for", "m", "in", "range", "(", "n_channels", ")", "]", ")", ".", "T", "self", ".", "rescov", "=", "r", "return", "self" ]
Determine VAR model from autocorrelation matrices by solving the Yule-Walker equations. Parameters ---------- acms : array, shape (n_lags, n_channels, n_channels) acms[l] contains the autocorrelation matrix at lag l. The highest lag must equal the model order. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example).
[ "Determine", "VAR", "model", "from", "autocorrelation", "matrices", "by", "solving", "the", "Yule", "-", "Walker", "equations", "." ]
python
train
awslabs/aws-sam-cli
samcli/cli/options.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/cli/options.py#L11-L27
def debug_option(f): """ Configures --debug option for CLI :param f: Callback Function to be passed to Click """ def callback(ctx, param, value): state = ctx.ensure_object(Context) state.debug = value return value return click.option('--debug', expose_value=False, is_flag=True, envvar="SAM_DEBUG", help='Turn on debug logging to print debug message generated by SAM CLI.', callback=callback)(f)
[ "def", "debug_option", "(", "f", ")", ":", "def", "callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "state", "=", "ctx", ".", "ensure_object", "(", "Context", ")", "state", ".", "debug", "=", "value", "return", "value", "return", "click", ".", "option", "(", "'--debug'", ",", "expose_value", "=", "False", ",", "is_flag", "=", "True", ",", "envvar", "=", "\"SAM_DEBUG\"", ",", "help", "=", "'Turn on debug logging to print debug message generated by SAM CLI.'", ",", "callback", "=", "callback", ")", "(", "f", ")" ]
Configures --debug option for CLI :param f: Callback Function to be passed to Click
[ "Configures", "--", "debug", "option", "for", "CLI" ]
python
train
nagius/snmp_passpersist
snmp_passpersist.py
https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L231-L234
def add_cnt_64bit(self,oid,value,label=None): """Short helper to add a 64 bit counter value to the MIB subtree.""" # Truncate integer to 64bits ma,x self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
[ "def", "add_cnt_64bit", "(", "self", ",", "oid", ",", "value", ",", "label", "=", "None", ")", ":", "# Truncate integer to 64bits ma,x", "self", ".", "add_oid_entry", "(", "oid", ",", "'Counter64'", ",", "int", "(", "value", ")", "%", "18446744073709551615", ",", "label", "=", "label", ")" ]
Short helper to add a 64 bit counter value to the MIB subtree.
[ "Short", "helper", "to", "add", "a", "64", "bit", "counter", "value", "to", "the", "MIB", "subtree", "." ]
python
train
Shinichi-Nakagawa/pitchpx
pitchpx/game/inning.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/game/inning.py#L351-L365
def result(cls, ab, pa, pitch_list): """ At Bat Result :param ab: at bat object(type:Beautifulsoup) :param pa: atbat data for plate appearance :param pitch_list: Pitching data :return: pa result value(dict) """ atbat = OrderedDict() atbat['ball_ct'] = MlbamUtil.get_attribute_stats(ab, 'b', int, None) atbat['strike_ct'] = MlbamUtil.get_attribute_stats(ab, 's', int, None) atbat['pitch_seq'] = ''.join([pitch['pitch_res'] for pitch in pitch_list]) atbat['pitch_type_seq'] = '|'.join([pitch['pitch_type'] for pitch in pitch_list]) atbat['battedball_cd'] = RetroSheet.battedball_cd(pa['event_cd'], pa['event_tx'], pa['ab_des']) return atbat
[ "def", "result", "(", "cls", ",", "ab", ",", "pa", ",", "pitch_list", ")", ":", "atbat", "=", "OrderedDict", "(", ")", "atbat", "[", "'ball_ct'", "]", "=", "MlbamUtil", ".", "get_attribute_stats", "(", "ab", ",", "'b'", ",", "int", ",", "None", ")", "atbat", "[", "'strike_ct'", "]", "=", "MlbamUtil", ".", "get_attribute_stats", "(", "ab", ",", "'s'", ",", "int", ",", "None", ")", "atbat", "[", "'pitch_seq'", "]", "=", "''", ".", "join", "(", "[", "pitch", "[", "'pitch_res'", "]", "for", "pitch", "in", "pitch_list", "]", ")", "atbat", "[", "'pitch_type_seq'", "]", "=", "'|'", ".", "join", "(", "[", "pitch", "[", "'pitch_type'", "]", "for", "pitch", "in", "pitch_list", "]", ")", "atbat", "[", "'battedball_cd'", "]", "=", "RetroSheet", ".", "battedball_cd", "(", "pa", "[", "'event_cd'", "]", ",", "pa", "[", "'event_tx'", "]", ",", "pa", "[", "'ab_des'", "]", ")", "return", "atbat" ]
At Bat Result :param ab: at bat object(type:Beautifulsoup) :param pa: atbat data for plate appearance :param pitch_list: Pitching data :return: pa result value(dict)
[ "At", "Bat", "Result", ":", "param", "ab", ":", "at", "bat", "object", "(", "type", ":", "Beautifulsoup", ")", ":", "param", "pa", ":", "atbat", "data", "for", "plate", "appearance", ":", "param", "pitch_list", ":", "Pitching", "data", ":", "return", ":", "pa", "result", "value", "(", "dict", ")" ]
python
train
O365/python-o365
O365/mailbox.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/mailbox.py#L405-L432
def copy_folder(self, to_folder): """ Copy this folder and it's contents to into another folder :param to_folder: the destination Folder/folder_id to copy into :type to_folder: mailbox.Folder or str :return: The new folder after copying :rtype: mailbox.Folder or None """ to_folder_id = to_folder.folder_id if isinstance(to_folder, Folder) else to_folder if self.root or not self.folder_id or not to_folder_id: return None url = self.build_url( self._endpoints.get('copy_folder').format(id=self.folder_id)) response = self.con.post(url, data={self._cc('destinationId'): to_folder_id}) if not response: return None folder = response.json() self_class = getattr(self, 'folder_constructor', type(self)) # Everything received from cloud must be passed as self._cloud_data_key return self_class(con=self.con, main_resource=self.main_resource, **{self._cloud_data_key: folder})
[ "def", "copy_folder", "(", "self", ",", "to_folder", ")", ":", "to_folder_id", "=", "to_folder", ".", "folder_id", "if", "isinstance", "(", "to_folder", ",", "Folder", ")", "else", "to_folder", "if", "self", ".", "root", "or", "not", "self", ".", "folder_id", "or", "not", "to_folder_id", ":", "return", "None", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'copy_folder'", ")", ".", "format", "(", "id", "=", "self", ".", "folder_id", ")", ")", "response", "=", "self", ".", "con", ".", "post", "(", "url", ",", "data", "=", "{", "self", ".", "_cc", "(", "'destinationId'", ")", ":", "to_folder_id", "}", ")", "if", "not", "response", ":", "return", "None", "folder", "=", "response", ".", "json", "(", ")", "self_class", "=", "getattr", "(", "self", ",", "'folder_constructor'", ",", "type", "(", "self", ")", ")", "# Everything received from cloud must be passed as self._cloud_data_key", "return", "self_class", "(", "con", "=", "self", ".", "con", ",", "main_resource", "=", "self", ".", "main_resource", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "folder", "}", ")" ]
Copy this folder and it's contents to into another folder :param to_folder: the destination Folder/folder_id to copy into :type to_folder: mailbox.Folder or str :return: The new folder after copying :rtype: mailbox.Folder or None
[ "Copy", "this", "folder", "and", "it", "s", "contents", "to", "into", "another", "folder" ]
python
train