repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Azure/azure-cli-extensions
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/queue/queueservice.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/queue/queueservice.py#L636-L652
def exists(self, queue_name, timeout=None): ''' Returns a boolean indicating whether the queue exists. :param str queue_name: The name of queue to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue exists. :rtype: bool ''' try: self.get_queue_metadata(queue_name, timeout=timeout) return True except AzureHttpError as ex: _dont_fail_not_exist(ex) return False
[ "def", "exists", "(", "self", ",", "queue_name", ",", "timeout", "=", "None", ")", ":", "try", ":", "self", ".", "get_queue_metadata", "(", "queue_name", ",", "timeout", "=", "timeout", ")", "return", "True", "except", "AzureHttpError", "as", "ex", ":", "_dont_fail_not_exist", "(", "ex", ")", "return", "False" ]
Returns a boolean indicating whether the queue exists. :param str queue_name: The name of queue to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue exists. :rtype: bool
[ "Returns", "a", "boolean", "indicating", "whether", "the", "queue", "exists", "." ]
python
train
cs50/check50
check50/internal.py
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/internal.py#L63-L105
def load_config(check_dir): """ Load configuration file from ``check_dir / ".cs50.yaml"``, applying defaults to unspecified values. :param check_dir: directory from which to load config file :type check_dir: str / Path :rtype: dict """ # Defaults for top-level keys options = { "checks": "__init__.py", "dependencies": None, "translations": None } # Defaults for translation keys translation_options = { "localedir": "locale", "domain": "messages", } config_file = Path(check_dir) / ".cs50.yaml" with open(config_file) as f: config = lib50.config.load(f.read(), "check50") if isinstance(config, dict): options.update(config) if options["translations"]: if isinstance(options["translations"], dict): translation_options.update(options["translations"]) options["translations"] = translation_options if isinstance(options["checks"], dict): # Compile simple checks with open(check_dir / "__init__.py", "w") as f: f.write(simple.compile(options["checks"])) options["checks"] = "__init__.py" return options
[ "def", "load_config", "(", "check_dir", ")", ":", "# Defaults for top-level keys", "options", "=", "{", "\"checks\"", ":", "\"__init__.py\"", ",", "\"dependencies\"", ":", "None", ",", "\"translations\"", ":", "None", "}", "# Defaults for translation keys", "translation_options", "=", "{", "\"localedir\"", ":", "\"locale\"", ",", "\"domain\"", ":", "\"messages\"", ",", "}", "config_file", "=", "Path", "(", "check_dir", ")", "/", "\".cs50.yaml\"", "with", "open", "(", "config_file", ")", "as", "f", ":", "config", "=", "lib50", ".", "config", ".", "load", "(", "f", ".", "read", "(", ")", ",", "\"check50\"", ")", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "options", ".", "update", "(", "config", ")", "if", "options", "[", "\"translations\"", "]", ":", "if", "isinstance", "(", "options", "[", "\"translations\"", "]", ",", "dict", ")", ":", "translation_options", ".", "update", "(", "options", "[", "\"translations\"", "]", ")", "options", "[", "\"translations\"", "]", "=", "translation_options", "if", "isinstance", "(", "options", "[", "\"checks\"", "]", ",", "dict", ")", ":", "# Compile simple checks", "with", "open", "(", "check_dir", "/", "\"__init__.py\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "simple", ".", "compile", "(", "options", "[", "\"checks\"", "]", ")", ")", "options", "[", "\"checks\"", "]", "=", "\"__init__.py\"", "return", "options" ]
Load configuration file from ``check_dir / ".cs50.yaml"``, applying defaults to unspecified values. :param check_dir: directory from which to load config file :type check_dir: str / Path :rtype: dict
[ "Load", "configuration", "file", "from", "check_dir", "/", ".", "cs50", ".", "yaml", "applying", "defaults", "to", "unspecified", "values", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/util.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/util.py#L491-L506
def _get_vars_to_collections(variables): """Returns a dict mapping variables to the collections they appear in.""" var_to_collections = collections.defaultdict(lambda: []) if isinstance(variables, dict): variables = list(v for _, v in variable_map_items(variables)) for graph in set(v.graph for v in variables): for collection_name in list(graph.collections): entries = set(entry for entry in graph.get_collection(collection_name) if isinstance(entry, tf.Variable)) # For legacy reasons, tf.GraphKeys.GLOBAL_VARIABLES == "variables". # Correcting for this here, to avoid confusion. if collection_name == tf.GraphKeys.GLOBAL_VARIABLES: collection_name = "global_variables" for var in entries.intersection(variables): var_to_collections[var].append(collection_name) return var_to_collections
[ "def", "_get_vars_to_collections", "(", "variables", ")", ":", "var_to_collections", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "if", "isinstance", "(", "variables", ",", "dict", ")", ":", "variables", "=", "list", "(", "v", "for", "_", ",", "v", "in", "variable_map_items", "(", "variables", ")", ")", "for", "graph", "in", "set", "(", "v", ".", "graph", "for", "v", "in", "variables", ")", ":", "for", "collection_name", "in", "list", "(", "graph", ".", "collections", ")", ":", "entries", "=", "set", "(", "entry", "for", "entry", "in", "graph", ".", "get_collection", "(", "collection_name", ")", "if", "isinstance", "(", "entry", ",", "tf", ".", "Variable", ")", ")", "# For legacy reasons, tf.GraphKeys.GLOBAL_VARIABLES == \"variables\".", "# Correcting for this here, to avoid confusion.", "if", "collection_name", "==", "tf", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", ":", "collection_name", "=", "\"global_variables\"", "for", "var", "in", "entries", ".", "intersection", "(", "variables", ")", ":", "var_to_collections", "[", "var", "]", ".", "append", "(", "collection_name", ")", "return", "var_to_collections" ]
Returns a dict mapping variables to the collections they appear in.
[ "Returns", "a", "dict", "mapping", "variables", "to", "the", "collections", "they", "appear", "in", "." ]
python
train
ralphbean/bugwarrior
bugwarrior/db.py
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L91-L97
def hamdist(str1, str2): """Count the # of differences between equal length strings str1 and str2""" diffs = 0 for ch1, ch2 in zip(str1, str2): if ch1 != ch2: diffs += 1 return diffs
[ "def", "hamdist", "(", "str1", ",", "str2", ")", ":", "diffs", "=", "0", "for", "ch1", ",", "ch2", "in", "zip", "(", "str1", ",", "str2", ")", ":", "if", "ch1", "!=", "ch2", ":", "diffs", "+=", "1", "return", "diffs" ]
Count the # of differences between equal length strings str1 and str2
[ "Count", "the", "#", "of", "differences", "between", "equal", "length", "strings", "str1", "and", "str2" ]
python
test
cni/MRS
MRS/analysis.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/analysis.py#L809-L845
def simple_auc(spectrum, f_ppm, center=3.00, bandwidth=0.30): """ Calculates area under the curve (no fitting) Parameters ---------- spectrum : array of shape (n_transients, n_points) Typically the difference of the on/off spectra in each transient. center, bandwidth : float Determine the limits for the part of the spectrum for which we want to calculate the AUC. e.g. if center = 3.0, bandwidth = 0.3, lower and upper bounds will be 2.85 and 3.15 respectively (center +/- bandwidth/2). Notes ----- Default center and bandwidth are 3.0 and 0.3ppm respectively because of Sanacora 1999 pg 1045: "The GABA signal was integrated over a 0.30-ppm bandwidth at 3.00ppm" Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical {gamma}-aminobutyric acid levels in depressed patients determined by proton magnetic resonance spectroscopy. Archives of general psychiatry, 56(11), 1043. """ range = np.max(f_ppm)-np.min(f_ppm) dx=float(range)/float(len(f_ppm)) lb = np.floor((np.max(f_ppm)-float(center)+float(bandwidth)/2)/dx) ub = np.ceil((np.max(f_ppm)-float(center)-float(bandwidth)/2)/dx) auc = trapz(spectrum[ub:lb].real, dx=dx) return auc, ub, lb
[ "def", "simple_auc", "(", "spectrum", ",", "f_ppm", ",", "center", "=", "3.00", ",", "bandwidth", "=", "0.30", ")", ":", "range", "=", "np", ".", "max", "(", "f_ppm", ")", "-", "np", ".", "min", "(", "f_ppm", ")", "dx", "=", "float", "(", "range", ")", "/", "float", "(", "len", "(", "f_ppm", ")", ")", "lb", "=", "np", ".", "floor", "(", "(", "np", ".", "max", "(", "f_ppm", ")", "-", "float", "(", "center", ")", "+", "float", "(", "bandwidth", ")", "/", "2", ")", "/", "dx", ")", "ub", "=", "np", ".", "ceil", "(", "(", "np", ".", "max", "(", "f_ppm", ")", "-", "float", "(", "center", ")", "-", "float", "(", "bandwidth", ")", "/", "2", ")", "/", "dx", ")", "auc", "=", "trapz", "(", "spectrum", "[", "ub", ":", "lb", "]", ".", "real", ",", "dx", "=", "dx", ")", "return", "auc", ",", "ub", ",", "lb" ]
Calculates area under the curve (no fitting) Parameters ---------- spectrum : array of shape (n_transients, n_points) Typically the difference of the on/off spectra in each transient. center, bandwidth : float Determine the limits for the part of the spectrum for which we want to calculate the AUC. e.g. if center = 3.0, bandwidth = 0.3, lower and upper bounds will be 2.85 and 3.15 respectively (center +/- bandwidth/2). Notes ----- Default center and bandwidth are 3.0 and 0.3ppm respectively because of Sanacora 1999 pg 1045: "The GABA signal was integrated over a 0.30-ppm bandwidth at 3.00ppm" Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical {gamma}-aminobutyric acid levels in depressed patients determined by proton magnetic resonance spectroscopy. Archives of general psychiatry, 56(11), 1043.
[ "Calculates", "area", "under", "the", "curve", "(", "no", "fitting", ")" ]
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L356-L363
def post_event_access_code(self, id, access_code_id, **data): """ POST /events/:id/access_codes/:access_code_id/ Updates an access code; returns the result as a :format:`access_code` as the key ``access_code``. """ return self.post("/events/{0}/access_codes/{0}/".format(id,access_code_id), data=data)
[ "def", "post_event_access_code", "(", "self", ",", "id", ",", "access_code_id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "post", "(", "\"/events/{0}/access_codes/{0}/\"", ".", "format", "(", "id", ",", "access_code_id", ")", ",", "data", "=", "data", ")" ]
POST /events/:id/access_codes/:access_code_id/ Updates an access code; returns the result as a :format:`access_code` as the key ``access_code``.
[ "POST", "/", "events", "/", ":", "id", "/", "access_codes", "/", ":", "access_code_id", "/", "Updates", "an", "access", "code", ";", "returns", "the", "result", "as", "a", ":", "format", ":", "access_code", "as", "the", "key", "access_code", "." ]
python
train
Telefonica/toolium
toolium/driver_wrappers_pool.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/driver_wrappers_pool.py#L95-L111
def capture_screenshots(cls, name): """Capture a screenshot in each driver :param name: screenshot name suffix """ screenshot_name = '{}_driver{}' if len(cls.driver_wrappers) > 1 else '{}' driver_index = 1 for driver_wrapper in cls.driver_wrappers: if not driver_wrapper.driver: continue from toolium.jira import add_attachment try: add_attachment(driver_wrapper.utils.capture_screenshot(screenshot_name.format(name, driver_index))) except Exception: # Capture exceptions to avoid errors in teardown method due to session timeouts pass driver_index += 1
[ "def", "capture_screenshots", "(", "cls", ",", "name", ")", ":", "screenshot_name", "=", "'{}_driver{}'", "if", "len", "(", "cls", ".", "driver_wrappers", ")", ">", "1", "else", "'{}'", "driver_index", "=", "1", "for", "driver_wrapper", "in", "cls", ".", "driver_wrappers", ":", "if", "not", "driver_wrapper", ".", "driver", ":", "continue", "from", "toolium", ".", "jira", "import", "add_attachment", "try", ":", "add_attachment", "(", "driver_wrapper", ".", "utils", ".", "capture_screenshot", "(", "screenshot_name", ".", "format", "(", "name", ",", "driver_index", ")", ")", ")", "except", "Exception", ":", "# Capture exceptions to avoid errors in teardown method due to session timeouts", "pass", "driver_index", "+=", "1" ]
Capture a screenshot in each driver :param name: screenshot name suffix
[ "Capture", "a", "screenshot", "in", "each", "driver" ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/slim/scopes.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/slim/scopes.py#L138-L157
def add_arg_scope(func): """Decorates a function with args so it can be used within an arg_scope. Args: func: function to decorate. Returns: A tuple with the decorated function func_with_args(). """ @functools.wraps(func) def func_with_args(*args, **kwargs): current_scope = _current_arg_scope() current_args = kwargs key_func = (func.__module__, func.__name__) if key_func in current_scope: current_args = current_scope[key_func].copy() current_args.update(kwargs) return func(*args, **current_args) _add_op(func) return func_with_args
[ "def", "add_arg_scope", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "func_with_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "current_scope", "=", "_current_arg_scope", "(", ")", "current_args", "=", "kwargs", "key_func", "=", "(", "func", ".", "__module__", ",", "func", ".", "__name__", ")", "if", "key_func", "in", "current_scope", ":", "current_args", "=", "current_scope", "[", "key_func", "]", ".", "copy", "(", ")", "current_args", ".", "update", "(", "kwargs", ")", "return", "func", "(", "*", "args", ",", "*", "*", "current_args", ")", "_add_op", "(", "func", ")", "return", "func_with_args" ]
Decorates a function with args so it can be used within an arg_scope. Args: func: function to decorate. Returns: A tuple with the decorated function func_with_args().
[ "Decorates", "a", "function", "with", "args", "so", "it", "can", "be", "used", "within", "an", "arg_scope", "." ]
python
train
PMEAL/porespy
porespy/visualization/__views__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__views__.py#L49-L80
def sem(im, direction='X'): r""" Simulates an SEM photograph looking into the porous material in the specified direction. Features are colored according to their depth into the image, so darker features are further away. Parameters ---------- im : array_like ND-image of the porous material with the solid phase marked as 1 or True direction : string Specify the axis along which the camera will point. Options are 'X', 'Y', and 'Z'. Returns ------- image : 2D-array A 2D greyscale image suitable for use in matplotlib\'s ```imshow``` function. """ im = sp.array(~im, dtype=int) if direction in ['Y', 'y']: im = sp.transpose(im, axes=[1, 0, 2]) if direction in ['Z', 'z']: im = sp.transpose(im, axes=[2, 1, 0]) t = im.shape[0] depth = sp.reshape(sp.arange(0, t), [t, 1, 1]) im = im*depth im = sp.amax(im, axis=0) return im
[ "def", "sem", "(", "im", ",", "direction", "=", "'X'", ")", ":", "im", "=", "sp", ".", "array", "(", "~", "im", ",", "dtype", "=", "int", ")", "if", "direction", "in", "[", "'Y'", ",", "'y'", "]", ":", "im", "=", "sp", ".", "transpose", "(", "im", ",", "axes", "=", "[", "1", ",", "0", ",", "2", "]", ")", "if", "direction", "in", "[", "'Z'", ",", "'z'", "]", ":", "im", "=", "sp", ".", "transpose", "(", "im", ",", "axes", "=", "[", "2", ",", "1", ",", "0", "]", ")", "t", "=", "im", ".", "shape", "[", "0", "]", "depth", "=", "sp", ".", "reshape", "(", "sp", ".", "arange", "(", "0", ",", "t", ")", ",", "[", "t", ",", "1", ",", "1", "]", ")", "im", "=", "im", "*", "depth", "im", "=", "sp", ".", "amax", "(", "im", ",", "axis", "=", "0", ")", "return", "im" ]
r""" Simulates an SEM photograph looking into the porous material in the specified direction. Features are colored according to their depth into the image, so darker features are further away. Parameters ---------- im : array_like ND-image of the porous material with the solid phase marked as 1 or True direction : string Specify the axis along which the camera will point. Options are 'X', 'Y', and 'Z'. Returns ------- image : 2D-array A 2D greyscale image suitable for use in matplotlib\'s ```imshow``` function.
[ "r", "Simulates", "an", "SEM", "photograph", "looking", "into", "the", "porous", "material", "in", "the", "specified", "direction", ".", "Features", "are", "colored", "according", "to", "their", "depth", "into", "the", "image", "so", "darker", "features", "are", "further", "away", "." ]
python
train
uogbuji/versa
tools/py/driver/postgres.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/postgres.py#L113-L139
def _process_db_rows_iter(self, cursor): ''' Turn the low-level rows from the result of a standard query join into higher-level statements, yielded iteratively. Note this might lead to idle transaction errors? ''' #Be aware of: http://packages.python.org/psycopg2/faq.html#problems-with-transactions-handling #The results will come back grouped by the raw relationship IDs, in order for relid, relgroup in groupby(cursor, itemgetter(0)): curr_rel = None attrs = None #Each relgroup are the DB rows corresponding to a single relationship, #With redundant origin/rel/target but the sequence of attributes for row in relgroup: (rawid, origin, rel, target, a_name, a_val) = row #self._logger.debug('Row: {0}'.format(repr(row))) if not curr_rel: curr_rel = (origin, rel, target) if a_name: if not attrs: attrs = {} curr_rel = (origin, rel, target, attrs) attrs[a_name] = a_val yield curr_rel cursor.close() self._conn.rollback() #Finish with the transaction return
[ "def", "_process_db_rows_iter", "(", "self", ",", "cursor", ")", ":", "#Be aware of: http://packages.python.org/psycopg2/faq.html#problems-with-transactions-handling", "#The results will come back grouped by the raw relationship IDs, in order", "for", "relid", ",", "relgroup", "in", "groupby", "(", "cursor", ",", "itemgetter", "(", "0", ")", ")", ":", "curr_rel", "=", "None", "attrs", "=", "None", "#Each relgroup are the DB rows corresponding to a single relationship,", "#With redundant origin/rel/target but the sequence of attributes", "for", "row", "in", "relgroup", ":", "(", "rawid", ",", "origin", ",", "rel", ",", "target", ",", "a_name", ",", "a_val", ")", "=", "row", "#self._logger.debug('Row: {0}'.format(repr(row)))", "if", "not", "curr_rel", ":", "curr_rel", "=", "(", "origin", ",", "rel", ",", "target", ")", "if", "a_name", ":", "if", "not", "attrs", ":", "attrs", "=", "{", "}", "curr_rel", "=", "(", "origin", ",", "rel", ",", "target", ",", "attrs", ")", "attrs", "[", "a_name", "]", "=", "a_val", "yield", "curr_rel", "cursor", ".", "close", "(", ")", "self", ".", "_conn", ".", "rollback", "(", ")", "#Finish with the transaction", "return" ]
Turn the low-level rows from the result of a standard query join into higher-level statements, yielded iteratively. Note this might lead to idle transaction errors?
[ "Turn", "the", "low", "-", "level", "rows", "from", "the", "result", "of", "a", "standard", "query", "join", "into", "higher", "-", "level", "statements", "yielded", "iteratively", ".", "Note", "this", "might", "lead", "to", "idle", "transaction", "errors?" ]
python
train
pantsbuild/pex
pex/vendor/_vendored/setuptools/setuptools/sandbox.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/vendor/_vendored/setuptools/setuptools/sandbox.py#L117-L131
def dump(type, exc): """ Always return a dumped (pickled) type and exc. If exc can't be pickled, wrap it in UnpickleableException first. """ try: return pickle.dumps(type), pickle.dumps(exc) except Exception: # get UnpickleableException inside the sandbox if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.sandbox import UnpickleableException as cls # vendor:skip else: from pex.third_party.setuptools.sandbox import UnpickleableException as cls return cls.dump(cls, cls(repr(exc)))
[ "def", "dump", "(", "type", ",", "exc", ")", ":", "try", ":", "return", "pickle", ".", "dumps", "(", "type", ")", ",", "pickle", ".", "dumps", "(", "exc", ")", "except", "Exception", ":", "# get UnpickleableException inside the sandbox", "if", "\"__PEX_UNVENDORED__\"", "in", "__import__", "(", "\"os\"", ")", ".", "environ", ":", "from", "setuptools", ".", "sandbox", "import", "UnpickleableException", "as", "cls", "# vendor:skip", "else", ":", "from", "pex", ".", "third_party", ".", "setuptools", ".", "sandbox", "import", "UnpickleableException", "as", "cls", "return", "cls", ".", "dump", "(", "cls", ",", "cls", "(", "repr", "(", "exc", ")", ")", ")" ]
Always return a dumped (pickled) type and exc. If exc can't be pickled, wrap it in UnpickleableException first.
[ "Always", "return", "a", "dumped", "(", "pickled", ")", "type", "and", "exc", ".", "If", "exc", "can", "t", "be", "pickled", "wrap", "it", "in", "UnpickleableException", "first", "." ]
python
train
ubernostrum/django-registration
src/django_registration/validators.py
https://github.com/ubernostrum/django-registration/blob/cf10b13423669346a1f4cfaa31aae0b42856b416/src/django_registration/validators.py#L237-L250
def validate_confusables(value): """ Validator which disallows 'dangerous' usernames likely to represent homograph attacks. A username is 'dangerous' if it is mixed-script (as defined by Unicode 'Script' property) and contains one or more characters appearing in the Unicode Visually Confusable Characters file. """ if not isinstance(value, six.text_type): return if confusables.is_dangerous(value): raise ValidationError(CONFUSABLE, code='invalid')
[ "def", "validate_confusables", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "return", "if", "confusables", ".", "is_dangerous", "(", "value", ")", ":", "raise", "ValidationError", "(", "CONFUSABLE", ",", "code", "=", "'invalid'", ")" ]
Validator which disallows 'dangerous' usernames likely to represent homograph attacks. A username is 'dangerous' if it is mixed-script (as defined by Unicode 'Script' property) and contains one or more characters appearing in the Unicode Visually Confusable Characters file.
[ "Validator", "which", "disallows", "dangerous", "usernames", "likely", "to", "represent", "homograph", "attacks", "." ]
python
train
apache/spark
python/pyspark/serializers.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L345-L352
def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """ batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
[ "def", "load_stream", "(", "self", ",", "stream", ")", ":", "batches", "=", "super", "(", "ArrowStreamPandasSerializer", ",", "self", ")", ".", "load_stream", "(", "stream", ")", "import", "pyarrow", "as", "pa", "for", "batch", "in", "batches", ":", "yield", "[", "self", ".", "arrow_to_pandas", "(", "c", ")", "for", "c", "in", "pa", ".", "Table", ".", "from_batches", "(", "[", "batch", "]", ")", ".", "itercolumns", "(", ")", "]" ]
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
[ "Deserialize", "ArrowRecordBatches", "to", "an", "Arrow", "table", "and", "return", "as", "a", "list", "of", "pandas", ".", "Series", "." ]
python
train
tornadoweb/tornado
tornado/ioloop.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/ioloop.py#L548-L587
def add_timeout( self, deadline: Union[float, datetime.timedelta], callback: Callable[..., None], *args: Any, **kwargs: Any ) -> object: """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback. """ if isinstance(deadline, numbers.Real): return self.call_at(deadline, callback, *args, **kwargs) elif isinstance(deadline, datetime.timedelta): return self.call_at( self.time() + deadline.total_seconds(), callback, *args, **kwargs ) else: raise TypeError("Unsupported deadline %r" % deadline)
[ "def", "add_timeout", "(", "self", ",", "deadline", ":", "Union", "[", "float", ",", "datetime", ".", "timedelta", "]", ",", "callback", ":", "Callable", "[", "...", ",", "None", "]", ",", "*", "args", ":", "Any", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "object", ":", "if", "isinstance", "(", "deadline", ",", "numbers", ".", "Real", ")", ":", "return", "self", ".", "call_at", "(", "deadline", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "deadline", ",", "datetime", ".", "timedelta", ")", ":", "return", "self", ".", "call_at", "(", "self", ".", "time", "(", ")", "+", "deadline", ".", "total_seconds", "(", ")", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported deadline %r\"", "%", "deadline", ")" ]
Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback.
[ "Runs", "the", "callback", "at", "the", "time", "deadline", "from", "the", "I", "/", "O", "loop", "." ]
python
train
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L493-L508
def collection_year(soup): """ Pub date of type collection will hold a year element for VOR articles """ pub_date = first(raw_parser.pub_date(soup, pub_type="collection")) if not pub_date: pub_date = first(raw_parser.pub_date(soup, date_type="collection")) if not pub_date: return None year = None year_tag = raw_parser.year(pub_date) if year_tag: year = int(node_text(year_tag)) return year
[ "def", "collection_year", "(", "soup", ")", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "pub_type", "=", "\"collection\"", ")", ")", "if", "not", "pub_date", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "date_type", "=", "\"collection\"", ")", ")", "if", "not", "pub_date", ":", "return", "None", "year", "=", "None", "year_tag", "=", "raw_parser", ".", "year", "(", "pub_date", ")", "if", "year_tag", ":", "year", "=", "int", "(", "node_text", "(", "year_tag", ")", ")", "return", "year" ]
Pub date of type collection will hold a year element for VOR articles
[ "Pub", "date", "of", "type", "collection", "will", "hold", "a", "year", "element", "for", "VOR", "articles" ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_metrics.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_metrics.py#L846-L915
def resource(self): """ :class:`~zhmcclient.BaseResource`: The Python resource object of the resource these metric values apply to. Raises: :exc:`~zhmcclient.NotFound`: No resource found for this URI in the management scope of the HMC. """ if self._resource is not None: return self._resource resource_class = self.metric_group_definition.resource_class resource_uri = self.resource_uri if resource_class == 'cpc': filter_args = {'object-uri': resource_uri} resource = self.client.cpcs.find(**filter_args) elif resource_class == 'logical-partition': for cpc in self.client.cpcs.list(): try: filter_args = {'object-uri': resource_uri} resource = cpc.lpars.find(**filter_args) break except NotFound: pass # Try next CPC else: raise elif resource_class == 'partition': for cpc in self.client.cpcs.list(): try: filter_args = {'object-uri': resource_uri} resource = cpc.partitions.find(**filter_args) break except NotFound: pass # Try next CPC else: raise elif resource_class == 'adapter': for cpc in self.client.cpcs.list(): try: filter_args = {'object-uri': resource_uri} resource = cpc.adapters.find(**filter_args) break except NotFound: pass # Try next CPC else: raise elif resource_class == 'nic': for cpc in self.client.cpcs.list(): found = False for partition in cpc.partitions.list(): try: filter_args = {'element-uri': resource_uri} resource = partition.nics.find(**filter_args) found = True break except NotFound: pass # Try next partition / next CPC if found: break else: raise else: raise ValueError( "Invalid resource class: {!r}".format(resource_class)) self._resource = resource return self._resource
[ "def", "resource", "(", "self", ")", ":", "if", "self", ".", "_resource", "is", "not", "None", ":", "return", "self", ".", "_resource", "resource_class", "=", "self", ".", "metric_group_definition", ".", "resource_class", "resource_uri", "=", "self", ".", "resource_uri", "if", "resource_class", "==", "'cpc'", ":", "filter_args", "=", "{", "'object-uri'", ":", "resource_uri", "}", "resource", "=", "self", ".", "client", ".", "cpcs", ".", "find", "(", "*", "*", "filter_args", ")", "elif", "resource_class", "==", "'logical-partition'", ":", "for", "cpc", "in", "self", ".", "client", ".", "cpcs", ".", "list", "(", ")", ":", "try", ":", "filter_args", "=", "{", "'object-uri'", ":", "resource_uri", "}", "resource", "=", "cpc", ".", "lpars", ".", "find", "(", "*", "*", "filter_args", ")", "break", "except", "NotFound", ":", "pass", "# Try next CPC", "else", ":", "raise", "elif", "resource_class", "==", "'partition'", ":", "for", "cpc", "in", "self", ".", "client", ".", "cpcs", ".", "list", "(", ")", ":", "try", ":", "filter_args", "=", "{", "'object-uri'", ":", "resource_uri", "}", "resource", "=", "cpc", ".", "partitions", ".", "find", "(", "*", "*", "filter_args", ")", "break", "except", "NotFound", ":", "pass", "# Try next CPC", "else", ":", "raise", "elif", "resource_class", "==", "'adapter'", ":", "for", "cpc", "in", "self", ".", "client", ".", "cpcs", ".", "list", "(", ")", ":", "try", ":", "filter_args", "=", "{", "'object-uri'", ":", "resource_uri", "}", "resource", "=", "cpc", ".", "adapters", ".", "find", "(", "*", "*", "filter_args", ")", "break", "except", "NotFound", ":", "pass", "# Try next CPC", "else", ":", "raise", "elif", "resource_class", "==", "'nic'", ":", "for", "cpc", "in", "self", ".", "client", ".", "cpcs", ".", "list", "(", ")", ":", "found", "=", "False", "for", "partition", "in", "cpc", ".", "partitions", ".", "list", "(", ")", ":", "try", ":", "filter_args", "=", "{", "'element-uri'", ":", "resource_uri", "}", "resource", "=", "partition", ".", "nics", ".", "find", "(", "*", "*", "filter_args", ")", "found", "=", "True", "break", "except", "NotFound", ":", "pass", "# Try next partition / next CPC", "if", "found", ":", "break", "else", ":", "raise", "else", ":", "raise", "ValueError", "(", "\"Invalid resource class: {!r}\"", ".", "format", "(", "resource_class", ")", ")", "self", ".", "_resource", "=", "resource", "return", "self", ".", "_resource" ]
:class:`~zhmcclient.BaseResource`: The Python resource object of the resource these metric values apply to. Raises: :exc:`~zhmcclient.NotFound`: No resource found for this URI in the management scope of the HMC.
[ ":", "class", ":", "~zhmcclient", ".", "BaseResource", ":", "The", "Python", "resource", "object", "of", "the", "resource", "these", "metric", "values", "apply", "to", "." ]
python
train
inveniosoftware/invenio-collections
invenio_collections/cli.py
https://github.com/inveniosoftware/invenio-collections/blob/f3adca45c6d00a4dbf1f48fd501e8a68fe347f2f/invenio_collections/cli.py#L41-L51
def dry_run(func): """Dry run: simulate sql execution.""" @wraps(func) def inner(dry_run, *args, **kwargs): ret = func(dry_run=dry_run, *args, **kwargs) if not dry_run: db.session.commit() else: db.session.rollback() return ret return inner
[ "def", "dry_run", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "dry_run", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "func", "(", "dry_run", "=", "dry_run", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "dry_run", ":", "db", ".", "session", ".", "commit", "(", ")", "else", ":", "db", ".", "session", ".", "rollback", "(", ")", "return", "ret", "return", "inner" ]
Dry run: simulate sql execution.
[ "Dry", "run", ":", "simulate", "sql", "execution", "." ]
python
train
trailofbits/manticore
examples/script/concolic.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/examples/script/concolic.py#L157-L184
def symbolic_run_get_cons(trace): ''' Execute a symbolic run that follows a concrete run; return constraints generated and the stdin data produced ''' m2 = Manticore.linux(prog, workspace_url='mem:') f = Follower(trace) m2.verbosity(VERBOSITY) m2.register_plugin(f) def on_term_testcase(mcore, state, stateid, err): with m2.locked_context() as ctx: readdata = [] for name, fd, data in state.platform.syscall_trace: if name in ('_receive', '_read') and fd == 0: readdata.append(data) ctx['readdata'] = readdata ctx['constraints'] = list(state.constraints.constraints) m2.subscribe('will_terminate_state', on_term_testcase) m2.run() constraints = m2.context['constraints'] datas = m2.context['readdata'] return constraints, datas
[ "def", "symbolic_run_get_cons", "(", "trace", ")", ":", "m2", "=", "Manticore", ".", "linux", "(", "prog", ",", "workspace_url", "=", "'mem:'", ")", "f", "=", "Follower", "(", "trace", ")", "m2", ".", "verbosity", "(", "VERBOSITY", ")", "m2", ".", "register_plugin", "(", "f", ")", "def", "on_term_testcase", "(", "mcore", ",", "state", ",", "stateid", ",", "err", ")", ":", "with", "m2", ".", "locked_context", "(", ")", "as", "ctx", ":", "readdata", "=", "[", "]", "for", "name", ",", "fd", ",", "data", "in", "state", ".", "platform", ".", "syscall_trace", ":", "if", "name", "in", "(", "'_receive'", ",", "'_read'", ")", "and", "fd", "==", "0", ":", "readdata", ".", "append", "(", "data", ")", "ctx", "[", "'readdata'", "]", "=", "readdata", "ctx", "[", "'constraints'", "]", "=", "list", "(", "state", ".", "constraints", ".", "constraints", ")", "m2", ".", "subscribe", "(", "'will_terminate_state'", ",", "on_term_testcase", ")", "m2", ".", "run", "(", ")", "constraints", "=", "m2", ".", "context", "[", "'constraints'", "]", "datas", "=", "m2", ".", "context", "[", "'readdata'", "]", "return", "constraints", ",", "datas" ]
Execute a symbolic run that follows a concrete run; return constraints generated and the stdin data produced
[ "Execute", "a", "symbolic", "run", "that", "follows", "a", "concrete", "run", ";", "return", "constraints", "generated", "and", "the", "stdin", "data", "produced" ]
python
valid
stefanfoulis/django-sendsms
sendsms/api.py
https://github.com/stefanfoulis/django-sendsms/blob/375f469789866853253eceba936ebcff98e83c07/sendsms/api.py#L14-L28
def send_sms(body, from_phone, to, flash=False, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Easy wrapper for send a single SMS to a recipient list. :returns: the number of SMSs sent. """ from sendsms.message import SmsMessage connection = connection or get_connection( username = auth_user, password = auth_password, fail_silently = fail_silently ) return SmsMessage(body=body, from_phone=from_phone, to=to, \ flash=flash, connection=connection).send()
[ "def", "send_sms", "(", "body", ",", "from_phone", ",", "to", ",", "flash", "=", "False", ",", "fail_silently", "=", "False", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ",", "connection", "=", "None", ")", ":", "from", "sendsms", ".", "message", "import", "SmsMessage", "connection", "=", "connection", "or", "get_connection", "(", "username", "=", "auth_user", ",", "password", "=", "auth_password", ",", "fail_silently", "=", "fail_silently", ")", "return", "SmsMessage", "(", "body", "=", "body", ",", "from_phone", "=", "from_phone", ",", "to", "=", "to", ",", "flash", "=", "flash", ",", "connection", "=", "connection", ")", ".", "send", "(", ")" ]
Easy wrapper for send a single SMS to a recipient list. :returns: the number of SMSs sent.
[ "Easy", "wrapper", "for", "send", "a", "single", "SMS", "to", "a", "recipient", "list", "." ]
python
train
openstack/proliantutils
proliantutils/utils.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/utils.py#L138-L156
def validate_href(image_href): """Validate HTTP image reference. :param image_href: Image reference. :raises: exception.ImageRefValidationFailed if HEAD request failed or returned response code not equal to 200. :returns: Response to HEAD request. """ try: response = requests.head(image_href) if response.status_code != http_client.OK: raise exception.ImageRefValidationFailed( image_href=image_href, reason=("Got HTTP code %s instead of 200 in response to " "HEAD request." % response.status_code)) except requests.RequestException as e: raise exception.ImageRefValidationFailed(image_href=image_href, reason=e) return response
[ "def", "validate_href", "(", "image_href", ")", ":", "try", ":", "response", "=", "requests", ".", "head", "(", "image_href", ")", "if", "response", ".", "status_code", "!=", "http_client", ".", "OK", ":", "raise", "exception", ".", "ImageRefValidationFailed", "(", "image_href", "=", "image_href", ",", "reason", "=", "(", "\"Got HTTP code %s instead of 200 in response to \"", "\"HEAD request.\"", "%", "response", ".", "status_code", ")", ")", "except", "requests", ".", "RequestException", "as", "e", ":", "raise", "exception", ".", "ImageRefValidationFailed", "(", "image_href", "=", "image_href", ",", "reason", "=", "e", ")", "return", "response" ]
Validate HTTP image reference. :param image_href: Image reference. :raises: exception.ImageRefValidationFailed if HEAD request failed or returned response code not equal to 200. :returns: Response to HEAD request.
[ "Validate", "HTTP", "image", "reference", "." ]
python
train
tilezen/tilequeue
tilequeue/query/rawr.py
https://github.com/tilezen/tilequeue/blob/d7b9484ab92e246eb2773949c784ebb37c731e28/tilequeue/query/rawr.py#L229-L239
def _snapping_round(num, eps, resolution): """ Return num snapped to within eps of an integer, or int(resolution(num)). """ rounded = round(num) delta = abs(num - rounded) if delta < eps: return int(rounded) else: return int(resolution(num))
[ "def", "_snapping_round", "(", "num", ",", "eps", ",", "resolution", ")", ":", "rounded", "=", "round", "(", "num", ")", "delta", "=", "abs", "(", "num", "-", "rounded", ")", "if", "delta", "<", "eps", ":", "return", "int", "(", "rounded", ")", "else", ":", "return", "int", "(", "resolution", "(", "num", ")", ")" ]
Return num snapped to within eps of an integer, or int(resolution(num)).
[ "Return", "num", "snapped", "to", "within", "eps", "of", "an", "integer", "or", "int", "(", "resolution", "(", "num", "))", "." ]
python
train
graphql-python/graphql-core-next
graphql/execution/execute.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/execution/execute.py#L537-L553
def does_fragment_condition_match( self, fragment: Union[FragmentDefinitionNode, InlineFragmentNode], type_: GraphQLObjectType, ) -> bool: """Determine if a fragment is applicable to the given type.""" type_condition_node = fragment.type_condition if not type_condition_node: return True conditional_type = type_from_ast(self.schema, type_condition_node) if conditional_type is type_: return True if is_abstract_type(conditional_type): return self.schema.is_possible_type( cast(GraphQLAbstractType, conditional_type), type_ ) return False
[ "def", "does_fragment_condition_match", "(", "self", ",", "fragment", ":", "Union", "[", "FragmentDefinitionNode", ",", "InlineFragmentNode", "]", ",", "type_", ":", "GraphQLObjectType", ",", ")", "->", "bool", ":", "type_condition_node", "=", "fragment", ".", "type_condition", "if", "not", "type_condition_node", ":", "return", "True", "conditional_type", "=", "type_from_ast", "(", "self", ".", "schema", ",", "type_condition_node", ")", "if", "conditional_type", "is", "type_", ":", "return", "True", "if", "is_abstract_type", "(", "conditional_type", ")", ":", "return", "self", ".", "schema", ".", "is_possible_type", "(", "cast", "(", "GraphQLAbstractType", ",", "conditional_type", ")", ",", "type_", ")", "return", "False" ]
Determine if a fragment is applicable to the given type.
[ "Determine", "if", "a", "fragment", "is", "applicable", "to", "the", "given", "type", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/objects.py#L185-L190
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidObjectForm._init_map(self, record_types=record_types) self._my_map['assignedBinIds'] = [str(kwargs['bin_id'])] self._my_map['group'] = self._group_default self._my_map['avatarId'] = self._avatar_default
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidObjectForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "self", ".", "_my_map", "[", "'assignedBinIds'", "]", "=", "[", "str", "(", "kwargs", "[", "'bin_id'", "]", ")", "]", "self", ".", "_my_map", "[", "'group'", "]", "=", "self", ".", "_group_default", "self", ".", "_my_map", "[", "'avatarId'", "]", "=", "self", ".", "_avatar_default" ]
Initialize form map
[ "Initialize", "form", "map" ]
python
train
yyuu/botornado
boto/route53/record.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/route53/record.py#L65-L73
def to_xml(self): """Convert this ResourceRecordSet into XML to be saved via the ChangeResourceRecordSetsRequest""" changesXML = "" for change in self.changes: changeParams = {"action": change[0], "record": change[1].to_xml()} changesXML += self.ChangeXML % changeParams params = {"comment": self.comment, "changes": changesXML} return self.ChangeResourceRecordSetsBody % params
[ "def", "to_xml", "(", "self", ")", ":", "changesXML", "=", "\"\"", "for", "change", "in", "self", ".", "changes", ":", "changeParams", "=", "{", "\"action\"", ":", "change", "[", "0", "]", ",", "\"record\"", ":", "change", "[", "1", "]", ".", "to_xml", "(", ")", "}", "changesXML", "+=", "self", ".", "ChangeXML", "%", "changeParams", "params", "=", "{", "\"comment\"", ":", "self", ".", "comment", ",", "\"changes\"", ":", "changesXML", "}", "return", "self", ".", "ChangeResourceRecordSetsBody", "%", "params" ]
Convert this ResourceRecordSet into XML to be saved via the ChangeResourceRecordSetsRequest
[ "Convert", "this", "ResourceRecordSet", "into", "XML", "to", "be", "saved", "via", "the", "ChangeResourceRecordSetsRequest" ]
python
train
panzarino/mlbgame
mlbgame/__init__.py
https://github.com/panzarino/mlbgame/blob/0a2d10540de793fdc3b8476aa18f5cf3b53d0b54/mlbgame/__init__.py#L211-L215
def player_stats(game_id): """Return dictionary of player stats for game matching the game id.""" # get information for that game data = mlbgame.stats.player_stats(game_id) return mlbgame.stats.Stats(data, game_id, True)
[ "def", "player_stats", "(", "game_id", ")", ":", "# get information for that game", "data", "=", "mlbgame", ".", "stats", ".", "player_stats", "(", "game_id", ")", "return", "mlbgame", ".", "stats", ".", "Stats", "(", "data", ",", "game_id", ",", "True", ")" ]
Return dictionary of player stats for game matching the game id.
[ "Return", "dictionary", "of", "player", "stats", "for", "game", "matching", "the", "game", "id", "." ]
python
train
saltstack/salt
salt/modules/bcache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L451-L561
def device(dev, stats=False, config=False, internals=False, superblock=False): ''' Check the state of a single bcache device CLI example: .. code-block:: bash salt '*' bcache.device bcache0 salt '*' bcache.device /dev/sdc stats=True :param stats: include statistics :param settings: include all settings :param internals: include all internals :param superblock: include superblock info ''' result = {} if not _sysfs_attr(_bcpath(dev), None, 'error', '{0} is not a bcache fo any kind'.format(dev)): return False elif _bcsys(dev, 'set'): # ---------------- It's the cache itself ---------------- result['uuid'] = uuid() base_attr = ['block_size', 'bucket_size', 'cache_available_percent', 'cache_replacement_policy', 'congested'] # ---------------- Parse through both the blockdev & the FS ---------------- result.update(_sysfs_parse(_bcpath(dev), base_attr, stats, config, internals)) result.update(_sysfs_parse(_fspath(), base_attr, stats, config, internals)) result.update(result.pop('base')) else: # ---------------- It's a backing device ---------------- back_uuid = uuid(dev) if back_uuid is not None: result['cache'] = back_uuid try: result['dev'] = os.path.basename(_bcsys(dev, 'dev')) except Exception: pass result['bdev'] = _bdev(dev) base_attr = ['cache_mode', 'running', 'state', 'writeback_running'] base_path = _bcpath(dev) result.update(_sysfs_parse(base_path, base_attr, stats, config, internals)) result.update(result.pop('base')) # ---------------- Modifications ---------------- state = [result['state']] if result.pop('running'): state.append('running') else: state.append('stopped') if 'writeback_running' in result: if result.pop('writeback_running'): state.append('writeback_running') else: state.append('writeback_stopped') result['state'] = state # ---------------- Statistics ---------------- if 'stats' in result: replre = r'(stats|cache)_' statres = result['stats'] for attr in result['stats']: if '/' not in attr: key = re.sub(replre, '', attr) statres[key] = statres.pop(attr) else: stat, key = attr.split('/', 1) stat = re.sub(replre, '', stat) key = re.sub(replre, '', key) if stat not in statres: statres[stat] = {} statres[stat][key] = statres.pop(attr) result['stats'] = statres # ---------------- Internals ---------------- if internals: interres = result.pop('inter_ro', {}) interres.update(result.pop('inter_rw', {})) if interres: for key in interres: if key.startswith('internal'): nkey = re.sub(r'internal[s/]*', '', key) interres[nkey] = interres.pop(key) key = nkey if key.startswith(('btree', 'writeback')): mkey, skey = re.split(r'_', key, maxsplit=1) if mkey not in interres: interres[mkey] = {} interres[mkey][skey] = interres.pop(key) result['internals'] = interres # ---------------- Config ---------------- if config: configres = result['config'] for key in configres: if key.startswith('writeback'): mkey, skey = re.split(r'_', key, maxsplit=1) if mkey not in configres: configres[mkey] = {} configres[mkey][skey] = configres.pop(key) result['config'] = configres # ---------------- Superblock ---------------- if superblock: result['superblock'] = super_(dev) return result
[ "def", "device", "(", "dev", ",", "stats", "=", "False", ",", "config", "=", "False", ",", "internals", "=", "False", ",", "superblock", "=", "False", ")", ":", "result", "=", "{", "}", "if", "not", "_sysfs_attr", "(", "_bcpath", "(", "dev", ")", ",", "None", ",", "'error'", ",", "'{0} is not a bcache fo any kind'", ".", "format", "(", "dev", ")", ")", ":", "return", "False", "elif", "_bcsys", "(", "dev", ",", "'set'", ")", ":", "# ---------------- It's the cache itself ----------------", "result", "[", "'uuid'", "]", "=", "uuid", "(", ")", "base_attr", "=", "[", "'block_size'", ",", "'bucket_size'", ",", "'cache_available_percent'", ",", "'cache_replacement_policy'", ",", "'congested'", "]", "# ---------------- Parse through both the blockdev & the FS ----------------", "result", ".", "update", "(", "_sysfs_parse", "(", "_bcpath", "(", "dev", ")", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "_sysfs_parse", "(", "_fspath", "(", ")", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "result", ".", "pop", "(", "'base'", ")", ")", "else", ":", "# ---------------- It's a backing device ----------------", "back_uuid", "=", "uuid", "(", "dev", ")", "if", "back_uuid", "is", "not", "None", ":", "result", "[", "'cache'", "]", "=", "back_uuid", "try", ":", "result", "[", "'dev'", "]", "=", "os", ".", "path", ".", "basename", "(", "_bcsys", "(", "dev", ",", "'dev'", ")", ")", "except", "Exception", ":", "pass", "result", "[", "'bdev'", "]", "=", "_bdev", "(", "dev", ")", "base_attr", "=", "[", "'cache_mode'", ",", "'running'", ",", "'state'", ",", "'writeback_running'", "]", "base_path", "=", "_bcpath", "(", "dev", ")", "result", ".", "update", "(", "_sysfs_parse", "(", "base_path", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "result", ".", "pop", "(", "'base'", ")", ")", "# ---------------- Modifications ----------------", "state", "=", "[", "result", "[", "'state'", "]", "]", "if", "result", ".", "pop", "(", "'running'", ")", ":", "state", ".", "append", "(", "'running'", ")", "else", ":", "state", ".", "append", "(", "'stopped'", ")", "if", "'writeback_running'", "in", "result", ":", "if", "result", ".", "pop", "(", "'writeback_running'", ")", ":", "state", ".", "append", "(", "'writeback_running'", ")", "else", ":", "state", ".", "append", "(", "'writeback_stopped'", ")", "result", "[", "'state'", "]", "=", "state", "# ---------------- Statistics ----------------", "if", "'stats'", "in", "result", ":", "replre", "=", "r'(stats|cache)_'", "statres", "=", "result", "[", "'stats'", "]", "for", "attr", "in", "result", "[", "'stats'", "]", ":", "if", "'/'", "not", "in", "attr", ":", "key", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "attr", ")", "statres", "[", "key", "]", "=", "statres", ".", "pop", "(", "attr", ")", "else", ":", "stat", ",", "key", "=", "attr", ".", "split", "(", "'/'", ",", "1", ")", "stat", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "stat", ")", "key", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "key", ")", "if", "stat", "not", "in", "statres", ":", "statres", "[", "stat", "]", "=", "{", "}", "statres", "[", "stat", "]", "[", "key", "]", "=", "statres", ".", "pop", "(", "attr", ")", "result", "[", "'stats'", "]", "=", "statres", "# ---------------- Internals ----------------", "if", "internals", ":", "interres", "=", "result", ".", "pop", "(", "'inter_ro'", ",", "{", "}", ")", "interres", ".", "update", "(", "result", ".", "pop", "(", "'inter_rw'", ",", "{", "}", ")", ")", "if", "interres", ":", "for", "key", "in", "interres", ":", "if", "key", ".", "startswith", "(", "'internal'", ")", ":", "nkey", "=", "re", ".", "sub", "(", "r'internal[s/]*'", ",", "''", ",", "key", ")", "interres", "[", "nkey", "]", "=", "interres", ".", "pop", "(", "key", ")", "key", "=", "nkey", "if", "key", ".", "startswith", "(", "(", "'btree'", ",", "'writeback'", ")", ")", ":", "mkey", ",", "skey", "=", "re", ".", "split", "(", "r'_'", ",", "key", ",", "maxsplit", "=", "1", ")", "if", "mkey", "not", "in", "interres", ":", "interres", "[", "mkey", "]", "=", "{", "}", "interres", "[", "mkey", "]", "[", "skey", "]", "=", "interres", ".", "pop", "(", "key", ")", "result", "[", "'internals'", "]", "=", "interres", "# ---------------- Config ----------------", "if", "config", ":", "configres", "=", "result", "[", "'config'", "]", "for", "key", "in", "configres", ":", "if", "key", ".", "startswith", "(", "'writeback'", ")", ":", "mkey", ",", "skey", "=", "re", ".", "split", "(", "r'_'", ",", "key", ",", "maxsplit", "=", "1", ")", "if", "mkey", "not", "in", "configres", ":", "configres", "[", "mkey", "]", "=", "{", "}", "configres", "[", "mkey", "]", "[", "skey", "]", "=", "configres", ".", "pop", "(", "key", ")", "result", "[", "'config'", "]", "=", "configres", "# ---------------- Superblock ----------------", "if", "superblock", ":", "result", "[", "'superblock'", "]", "=", "super_", "(", "dev", ")", "return", "result" ]
Check the state of a single bcache device CLI example: .. code-block:: bash salt '*' bcache.device bcache0 salt '*' bcache.device /dev/sdc stats=True :param stats: include statistics :param settings: include all settings :param internals: include all internals :param superblock: include superblock info
[ "Check", "the", "state", "of", "a", "single", "bcache", "device" ]
python
train
arviz-devs/arviz
arviz/data/io_cmdstan.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/io_cmdstan.py#L400-L568
def _read_output(path): """Read CmdStan output.csv. Parameters ---------- path : str Returns ------- List[DataFrame, DataFrame, List[str], List[str], List[str]] pandas.DataFrame Sample data pandas.DataFrame Sample stats List[str] Configuration information List[str] Adaptation information List[str] Timing info """ chains = [] configuration_info = [] adaptation_info = [] timing_info = [] i = 0 # Read (first) configuration and adaption with open(path, "r") as f_obj: column_names = False for i, line in enumerate(f_obj): line = line.strip() if line.startswith("#"): if column_names: adaptation_info.append(line.strip()) else: configuration_info.append(line.strip()) elif not column_names: column_names = True pconf = _process_configuration(configuration_info) if pconf["save_warmup"]: warmup_range = range(pconf["num_warmup"] // pconf["thin"]) for _, _ in zip(warmup_range, f_obj): continue else: break # Read data with open(path, "r") as f_obj: df = pd.read_csv(f_obj, comment="#") # split dataframe if header found multiple times if df.iloc[:, 0].dtype.kind == "O": first_col = df.columns[0] col_locations = first_col == df.loc[:, first_col] col_locations = list(col_locations.loc[col_locations].index) dfs = [] for idx, last_idx in zip(col_locations, [-1] + list(col_locations[:-1])): df_ = deepcopy(df.loc[last_idx + 1 : idx - 1, :]) for col in df_.columns: df_.loc[:, col] = pd.to_numeric(df_.loc[:, col]) if len(df_): dfs.append(df_.reset_index(drop=True)) df = df.loc[idx + 1 :, :] for col in df.columns: df.loc[:, col] = pd.to_numeric(df.loc[:, col]) dfs.append(df) else: dfs = [df] for j, df in enumerate(dfs): if j == 0: # Read timing info (first) from the end of the file line_num = i + df.shape[0] + 1 for k in range(5): line = linecache.getline(path, line_num + k).strip() if len(line): timing_info.append(line) configuration_info_len = len(configuration_info) adaptation_info_len = len(adaptation_info) timing_info_len = len(timing_info) num_of_samples = df.shape[0] header_count = 1 last_line_num = ( configuration_info_len + adaptation_info_len + timing_info_len + num_of_samples + header_count ) else: # header location found in the dataframe (not first) configuration_info = [] adaptation_info = [] timing_info = [] # line number for the next dataframe in csv line_num = last_line_num + 1 # row ranges config_start = line_num config_end = config_start + configuration_info_len # read configuration_info for reading_line in range(config_start, config_end): line = linecache.getline(path, reading_line) if line.startswith("#"): configuration_info.append(line) else: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Configuration: {}".format(path) ) raise ValueError(msg) pconf = _process_configuration(configuration_info) warmup_rows = pconf["save_warmup"] * pconf["num_warmup"] // pconf["thin"] adaption_start = config_end + 1 + warmup_rows adaption_end = adaption_start + adaptation_info_len # read adaptation_info for reading_line in range(adaption_start, adaption_end): line = linecache.getline(path, reading_line) if line.startswith("#"): adaptation_info.append(line) else: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Adaptation: {}".format(path) ) raise ValueError(msg) timing_start = adaption_end + len(df) - warmup_rows timing_end = timing_start + timing_info_len # read timing_info raise_timing_error = False for reading_line in range(timing_start, timing_end): line = linecache.getline(path, reading_line) if line.startswith("#"): timing_info.append(line) else: raise_timing_error = True break no_elapsed_time = not any("elapsed time" in row.lower() for row in timing_info) if raise_timing_error or no_elapsed_time: msg = ( "Invalid input file. " "Header information missing from combined csv. " "Timing: {}".format(path) ) raise ValueError(msg) last_line_num = reading_line # Remove warmup if pconf["save_warmup"]: saved_samples = pconf["num_samples"] // pconf["thin"] df = df.iloc[-saved_samples:, :] # Split data to sample_stats and sample sample_stats_columns = [col for col in df.columns if col.endswith("__")] sample_columns = [col for col in df.columns if col not in sample_stats_columns] sample_stats = df.loc[:, sample_stats_columns] sample_df = df.loc[:, sample_columns] chains.append((sample_df, sample_stats, configuration_info, adaptation_info, timing_info)) return chains
[ "def", "_read_output", "(", "path", ")", ":", "chains", "=", "[", "]", "configuration_info", "=", "[", "]", "adaptation_info", "=", "[", "]", "timing_info", "=", "[", "]", "i", "=", "0", "# Read (first) configuration and adaption", "with", "open", "(", "path", ",", "\"r\"", ")", "as", "f_obj", ":", "column_names", "=", "False", "for", "i", ",", "line", "in", "enumerate", "(", "f_obj", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "if", "column_names", ":", "adaptation_info", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "else", ":", "configuration_info", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "elif", "not", "column_names", ":", "column_names", "=", "True", "pconf", "=", "_process_configuration", "(", "configuration_info", ")", "if", "pconf", "[", "\"save_warmup\"", "]", ":", "warmup_range", "=", "range", "(", "pconf", "[", "\"num_warmup\"", "]", "//", "pconf", "[", "\"thin\"", "]", ")", "for", "_", ",", "_", "in", "zip", "(", "warmup_range", ",", "f_obj", ")", ":", "continue", "else", ":", "break", "# Read data", "with", "open", "(", "path", ",", "\"r\"", ")", "as", "f_obj", ":", "df", "=", "pd", ".", "read_csv", "(", "f_obj", ",", "comment", "=", "\"#\"", ")", "# split dataframe if header found multiple times", "if", "df", ".", "iloc", "[", ":", ",", "0", "]", ".", "dtype", ".", "kind", "==", "\"O\"", ":", "first_col", "=", "df", ".", "columns", "[", "0", "]", "col_locations", "=", "first_col", "==", "df", ".", "loc", "[", ":", ",", "first_col", "]", "col_locations", "=", "list", "(", "col_locations", ".", "loc", "[", "col_locations", "]", ".", "index", ")", "dfs", "=", "[", "]", "for", "idx", ",", "last_idx", "in", "zip", "(", "col_locations", ",", "[", "-", "1", "]", "+", "list", "(", "col_locations", "[", ":", "-", "1", "]", ")", ")", ":", "df_", "=", "deepcopy", "(", "df", ".", "loc", "[", "last_idx", "+", "1", ":", "idx", "-", "1", ",", ":", "]", ")", "for", "col", "in", "df_", ".", "columns", ":", "df_", ".", "loc", "[", ":", ",", "col", "]", "=", "pd", ".", "to_numeric", "(", "df_", ".", "loc", "[", ":", ",", "col", "]", ")", "if", "len", "(", "df_", ")", ":", "dfs", ".", "append", "(", "df_", ".", "reset_index", "(", "drop", "=", "True", ")", ")", "df", "=", "df", ".", "loc", "[", "idx", "+", "1", ":", ",", ":", "]", "for", "col", "in", "df", ".", "columns", ":", "df", ".", "loc", "[", ":", ",", "col", "]", "=", "pd", ".", "to_numeric", "(", "df", ".", "loc", "[", ":", ",", "col", "]", ")", "dfs", ".", "append", "(", "df", ")", "else", ":", "dfs", "=", "[", "df", "]", "for", "j", ",", "df", "in", "enumerate", "(", "dfs", ")", ":", "if", "j", "==", "0", ":", "# Read timing info (first) from the end of the file", "line_num", "=", "i", "+", "df", ".", "shape", "[", "0", "]", "+", "1", "for", "k", "in", "range", "(", "5", ")", ":", "line", "=", "linecache", ".", "getline", "(", "path", ",", "line_num", "+", "k", ")", ".", "strip", "(", ")", "if", "len", "(", "line", ")", ":", "timing_info", ".", "append", "(", "line", ")", "configuration_info_len", "=", "len", "(", "configuration_info", ")", "adaptation_info_len", "=", "len", "(", "adaptation_info", ")", "timing_info_len", "=", "len", "(", "timing_info", ")", "num_of_samples", "=", "df", ".", "shape", "[", "0", "]", "header_count", "=", "1", "last_line_num", "=", "(", "configuration_info_len", "+", "adaptation_info_len", "+", "timing_info_len", "+", "num_of_samples", "+", "header_count", ")", "else", ":", "# header location found in the dataframe (not first)", "configuration_info", "=", "[", "]", "adaptation_info", "=", "[", "]", "timing_info", "=", "[", "]", "# line number for the next dataframe in csv", "line_num", "=", "last_line_num", "+", "1", "# row ranges", "config_start", "=", "line_num", "config_end", "=", "config_start", "+", "configuration_info_len", "# read configuration_info", "for", "reading_line", "in", "range", "(", "config_start", ",", "config_end", ")", ":", "line", "=", "linecache", ".", "getline", "(", "path", ",", "reading_line", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "configuration_info", ".", "append", "(", "line", ")", "else", ":", "msg", "=", "(", "\"Invalid input file. \"", "\"Header information missing from combined csv. \"", "\"Configuration: {}\"", ".", "format", "(", "path", ")", ")", "raise", "ValueError", "(", "msg", ")", "pconf", "=", "_process_configuration", "(", "configuration_info", ")", "warmup_rows", "=", "pconf", "[", "\"save_warmup\"", "]", "*", "pconf", "[", "\"num_warmup\"", "]", "//", "pconf", "[", "\"thin\"", "]", "adaption_start", "=", "config_end", "+", "1", "+", "warmup_rows", "adaption_end", "=", "adaption_start", "+", "adaptation_info_len", "# read adaptation_info", "for", "reading_line", "in", "range", "(", "adaption_start", ",", "adaption_end", ")", ":", "line", "=", "linecache", ".", "getline", "(", "path", ",", "reading_line", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "adaptation_info", ".", "append", "(", "line", ")", "else", ":", "msg", "=", "(", "\"Invalid input file. \"", "\"Header information missing from combined csv. \"", "\"Adaptation: {}\"", ".", "format", "(", "path", ")", ")", "raise", "ValueError", "(", "msg", ")", "timing_start", "=", "adaption_end", "+", "len", "(", "df", ")", "-", "warmup_rows", "timing_end", "=", "timing_start", "+", "timing_info_len", "# read timing_info", "raise_timing_error", "=", "False", "for", "reading_line", "in", "range", "(", "timing_start", ",", "timing_end", ")", ":", "line", "=", "linecache", ".", "getline", "(", "path", ",", "reading_line", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "timing_info", ".", "append", "(", "line", ")", "else", ":", "raise_timing_error", "=", "True", "break", "no_elapsed_time", "=", "not", "any", "(", "\"elapsed time\"", "in", "row", ".", "lower", "(", ")", "for", "row", "in", "timing_info", ")", "if", "raise_timing_error", "or", "no_elapsed_time", ":", "msg", "=", "(", "\"Invalid input file. \"", "\"Header information missing from combined csv. \"", "\"Timing: {}\"", ".", "format", "(", "path", ")", ")", "raise", "ValueError", "(", "msg", ")", "last_line_num", "=", "reading_line", "# Remove warmup", "if", "pconf", "[", "\"save_warmup\"", "]", ":", "saved_samples", "=", "pconf", "[", "\"num_samples\"", "]", "//", "pconf", "[", "\"thin\"", "]", "df", "=", "df", ".", "iloc", "[", "-", "saved_samples", ":", ",", ":", "]", "# Split data to sample_stats and sample", "sample_stats_columns", "=", "[", "col", "for", "col", "in", "df", ".", "columns", "if", "col", ".", "endswith", "(", "\"__\"", ")", "]", "sample_columns", "=", "[", "col", "for", "col", "in", "df", ".", "columns", "if", "col", "not", "in", "sample_stats_columns", "]", "sample_stats", "=", "df", ".", "loc", "[", ":", ",", "sample_stats_columns", "]", "sample_df", "=", "df", ".", "loc", "[", ":", ",", "sample_columns", "]", "chains", ".", "append", "(", "(", "sample_df", ",", "sample_stats", ",", "configuration_info", ",", "adaptation_info", ",", "timing_info", ")", ")", "return", "chains" ]
Read CmdStan output.csv. Parameters ---------- path : str Returns ------- List[DataFrame, DataFrame, List[str], List[str], List[str]] pandas.DataFrame Sample data pandas.DataFrame Sample stats List[str] Configuration information List[str] Adaptation information List[str] Timing info
[ "Read", "CmdStan", "output", ".", "csv", "." ]
python
train
thomasdelaet/python-velbus
velbus/messages/set_date.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/set_date.py#L35-L46
def populate(self, priority, address, rtr, data): """ :return: None """ assert isinstance(data, bytes) self.needs_low_priority(priority) self.needs_no_rtr(rtr) self.needs_data(data, 4) self.set_attributes(priority, address, rtr) self._day = data[0] self._mon = data[1] self._year = ((data[2] << 8) + data[3])
[ "def", "populate", "(", "self", ",", "priority", ",", "address", ",", "rtr", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", "self", ".", "needs_low_priority", "(", "priority", ")", "self", ".", "needs_no_rtr", "(", "rtr", ")", "self", ".", "needs_data", "(", "data", ",", "4", ")", "self", ".", "set_attributes", "(", "priority", ",", "address", ",", "rtr", ")", "self", ".", "_day", "=", "data", "[", "0", "]", "self", ".", "_mon", "=", "data", "[", "1", "]", "self", ".", "_year", "=", "(", "(", "data", "[", "2", "]", "<<", "8", ")", "+", "data", "[", "3", "]", ")" ]
:return: None
[ ":", "return", ":", "None" ]
python
train
bird-house/twitcher
twitcher/utils.py
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/utils.py#L51-L62
def localize_datetime(dt, tz_name='UTC'): """Provide a timzeone-aware object for a given datetime and timezone name """ tz_aware_dt = dt if dt.tzinfo is None: utc = pytz.timezone('UTC') aware = utc.localize(dt) timezone = pytz.timezone(tz_name) tz_aware_dt = aware.astimezone(timezone) else: logger.warn('tzinfo already set') return tz_aware_dt
[ "def", "localize_datetime", "(", "dt", ",", "tz_name", "=", "'UTC'", ")", ":", "tz_aware_dt", "=", "dt", "if", "dt", ".", "tzinfo", "is", "None", ":", "utc", "=", "pytz", ".", "timezone", "(", "'UTC'", ")", "aware", "=", "utc", ".", "localize", "(", "dt", ")", "timezone", "=", "pytz", ".", "timezone", "(", "tz_name", ")", "tz_aware_dt", "=", "aware", ".", "astimezone", "(", "timezone", ")", "else", ":", "logger", ".", "warn", "(", "'tzinfo already set'", ")", "return", "tz_aware_dt" ]
Provide a timzeone-aware object for a given datetime and timezone name
[ "Provide", "a", "timzeone", "-", "aware", "object", "for", "a", "given", "datetime", "and", "timezone", "name" ]
python
valid
chemlab/chemlab
chemlab/mviewer/api/appeareance.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/appeareance.py#L19-L39
def change_background(color): """Setup the background color to *color*. Example:: change_background('black') change_background('white') change_background('#ffffff') You can call this function interactively by using:: change_color.interactive() A new dialog will popup with a color chooser. .. seealso:: :py:func:`chemlab.graphics.colors.parse_color` """ viewer.widget.background_color = colors.any_to_rgb(color) viewer.update()
[ "def", "change_background", "(", "color", ")", ":", "viewer", ".", "widget", ".", "background_color", "=", "colors", ".", "any_to_rgb", "(", "color", ")", "viewer", ".", "update", "(", ")" ]
Setup the background color to *color*. Example:: change_background('black') change_background('white') change_background('#ffffff') You can call this function interactively by using:: change_color.interactive() A new dialog will popup with a color chooser. .. seealso:: :py:func:`chemlab.graphics.colors.parse_color`
[ "Setup", "the", "background", "color", "to", "*", "color", "*", ".", "Example", "::" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/heilman_sagae_2015.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/heilman_sagae_2015.py#L210-L223
def get_tree_type(tree): """Return the (sub)tree type: 'root', 'nucleus', 'satellite', 'text' or 'leaf' Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it) """ if is_leaf_node(tree): return SubtreeType.leaf tree_type = tree.label().lower().split(':')[0] assert tree_type in SUBTREE_TYPES return tree_type
[ "def", "get_tree_type", "(", "tree", ")", ":", "if", "is_leaf_node", "(", "tree", ")", ":", "return", "SubtreeType", ".", "leaf", "tree_type", "=", "tree", ".", "label", "(", ")", ".", "lower", "(", ")", ".", "split", "(", "':'", ")", "[", "0", "]", "assert", "tree_type", "in", "SUBTREE_TYPES", "return", "tree_type" ]
Return the (sub)tree type: 'root', 'nucleus', 'satellite', 'text' or 'leaf' Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it)
[ "Return", "the", "(", "sub", ")", "tree", "type", ":", "root", "nucleus", "satellite", "text", "or", "leaf" ]
python
train
sassoo/goldman
goldman/stores/postgres/store.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L381-L407
def query(self, query, param=None): """ Perform a SQL based query This will abort on a failure to communicate with the database. :query: string query :params: parameters for the query :return: RecordList from psycopg2 """ with self.conn.cursor() as curs: print 'XXX QUERY', curs.mogrify(query, param) try: curs.execute(query, param) except BaseException as exc: msg = 'query: {}, param: {}, exc: {}'.format(query, param, exc) if hasattr(exc, 'pgcode'): msg = '{}, exc code: {}'.format(msg, exc.pgcode) print msg handle_exc(exc) result = curs.fetchall() return result
[ "def", "query", "(", "self", ",", "query", ",", "param", "=", "None", ")", ":", "with", "self", ".", "conn", ".", "cursor", "(", ")", "as", "curs", ":", "print", "'XXX QUERY'", ",", "curs", ".", "mogrify", "(", "query", ",", "param", ")", "try", ":", "curs", ".", "execute", "(", "query", ",", "param", ")", "except", "BaseException", "as", "exc", ":", "msg", "=", "'query: {}, param: {}, exc: {}'", ".", "format", "(", "query", ",", "param", ",", "exc", ")", "if", "hasattr", "(", "exc", ",", "'pgcode'", ")", ":", "msg", "=", "'{}, exc code: {}'", ".", "format", "(", "msg", ",", "exc", ".", "pgcode", ")", "print", "msg", "handle_exc", "(", "exc", ")", "result", "=", "curs", ".", "fetchall", "(", ")", "return", "result" ]
Perform a SQL based query This will abort on a failure to communicate with the database. :query: string query :params: parameters for the query :return: RecordList from psycopg2
[ "Perform", "a", "SQL", "based", "query" ]
python
train
portfors-lab/sparkle
sparkle/stim/auto_parameter_model.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L301-L335
def ranges(self): """The expanded lists of values generated from the parameter fields :returns: list<list>, outer list is for each parameter, inner loops are that parameter's values to loop through """ steps = [] for p in self._parameters: # inclusive range if p['parameter'] == 'filename': steps.append(p['names']) else: if p['step'] > 0: start = p['start'] stop = p['stop'] if start > stop: step = p['step']*-1 else: step = p['step'] # nsteps = np.ceil(np.around(abs(start - stop), 4) / p['step']) nsteps = self.nStepsForParam(p) # print 'nsteps', np.around(abs(start - stop), 4), p['step'] # print 'start, stop, steps', start, stop, nsteps step_tmp = np.linspace(start, start+step*(nsteps-2), nsteps-1) # print 'step_tmp', step_tmp # if step_tmp[-1] != stop: step_tmp = np.append(step_tmp,stop) # print 'step range', step_tmp steps.append(np.around(step_tmp,4)) else: assert p['start'] == p['stop'] steps.append([p['start']]) return steps
[ "def", "ranges", "(", "self", ")", ":", "steps", "=", "[", "]", "for", "p", "in", "self", ".", "_parameters", ":", "# inclusive range", "if", "p", "[", "'parameter'", "]", "==", "'filename'", ":", "steps", ".", "append", "(", "p", "[", "'names'", "]", ")", "else", ":", "if", "p", "[", "'step'", "]", ">", "0", ":", "start", "=", "p", "[", "'start'", "]", "stop", "=", "p", "[", "'stop'", "]", "if", "start", ">", "stop", ":", "step", "=", "p", "[", "'step'", "]", "*", "-", "1", "else", ":", "step", "=", "p", "[", "'step'", "]", "# nsteps = np.ceil(np.around(abs(start - stop), 4) / p['step'])", "nsteps", "=", "self", ".", "nStepsForParam", "(", "p", ")", "# print 'nsteps', np.around(abs(start - stop), 4), p['step']", "# print 'start, stop, steps', start, stop, nsteps", "step_tmp", "=", "np", ".", "linspace", "(", "start", ",", "start", "+", "step", "*", "(", "nsteps", "-", "2", ")", ",", "nsteps", "-", "1", ")", "# print 'step_tmp', step_tmp", "# if step_tmp[-1] != stop:", "step_tmp", "=", "np", ".", "append", "(", "step_tmp", ",", "stop", ")", "# print 'step range', step_tmp", "steps", ".", "append", "(", "np", ".", "around", "(", "step_tmp", ",", "4", ")", ")", "else", ":", "assert", "p", "[", "'start'", "]", "==", "p", "[", "'stop'", "]", "steps", ".", "append", "(", "[", "p", "[", "'start'", "]", "]", ")", "return", "steps" ]
The expanded lists of values generated from the parameter fields :returns: list<list>, outer list is for each parameter, inner loops are that parameter's values to loop through
[ "The", "expanded", "lists", "of", "values", "generated", "from", "the", "parameter", "fields" ]
python
train
jobovy/galpy
galpy/util/bovy_plot.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_plot.py#L1083-L1114
def _add_ticks(xticks=True,yticks=True): """ NAME: _add_ticks PURPOSE: add minor axis ticks to a plot INPUT: (none; works on the current axes) OUTPUT: (none; works on the current axes) HISTORY: 2009-12-23 - Written - Bovy (NYU) """ ax=pyplot.gca() if xticks: xstep= ax.xaxis.get_majorticklocs() xstep= xstep[1]-xstep[0] ax.xaxis.set_minor_locator(ticker.MultipleLocator(xstep/5.)) if yticks: ystep= ax.yaxis.get_majorticklocs() ystep= ystep[1]-ystep[0] ax.yaxis.set_minor_locator(ticker.MultipleLocator(ystep/5.))
[ "def", "_add_ticks", "(", "xticks", "=", "True", ",", "yticks", "=", "True", ")", ":", "ax", "=", "pyplot", ".", "gca", "(", ")", "if", "xticks", ":", "xstep", "=", "ax", ".", "xaxis", ".", "get_majorticklocs", "(", ")", "xstep", "=", "xstep", "[", "1", "]", "-", "xstep", "[", "0", "]", "ax", ".", "xaxis", ".", "set_minor_locator", "(", "ticker", ".", "MultipleLocator", "(", "xstep", "/", "5.", ")", ")", "if", "yticks", ":", "ystep", "=", "ax", ".", "yaxis", ".", "get_majorticklocs", "(", ")", "ystep", "=", "ystep", "[", "1", "]", "-", "ystep", "[", "0", "]", "ax", ".", "yaxis", ".", "set_minor_locator", "(", "ticker", ".", "MultipleLocator", "(", "ystep", "/", "5.", ")", ")" ]
NAME: _add_ticks PURPOSE: add minor axis ticks to a plot INPUT: (none; works on the current axes) OUTPUT: (none; works on the current axes) HISTORY: 2009-12-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
SectorLabs/django-postgres-extra
psqlextra/backend/hstore_required.py
https://github.com/SectorLabs/django-postgres-extra/blob/eef2ed5504d225858d4e4f5d77a838082ca6053e/psqlextra/backend/hstore_required.py#L134-L149
def _rename_hstore_required(self, old_table_name, new_table_name, old_field, new_field, key): """Renames an existing REQUIRED CONSTRAINT for the specified hstore key.""" old_name = self._required_constraint_name( old_table_name, old_field, key) new_name = self._required_constraint_name( new_table_name, new_field, key) sql = self.sql_hstore_required_rename.format( table=self.quote_name(new_table_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name) ) self.execute(sql)
[ "def", "_rename_hstore_required", "(", "self", ",", "old_table_name", ",", "new_table_name", ",", "old_field", ",", "new_field", ",", "key", ")", ":", "old_name", "=", "self", ".", "_required_constraint_name", "(", "old_table_name", ",", "old_field", ",", "key", ")", "new_name", "=", "self", ".", "_required_constraint_name", "(", "new_table_name", ",", "new_field", ",", "key", ")", "sql", "=", "self", ".", "sql_hstore_required_rename", ".", "format", "(", "table", "=", "self", ".", "quote_name", "(", "new_table_name", ")", ",", "old_name", "=", "self", ".", "quote_name", "(", "old_name", ")", ",", "new_name", "=", "self", ".", "quote_name", "(", "new_name", ")", ")", "self", ".", "execute", "(", "sql", ")" ]
Renames an existing REQUIRED CONSTRAINT for the specified hstore key.
[ "Renames", "an", "existing", "REQUIRED", "CONSTRAINT", "for", "the", "specified", "hstore", "key", "." ]
python
test
Alignak-monitoring/alignak
alignak/scheduler.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L2203-L2227
def find_item_by_id(self, object_id): """Get item based on its id or uuid :param object_id: :type object_id: int | str :return: :rtype: alignak.objects.item.Item | None """ # Item id may be an item if isinstance(object_id, Item): return object_id # Item id should be a uuid string if not isinstance(object_id, string_types): logger.debug("Find an item by id, object_id is not int nor string: %s", object_id) return object_id for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups, self.servicegroups, self.contacts, self.contactgroups]: if object_id in items: return items[object_id] # raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover, logger.error("Item with id %s not found", str(object_id)) # pragma: no cover, return None
[ "def", "find_item_by_id", "(", "self", ",", "object_id", ")", ":", "# Item id may be an item", "if", "isinstance", "(", "object_id", ",", "Item", ")", ":", "return", "object_id", "# Item id should be a uuid string", "if", "not", "isinstance", "(", "object_id", ",", "string_types", ")", ":", "logger", ".", "debug", "(", "\"Find an item by id, object_id is not int nor string: %s\"", ",", "object_id", ")", "return", "object_id", "for", "items", "in", "[", "self", ".", "hosts", ",", "self", ".", "services", ",", "self", ".", "actions", ",", "self", ".", "checks", ",", "self", ".", "hostgroups", ",", "self", ".", "servicegroups", ",", "self", ".", "contacts", ",", "self", ".", "contactgroups", "]", ":", "if", "object_id", "in", "items", ":", "return", "items", "[", "object_id", "]", "# raise AttributeError(\"Item with id %s not found\" % object_id) # pragma: no cover,", "logger", ".", "error", "(", "\"Item with id %s not found\"", ",", "str", "(", "object_id", ")", ")", "# pragma: no cover,", "return", "None" ]
Get item based on its id or uuid :param object_id: :type object_id: int | str :return: :rtype: alignak.objects.item.Item | None
[ "Get", "item", "based", "on", "its", "id", "or", "uuid" ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/configobj.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/configobj.py#L680-L693
def pop(self, key, default=MISSING): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ try: val = self[key] except KeyError: if default is MISSING: raise val = default else: del self[key] return val
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "MISSING", ")", ":", "try", ":", "val", "=", "self", "[", "key", "]", "except", "KeyError", ":", "if", "default", "is", "MISSING", ":", "raise", "val", "=", "default", "else", ":", "del", "self", "[", "key", "]", "return", "val" ]
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised'
[ "D", ".", "pop", "(", "k", "[", "d", "]", ")", "-", ">", "v", "remove", "specified", "key", "and", "return", "the", "corresponding", "value", ".", "If", "key", "is", "not", "found", "d", "is", "returned", "if", "given", "otherwise", "KeyError", "is", "raised" ]
python
train
jealous/stockstats
stockstats.py
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L233-L250
def _get_rsv(df, n_days): """ Calculate the RSV (Raw Stochastic Value) within N days This value is essential for calculating KDJs Current day is included in N :param df: data :param n_days: N days :return: None """ n_days = int(n_days) column_name = 'rsv_{}'.format(n_days) low_min = df['low'].rolling( min_periods=1, window=n_days, center=False).min() high_max = df['high'].rolling( min_periods=1, window=n_days, center=False).max() cv = (df['close'] - low_min) / (high_max - low_min) df[column_name] = cv.fillna(0).astype('float64') * 100
[ "def", "_get_rsv", "(", "df", ",", "n_days", ")", ":", "n_days", "=", "int", "(", "n_days", ")", "column_name", "=", "'rsv_{}'", ".", "format", "(", "n_days", ")", "low_min", "=", "df", "[", "'low'", "]", ".", "rolling", "(", "min_periods", "=", "1", ",", "window", "=", "n_days", ",", "center", "=", "False", ")", ".", "min", "(", ")", "high_max", "=", "df", "[", "'high'", "]", ".", "rolling", "(", "min_periods", "=", "1", ",", "window", "=", "n_days", ",", "center", "=", "False", ")", ".", "max", "(", ")", "cv", "=", "(", "df", "[", "'close'", "]", "-", "low_min", ")", "/", "(", "high_max", "-", "low_min", ")", "df", "[", "column_name", "]", "=", "cv", ".", "fillna", "(", "0", ")", ".", "astype", "(", "'float64'", ")", "*", "100" ]
Calculate the RSV (Raw Stochastic Value) within N days This value is essential for calculating KDJs Current day is included in N :param df: data :param n_days: N days :return: None
[ "Calculate", "the", "RSV", "(", "Raw", "Stochastic", "Value", ")", "within", "N", "days", "This", "value", "is", "essential", "for", "calculating", "KDJs", "Current", "day", "is", "included", "in", "N", ":", "param", "df", ":", "data", ":", "param", "n_days", ":", "N", "days", ":", "return", ":", "None" ]
python
train
aio-libs/aioftp
aioftp/client.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/client.py#L198-L215
def check_codes(self, expected_codes, received_code, info): """ Checks if any of expected matches received. :param expected_codes: tuple of expected codes :type expected_codes: :py:class:`tuple` :param received_code: received code for matching :type received_code: :py:class:`aioftp.Code` :param info: list of response lines from server :type info: :py:class:`list` :raises aioftp.StatusCodeError: if received code does not matches any expected code """ if not any(map(received_code.matches, expected_codes)): raise errors.StatusCodeError(expected_codes, received_code, info)
[ "def", "check_codes", "(", "self", ",", "expected_codes", ",", "received_code", ",", "info", ")", ":", "if", "not", "any", "(", "map", "(", "received_code", ".", "matches", ",", "expected_codes", ")", ")", ":", "raise", "errors", ".", "StatusCodeError", "(", "expected_codes", ",", "received_code", ",", "info", ")" ]
Checks if any of expected matches received. :param expected_codes: tuple of expected codes :type expected_codes: :py:class:`tuple` :param received_code: received code for matching :type received_code: :py:class:`aioftp.Code` :param info: list of response lines from server :type info: :py:class:`list` :raises aioftp.StatusCodeError: if received code does not matches any expected code
[ "Checks", "if", "any", "of", "expected", "matches", "received", "." ]
python
valid
aws/sagemaker-python-sdk
src/sagemaker/utils.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/utils.py#L46-L62
def name_from_base(base, max_length=63, short=False): """Append a timestamp to the provided string. This function assures that the total length of the resulting string is not longer than the specified max length, trimming the input parameter if necessary. Args: base (str): String used as prefix to generate the unique name. max_length (int): Maximum length for the resulting string. short (bool): Whether or not to use a truncated timestamp. Returns: str: Input parameter with appended timestamp. """ timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp() trimmed_base = base[:max_length - len(timestamp) - 1] return '{}-{}'.format(trimmed_base, timestamp)
[ "def", "name_from_base", "(", "base", ",", "max_length", "=", "63", ",", "short", "=", "False", ")", ":", "timestamp", "=", "sagemaker_short_timestamp", "(", ")", "if", "short", "else", "sagemaker_timestamp", "(", ")", "trimmed_base", "=", "base", "[", ":", "max_length", "-", "len", "(", "timestamp", ")", "-", "1", "]", "return", "'{}-{}'", ".", "format", "(", "trimmed_base", ",", "timestamp", ")" ]
Append a timestamp to the provided string. This function assures that the total length of the resulting string is not longer than the specified max length, trimming the input parameter if necessary. Args: base (str): String used as prefix to generate the unique name. max_length (int): Maximum length for the resulting string. short (bool): Whether or not to use a truncated timestamp. Returns: str: Input parameter with appended timestamp.
[ "Append", "a", "timestamp", "to", "the", "provided", "string", "." ]
python
train
dbcli/cli_helpers
cli_helpers/tabular_output/preprocessors.py
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/preprocessors.py#L27-L40
def convert_to_string(data, headers, **_): """Convert all *data* and *headers* to strings. Binary data that cannot be decoded is converted to a hexadecimal representation via :func:`binascii.hexlify`. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :return: The processed data and headers. :rtype: tuple """ return (([utils.to_string(v) for v in row] for row in data), [utils.to_string(h) for h in headers])
[ "def", "convert_to_string", "(", "data", ",", "headers", ",", "*", "*", "_", ")", ":", "return", "(", "(", "[", "utils", ".", "to_string", "(", "v", ")", "for", "v", "in", "row", "]", "for", "row", "in", "data", ")", ",", "[", "utils", ".", "to_string", "(", "h", ")", "for", "h", "in", "headers", "]", ")" ]
Convert all *data* and *headers* to strings. Binary data that cannot be decoded is converted to a hexadecimal representation via :func:`binascii.hexlify`. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :return: The processed data and headers. :rtype: tuple
[ "Convert", "all", "*", "data", "*", "and", "*", "headers", "*", "to", "strings", "." ]
python
test
inveniosoftware/invenio-oauthclient
invenio_oauthclient/views/client.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/views/client.py#L97-L128
def authorized(remote_app=None): """Authorized handler callback.""" if remote_app not in current_oauthclient.handlers: return abort(404) state_token = request.args.get('state') # Verify state parameter try: assert state_token # Checks authenticity and integrity of state and decodes the value. state = serializer.loads(state_token) # Verify that state is for this session, app and that next parameter # have not been modified. assert state['sid'] == _create_identifier() assert state['app'] == remote_app # Store next URL set_session_next_url(remote_app, state['next']) except (AssertionError, BadData): if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or ( not(current_app.debug or current_app.testing)): abort(403) try: handler = current_oauthclient.handlers[remote_app]() except OAuthException as e: if e.type == 'invalid_response': abort(500) else: raise return handler
[ "def", "authorized", "(", "remote_app", "=", "None", ")", ":", "if", "remote_app", "not", "in", "current_oauthclient", ".", "handlers", ":", "return", "abort", "(", "404", ")", "state_token", "=", "request", ".", "args", ".", "get", "(", "'state'", ")", "# Verify state parameter", "try", ":", "assert", "state_token", "# Checks authenticity and integrity of state and decodes the value.", "state", "=", "serializer", ".", "loads", "(", "state_token", ")", "# Verify that state is for this session, app and that next parameter", "# have not been modified.", "assert", "state", "[", "'sid'", "]", "==", "_create_identifier", "(", ")", "assert", "state", "[", "'app'", "]", "==", "remote_app", "# Store next URL", "set_session_next_url", "(", "remote_app", ",", "state", "[", "'next'", "]", ")", "except", "(", "AssertionError", ",", "BadData", ")", ":", "if", "current_app", ".", "config", ".", "get", "(", "'OAUTHCLIENT_STATE_ENABLED'", ",", "True", ")", "or", "(", "not", "(", "current_app", ".", "debug", "or", "current_app", ".", "testing", ")", ")", ":", "abort", "(", "403", ")", "try", ":", "handler", "=", "current_oauthclient", ".", "handlers", "[", "remote_app", "]", "(", ")", "except", "OAuthException", "as", "e", ":", "if", "e", ".", "type", "==", "'invalid_response'", ":", "abort", "(", "500", ")", "else", ":", "raise", "return", "handler" ]
Authorized handler callback.
[ "Authorized", "handler", "callback", "." ]
python
train
GoogleCloudPlatform/datastore-ndb-python
ndb/model.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/model.py#L3005-L3014
def _check_initialized(self): """Internal helper to check for uninitialized properties. Raises: BadValueError if it finds any. """ baddies = self._find_uninitialized() if baddies: raise datastore_errors.BadValueError( 'Entity has uninitialized properties: %s' % ', '.join(baddies))
[ "def", "_check_initialized", "(", "self", ")", ":", "baddies", "=", "self", ".", "_find_uninitialized", "(", ")", "if", "baddies", ":", "raise", "datastore_errors", ".", "BadValueError", "(", "'Entity has uninitialized properties: %s'", "%", "', '", ".", "join", "(", "baddies", ")", ")" ]
Internal helper to check for uninitialized properties. Raises: BadValueError if it finds any.
[ "Internal", "helper", "to", "check", "for", "uninitialized", "properties", "." ]
python
train
Kautenja/nes-py
nes_py/nes_env.py
https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L271-L307
def step(self, action): """ Run one frame of the NES and return the relevant observation data. Args: action (byte): the bitmap determining which buttons to press Returns: a tuple of: - state (np.ndarray): next frame as a result of the given action - reward (float) : amount of reward returned after given action - done (boolean): whether the episode has ended - info (dict): contains auxiliary diagnostic information """ # if the environment is done, raise an error if self.done: raise ValueError('cannot step in a done environment! call `reset`') # set the action on the controller self.controllers[0][:] = action # pass the action to the emulator as an unsigned byte _LIB.Step(self._env) # get the reward for this step reward = self._get_reward() # get the done flag for this step self.done = self._get_done() # get the info for this step info = self._get_info() # call the after step callback self._did_step(self.done) # bound the reward in [min, max] if reward < self.reward_range[0]: reward = self.reward_range[0] elif reward > self.reward_range[1]: reward = self.reward_range[1] # return the screen from the emulator and other relevant data return self.screen, reward, self.done, info
[ "def", "step", "(", "self", ",", "action", ")", ":", "# if the environment is done, raise an error", "if", "self", ".", "done", ":", "raise", "ValueError", "(", "'cannot step in a done environment! call `reset`'", ")", "# set the action on the controller", "self", ".", "controllers", "[", "0", "]", "[", ":", "]", "=", "action", "# pass the action to the emulator as an unsigned byte", "_LIB", ".", "Step", "(", "self", ".", "_env", ")", "# get the reward for this step", "reward", "=", "self", ".", "_get_reward", "(", ")", "# get the done flag for this step", "self", ".", "done", "=", "self", ".", "_get_done", "(", ")", "# get the info for this step", "info", "=", "self", ".", "_get_info", "(", ")", "# call the after step callback", "self", ".", "_did_step", "(", "self", ".", "done", ")", "# bound the reward in [min, max]", "if", "reward", "<", "self", ".", "reward_range", "[", "0", "]", ":", "reward", "=", "self", ".", "reward_range", "[", "0", "]", "elif", "reward", ">", "self", ".", "reward_range", "[", "1", "]", ":", "reward", "=", "self", ".", "reward_range", "[", "1", "]", "# return the screen from the emulator and other relevant data", "return", "self", ".", "screen", ",", "reward", ",", "self", ".", "done", ",", "info" ]
Run one frame of the NES and return the relevant observation data. Args: action (byte): the bitmap determining which buttons to press Returns: a tuple of: - state (np.ndarray): next frame as a result of the given action - reward (float) : amount of reward returned after given action - done (boolean): whether the episode has ended - info (dict): contains auxiliary diagnostic information
[ "Run", "one", "frame", "of", "the", "NES", "and", "return", "the", "relevant", "observation", "data", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L141-L149
def lmx_moe_h1k_f8k_x16(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.filter_size = 8192 hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 16 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
[ "def", "lmx_moe_h1k_f8k_x16", "(", ")", ":", "hparams", "=", "lmx_h1k_f4k", "(", ")", "hparams", ".", "filter_size", "=", "8192", "hparams", ".", "ffn_layer", "=", "\"local_moe_tpu\"", "hparams", ".", "moe_num_experts", "=", "16", "hparams", ".", "weight_dtype", "=", "\"bfloat16\"", "hparams", ".", "batch_size", "=", "8192", "return", "hparams" ]
Transformer with mixture of experts. 890M Params.
[ "Transformer", "with", "mixture", "of", "experts", ".", "890M", "Params", "." ]
python
train
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L532-L583
def get_suitable_slot_for_duplicate(self, src_slot): """Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot """ slot_from = to_int(src_slot, 0) if slot_from < 1: return -1 # Are the analyses from src_slot suitable for duplicates creation? container = self.get_container_at(slot_from) if not container or not IAnalysisRequest.providedBy(container): # We cannot create duplicates from analyses other than routine ones, # those that belong to an Analysis Request. return -1 occupied = self.get_slot_positions(type='all') wst = self.getWorksheetTemplate() if not wst: # No worksheet template assigned, add a new slot at the end of # the worksheet with the duplicate there slot_to = max(occupied) + 1 return slot_to # If there is a match with the layout defined in the Worksheet # Template, use that slot instead of adding a new one at the end of # the worksheet layout = wst.getLayout() for pos in layout: if pos['type'] != 'd' or to_int(pos['dup']) != slot_from: continue slot_to = int(pos['pos']) if slot_to in occupied: # Not an empty slot continue # This slot is empty, use it instead of adding a new # slot at the end of the worksheet return slot_to # Add a new slot at the end of the worksheet, but take into account # that a worksheet template is assigned, so we need to take care to # not override slots defined by its layout occupied.append(len(layout)) slot_to = max(occupied) + 1 return slot_to
[ "def", "get_suitable_slot_for_duplicate", "(", "self", ",", "src_slot", ")", ":", "slot_from", "=", "to_int", "(", "src_slot", ",", "0", ")", "if", "slot_from", "<", "1", ":", "return", "-", "1", "# Are the analyses from src_slot suitable for duplicates creation?", "container", "=", "self", ".", "get_container_at", "(", "slot_from", ")", "if", "not", "container", "or", "not", "IAnalysisRequest", ".", "providedBy", "(", "container", ")", ":", "# We cannot create duplicates from analyses other than routine ones,", "# those that belong to an Analysis Request.", "return", "-", "1", "occupied", "=", "self", ".", "get_slot_positions", "(", "type", "=", "'all'", ")", "wst", "=", "self", ".", "getWorksheetTemplate", "(", ")", "if", "not", "wst", ":", "# No worksheet template assigned, add a new slot at the end of", "# the worksheet with the duplicate there", "slot_to", "=", "max", "(", "occupied", ")", "+", "1", "return", "slot_to", "# If there is a match with the layout defined in the Worksheet", "# Template, use that slot instead of adding a new one at the end of", "# the worksheet", "layout", "=", "wst", ".", "getLayout", "(", ")", "for", "pos", "in", "layout", ":", "if", "pos", "[", "'type'", "]", "!=", "'d'", "or", "to_int", "(", "pos", "[", "'dup'", "]", ")", "!=", "slot_from", ":", "continue", "slot_to", "=", "int", "(", "pos", "[", "'pos'", "]", ")", "if", "slot_to", "in", "occupied", ":", "# Not an empty slot", "continue", "# This slot is empty, use it instead of adding a new", "# slot at the end of the worksheet", "return", "slot_to", "# Add a new slot at the end of the worksheet, but take into account", "# that a worksheet template is assigned, so we need to take care to", "# not override slots defined by its layout", "occupied", ".", "append", "(", "len", "(", "layout", ")", ")", "slot_to", "=", "max", "(", "occupied", ")", "+", "1", "return", "slot_to" ]
Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot
[ "Returns", "the", "suitable", "position", "for", "a", "duplicate", "analysis", "taking", "into", "account", "if", "there", "is", "a", "WorksheetTemplate", "assigned", "to", "this", "worksheet", "." ]
python
train
xeroc/python-graphenelib
graphenecommon/amount.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/amount.py#L134-L141
def copy(self): """ Copy the instance and make sure not to use a reference """ return self.__class__( amount=self["amount"], asset=self["asset"].copy(), blockchain_instance=self.blockchain, )
[ "def", "copy", "(", "self", ")", ":", "return", "self", ".", "__class__", "(", "amount", "=", "self", "[", "\"amount\"", "]", ",", "asset", "=", "self", "[", "\"asset\"", "]", ".", "copy", "(", ")", ",", "blockchain_instance", "=", "self", ".", "blockchain", ",", ")" ]
Copy the instance and make sure not to use a reference
[ "Copy", "the", "instance", "and", "make", "sure", "not", "to", "use", "a", "reference" ]
python
valid
SoCo/SoCo
soco/music_services/music_service.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_services/music_service.py#L76-L125
def get_soap_header(self): """Generate the SOAP authentication header for the related service. This header contains all the necessary authentication details. Returns: str: A string representation of the XML content of the SOAP header. """ # According to the SONOS SMAPI, this header must be sent with all # SOAP requests. Building this is an expensive operation (though # occasionally necessary), so f we have a cached value, return it if self._cached_soap_header is not None: return self._cached_soap_header music_service = self.music_service credentials_header = XML.Element( "credentials", {'xmlns': "http://www.sonos.com/Services/1.1"}) device_id = XML.SubElement(credentials_header, 'deviceId') device_id.text = self._device_id device_provider = XML.SubElement(credentials_header, 'deviceProvider') device_provider.text = 'Sonos' if music_service.account.oa_device_id: # OAuth account credentials are present. We must use them to # authenticate. login_token = XML.Element('loginToken') token = XML.SubElement(login_token, 'token') token.text = music_service.account.oa_device_id key = XML.SubElement(login_token, 'key') key.text = music_service.account.key household_id = XML.SubElement(login_token, 'householdId') household_id.text = self._device.household_id credentials_header.append(login_token) # otherwise, perhaps use DeviceLink or UserId auth elif music_service.auth_type in ['DeviceLink', 'UserId']: # We need a session ID from Sonos session_id = self._device.musicServices.GetSessionId([ ('ServiceId', music_service.service_id), ('Username', music_service.account.username) ])['SessionId'] session_elt = XML.Element('sessionId') session_elt.text = session_id credentials_header.append(session_elt) # Anonymous auth. No need for anything further. self._cached_soap_header = XML.tostring( credentials_header, encoding='utf-8').decode(encoding='utf-8') return self._cached_soap_header
[ "def", "get_soap_header", "(", "self", ")", ":", "# According to the SONOS SMAPI, this header must be sent with all", "# SOAP requests. Building this is an expensive operation (though", "# occasionally necessary), so f we have a cached value, return it", "if", "self", ".", "_cached_soap_header", "is", "not", "None", ":", "return", "self", ".", "_cached_soap_header", "music_service", "=", "self", ".", "music_service", "credentials_header", "=", "XML", ".", "Element", "(", "\"credentials\"", ",", "{", "'xmlns'", ":", "\"http://www.sonos.com/Services/1.1\"", "}", ")", "device_id", "=", "XML", ".", "SubElement", "(", "credentials_header", ",", "'deviceId'", ")", "device_id", ".", "text", "=", "self", ".", "_device_id", "device_provider", "=", "XML", ".", "SubElement", "(", "credentials_header", ",", "'deviceProvider'", ")", "device_provider", ".", "text", "=", "'Sonos'", "if", "music_service", ".", "account", ".", "oa_device_id", ":", "# OAuth account credentials are present. We must use them to", "# authenticate.", "login_token", "=", "XML", ".", "Element", "(", "'loginToken'", ")", "token", "=", "XML", ".", "SubElement", "(", "login_token", ",", "'token'", ")", "token", ".", "text", "=", "music_service", ".", "account", ".", "oa_device_id", "key", "=", "XML", ".", "SubElement", "(", "login_token", ",", "'key'", ")", "key", ".", "text", "=", "music_service", ".", "account", ".", "key", "household_id", "=", "XML", ".", "SubElement", "(", "login_token", ",", "'householdId'", ")", "household_id", ".", "text", "=", "self", ".", "_device", ".", "household_id", "credentials_header", ".", "append", "(", "login_token", ")", "# otherwise, perhaps use DeviceLink or UserId auth", "elif", "music_service", ".", "auth_type", "in", "[", "'DeviceLink'", ",", "'UserId'", "]", ":", "# We need a session ID from Sonos", "session_id", "=", "self", ".", "_device", ".", "musicServices", ".", "GetSessionId", "(", "[", "(", "'ServiceId'", ",", "music_service", ".", "service_id", ")", ",", "(", "'Username'", ",", "music_service", ".", "account", ".", "username", ")", "]", ")", "[", "'SessionId'", "]", "session_elt", "=", "XML", ".", "Element", "(", "'sessionId'", ")", "session_elt", ".", "text", "=", "session_id", "credentials_header", ".", "append", "(", "session_elt", ")", "# Anonymous auth. No need for anything further.", "self", ".", "_cached_soap_header", "=", "XML", ".", "tostring", "(", "credentials_header", ",", "encoding", "=", "'utf-8'", ")", ".", "decode", "(", "encoding", "=", "'utf-8'", ")", "return", "self", ".", "_cached_soap_header" ]
Generate the SOAP authentication header for the related service. This header contains all the necessary authentication details. Returns: str: A string representation of the XML content of the SOAP header.
[ "Generate", "the", "SOAP", "authentication", "header", "for", "the", "related", "service", "." ]
python
train
Nukesor/pueue
pueue/daemon/daemon.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L128-L150
def read_config(self): """Read a previous configuration file or create a new with default values.""" config_file = os.path.join(self.config_dir, 'pueue.ini') self.config = configparser.ConfigParser() # Try to get configuration file and return it # If this doesn't work, a new default config file will be created if os.path.exists(config_file): try: self.config.read(config_file) return except Exception: self.logger.error('Error while parsing config file. Deleting old config') self.logger.exception() self.config['default'] = { 'resumeAfterStart': False, 'maxProcesses': 1, 'customShell': 'default', } self.config['log'] = { 'logTime': 60*60*24*14, } self.write_config()
[ "def", "read_config", "(", "self", ")", ":", "config_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config_dir", ",", "'pueue.ini'", ")", "self", ".", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "# Try to get configuration file and return it", "# If this doesn't work, a new default config file will be created", "if", "os", ".", "path", ".", "exists", "(", "config_file", ")", ":", "try", ":", "self", ".", "config", ".", "read", "(", "config_file", ")", "return", "except", "Exception", ":", "self", ".", "logger", ".", "error", "(", "'Error while parsing config file. Deleting old config'", ")", "self", ".", "logger", ".", "exception", "(", ")", "self", ".", "config", "[", "'default'", "]", "=", "{", "'resumeAfterStart'", ":", "False", ",", "'maxProcesses'", ":", "1", ",", "'customShell'", ":", "'default'", ",", "}", "self", ".", "config", "[", "'log'", "]", "=", "{", "'logTime'", ":", "60", "*", "60", "*", "24", "*", "14", ",", "}", "self", ".", "write_config", "(", ")" ]
Read a previous configuration file or create a new with default values.
[ "Read", "a", "previous", "configuration", "file", "or", "create", "a", "new", "with", "default", "values", "." ]
python
train
assemblerflow/flowcraft
flowcraft/generator/process.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L518-L567
def set_secondary_channel(self, source, channel_list): """ General purpose method for setting a secondary channel This method allows a given source channel to be forked into one or more channels and sets those forks in the :py:attr:`Process.forks` attribute. Both the source and the channels in the ``channel_list`` argument must be the final channel strings, which means that this method should be called only after setting the main channels. If the source is not a main channel, this will simply create a fork or set for every channel in the ``channel_list`` argument list:: SOURCE_CHANNEL_1.into{SINK_1;SINK_2} If the source is a main channel, this will apply some changes to the output channel of the process, to avoid overlapping main output channels. For instance, forking the main output channel for process 2 would create a ``MAIN_2.into{...}``. The issue here is that the ``MAIN_2`` channel is expected as the input of the next process, but now is being used to create the fork. To solve this issue, the output channel is modified into ``_MAIN_2``, and the fork is set to the channels provided channels plus the ``MAIN_2`` channel:: _MAIN_2.into{MAIN_2;MAIN_5;...} Parameters ---------- source : str String with the name of the source channel channel_list : list List of channels that will receive a fork of the secondary channel """ logger.debug("Setting secondary channel for source '{}': {}".format( source, channel_list)) source = "{}_{}".format(source, self.pid) # Removes possible duplicate channels, when the fork is terminal channel_list = sorted(list(set(channel_list))) # When there is only one channel to fork into, use the 'set' operator # instead of 'into' op = "set" if len(channel_list) == 1 else "into" self.forks.append("\n{}.{}{{ {} }}\n".format( source, op, ";".join(channel_list))) logger.debug("Setting forks attribute to: {}".format(self.forks)) self._context = {**self._context, **{"forks": "\n".join(self.forks)}}
[ "def", "set_secondary_channel", "(", "self", ",", "source", ",", "channel_list", ")", ":", "logger", ".", "debug", "(", "\"Setting secondary channel for source '{}': {}\"", ".", "format", "(", "source", ",", "channel_list", ")", ")", "source", "=", "\"{}_{}\"", ".", "format", "(", "source", ",", "self", ".", "pid", ")", "# Removes possible duplicate channels, when the fork is terminal", "channel_list", "=", "sorted", "(", "list", "(", "set", "(", "channel_list", ")", ")", ")", "# When there is only one channel to fork into, use the 'set' operator", "# instead of 'into'", "op", "=", "\"set\"", "if", "len", "(", "channel_list", ")", "==", "1", "else", "\"into\"", "self", ".", "forks", ".", "append", "(", "\"\\n{}.{}{{ {} }}\\n\"", ".", "format", "(", "source", ",", "op", ",", "\";\"", ".", "join", "(", "channel_list", ")", ")", ")", "logger", ".", "debug", "(", "\"Setting forks attribute to: {}\"", ".", "format", "(", "self", ".", "forks", ")", ")", "self", ".", "_context", "=", "{", "*", "*", "self", ".", "_context", ",", "*", "*", "{", "\"forks\"", ":", "\"\\n\"", ".", "join", "(", "self", ".", "forks", ")", "}", "}" ]
General purpose method for setting a secondary channel This method allows a given source channel to be forked into one or more channels and sets those forks in the :py:attr:`Process.forks` attribute. Both the source and the channels in the ``channel_list`` argument must be the final channel strings, which means that this method should be called only after setting the main channels. If the source is not a main channel, this will simply create a fork or set for every channel in the ``channel_list`` argument list:: SOURCE_CHANNEL_1.into{SINK_1;SINK_2} If the source is a main channel, this will apply some changes to the output channel of the process, to avoid overlapping main output channels. For instance, forking the main output channel for process 2 would create a ``MAIN_2.into{...}``. The issue here is that the ``MAIN_2`` channel is expected as the input of the next process, but now is being used to create the fork. To solve this issue, the output channel is modified into ``_MAIN_2``, and the fork is set to the channels provided channels plus the ``MAIN_2`` channel:: _MAIN_2.into{MAIN_2;MAIN_5;...} Parameters ---------- source : str String with the name of the source channel channel_list : list List of channels that will receive a fork of the secondary channel
[ "General", "purpose", "method", "for", "setting", "a", "secondary", "channel" ]
python
test
rehandalal/buchner
buchner/helpers.py
https://github.com/rehandalal/buchner/blob/dc22a61c493b9d4a74d76e8b42a319aa13e385f3/buchner/helpers.py#L13-L19
def json_requested(): """Check if json is the preferred output format for the request.""" best = request.accept_mimetypes.best_match( ['application/json', 'text/html']) return (best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html'])
[ "def", "json_requested", "(", ")", ":", "best", "=", "request", ".", "accept_mimetypes", ".", "best_match", "(", "[", "'application/json'", ",", "'text/html'", "]", ")", "return", "(", "best", "==", "'application/json'", "and", "request", ".", "accept_mimetypes", "[", "best", "]", ">", "request", ".", "accept_mimetypes", "[", "'text/html'", "]", ")" ]
Check if json is the preferred output format for the request.
[ "Check", "if", "json", "is", "the", "preferred", "output", "format", "for", "the", "request", "." ]
python
train
ToFuProject/tofu
tofu/geom/_comp.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_comp.py#L45-L104
def _Struct_set_Poly(Poly, pos=None, extent=None, arrayorder='C', Type='Tor', Clock=False): """ Compute geometrical attributes of a Struct object """ # Make Poly closed, counter-clockwise, with '(cc,N)' layout and arrayorder Poly = _GG.Poly_Order(Poly, order='C', Clock=False, close=True, layout='(cc,N)', Test=True) assert Poly.shape[0]==2, "Arg Poly must be a 2D polygon !" fPfmt = np.ascontiguousarray if arrayorder=='C' else np.asfortranarray # Get all remarkable points and moments NP = Poly.shape[1]-1 P1Max = Poly[:,np.argmax(Poly[0,:])] P1Min = Poly[:,np.argmin(Poly[0,:])] P2Max = Poly[:,np.argmax(Poly[1,:])] P2Min = Poly[:,np.argmin(Poly[1,:])] BaryP = np.sum(Poly[:,:-1],axis=1,keepdims=False)/(Poly.shape[1]-1) BaryL = np.array([(P1Max[0]+P1Min[0])/2., (P2Max[1]+P2Min[1])/2.]) TorP = plg.Polygon(Poly.T) Surf = TorP.area() BaryS = np.array(TorP.center()).flatten() # Get lim-related indicators noccur = int(pos.size) Multi = noccur>1 # Get Tor-related quantities if Type.lower()=='lin': Vol, BaryV = None, None else: Vol, BaryV = _GG.Poly_VolAngTor(Poly) msg = "Pb. with volume computation for Ves object of type 'Tor' !" assert Vol>0., msg # Compute the non-normalized vector of each side of the Poly Vect = np.diff(Poly,n=1,axis=1) Vect = fPfmt(Vect) # Compute the normalised vectors directed inwards Vin = np.array([Vect[1,:],-Vect[0,:]]) if not _GG.Poly_isClockwise(Poly): Vin = -Vin Vin = Vin/np.hypot(Vin[0,:],Vin[1,:])[np.newaxis,:] Vin = fPfmt(Vin) poly = _GG.Poly_Order(Poly, order=arrayorder, Clock=Clock, close=False, layout='(cc,N)', Test=True) # Get bounding circle circC = BaryS r = np.sqrt(np.sum((poly-circC[:,np.newaxis])**2,axis=0)) circr = np.max(r) dout = {'Poly':poly, 'pos':pos, 'extent':extent, 'noccur':noccur, 'Multi':Multi, 'nP':NP, 'P1Max':P1Max, 'P1Min':P1Min, 'P2Max':P2Max, 'P2Min':P2Min, 'BaryP':BaryP, 'BaryL':BaryL, 'BaryS':BaryS, 'BaryV':BaryV, 'Surf':Surf, 'VolAng':Vol, 'Vect':Vect, 'VIn':Vin, 'circ-C':circC, 'circ-r':circr, 'Clock':Clock} return dout
[ "def", "_Struct_set_Poly", "(", "Poly", ",", "pos", "=", "None", ",", "extent", "=", "None", ",", "arrayorder", "=", "'C'", ",", "Type", "=", "'Tor'", ",", "Clock", "=", "False", ")", ":", "# Make Poly closed, counter-clockwise, with '(cc,N)' layout and arrayorder", "Poly", "=", "_GG", ".", "Poly_Order", "(", "Poly", ",", "order", "=", "'C'", ",", "Clock", "=", "False", ",", "close", "=", "True", ",", "layout", "=", "'(cc,N)'", ",", "Test", "=", "True", ")", "assert", "Poly", ".", "shape", "[", "0", "]", "==", "2", ",", "\"Arg Poly must be a 2D polygon !\"", "fPfmt", "=", "np", ".", "ascontiguousarray", "if", "arrayorder", "==", "'C'", "else", "np", ".", "asfortranarray", "# Get all remarkable points and moments", "NP", "=", "Poly", ".", "shape", "[", "1", "]", "-", "1", "P1Max", "=", "Poly", "[", ":", ",", "np", ".", "argmax", "(", "Poly", "[", "0", ",", ":", "]", ")", "]", "P1Min", "=", "Poly", "[", ":", ",", "np", ".", "argmin", "(", "Poly", "[", "0", ",", ":", "]", ")", "]", "P2Max", "=", "Poly", "[", ":", ",", "np", ".", "argmax", "(", "Poly", "[", "1", ",", ":", "]", ")", "]", "P2Min", "=", "Poly", "[", ":", ",", "np", ".", "argmin", "(", "Poly", "[", "1", ",", ":", "]", ")", "]", "BaryP", "=", "np", ".", "sum", "(", "Poly", "[", ":", ",", ":", "-", "1", "]", ",", "axis", "=", "1", ",", "keepdims", "=", "False", ")", "/", "(", "Poly", ".", "shape", "[", "1", "]", "-", "1", ")", "BaryL", "=", "np", ".", "array", "(", "[", "(", "P1Max", "[", "0", "]", "+", "P1Min", "[", "0", "]", ")", "/", "2.", ",", "(", "P2Max", "[", "1", "]", "+", "P2Min", "[", "1", "]", ")", "/", "2.", "]", ")", "TorP", "=", "plg", ".", "Polygon", "(", "Poly", ".", "T", ")", "Surf", "=", "TorP", ".", "area", "(", ")", "BaryS", "=", "np", ".", "array", "(", "TorP", ".", "center", "(", ")", ")", ".", "flatten", "(", ")", "# Get lim-related indicators", "noccur", "=", "int", "(", "pos", ".", "size", ")", "Multi", "=", "noccur", ">", "1", "# Get Tor-related quantities", "if", "Type", ".", "lower", "(", ")", "==", "'lin'", ":", "Vol", ",", "BaryV", "=", "None", ",", "None", "else", ":", "Vol", ",", "BaryV", "=", "_GG", ".", "Poly_VolAngTor", "(", "Poly", ")", "msg", "=", "\"Pb. with volume computation for Ves object of type 'Tor' !\"", "assert", "Vol", ">", "0.", ",", "msg", "# Compute the non-normalized vector of each side of the Poly", "Vect", "=", "np", ".", "diff", "(", "Poly", ",", "n", "=", "1", ",", "axis", "=", "1", ")", "Vect", "=", "fPfmt", "(", "Vect", ")", "# Compute the normalised vectors directed inwards", "Vin", "=", "np", ".", "array", "(", "[", "Vect", "[", "1", ",", ":", "]", ",", "-", "Vect", "[", "0", ",", ":", "]", "]", ")", "if", "not", "_GG", ".", "Poly_isClockwise", "(", "Poly", ")", ":", "Vin", "=", "-", "Vin", "Vin", "=", "Vin", "/", "np", ".", "hypot", "(", "Vin", "[", "0", ",", ":", "]", ",", "Vin", "[", "1", ",", ":", "]", ")", "[", "np", ".", "newaxis", ",", ":", "]", "Vin", "=", "fPfmt", "(", "Vin", ")", "poly", "=", "_GG", ".", "Poly_Order", "(", "Poly", ",", "order", "=", "arrayorder", ",", "Clock", "=", "Clock", ",", "close", "=", "False", ",", "layout", "=", "'(cc,N)'", ",", "Test", "=", "True", ")", "# Get bounding circle", "circC", "=", "BaryS", "r", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "poly", "-", "circC", "[", ":", ",", "np", ".", "newaxis", "]", ")", "**", "2", ",", "axis", "=", "0", ")", ")", "circr", "=", "np", ".", "max", "(", "r", ")", "dout", "=", "{", "'Poly'", ":", "poly", ",", "'pos'", ":", "pos", ",", "'extent'", ":", "extent", ",", "'noccur'", ":", "noccur", ",", "'Multi'", ":", "Multi", ",", "'nP'", ":", "NP", ",", "'P1Max'", ":", "P1Max", ",", "'P1Min'", ":", "P1Min", ",", "'P2Max'", ":", "P2Max", ",", "'P2Min'", ":", "P2Min", ",", "'BaryP'", ":", "BaryP", ",", "'BaryL'", ":", "BaryL", ",", "'BaryS'", ":", "BaryS", ",", "'BaryV'", ":", "BaryV", ",", "'Surf'", ":", "Surf", ",", "'VolAng'", ":", "Vol", ",", "'Vect'", ":", "Vect", ",", "'VIn'", ":", "Vin", ",", "'circ-C'", ":", "circC", ",", "'circ-r'", ":", "circr", ",", "'Clock'", ":", "Clock", "}", "return", "dout" ]
Compute geometrical attributes of a Struct object
[ "Compute", "geometrical", "attributes", "of", "a", "Struct", "object" ]
python
train
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L3763-L3784
def enable_command(self, command: str) -> None: """ Enable a command by restoring its functions :param command: the command being enabled """ # If the commands is already enabled, then return if command not in self.disabled_commands: return help_func_name = HELP_FUNC_PREFIX + command # Restore the command and help functions to their original values dc = self.disabled_commands[command] setattr(self, self.cmd_func_name(command), dc.command_function) if dc.help_function is None: delattr(self, help_func_name) else: setattr(self, help_func_name, dc.help_function) # Remove the disabled command entry del self.disabled_commands[command]
[ "def", "enable_command", "(", "self", ",", "command", ":", "str", ")", "->", "None", ":", "# If the commands is already enabled, then return", "if", "command", "not", "in", "self", ".", "disabled_commands", ":", "return", "help_func_name", "=", "HELP_FUNC_PREFIX", "+", "command", "# Restore the command and help functions to their original values", "dc", "=", "self", ".", "disabled_commands", "[", "command", "]", "setattr", "(", "self", ",", "self", ".", "cmd_func_name", "(", "command", ")", ",", "dc", ".", "command_function", ")", "if", "dc", ".", "help_function", "is", "None", ":", "delattr", "(", "self", ",", "help_func_name", ")", "else", ":", "setattr", "(", "self", ",", "help_func_name", ",", "dc", ".", "help_function", ")", "# Remove the disabled command entry", "del", "self", ".", "disabled_commands", "[", "command", "]" ]
Enable a command by restoring its functions :param command: the command being enabled
[ "Enable", "a", "command", "by", "restoring", "its", "functions", ":", "param", "command", ":", "the", "command", "being", "enabled" ]
python
train
senaite/senaite.core
bika/lims/content/client.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/client.py#L265-L283
def manage_delObjects(self, ids=None, REQUEST=None): """Overrides parent function. If the ids passed in are from Attachment types, the function ignores the DeleteObjects permission. For the rest of types, it works as usual (checks the permission) """ if ids is None: ids = [] if isinstance(ids, basestring): ids = [ids] for id in ids: item = self._getOb(id) if isinstance(item, Attachment): # Ignore DeleteObjects permission check continue if not _checkPermission(permissions.DeleteObjects, item): raise Unauthorized, ( "Do not have permissions to remove this object") return PortalFolder.manage_delObjects(self, ids, REQUEST=REQUEST)
[ "def", "manage_delObjects", "(", "self", ",", "ids", "=", "None", ",", "REQUEST", "=", "None", ")", ":", "if", "ids", "is", "None", ":", "ids", "=", "[", "]", "if", "isinstance", "(", "ids", ",", "basestring", ")", ":", "ids", "=", "[", "ids", "]", "for", "id", "in", "ids", ":", "item", "=", "self", ".", "_getOb", "(", "id", ")", "if", "isinstance", "(", "item", ",", "Attachment", ")", ":", "# Ignore DeleteObjects permission check", "continue", "if", "not", "_checkPermission", "(", "permissions", ".", "DeleteObjects", ",", "item", ")", ":", "raise", "Unauthorized", ",", "(", "\"Do not have permissions to remove this object\"", ")", "return", "PortalFolder", ".", "manage_delObjects", "(", "self", ",", "ids", ",", "REQUEST", "=", "REQUEST", ")" ]
Overrides parent function. If the ids passed in are from Attachment types, the function ignores the DeleteObjects permission. For the rest of types, it works as usual (checks the permission)
[ "Overrides", "parent", "function", ".", "If", "the", "ids", "passed", "in", "are", "from", "Attachment", "types", "the", "function", "ignores", "the", "DeleteObjects", "permission", ".", "For", "the", "rest", "of", "types", "it", "works", "as", "usual", "(", "checks", "the", "permission", ")" ]
python
train
GNS3/gns3-server
gns3server/compute/dynamips/nodes/router.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/router.py#L638-L651
def set_clock_divisor(self, clock_divisor): """ Sets the clock divisor value. The higher is the value, the faster is the clock in the virtual machine. The default is 4, but it is often required to adjust it. :param clock_divisor: clock divisor value (integer) """ yield from self._hypervisor.send('vm set_clock_divisor "{name}" {clock}'.format(name=self._name, clock=clock_divisor)) log.info('Router "{name}" [{id}]: clock divisor updated from {old_clock} to {new_clock}'.format(name=self._name, id=self._id, old_clock=self._clock_divisor, new_clock=clock_divisor)) self._clock_divisor = clock_divisor
[ "def", "set_clock_divisor", "(", "self", ",", "clock_divisor", ")", ":", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'vm set_clock_divisor \"{name}\" {clock}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "clock", "=", "clock_divisor", ")", ")", "log", ".", "info", "(", "'Router \"{name}\" [{id}]: clock divisor updated from {old_clock} to {new_clock}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ",", "old_clock", "=", "self", ".", "_clock_divisor", ",", "new_clock", "=", "clock_divisor", ")", ")", "self", ".", "_clock_divisor", "=", "clock_divisor" ]
Sets the clock divisor value. The higher is the value, the faster is the clock in the virtual machine. The default is 4, but it is often required to adjust it. :param clock_divisor: clock divisor value (integer)
[ "Sets", "the", "clock", "divisor", "value", ".", "The", "higher", "is", "the", "value", "the", "faster", "is", "the", "clock", "in", "the", "virtual", "machine", ".", "The", "default", "is", "4", "but", "it", "is", "often", "required", "to", "adjust", "it", "." ]
python
train
rhayes777/PyAutoFit
autofit/tools/pipeline.py
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L38-L53
def add(self, phase_name, result): """ Add the result of a phase. Parameters ---------- phase_name: str The name of the phase result The result of that phase """ if phase_name in self.__result_dict: raise exc.PipelineException( "Results from a phase called {} already exist in the pipeline".format(phase_name)) self.__result_list.append(result) self.__result_dict[phase_name] = result
[ "def", "add", "(", "self", ",", "phase_name", ",", "result", ")", ":", "if", "phase_name", "in", "self", ".", "__result_dict", ":", "raise", "exc", ".", "PipelineException", "(", "\"Results from a phase called {} already exist in the pipeline\"", ".", "format", "(", "phase_name", ")", ")", "self", ".", "__result_list", ".", "append", "(", "result", ")", "self", ".", "__result_dict", "[", "phase_name", "]", "=", "result" ]
Add the result of a phase. Parameters ---------- phase_name: str The name of the phase result The result of that phase
[ "Add", "the", "result", "of", "a", "phase", "." ]
python
train
andy-z/ged4py
ged4py/detail/name.py
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/name.py#L7-L35
def split_name(name): """Extracts pieces of name from full name string. Full name can have one of these formats: <NAME_TEXT> | /<NAME_TEXT>/ | <NAME_TEXT> /<NAME_TEXT>/ | /<NAME_TEXT>/ <NAME_TEXT> | <NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT> <NAME_TEXT> can include almost anything excluding commas, numbers, special characters (though some test files use numbers for the names). Text between slashes is considered a surname, outside slashes - given name. This method splits full name into pieces at slashes, e.g.: "First /Last/" -> ("First", "Last", "") "/Last/ First" -> ("", "Last", "First") "First /Last/ Jr." -> ("First", "Last", "Jr.") "First Jr." -> ("First Jr.", "", "") :param str name: Full name string. :return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will be empty strings if they are not present in full string. """ given1, _, rem = name.partition("/") surname, _, given2 = rem.partition("/") return given1.strip(), surname.strip(), given2.strip()
[ "def", "split_name", "(", "name", ")", ":", "given1", ",", "_", ",", "rem", "=", "name", ".", "partition", "(", "\"/\"", ")", "surname", ",", "_", ",", "given2", "=", "rem", ".", "partition", "(", "\"/\"", ")", "return", "given1", ".", "strip", "(", ")", ",", "surname", ".", "strip", "(", ")", ",", "given2", ".", "strip", "(", ")" ]
Extracts pieces of name from full name string. Full name can have one of these formats: <NAME_TEXT> | /<NAME_TEXT>/ | <NAME_TEXT> /<NAME_TEXT>/ | /<NAME_TEXT>/ <NAME_TEXT> | <NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT> <NAME_TEXT> can include almost anything excluding commas, numbers, special characters (though some test files use numbers for the names). Text between slashes is considered a surname, outside slashes - given name. This method splits full name into pieces at slashes, e.g.: "First /Last/" -> ("First", "Last", "") "/Last/ First" -> ("", "Last", "First") "First /Last/ Jr." -> ("First", "Last", "Jr.") "First Jr." -> ("First Jr.", "", "") :param str name: Full name string. :return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will be empty strings if they are not present in full string.
[ "Extracts", "pieces", "of", "name", "from", "full", "name", "string", "." ]
python
train
tensorflow/cleverhans
examples/multigpu_advtrain/model.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L206-L214
def set_device(self, device_name): """ Set the device before the next fprop to create a new graph on the specified device. """ device_name = unify_device_name(device_name) self.device_name = device_name for layer in self.layers: layer.device_name = device_name
[ "def", "set_device", "(", "self", ",", "device_name", ")", ":", "device_name", "=", "unify_device_name", "(", "device_name", ")", "self", ".", "device_name", "=", "device_name", "for", "layer", "in", "self", ".", "layers", ":", "layer", ".", "device_name", "=", "device_name" ]
Set the device before the next fprop to create a new graph on the specified device.
[ "Set", "the", "device", "before", "the", "next", "fprop", "to", "create", "a", "new", "graph", "on", "the", "specified", "device", "." ]
python
train
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_element.py
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_element.py#L82-L88
def input_value(self, locator, text): """Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements. """ self._info("Setting text '%s' into text field '%s'" % (text, locator)) self._element_input_value_by_locator(locator, text)
[ "def", "input_value", "(", "self", ",", "locator", ",", "text", ")", ":", "self", ".", "_info", "(", "\"Setting text '%s' into text field '%s'\"", "%", "(", "text", ",", "locator", ")", ")", "self", ".", "_element_input_value_by_locator", "(", "locator", ",", "text", ")" ]
Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements.
[ "Sets", "the", "given", "value", "into", "text", "field", "identified", "by", "locator", ".", "This", "is", "an", "IOS", "only", "keyword", "input", "value", "makes", "use", "of", "set_value", "See", "introduction", "for", "details", "about", "locating", "elements", "." ]
python
train
saltstack/salt
salt/modules/xfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L275-L297
def _xfs_prune_output(out, uuid): ''' Parse prune output. ''' data = {} cnt = [] cutpoint = False for line in [l.strip() for l in out.split("\n") if l]: if line.startswith("-"): if cutpoint: break else: cutpoint = True continue if cutpoint: cnt.append(line) for kset in [e for e in cnt[1:] if ':' in e]: key, val = [t.strip() for t in kset.split(":", 1)] data[key.lower().replace(" ", "_")] = val return data.get('uuid') == uuid and data or {}
[ "def", "_xfs_prune_output", "(", "out", ",", "uuid", ")", ":", "data", "=", "{", "}", "cnt", "=", "[", "]", "cutpoint", "=", "False", "for", "line", "in", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "out", ".", "split", "(", "\"\\n\"", ")", "if", "l", "]", ":", "if", "line", ".", "startswith", "(", "\"-\"", ")", ":", "if", "cutpoint", ":", "break", "else", ":", "cutpoint", "=", "True", "continue", "if", "cutpoint", ":", "cnt", ".", "append", "(", "line", ")", "for", "kset", "in", "[", "e", "for", "e", "in", "cnt", "[", "1", ":", "]", "if", "':'", "in", "e", "]", ":", "key", ",", "val", "=", "[", "t", ".", "strip", "(", ")", "for", "t", "in", "kset", ".", "split", "(", "\":\"", ",", "1", ")", "]", "data", "[", "key", ".", "lower", "(", ")", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "]", "=", "val", "return", "data", ".", "get", "(", "'uuid'", ")", "==", "uuid", "and", "data", "or", "{", "}" ]
Parse prune output.
[ "Parse", "prune", "output", "." ]
python
train
idlesign/django-admirarchy
admirarchy/utils.py
https://github.com/idlesign/django-admirarchy/blob/723e4fd212fdebcc156492cb16b9d65356f5ca73/admirarchy/utils.py#L307-L349
def hook_get_results(self, changelist): """Triggered by `ChangeList.get_results()`.""" # Poor NestedSet guys they've punished themselves once chosen that approach, # and now we punish them again with all those DB hits. result_list = list(changelist.result_list) # Get children stats. filter_kwargs = {'%s' % self.left_field: models.F('%s' % self.right_field) - 1} # Leaf nodes only. filter_kwargs.update(self.get_immediate_children_filter(self.parent)) stats_qs = changelist.result_list.filter(**filter_kwargs).values_list('id') leafs = [item[0] for item in stats_qs] for result in result_list: if result.id in leafs: setattr(result, self.CHILD_COUNT_MODEL_ATTR, 0) else: setattr(result, self.CHILD_COUNT_MODEL_ATTR, '>1') # Too much pain to get real stats, so that'll suffice. if self.pid: # Render to upper level link. parent = self.parent filter_kwargs = { '%s__lt' % self.left_field: getattr(parent, self.left_field), '%s__gt' % self.right_field: getattr(parent, self.right_field), } try: granparent_id = changelist.model.objects.filter(**filter_kwargs).order_by('-%s' % self.left_field)[0].id except IndexError: granparent_id = None if granparent_id != parent.id: parent = changelist.model(pk=granparent_id) setattr(parent, self.UPPER_LEVEL_MODEL_ATTR, True) result_list = [parent] + result_list changelist.result_list = result_list
[ "def", "hook_get_results", "(", "self", ",", "changelist", ")", ":", "# Poor NestedSet guys they've punished themselves once chosen that approach,", "# and now we punish them again with all those DB hits.", "result_list", "=", "list", "(", "changelist", ".", "result_list", ")", "# Get children stats.", "filter_kwargs", "=", "{", "'%s'", "%", "self", ".", "left_field", ":", "models", ".", "F", "(", "'%s'", "%", "self", ".", "right_field", ")", "-", "1", "}", "# Leaf nodes only.", "filter_kwargs", ".", "update", "(", "self", ".", "get_immediate_children_filter", "(", "self", ".", "parent", ")", ")", "stats_qs", "=", "changelist", ".", "result_list", ".", "filter", "(", "*", "*", "filter_kwargs", ")", ".", "values_list", "(", "'id'", ")", "leafs", "=", "[", "item", "[", "0", "]", "for", "item", "in", "stats_qs", "]", "for", "result", "in", "result_list", ":", "if", "result", ".", "id", "in", "leafs", ":", "setattr", "(", "result", ",", "self", ".", "CHILD_COUNT_MODEL_ATTR", ",", "0", ")", "else", ":", "setattr", "(", "result", ",", "self", ".", "CHILD_COUNT_MODEL_ATTR", ",", "'>1'", ")", "# Too much pain to get real stats, so that'll suffice.", "if", "self", ".", "pid", ":", "# Render to upper level link.", "parent", "=", "self", ".", "parent", "filter_kwargs", "=", "{", "'%s__lt'", "%", "self", ".", "left_field", ":", "getattr", "(", "parent", ",", "self", ".", "left_field", ")", ",", "'%s__gt'", "%", "self", ".", "right_field", ":", "getattr", "(", "parent", ",", "self", ".", "right_field", ")", ",", "}", "try", ":", "granparent_id", "=", "changelist", ".", "model", ".", "objects", ".", "filter", "(", "*", "*", "filter_kwargs", ")", ".", "order_by", "(", "'-%s'", "%", "self", ".", "left_field", ")", "[", "0", "]", ".", "id", "except", "IndexError", ":", "granparent_id", "=", "None", "if", "granparent_id", "!=", "parent", ".", "id", ":", "parent", "=", "changelist", ".", "model", "(", "pk", "=", "granparent_id", ")", "setattr", "(", "parent", ",", "self", ".", "UPPER_LEVEL_MODEL_ATTR", ",", "True", ")", "result_list", "=", "[", "parent", "]", "+", "result_list", "changelist", ".", "result_list", "=", "result_list" ]
Triggered by `ChangeList.get_results()`.
[ "Triggered", "by", "ChangeList", ".", "get_results", "()", "." ]
python
train
boundary/pulse-api-cli
boundary/plugin_get.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/plugin_get.py#L32-L40
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.plugin_name is not None: self.plugin_name = self.args.plugin_name self.path = "v1/plugins/{0}".format(self.plugin_name)
[ "def", "get_arguments", "(", "self", ")", ":", "ApiCli", ".", "get_arguments", "(", "self", ")", "if", "self", ".", "args", ".", "plugin_name", "is", "not", "None", ":", "self", ".", "plugin_name", "=", "self", ".", "args", ".", "plugin_name", "self", ".", "path", "=", "\"v1/plugins/{0}\"", ".", "format", "(", "self", ".", "plugin_name", ")" ]
Extracts the specific arguments of this CLI
[ "Extracts", "the", "specific", "arguments", "of", "this", "CLI" ]
python
test
pandas-dev/pandas
pandas/core/nanops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L1203-L1232
def _nanpercentile_1d(values, mask, q, na_value, interpolation): """ Wraper for np.percentile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing q : scalar or array of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array """ # mask is Union[ExtensionArray, ndarray] values = values[~mask] if len(values) == 0: if lib.is_scalar(q): return na_value else: return np.array([na_value] * len(q), dtype=values.dtype) return np.percentile(values, q, interpolation=interpolation)
[ "def", "_nanpercentile_1d", "(", "values", ",", "mask", ",", "q", ",", "na_value", ",", "interpolation", ")", ":", "# mask is Union[ExtensionArray, ndarray]", "values", "=", "values", "[", "~", "mask", "]", "if", "len", "(", "values", ")", "==", "0", ":", "if", "lib", ".", "is_scalar", "(", "q", ")", ":", "return", "na_value", "else", ":", "return", "np", ".", "array", "(", "[", "na_value", "]", "*", "len", "(", "q", ")", ",", "dtype", "=", "values", ".", "dtype", ")", "return", "np", ".", "percentile", "(", "values", ",", "q", ",", "interpolation", "=", "interpolation", ")" ]
Wraper for np.percentile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing q : scalar or array of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array
[ "Wraper", "for", "np", ".", "percentile", "that", "skips", "missing", "values", "specialized", "to", "1", "-", "dimensional", "case", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/fieldmanagers.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/fieldmanagers.py#L201-L213
def json_data(self, instance, default=None): """Get a JSON compatible value """ value = self.get(instance) out = [] for rel in value: if rel.isBroken(): logger.warn("Skipping broken relation {}".format(repr(rel))) continue obj = rel.to_object out.append(api.get_url_info(obj)) return out
[ "def", "json_data", "(", "self", ",", "instance", ",", "default", "=", "None", ")", ":", "value", "=", "self", ".", "get", "(", "instance", ")", "out", "=", "[", "]", "for", "rel", "in", "value", ":", "if", "rel", ".", "isBroken", "(", ")", ":", "logger", ".", "warn", "(", "\"Skipping broken relation {}\"", ".", "format", "(", "repr", "(", "rel", ")", ")", ")", "continue", "obj", "=", "rel", ".", "to_object", "out", ".", "append", "(", "api", ".", "get_url_info", "(", "obj", ")", ")", "return", "out" ]
Get a JSON compatible value
[ "Get", "a", "JSON", "compatible", "value" ]
python
train
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L2000-L2006
def name(self): """ Returns the type name of the `Stream` field (read-only).""" size = len(self) if size > 0: return self.item_type.name.capitalize() + str(size) else: return self.item_type.name.capitalize()
[ "def", "name", "(", "self", ")", ":", "size", "=", "len", "(", "self", ")", "if", "size", ">", "0", ":", "return", "self", ".", "item_type", ".", "name", ".", "capitalize", "(", ")", "+", "str", "(", "size", ")", "else", ":", "return", "self", ".", "item_type", ".", "name", ".", "capitalize", "(", ")" ]
Returns the type name of the `Stream` field (read-only).
[ "Returns", "the", "type", "name", "of", "the", "Stream", "field", "(", "read", "-", "only", ")", "." ]
python
train
Microsoft/knack
knack/log.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/log.py#L143-L152
def _determine_verbose_level(self, args): """ Get verbose level by reading the arguments. """ verbose_level = 0 for arg in args: if arg == CLILogging.VERBOSE_FLAG: verbose_level += 1 elif arg == CLILogging.DEBUG_FLAG: verbose_level += 2 # Use max verbose level if too much verbosity specified. return min(verbose_level, len(self.console_log_configs) - 1)
[ "def", "_determine_verbose_level", "(", "self", ",", "args", ")", ":", "verbose_level", "=", "0", "for", "arg", "in", "args", ":", "if", "arg", "==", "CLILogging", ".", "VERBOSE_FLAG", ":", "verbose_level", "+=", "1", "elif", "arg", "==", "CLILogging", ".", "DEBUG_FLAG", ":", "verbose_level", "+=", "2", "# Use max verbose level if too much verbosity specified.", "return", "min", "(", "verbose_level", ",", "len", "(", "self", ".", "console_log_configs", ")", "-", "1", ")" ]
Get verbose level by reading the arguments.
[ "Get", "verbose", "level", "by", "reading", "the", "arguments", "." ]
python
train
ayust/kitnirc
kitnirc/contrib/commands.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/contrib/commands.py#L84-L97
def check_for_interest(self, client, recipient, message): """Determine whether this line is addressing us.""" for prefix in self.prefixes: if message.startswith(prefix): return True, message[len(prefix):] # Don't require a prefix if addressed in PM. # This comes after the prefix checks because # if the user does include a prefix, we want # to strip it, even in PM. if not isinstance(recipient, Channel): return True, message return False, None
[ "def", "check_for_interest", "(", "self", ",", "client", ",", "recipient", ",", "message", ")", ":", "for", "prefix", "in", "self", ".", "prefixes", ":", "if", "message", ".", "startswith", "(", "prefix", ")", ":", "return", "True", ",", "message", "[", "len", "(", "prefix", ")", ":", "]", "# Don't require a prefix if addressed in PM.", "# This comes after the prefix checks because", "# if the user does include a prefix, we want", "# to strip it, even in PM.", "if", "not", "isinstance", "(", "recipient", ",", "Channel", ")", ":", "return", "True", ",", "message", "return", "False", ",", "None" ]
Determine whether this line is addressing us.
[ "Determine", "whether", "this", "line", "is", "addressing", "us", "." ]
python
train
PiotrDabkowski/Js2Py
js2py/internals/operations.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/internals/operations.py#L227-L253
def abstract_equality_op(self, other): ''' returns the result of JS == compare. result is PyJs type: bool''' tx, ty = Type(self), Type(other) if tx == ty: if tx == 'Undefined' or tx == 'Null': return True if tx == 'Number' or tx == 'String' or tx == 'Boolean': return self == other return self is other # Object elif (tx == 'Undefined' and ty == 'Null') or (ty == 'Undefined' and tx == 'Null'): return True elif tx == 'Number' and ty == 'String': return abstract_equality_op(self, to_number(other)) elif tx == 'String' and ty == 'Number': return abstract_equality_op(to_number(self), other) elif tx == 'Boolean': return abstract_equality_op(to_number(self), other) elif ty == 'Boolean': return abstract_equality_op(self, to_number(other)) elif (tx == 'String' or tx == 'Number') and is_object(other): return abstract_equality_op(self, to_primitive(other)) elif (ty == 'String' or ty == 'Number') and is_object(self): return abstract_equality_op(to_primitive(self), other) else: return False
[ "def", "abstract_equality_op", "(", "self", ",", "other", ")", ":", "tx", ",", "ty", "=", "Type", "(", "self", ")", ",", "Type", "(", "other", ")", "if", "tx", "==", "ty", ":", "if", "tx", "==", "'Undefined'", "or", "tx", "==", "'Null'", ":", "return", "True", "if", "tx", "==", "'Number'", "or", "tx", "==", "'String'", "or", "tx", "==", "'Boolean'", ":", "return", "self", "==", "other", "return", "self", "is", "other", "# Object", "elif", "(", "tx", "==", "'Undefined'", "and", "ty", "==", "'Null'", ")", "or", "(", "ty", "==", "'Undefined'", "and", "tx", "==", "'Null'", ")", ":", "return", "True", "elif", "tx", "==", "'Number'", "and", "ty", "==", "'String'", ":", "return", "abstract_equality_op", "(", "self", ",", "to_number", "(", "other", ")", ")", "elif", "tx", "==", "'String'", "and", "ty", "==", "'Number'", ":", "return", "abstract_equality_op", "(", "to_number", "(", "self", ")", ",", "other", ")", "elif", "tx", "==", "'Boolean'", ":", "return", "abstract_equality_op", "(", "to_number", "(", "self", ")", ",", "other", ")", "elif", "ty", "==", "'Boolean'", ":", "return", "abstract_equality_op", "(", "self", ",", "to_number", "(", "other", ")", ")", "elif", "(", "tx", "==", "'String'", "or", "tx", "==", "'Number'", ")", "and", "is_object", "(", "other", ")", ":", "return", "abstract_equality_op", "(", "self", ",", "to_primitive", "(", "other", ")", ")", "elif", "(", "ty", "==", "'String'", "or", "ty", "==", "'Number'", ")", "and", "is_object", "(", "self", ")", ":", "return", "abstract_equality_op", "(", "to_primitive", "(", "self", ")", ",", "other", ")", "else", ":", "return", "False" ]
returns the result of JS == compare. result is PyJs type: bool
[ "returns", "the", "result", "of", "JS", "==", "compare", ".", "result", "is", "PyJs", "type", ":", "bool" ]
python
valid
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L1552-L1573
def read_reporting_discussions(self, project=None, continuation_token=None, max_page_size=None): """ReadReportingDiscussions. [Preview API] :param str project: Project ID or project name :param str continuation_token: :param int max_page_size: :rtype: :class:`<ReportingWorkItemRevisionsBatch> <azure.devops.v5_1.work-item-tracking.models.ReportingWorkItemRevisionsBatch>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if max_page_size is not None: query_parameters['$maxPageSize'] = self._serialize.query('max_page_size', max_page_size, 'int') response = self._send(http_method='GET', location_id='4a644469-90c5-4fcc-9a9f-be0827d369ec', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ReportingWorkItemRevisionsBatch', response)
[ "def", "read_reporting_discussions", "(", "self", ",", "project", "=", "None", ",", "continuation_token", "=", "None", ",", "max_page_size", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "continuation_token", "is", "not", "None", ":", "query_parameters", "[", "'continuationToken'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'continuation_token'", ",", "continuation_token", ",", "'str'", ")", "if", "max_page_size", "is", "not", "None", ":", "query_parameters", "[", "'$maxPageSize'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'max_page_size'", ",", "max_page_size", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'4a644469-90c5-4fcc-9a9f-be0827d369ec'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'ReportingWorkItemRevisionsBatch'", ",", "response", ")" ]
ReadReportingDiscussions. [Preview API] :param str project: Project ID or project name :param str continuation_token: :param int max_page_size: :rtype: :class:`<ReportingWorkItemRevisionsBatch> <azure.devops.v5_1.work-item-tracking.models.ReportingWorkItemRevisionsBatch>`
[ "ReadReportingDiscussions", ".", "[", "Preview", "API", "]", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "continuation_token", ":", ":", "param", "int", "max_page_size", ":", ":", "rtype", ":", ":", "class", ":", "<ReportingWorkItemRevisionsBatch", ">", "<azure", ".", "devops", ".", "v5_1", ".", "work", "-", "item", "-", "tracking", ".", "models", ".", "ReportingWorkItemRevisionsBatch", ">" ]
python
train
wtsi-hgi/python-common
hgicommon/collections.py
https://github.com/wtsi-hgi/python-common/blob/0376a6b574ff46e82e509e90b6cb3693a3dbb577/hgicommon/collections.py#L66-L89
def rename(self, key: Any, new_key: Any): """ Renames an item in this collection as a transaction. Will override if new key name already exists. :param key: the current name of the item :param new_key: the new name that the item should have """ if new_key == key: return required_locks = [self._key_locks[key], self._key_locks[new_key]] ordered_required_locks = sorted(required_locks, key=lambda x: id(x)) for lock in ordered_required_locks: lock.acquire() try: if key not in self._data: raise KeyError("Attribute to rename \"%s\" does not exist" % key) self._data[new_key] = self[key] del self._data[key] finally: for lock in required_locks: lock.release()
[ "def", "rename", "(", "self", ",", "key", ":", "Any", ",", "new_key", ":", "Any", ")", ":", "if", "new_key", "==", "key", ":", "return", "required_locks", "=", "[", "self", ".", "_key_locks", "[", "key", "]", ",", "self", ".", "_key_locks", "[", "new_key", "]", "]", "ordered_required_locks", "=", "sorted", "(", "required_locks", ",", "key", "=", "lambda", "x", ":", "id", "(", "x", ")", ")", "for", "lock", "in", "ordered_required_locks", ":", "lock", ".", "acquire", "(", ")", "try", ":", "if", "key", "not", "in", "self", ".", "_data", ":", "raise", "KeyError", "(", "\"Attribute to rename \\\"%s\\\" does not exist\"", "%", "key", ")", "self", ".", "_data", "[", "new_key", "]", "=", "self", "[", "key", "]", "del", "self", ".", "_data", "[", "key", "]", "finally", ":", "for", "lock", "in", "required_locks", ":", "lock", ".", "release", "(", ")" ]
Renames an item in this collection as a transaction. Will override if new key name already exists. :param key: the current name of the item :param new_key: the new name that the item should have
[ "Renames", "an", "item", "in", "this", "collection", "as", "a", "transaction", "." ]
python
valid
UncleRus/regnupg
regnupg.py
https://github.com/UncleRus/regnupg/blob/c1acb5d459107c70e45967ec554831a5f2cd1aaf/regnupg.py#L909-L921
def sign(self, message, *args, **kwargs): ''' Make a signature. :param message: Message to sign :param key_id: Key for signing, default will be used if null :param passphrase: Key password :param clearsign: Make a clear text signature :param detach: Make a detached signature :param binary: If false, create ASCII armored output :rtype: SignResult ''' return self.sign_file(self.create_stream(message), *args, **kwargs)
[ "def", "sign", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "sign_file", "(", "self", ".", "create_stream", "(", "message", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Make a signature. :param message: Message to sign :param key_id: Key for signing, default will be used if null :param passphrase: Key password :param clearsign: Make a clear text signature :param detach: Make a detached signature :param binary: If false, create ASCII armored output :rtype: SignResult
[ "Make", "a", "signature", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/assignments.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L865-L890
def batch_retrieve_overrides_in_course(self, course_id, assignment_overrides_id, assignment_overrides_assignment_id): """ Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides[id] """Ids of overrides to retrieve""" params["assignment_overrides[id]"] = assignment_overrides_id # REQUIRED - assignment_overrides[assignment_id] """Ids of assignments for each override""" params["assignment_overrides[assignment_id]"] = assignment_overrides_assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
[ "def", "batch_retrieve_overrides_in_course", "(", "self", ",", "course_id", ",", "assignment_overrides_id", ",", "assignment_overrides_assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - assignment_overrides[id]\r", "\"\"\"Ids of overrides to retrieve\"\"\"", "params", "[", "\"assignment_overrides[id]\"", "]", "=", "assignment_overrides_id", "# REQUIRED - assignment_overrides[assignment_id]\r", "\"\"\"Ids of assignments for each override\"\"\"", "params", "[", "\"assignment_overrides[assignment_id]\"", "]", "=", "assignment_overrides_assignment_id", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/assignments/overrides\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found.
[ "Batch", "retrieve", "overrides", "in", "a", "course", ".", "Returns", "a", "list", "of", "specified", "overrides", "in", "this", "course", "providing", "they", "target", "sections", "/", "groups", "/", "students", "visible", "to", "the", "current", "user", ".", "Returns", "null", "elements", "in", "the", "list", "for", "requests", "that", "were", "not", "found", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_analysis.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_analysis.py#L615-L643
def clip(sig, low=-1., high=1.): """ Clips the signal up to both a lower and a higher limit. Parameters ---------- sig : The signal to be clipped, be it a Stream instance, a list or any iterable. low, high : Lower and higher clipping limit, "saturating" the input to them. Defaults to -1.0 and 1.0, respectively. These can be None when needed one-sided clipping. When both limits are set to None, the output will be a Stream that yields exactly the ``sig`` input data. Returns ------- Clipped signal as a Stream instance. """ if low is None: if high is None: return Stream(sig) return Stream(el if el < high else high for el in sig) if high is None: return Stream(el if el > low else low for el in sig) if high < low: raise ValueError("Higher clipping limit is smaller than lower one") return Stream(high if el > high else (low if el < low else el) for el in sig)
[ "def", "clip", "(", "sig", ",", "low", "=", "-", "1.", ",", "high", "=", "1.", ")", ":", "if", "low", "is", "None", ":", "if", "high", "is", "None", ":", "return", "Stream", "(", "sig", ")", "return", "Stream", "(", "el", "if", "el", "<", "high", "else", "high", "for", "el", "in", "sig", ")", "if", "high", "is", "None", ":", "return", "Stream", "(", "el", "if", "el", ">", "low", "else", "low", "for", "el", "in", "sig", ")", "if", "high", "<", "low", ":", "raise", "ValueError", "(", "\"Higher clipping limit is smaller than lower one\"", ")", "return", "Stream", "(", "high", "if", "el", ">", "high", "else", "(", "low", "if", "el", "<", "low", "else", "el", ")", "for", "el", "in", "sig", ")" ]
Clips the signal up to both a lower and a higher limit. Parameters ---------- sig : The signal to be clipped, be it a Stream instance, a list or any iterable. low, high : Lower and higher clipping limit, "saturating" the input to them. Defaults to -1.0 and 1.0, respectively. These can be None when needed one-sided clipping. When both limits are set to None, the output will be a Stream that yields exactly the ``sig`` input data. Returns ------- Clipped signal as a Stream instance.
[ "Clips", "the", "signal", "up", "to", "both", "a", "lower", "and", "a", "higher", "limit", "." ]
python
train
svenevs/exhale
exhale/graph.py
https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L1523-L1544
def reparentAll(self): ''' Fixes some of the parental relationships lost in parsing the Breathe graph. File relationships are recovered in :func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. This method simply calls in this order: 1. :func:`~exhale.graph.ExhaleRoot.reparentUnions` 2. :func:`~exhale.graph.ExhaleRoot.reparentClassLike` 3. :func:`~exhale.graph.ExhaleRoot.reparentDirectories` 4. :func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` 5. :func:`~exhale.graph.ExhaleRoot.reparentNamespaces` ''' self.reparentUnions() self.reparentClassLike() self.reparentDirectories() self.renameToNamespaceScopes() self.reparentNamespaces() # make sure all children lists are unique (no duplicate children) for node in self.all_nodes: node.children = list(set(node.children))
[ "def", "reparentAll", "(", "self", ")", ":", "self", ".", "reparentUnions", "(", ")", "self", ".", "reparentClassLike", "(", ")", "self", ".", "reparentDirectories", "(", ")", "self", ".", "renameToNamespaceScopes", "(", ")", "self", ".", "reparentNamespaces", "(", ")", "# make sure all children lists are unique (no duplicate children)", "for", "node", "in", "self", ".", "all_nodes", ":", "node", ".", "children", "=", "list", "(", "set", "(", "node", ".", "children", ")", ")" ]
Fixes some of the parental relationships lost in parsing the Breathe graph. File relationships are recovered in :func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. This method simply calls in this order: 1. :func:`~exhale.graph.ExhaleRoot.reparentUnions` 2. :func:`~exhale.graph.ExhaleRoot.reparentClassLike` 3. :func:`~exhale.graph.ExhaleRoot.reparentDirectories` 4. :func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` 5. :func:`~exhale.graph.ExhaleRoot.reparentNamespaces`
[ "Fixes", "some", "of", "the", "parental", "relationships", "lost", "in", "parsing", "the", "Breathe", "graph", ".", "File", "relationships", "are", "recovered", "in", ":", "func", ":", "~exhale", ".", "graph", ".", "ExhaleRoot", ".", "fileRefDiscovery", ".", "This", "method", "simply", "calls", "in", "this", "order", ":" ]
python
train
frictionlessdata/tableschema-py
tableschema/cli.py
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/cli.py#L58-L67
def validate(schema): """Validate that a supposed schema is in fact a Table Schema.""" try: tableschema.validate(schema) click.echo("Schema is valid") sys.exit(0) except tableschema.exceptions.ValidationError as exception: click.echo("Schema is not valid") click.echo(exception.errors) sys.exit(1)
[ "def", "validate", "(", "schema", ")", ":", "try", ":", "tableschema", ".", "validate", "(", "schema", ")", "click", ".", "echo", "(", "\"Schema is valid\"", ")", "sys", ".", "exit", "(", "0", ")", "except", "tableschema", ".", "exceptions", ".", "ValidationError", "as", "exception", ":", "click", ".", "echo", "(", "\"Schema is not valid\"", ")", "click", ".", "echo", "(", "exception", ".", "errors", ")", "sys", ".", "exit", "(", "1", ")" ]
Validate that a supposed schema is in fact a Table Schema.
[ "Validate", "that", "a", "supposed", "schema", "is", "in", "fact", "a", "Table", "Schema", "." ]
python
train
gem/oq-engine
openquake/calculators/getters.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/getters.py#L306-L326
def init(self): """ Initialize the computers. Should be called on the workers """ if hasattr(self, 'computers'): # init already called return with hdf5.File(self.rupgetter.filename, 'r') as parent: self.weights = parent['weights'].value self.computers = [] for ebr in self.rupgetter.get_ruptures(self.srcfilter): sitecol = self.sitecol.filtered(ebr.sids) try: computer = calc.gmf.GmfComputer( ebr, sitecol, self.oqparam.imtls, self.cmaker, self.oqparam.truncation_level, self.correl_model) except FarAwayRupture: # due to numeric errors, ruptures within the maximum_distance # when written, can be outside when read; I found a case with # a distance of 99.9996936 km over a maximum distance of 100 km continue self.computers.append(computer)
[ "def", "init", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'computers'", ")", ":", "# init already called", "return", "with", "hdf5", ".", "File", "(", "self", ".", "rupgetter", ".", "filename", ",", "'r'", ")", "as", "parent", ":", "self", ".", "weights", "=", "parent", "[", "'weights'", "]", ".", "value", "self", ".", "computers", "=", "[", "]", "for", "ebr", "in", "self", ".", "rupgetter", ".", "get_ruptures", "(", "self", ".", "srcfilter", ")", ":", "sitecol", "=", "self", ".", "sitecol", ".", "filtered", "(", "ebr", ".", "sids", ")", "try", ":", "computer", "=", "calc", ".", "gmf", ".", "GmfComputer", "(", "ebr", ",", "sitecol", ",", "self", ".", "oqparam", ".", "imtls", ",", "self", ".", "cmaker", ",", "self", ".", "oqparam", ".", "truncation_level", ",", "self", ".", "correl_model", ")", "except", "FarAwayRupture", ":", "# due to numeric errors, ruptures within the maximum_distance", "# when written, can be outside when read; I found a case with", "# a distance of 99.9996936 km over a maximum distance of 100 km", "continue", "self", ".", "computers", ".", "append", "(", "computer", ")" ]
Initialize the computers. Should be called on the workers
[ "Initialize", "the", "computers", ".", "Should", "be", "called", "on", "the", "workers" ]
python
train
lalinsky/python-phoenixdb
phoenixdb/__init__.py
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/__init__.py#L44-L67
def connect(url, max_retries=None, **kwargs): """Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object. """ client = AvaticaClient(url, max_retries=max_retries) client.connect() return Connection(client, **kwargs)
[ "def", "connect", "(", "url", ",", "max_retries", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "AvaticaClient", "(", "url", ",", "max_retries", "=", "max_retries", ")", "client", ".", "connect", "(", ")", "return", "Connection", "(", "client", ",", "*", "*", "kwargs", ")" ]
Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object.
[ "Connects", "to", "a", "Phoenix", "query", "server", "." ]
python
train
hvac/hvac
hvac/api/secrets_engines/aws.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/aws.py#L70-L91
def rotate_root_iam_credentials(self, mount_point=DEFAULT_MOUNT_POINT): """Rotate static root IAM credentials. When you have configured Vault with static credentials, you can use this endpoint to have Vault rotate the access key it used. Note that, due to AWS eventual consistency, after calling this endpoint, subsequent calls from Vault to AWS may fail for a few seconds until AWS becomes consistent again. In order to call this endpoint, Vault's AWS access key MUST be the only access key on the IAM user; otherwise, generation of a new access key will fail. Once this method is called, Vault will now be the only entity that knows the AWS secret key is used to access AWS. Supported methods: POST: /{mount_point}/config/rotate-root. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/{mount_point}/config/rotate-root'.format(mount_point=mount_point) response = self._adapter.post( url=api_path, ) return response.json()
[ "def", "rotate_root_iam_credentials", "(", "self", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/config/rotate-root'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", ")", "return", "response", ".", "json", "(", ")" ]
Rotate static root IAM credentials. When you have configured Vault with static credentials, you can use this endpoint to have Vault rotate the access key it used. Note that, due to AWS eventual consistency, after calling this endpoint, subsequent calls from Vault to AWS may fail for a few seconds until AWS becomes consistent again. In order to call this endpoint, Vault's AWS access key MUST be the only access key on the IAM user; otherwise, generation of a new access key will fail. Once this method is called, Vault will now be the only entity that knows the AWS secret key is used to access AWS. Supported methods: POST: /{mount_point}/config/rotate-root. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict
[ "Rotate", "static", "root", "IAM", "credentials", "." ]
python
train
chriso/gauged
gauged/drivers/__init__.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/__init__.py#L14-L57
def parse_dsn(dsn_string): """Parse a connection string and return the associated driver""" dsn = urlparse(dsn_string) scheme = dsn.scheme.split('+')[0] username = password = host = port = None host = dsn.netloc if '@' in host: username, host = host.split('@') if ':' in username: username, password = username.split(':') password = unquote(password) username = unquote(username) if ':' in host: host, port = host.split(':') port = int(port) database = dsn.path.split('?')[0][1:] query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query kwargs = dict(parse_qsl(query, True)) if scheme == 'sqlite': return SQLiteDriver, [dsn.path], {} elif scheme == 'mysql': kwargs['user'] = username or 'root' kwargs['db'] = database if port: kwargs['port'] = port if host: kwargs['host'] = host if password: kwargs['passwd'] = password return MySQLDriver, [], kwargs elif scheme == 'postgresql': kwargs['user'] = username or 'postgres' kwargs['database'] = database if port: kwargs['port'] = port if 'unix_socket' in kwargs: kwargs['host'] = kwargs.pop('unix_socket') elif host: kwargs['host'] = host if password: kwargs['password'] = password return PostgreSQLDriver, [], kwargs else: raise ValueError('Unknown driver %s' % dsn_string)
[ "def", "parse_dsn", "(", "dsn_string", ")", ":", "dsn", "=", "urlparse", "(", "dsn_string", ")", "scheme", "=", "dsn", ".", "scheme", ".", "split", "(", "'+'", ")", "[", "0", "]", "username", "=", "password", "=", "host", "=", "port", "=", "None", "host", "=", "dsn", ".", "netloc", "if", "'@'", "in", "host", ":", "username", ",", "host", "=", "host", ".", "split", "(", "'@'", ")", "if", "':'", "in", "username", ":", "username", ",", "password", "=", "username", ".", "split", "(", "':'", ")", "password", "=", "unquote", "(", "password", ")", "username", "=", "unquote", "(", "username", ")", "if", "':'", "in", "host", ":", "host", ",", "port", "=", "host", ".", "split", "(", "':'", ")", "port", "=", "int", "(", "port", ")", "database", "=", "dsn", ".", "path", ".", "split", "(", "'?'", ")", "[", "0", "]", "[", "1", ":", "]", "query", "=", "dsn", ".", "path", ".", "split", "(", "'?'", ")", "[", "1", "]", "if", "'?'", "in", "dsn", ".", "path", "else", "dsn", ".", "query", "kwargs", "=", "dict", "(", "parse_qsl", "(", "query", ",", "True", ")", ")", "if", "scheme", "==", "'sqlite'", ":", "return", "SQLiteDriver", ",", "[", "dsn", ".", "path", "]", ",", "{", "}", "elif", "scheme", "==", "'mysql'", ":", "kwargs", "[", "'user'", "]", "=", "username", "or", "'root'", "kwargs", "[", "'db'", "]", "=", "database", "if", "port", ":", "kwargs", "[", "'port'", "]", "=", "port", "if", "host", ":", "kwargs", "[", "'host'", "]", "=", "host", "if", "password", ":", "kwargs", "[", "'passwd'", "]", "=", "password", "return", "MySQLDriver", ",", "[", "]", ",", "kwargs", "elif", "scheme", "==", "'postgresql'", ":", "kwargs", "[", "'user'", "]", "=", "username", "or", "'postgres'", "kwargs", "[", "'database'", "]", "=", "database", "if", "port", ":", "kwargs", "[", "'port'", "]", "=", "port", "if", "'unix_socket'", "in", "kwargs", ":", "kwargs", "[", "'host'", "]", "=", "kwargs", ".", "pop", "(", "'unix_socket'", ")", "elif", "host", ":", "kwargs", "[", "'host'", "]", "=", "host", "if", "password", ":", "kwargs", "[", "'password'", "]", "=", "password", "return", "PostgreSQLDriver", ",", "[", "]", ",", "kwargs", "else", ":", "raise", "ValueError", "(", "'Unknown driver %s'", "%", "dsn_string", ")" ]
Parse a connection string and return the associated driver
[ "Parse", "a", "connection", "string", "and", "return", "the", "associated", "driver" ]
python
train
obspy/vcr
vcr/core.py
https://github.com/obspy/vcr/blob/f961d3bffc57d1761b6de2fb1e67d5f464ebc6b6/vcr/core.py#L112-L122
def reset(cls): """ Reset to default settings """ cls.debug = False cls.disabled = False cls.overwrite = False cls.playback_only = False cls.recv_timeout = 5 cls.recv_endmarkers = [] cls.recv_size = None
[ "def", "reset", "(", "cls", ")", ":", "cls", ".", "debug", "=", "False", "cls", ".", "disabled", "=", "False", "cls", ".", "overwrite", "=", "False", "cls", ".", "playback_only", "=", "False", "cls", ".", "recv_timeout", "=", "5", "cls", ".", "recv_endmarkers", "=", "[", "]", "cls", ".", "recv_size", "=", "None" ]
Reset to default settings
[ "Reset", "to", "default", "settings" ]
python
train
eventable/vobject
docs/build/lib/vobject/icalendar.py
https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1208-L1219
def generateImplicitParameters(obj): """ Create default ACTION and TRIGGER if they're not set. """ try: obj.action except AttributeError: obj.add('action').value = 'AUDIO' try: obj.trigger except AttributeError: obj.add('trigger').value = datetime.timedelta(0)
[ "def", "generateImplicitParameters", "(", "obj", ")", ":", "try", ":", "obj", ".", "action", "except", "AttributeError", ":", "obj", ".", "add", "(", "'action'", ")", ".", "value", "=", "'AUDIO'", "try", ":", "obj", ".", "trigger", "except", "AttributeError", ":", "obj", ".", "add", "(", "'trigger'", ")", ".", "value", "=", "datetime", ".", "timedelta", "(", "0", ")" ]
Create default ACTION and TRIGGER if they're not set.
[ "Create", "default", "ACTION", "and", "TRIGGER", "if", "they", "re", "not", "set", "." ]
python
train
praekeltfoundation/marathon-acme
marathon_acme/clients/marathon_lb.py
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/clients/marathon_lb.py#L29-L36
def _request(self, endpoint, *args, **kwargs): """ Perform a request to a specific endpoint. Raise an error if the status code indicates a client or server error. """ kwargs['url'] = endpoint return (super(MarathonLbClient, self).request(*args, **kwargs) .addCallback(raise_for_status))
[ "def", "_request", "(", "self", ",", "endpoint", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'url'", "]", "=", "endpoint", "return", "(", "super", "(", "MarathonLbClient", ",", "self", ")", ".", "request", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "addCallback", "(", "raise_for_status", ")", ")" ]
Perform a request to a specific endpoint. Raise an error if the status code indicates a client or server error.
[ "Perform", "a", "request", "to", "a", "specific", "endpoint", ".", "Raise", "an", "error", "if", "the", "status", "code", "indicates", "a", "client", "or", "server", "error", "." ]
python
valid
eqcorrscan/EQcorrscan
eqcorrscan/core/subspace.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L234-L251
def energy_capture(self, stachans='all', size=(10, 7), show=False): """ Calculate the average percentage energy capture for this subspace. :return: Percentage energy capture :rtype: float """ if show: return subspace_fc_plot(detector=self, stachans=stachans, size=size, show=show) percent_capture = 0 if np.isinf(self.dimension): return 100 for channel in self.sigma: fc = np.sum(channel[0:self.dimension]) / np.sum(channel) percent_capture += fc else: return 100 * (percent_capture / len(self.sigma))
[ "def", "energy_capture", "(", "self", ",", "stachans", "=", "'all'", ",", "size", "=", "(", "10", ",", "7", ")", ",", "show", "=", "False", ")", ":", "if", "show", ":", "return", "subspace_fc_plot", "(", "detector", "=", "self", ",", "stachans", "=", "stachans", ",", "size", "=", "size", ",", "show", "=", "show", ")", "percent_capture", "=", "0", "if", "np", ".", "isinf", "(", "self", ".", "dimension", ")", ":", "return", "100", "for", "channel", "in", "self", ".", "sigma", ":", "fc", "=", "np", ".", "sum", "(", "channel", "[", "0", ":", "self", ".", "dimension", "]", ")", "/", "np", ".", "sum", "(", "channel", ")", "percent_capture", "+=", "fc", "else", ":", "return", "100", "*", "(", "percent_capture", "/", "len", "(", "self", ".", "sigma", ")", ")" ]
Calculate the average percentage energy capture for this subspace. :return: Percentage energy capture :rtype: float
[ "Calculate", "the", "average", "percentage", "energy", "capture", "for", "this", "subspace", "." ]
python
train
recurly/recurly-client-python
recurly/resource.py
https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/resource.py#L347-L360
def get(cls, uuid): """Return a `Resource` instance of this class identified by the given code or UUID. Only `Resource` classes with specified `member_path` attributes can be directly requested with this method. """ if not uuid: raise ValueError("get must have a value passed as an argument") uuid = quote(str(uuid)) url = recurly.base_uri() + (cls.member_path % (uuid,)) _resp, elem = cls.element_for_url(url) return cls.from_element(elem)
[ "def", "get", "(", "cls", ",", "uuid", ")", ":", "if", "not", "uuid", ":", "raise", "ValueError", "(", "\"get must have a value passed as an argument\"", ")", "uuid", "=", "quote", "(", "str", "(", "uuid", ")", ")", "url", "=", "recurly", ".", "base_uri", "(", ")", "+", "(", "cls", ".", "member_path", "%", "(", "uuid", ",", ")", ")", "_resp", ",", "elem", "=", "cls", ".", "element_for_url", "(", "url", ")", "return", "cls", ".", "from_element", "(", "elem", ")" ]
Return a `Resource` instance of this class identified by the given code or UUID. Only `Resource` classes with specified `member_path` attributes can be directly requested with this method.
[ "Return", "a", "Resource", "instance", "of", "this", "class", "identified", "by", "the", "given", "code", "or", "UUID", "." ]
python
train
thebigmunch/google-music
src/google_music/clients/mobileclient.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L978-L996
def podcast(self, podcast_series_id, *, max_episodes=50): """Get information about a podcast series. Parameters: podcast_series_id (str): A podcast series ID. max_episodes (int, Optional): Include up to given number of episodes in returned dict. Default: ``50`` Returns: dict: Podcast series information. """ podcast_info = self._call( mc_calls.PodcastFetchSeries, podcast_series_id, max_episodes=max_episodes ).body return podcast_info
[ "def", "podcast", "(", "self", ",", "podcast_series_id", ",", "*", ",", "max_episodes", "=", "50", ")", ":", "podcast_info", "=", "self", ".", "_call", "(", "mc_calls", ".", "PodcastFetchSeries", ",", "podcast_series_id", ",", "max_episodes", "=", "max_episodes", ")", ".", "body", "return", "podcast_info" ]
Get information about a podcast series. Parameters: podcast_series_id (str): A podcast series ID. max_episodes (int, Optional): Include up to given number of episodes in returned dict. Default: ``50`` Returns: dict: Podcast series information.
[ "Get", "information", "about", "a", "podcast", "series", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L82-L107
def create_load_string(upload_dir, groups=None, organism=None): """ create the code necessary to load the bcbioRNAseq object """ libraryline = 'library(bcbioRNASeq)' load_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism="$organism")')) load_noorganism_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism=NULL)')) flatline = 'flat <- flatFiles(bcb)' saveline = 'saveData(bcb, flat, dir="data")' if groups: groups = _list2Rlist(groups) else: groups = _quotestring("sampleName") if organism: load_bcbio = load_template.substitute( upload_dir=upload_dir, groups=groups, organism=organism) else: load_bcbio = load_noorganism_template.substitute(upload_dir=upload_dir, groups=groups) return ";\n".join([libraryline, load_bcbio, flatline, saveline])
[ "def", "create_load_string", "(", "upload_dir", ",", "groups", "=", "None", ",", "organism", "=", "None", ")", ":", "libraryline", "=", "'library(bcbioRNASeq)'", "load_template", "=", "Template", "(", "(", "'bcb <- bcbioRNASeq(uploadDir=\"$upload_dir\",'", "'interestingGroups=$groups,'", "'organism=\"$organism\")'", ")", ")", "load_noorganism_template", "=", "Template", "(", "(", "'bcb <- bcbioRNASeq(uploadDir=\"$upload_dir\",'", "'interestingGroups=$groups,'", "'organism=NULL)'", ")", ")", "flatline", "=", "'flat <- flatFiles(bcb)'", "saveline", "=", "'saveData(bcb, flat, dir=\"data\")'", "if", "groups", ":", "groups", "=", "_list2Rlist", "(", "groups", ")", "else", ":", "groups", "=", "_quotestring", "(", "\"sampleName\"", ")", "if", "organism", ":", "load_bcbio", "=", "load_template", ".", "substitute", "(", "upload_dir", "=", "upload_dir", ",", "groups", "=", "groups", ",", "organism", "=", "organism", ")", "else", ":", "load_bcbio", "=", "load_noorganism_template", ".", "substitute", "(", "upload_dir", "=", "upload_dir", ",", "groups", "=", "groups", ")", "return", "\";\\n\"", ".", "join", "(", "[", "libraryline", ",", "load_bcbio", ",", "flatline", ",", "saveline", "]", ")" ]
create the code necessary to load the bcbioRNAseq object
[ "create", "the", "code", "necessary", "to", "load", "the", "bcbioRNAseq", "object" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/gallery/gallery_client.py#L759-L775
def create_extension(self, upload_stream, **kwargs): """CreateExtension. [Preview API] :param object upload_stream: Stream to upload :rtype: :class:`<PublishedExtension> <azure.devops.v5_1.gallery.models.PublishedExtension>` """ if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='POST', location_id='a41192c8-9525-4b58-bc86-179fa549d80d', version='5.1-preview.2', content=content, media_type='application/octet-stream') return self._deserialize('PublishedExtension', response)
[ "def", "create_extension", "(", "self", ",", "upload_stream", ",", "*", "*", "kwargs", ")", ":", "if", "\"callback\"", "in", "kwargs", ":", "callback", "=", "kwargs", "[", "\"callback\"", "]", "else", ":", "callback", "=", "None", "content", "=", "self", ".", "_client", ".", "stream_upload", "(", "upload_stream", ",", "callback", "=", "callback", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'a41192c8-9525-4b58-bc86-179fa549d80d'", ",", "version", "=", "'5.1-preview.2'", ",", "content", "=", "content", ",", "media_type", "=", "'application/octet-stream'", ")", "return", "self", ".", "_deserialize", "(", "'PublishedExtension'", ",", "response", ")" ]
CreateExtension. [Preview API] :param object upload_stream: Stream to upload :rtype: :class:`<PublishedExtension> <azure.devops.v5_1.gallery.models.PublishedExtension>`
[ "CreateExtension", ".", "[", "Preview", "API", "]", ":", "param", "object", "upload_stream", ":", "Stream", "to", "upload", ":", "rtype", ":", ":", "class", ":", "<PublishedExtension", ">", "<azure", ".", "devops", ".", "v5_1", ".", "gallery", ".", "models", ".", "PublishedExtension", ">" ]
python
train
toumorokoshi/transmute-core
transmute_core/frameworks/aiohttp/route.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/frameworks/aiohttp/route.py#L6-L21
def add_route(app, fn, context=default_context): """ a decorator that adds a transmute route to the application. """ transmute_func = TransmuteFunction( fn, args_not_from_request=["request"] ) handler = create_handler(transmute_func, context=context) get_swagger_spec(app).add_func(transmute_func, context) for p in transmute_func.paths: aiohttp_path = _convert_to_aiohttp_path(p) resource = app.router.add_resource(aiohttp_path) for method in transmute_func.methods: resource.add_route(method, handler)
[ "def", "add_route", "(", "app", ",", "fn", ",", "context", "=", "default_context", ")", ":", "transmute_func", "=", "TransmuteFunction", "(", "fn", ",", "args_not_from_request", "=", "[", "\"request\"", "]", ")", "handler", "=", "create_handler", "(", "transmute_func", ",", "context", "=", "context", ")", "get_swagger_spec", "(", "app", ")", ".", "add_func", "(", "transmute_func", ",", "context", ")", "for", "p", "in", "transmute_func", ".", "paths", ":", "aiohttp_path", "=", "_convert_to_aiohttp_path", "(", "p", ")", "resource", "=", "app", ".", "router", ".", "add_resource", "(", "aiohttp_path", ")", "for", "method", "in", "transmute_func", ".", "methods", ":", "resource", ".", "add_route", "(", "method", ",", "handler", ")" ]
a decorator that adds a transmute route to the application.
[ "a", "decorator", "that", "adds", "a", "transmute", "route", "to", "the", "application", "." ]
python
train
ebroecker/canmatrix
src/canmatrix/formats/arxml.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/formats/arxml.py#L771-L780
def fill_tree_from_xml(tag, ar_tree, namespace): # type: (_Element, ArTree, str) -> None """Parse the xml tree into ArTree objects.""" for child in tag: # type: _Element name_elem = child.find('./' + namespace + 'SHORT-NAME') # long_name = child.find('./' + namespace + 'LONG-NAME') if name_elem is not None and child is not None: fill_tree_from_xml(child, ar_tree.append_child(name_elem.text, child), namespace) if name_elem is None and child is not None: fill_tree_from_xml(child, ar_tree, namespace)
[ "def", "fill_tree_from_xml", "(", "tag", ",", "ar_tree", ",", "namespace", ")", ":", "# type: (_Element, ArTree, str) -> None", "for", "child", "in", "tag", ":", "# type: _Element", "name_elem", "=", "child", ".", "find", "(", "'./'", "+", "namespace", "+", "'SHORT-NAME'", ")", "# long_name = child.find('./' + namespace + 'LONG-NAME')", "if", "name_elem", "is", "not", "None", "and", "child", "is", "not", "None", ":", "fill_tree_from_xml", "(", "child", ",", "ar_tree", ".", "append_child", "(", "name_elem", ".", "text", ",", "child", ")", ",", "namespace", ")", "if", "name_elem", "is", "None", "and", "child", "is", "not", "None", ":", "fill_tree_from_xml", "(", "child", ",", "ar_tree", ",", "namespace", ")" ]
Parse the xml tree into ArTree objects.
[ "Parse", "the", "xml", "tree", "into", "ArTree", "objects", "." ]
python
train
Lagg/steamodd
steam/sim.py
https://github.com/Lagg/steamodd/blob/2e9ced4e7a6dbe3e09d5a648450bafc12b937b95/steam/sim.py#L35-L48
def get(self, key): """ Returns context data for a given app, can be an ID or a case insensitive name """ keystr = str(key) res = None try: res = self.ctx[keystr] except KeyError: for k, v in self.ctx.items(): if "name" in v and v["name"].lower() == keystr.lower(): res = v break return res
[ "def", "get", "(", "self", ",", "key", ")", ":", "keystr", "=", "str", "(", "key", ")", "res", "=", "None", "try", ":", "res", "=", "self", ".", "ctx", "[", "keystr", "]", "except", "KeyError", ":", "for", "k", ",", "v", "in", "self", ".", "ctx", ".", "items", "(", ")", ":", "if", "\"name\"", "in", "v", "and", "v", "[", "\"name\"", "]", ".", "lower", "(", ")", "==", "keystr", ".", "lower", "(", ")", ":", "res", "=", "v", "break", "return", "res" ]
Returns context data for a given app, can be an ID or a case insensitive name
[ "Returns", "context", "data", "for", "a", "given", "app", "can", "be", "an", "ID", "or", "a", "case", "insensitive", "name" ]
python
train
ansibleplaybookbundle/ansible-playbook-bundle
src/apb/cli.py
https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L108-L165
def subcmd_init_parser(subcmd): """ init subcommand """ subcmd.add_argument( 'tag', action='store', help=u'Tag (org/name) or name of APB to initialize' ) subcmd.add_argument( '--force', action='store_true', dest='force', help=u'Force re-init on current directory', default=False ) subcmd.add_argument( '--dockerhost', action='store', help=u'set the dockerhost for this project', default="docker.io" ) subcmd.add_argument( '--async', action='store', dest='async', help=u'Specify asynchronous operation on application.', default='optional', choices=['required', 'optional', 'unsupported'] ) subcmd.add_argument( '--bindable', action='store_true', dest='bindable', help=u'Make application bindable on the spec.', default=False ) subcmd.add_argument( '--dep', '-d', action='append', dest='dependencies', help=u'Add image dependency to APB spec' ) for opt in SKIP_OPTIONS: subcmd.add_argument( '--skip-%s' % opt, action='store_true', dest='skip-%s' % opt, help=u'Specify which playbooks to not generate by default.', default=False ) return
[ "def", "subcmd_init_parser", "(", "subcmd", ")", ":", "subcmd", ".", "add_argument", "(", "'tag'", ",", "action", "=", "'store'", ",", "help", "=", "u'Tag (org/name) or name of APB to initialize'", ")", "subcmd", ".", "add_argument", "(", "'--force'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'force'", ",", "help", "=", "u'Force re-init on current directory'", ",", "default", "=", "False", ")", "subcmd", ".", "add_argument", "(", "'--dockerhost'", ",", "action", "=", "'store'", ",", "help", "=", "u'set the dockerhost for this project'", ",", "default", "=", "\"docker.io\"", ")", "subcmd", ".", "add_argument", "(", "'--async'", ",", "action", "=", "'store'", ",", "dest", "=", "'async'", ",", "help", "=", "u'Specify asynchronous operation on application.'", ",", "default", "=", "'optional'", ",", "choices", "=", "[", "'required'", ",", "'optional'", ",", "'unsupported'", "]", ")", "subcmd", ".", "add_argument", "(", "'--bindable'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'bindable'", ",", "help", "=", "u'Make application bindable on the spec.'", ",", "default", "=", "False", ")", "subcmd", ".", "add_argument", "(", "'--dep'", ",", "'-d'", ",", "action", "=", "'append'", ",", "dest", "=", "'dependencies'", ",", "help", "=", "u'Add image dependency to APB spec'", ")", "for", "opt", "in", "SKIP_OPTIONS", ":", "subcmd", ".", "add_argument", "(", "'--skip-%s'", "%", "opt", ",", "action", "=", "'store_true'", ",", "dest", "=", "'skip-%s'", "%", "opt", ",", "help", "=", "u'Specify which playbooks to not generate by default.'", ",", "default", "=", "False", ")", "return" ]
init subcommand
[ "init", "subcommand" ]
python
train
Metatab/tableintuit
tableintuit/rows.py
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/rows.py#L189-L288
def run(self, head_rows, tail_rows=None, n_rows=None): """ Run the intuition process :param head_rows: A list of rows from the start of the file. Should have at least 30 rows :param tail_rows: A list of rows from the end of the file. Optional, but should have at least 30 rows :param n_rows: Total number of rows, if a subset was provided in head_rows :return: """ from .exceptions import RowIntuitError header_rows = [] found_header = False MIN_SKIP_ROWS = 30 try: data_pattern_skip_rows = min(MIN_SKIP_ROWS, len(head_rows) - 8) except TypeError: # Hopefully b/c head_rows is a generator, not a sequence raise RowIntuitError("Head_rows must be a sequence, not a generator or iterator") try: data_pattern, self.data_pattern_source, n_cols = self.data_pattern(head_rows[data_pattern_skip_rows:]) except Exception as e: logger.debug("Failed to find data pattern") raise patterns = ([('D', data_pattern), # More than 25% strings in row is header, if it isn't matched as data ('H', re.compile(r'X{{{},{}}}'.format(max(3, n_cols/8),max(3,n_cols/4)))), ] + list(self.patterns)) if self.debug: logger.debug("--- Patterns") for e in patterns: logger.debug(" {} {}".format(e[0], e[1].pattern)) for i, row in enumerate(head_rows): picture = self.picture(row) label = self.match_picture(picture, patterns) try: # If a header or data has more than half of the line is a continuous nulls, # it's probably a comment. if label != 'B' and len(re.search('_+', picture).group(0)) > len(row)/2: label = 'C' except AttributeError: pass # re not matched if not found_header and label == 'H': found_header = True if label is False: if found_header: label = 'D' else: # Could be a really wacky header found_header = True label = 'H' if self.debug: logger.debug("HEAD: {:<5} {} {} {}".format(i, label, picture, row)) if label == 'C': self.comment_lines.append(i) elif label == 'H': self.header_lines.append(i) header_rows.append(row) elif label == 'D': self.start_line = i self.headers = self.coalesce_headers(header_rows) break if tail_rows: from itertools import takewhile, islice for i, row in enumerate(islice(reversed(tail_rows), 0, 10)): picture = self.picture(row) label = self.match_picture(picture, patterns) logger.debug("TAIL: {:<5} {} {} {}".format(i, label, picture, row)) # Compute the data label for the end line, then reverse them. labels = reversed(list(self.match_picture(self.picture(row), patterns) for row in tail_rows)) # Count the number of lines, from the end, that are either comment or blank end_line = len(list(takewhile(lambda x: x == 'C' or x == 'B' or x == 'H', labels))) if end_line: self.end_line = n_rows-end_line-1 return self
[ "def", "run", "(", "self", ",", "head_rows", ",", "tail_rows", "=", "None", ",", "n_rows", "=", "None", ")", ":", "from", ".", "exceptions", "import", "RowIntuitError", "header_rows", "=", "[", "]", "found_header", "=", "False", "MIN_SKIP_ROWS", "=", "30", "try", ":", "data_pattern_skip_rows", "=", "min", "(", "MIN_SKIP_ROWS", ",", "len", "(", "head_rows", ")", "-", "8", ")", "except", "TypeError", ":", "# Hopefully b/c head_rows is a generator, not a sequence", "raise", "RowIntuitError", "(", "\"Head_rows must be a sequence, not a generator or iterator\"", ")", "try", ":", "data_pattern", ",", "self", ".", "data_pattern_source", ",", "n_cols", "=", "self", ".", "data_pattern", "(", "head_rows", "[", "data_pattern_skip_rows", ":", "]", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "\"Failed to find data pattern\"", ")", "raise", "patterns", "=", "(", "[", "(", "'D'", ",", "data_pattern", ")", ",", "# More than 25% strings in row is header, if it isn't matched as data", "(", "'H'", ",", "re", ".", "compile", "(", "r'X{{{},{}}}'", ".", "format", "(", "max", "(", "3", ",", "n_cols", "/", "8", ")", ",", "max", "(", "3", ",", "n_cols", "/", "4", ")", ")", ")", ")", ",", "]", "+", "list", "(", "self", ".", "patterns", ")", ")", "if", "self", ".", "debug", ":", "logger", ".", "debug", "(", "\"--- Patterns\"", ")", "for", "e", "in", "patterns", ":", "logger", ".", "debug", "(", "\" {} {}\"", ".", "format", "(", "e", "[", "0", "]", ",", "e", "[", "1", "]", ".", "pattern", ")", ")", "for", "i", ",", "row", "in", "enumerate", "(", "head_rows", ")", ":", "picture", "=", "self", ".", "picture", "(", "row", ")", "label", "=", "self", ".", "match_picture", "(", "picture", ",", "patterns", ")", "try", ":", "# If a header or data has more than half of the line is a continuous nulls,", "# it's probably a comment.", "if", "label", "!=", "'B'", "and", "len", "(", "re", ".", "search", "(", "'_+'", ",", "picture", ")", ".", "group", "(", "0", ")", ")", ">", "len", "(", "row", ")", "/", "2", ":", "label", "=", "'C'", "except", "AttributeError", ":", "pass", "# re not matched", "if", "not", "found_header", "and", "label", "==", "'H'", ":", "found_header", "=", "True", "if", "label", "is", "False", ":", "if", "found_header", ":", "label", "=", "'D'", "else", ":", "# Could be a really wacky header", "found_header", "=", "True", "label", "=", "'H'", "if", "self", ".", "debug", ":", "logger", ".", "debug", "(", "\"HEAD: {:<5} {} {} {}\"", ".", "format", "(", "i", ",", "label", ",", "picture", ",", "row", ")", ")", "if", "label", "==", "'C'", ":", "self", ".", "comment_lines", ".", "append", "(", "i", ")", "elif", "label", "==", "'H'", ":", "self", ".", "header_lines", ".", "append", "(", "i", ")", "header_rows", ".", "append", "(", "row", ")", "elif", "label", "==", "'D'", ":", "self", ".", "start_line", "=", "i", "self", ".", "headers", "=", "self", ".", "coalesce_headers", "(", "header_rows", ")", "break", "if", "tail_rows", ":", "from", "itertools", "import", "takewhile", ",", "islice", "for", "i", ",", "row", "in", "enumerate", "(", "islice", "(", "reversed", "(", "tail_rows", ")", ",", "0", ",", "10", ")", ")", ":", "picture", "=", "self", ".", "picture", "(", "row", ")", "label", "=", "self", ".", "match_picture", "(", "picture", ",", "patterns", ")", "logger", ".", "debug", "(", "\"TAIL: {:<5} {} {} {}\"", ".", "format", "(", "i", ",", "label", ",", "picture", ",", "row", ")", ")", "# Compute the data label for the end line, then reverse them.", "labels", "=", "reversed", "(", "list", "(", "self", ".", "match_picture", "(", "self", ".", "picture", "(", "row", ")", ",", "patterns", ")", "for", "row", "in", "tail_rows", ")", ")", "# Count the number of lines, from the end, that are either comment or blank", "end_line", "=", "len", "(", "list", "(", "takewhile", "(", "lambda", "x", ":", "x", "==", "'C'", "or", "x", "==", "'B'", "or", "x", "==", "'H'", ",", "labels", ")", ")", ")", "if", "end_line", ":", "self", ".", "end_line", "=", "n_rows", "-", "end_line", "-", "1", "return", "self" ]
Run the intuition process :param head_rows: A list of rows from the start of the file. Should have at least 30 rows :param tail_rows: A list of rows from the end of the file. Optional, but should have at least 30 rows :param n_rows: Total number of rows, if a subset was provided in head_rows :return:
[ "Run", "the", "intuition", "process", ":", "param", "head_rows", ":", "A", "list", "of", "rows", "from", "the", "start", "of", "the", "file", ".", "Should", "have", "at", "least", "30", "rows", ":", "param", "tail_rows", ":", "A", "list", "of", "rows", "from", "the", "end", "of", "the", "file", ".", "Optional", "but", "should", "have", "at", "least", "30", "rows", ":", "param", "n_rows", ":", "Total", "number", "of", "rows", "if", "a", "subset", "was", "provided", "in", "head_rows", ":", "return", ":" ]
python
train
isislovecruft/python-gnupg
examples/make-8192-bit-key.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/examples/make-8192-bit-key.py#L158-L176
def displayNewKey(key): """Use ``gnupg.GPG.list_keys()`` to display details of the new key.""" if key.keyring: gpg.keyring = key.keyring if key.secring: gpg.secring = key.secring # Using '--fingerprint' twice will display subkey fingerprints too: gpg.options = ['--fingerprint', '--fingerprint'] keylist = gpg.list_keys(secret=True) # `result` is a `gnupg._parsers.ListKeys`, which is list-like, so iterate # over all the keys and display their info: for gpgkey in keylist: for k, v in gpgkey.items(): log.info("%s: %s" % (k.capitalize(), v)) return keylist
[ "def", "displayNewKey", "(", "key", ")", ":", "if", "key", ".", "keyring", ":", "gpg", ".", "keyring", "=", "key", ".", "keyring", "if", "key", ".", "secring", ":", "gpg", ".", "secring", "=", "key", ".", "secring", "# Using '--fingerprint' twice will display subkey fingerprints too:", "gpg", ".", "options", "=", "[", "'--fingerprint'", ",", "'--fingerprint'", "]", "keylist", "=", "gpg", ".", "list_keys", "(", "secret", "=", "True", ")", "# `result` is a `gnupg._parsers.ListKeys`, which is list-like, so iterate", "# over all the keys and display their info:", "for", "gpgkey", "in", "keylist", ":", "for", "k", ",", "v", "in", "gpgkey", ".", "items", "(", ")", ":", "log", ".", "info", "(", "\"%s: %s\"", "%", "(", "k", ".", "capitalize", "(", ")", ",", "v", ")", ")", "return", "keylist" ]
Use ``gnupg.GPG.list_keys()`` to display details of the new key.
[ "Use", "gnupg", ".", "GPG", ".", "list_keys", "()", "to", "display", "details", "of", "the", "new", "key", "." ]
python
train
ajenhl/tacl
tacl/sequence.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/sequence.py#L24-L42
def _format_alignment(self, a1, a2): """Returns `a1` marked up with HTML spans around characters that are also at the same index in `a2`. :param a1: text sequence from one witness :type a1: `str` :param a2: text sequence from another witness :type a2: `str` :rtype: `str` """ html = [] for index, char in enumerate(a1): output = self._substitutes.get(char, char) if a2[index] == char: html.append('<span class="match">{}</span>'.format(output)) elif char != '-': html.append(output) return ''.join(html)
[ "def", "_format_alignment", "(", "self", ",", "a1", ",", "a2", ")", ":", "html", "=", "[", "]", "for", "index", ",", "char", "in", "enumerate", "(", "a1", ")", ":", "output", "=", "self", ".", "_substitutes", ".", "get", "(", "char", ",", "char", ")", "if", "a2", "[", "index", "]", "==", "char", ":", "html", ".", "append", "(", "'<span class=\"match\">{}</span>'", ".", "format", "(", "output", ")", ")", "elif", "char", "!=", "'-'", ":", "html", ".", "append", "(", "output", ")", "return", "''", ".", "join", "(", "html", ")" ]
Returns `a1` marked up with HTML spans around characters that are also at the same index in `a2`. :param a1: text sequence from one witness :type a1: `str` :param a2: text sequence from another witness :type a2: `str` :rtype: `str`
[ "Returns", "a1", "marked", "up", "with", "HTML", "spans", "around", "characters", "that", "are", "also", "at", "the", "same", "index", "in", "a2", "." ]
python
train
fermiPy/fermipy
fermipy/srcmap_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/srcmap_utils.py#L84-L112
def shift_to_coords(self, pix, fill_value=np.nan): """Create a new map that is shifted to the pixel coordinates ``pix``.""" pix_offset = self.get_offsets(pix) dpix = np.zeros(len(self.shape) - 1) for i in range(len(self.shape) - 1): x = self.rebin * (pix[i] - pix_offset[i + 1] ) + (self.rebin - 1.0) / 2. dpix[i] = x - self._pix_ref[i] pos = [pix_offset[i] + self.shape[i] // 2 for i in range(self.data.ndim)] s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos) k = np.zeros(self.data.shape) for i in range(k.shape[0]): k[i] = shift(self._data_spline[i], dpix, cval=np.nan, order=2, prefilter=False) for i in range(1, len(self.shape)): k = utils.sum_bins(k, i, self.rebin) k0 = np.ones(self.shape_out) * fill_value if k[s1].size == 0 or k0[s0].size == 0: return k0 k0[s0] = k[s1] return k0
[ "def", "shift_to_coords", "(", "self", ",", "pix", ",", "fill_value", "=", "np", ".", "nan", ")", ":", "pix_offset", "=", "self", ".", "get_offsets", "(", "pix", ")", "dpix", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "shape", ")", "-", "1", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "shape", ")", "-", "1", ")", ":", "x", "=", "self", ".", "rebin", "*", "(", "pix", "[", "i", "]", "-", "pix_offset", "[", "i", "+", "1", "]", ")", "+", "(", "self", ".", "rebin", "-", "1.0", ")", "/", "2.", "dpix", "[", "i", "]", "=", "x", "-", "self", ".", "_pix_ref", "[", "i", "]", "pos", "=", "[", "pix_offset", "[", "i", "]", "+", "self", ".", "shape", "[", "i", "]", "//", "2", "for", "i", "in", "range", "(", "self", ".", "data", ".", "ndim", ")", "]", "s0", ",", "s1", "=", "utils", ".", "overlap_slices", "(", "self", ".", "shape_out", ",", "self", ".", "shape", ",", "pos", ")", "k", "=", "np", ".", "zeros", "(", "self", ".", "data", ".", "shape", ")", "for", "i", "in", "range", "(", "k", ".", "shape", "[", "0", "]", ")", ":", "k", "[", "i", "]", "=", "shift", "(", "self", ".", "_data_spline", "[", "i", "]", ",", "dpix", ",", "cval", "=", "np", ".", "nan", ",", "order", "=", "2", ",", "prefilter", "=", "False", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "shape", ")", ")", ":", "k", "=", "utils", ".", "sum_bins", "(", "k", ",", "i", ",", "self", ".", "rebin", ")", "k0", "=", "np", ".", "ones", "(", "self", ".", "shape_out", ")", "*", "fill_value", "if", "k", "[", "s1", "]", ".", "size", "==", "0", "or", "k0", "[", "s0", "]", ".", "size", "==", "0", ":", "return", "k0", "k0", "[", "s0", "]", "=", "k", "[", "s1", "]", "return", "k0" ]
Create a new map that is shifted to the pixel coordinates ``pix``.
[ "Create", "a", "new", "map", "that", "is", "shifted", "to", "the", "pixel", "coordinates", "pix", "." ]
python
train
armstrong/armstrong.core.arm_sections
armstrong/core/arm_sections/utils.py
https://github.com/armstrong/armstrong.core.arm_sections/blob/39c999c93771da909359e53b35afefe4846f77cb/armstrong/core/arm_sections/utils.py#L30-L34
def get_section_relations(Section): """Find every relationship between section and the item model.""" all_rels = (Section._meta.get_all_related_objects() + Section._meta.get_all_related_many_to_many_objects()) return filter_item_rels(all_rels)
[ "def", "get_section_relations", "(", "Section", ")", ":", "all_rels", "=", "(", "Section", ".", "_meta", ".", "get_all_related_objects", "(", ")", "+", "Section", ".", "_meta", ".", "get_all_related_many_to_many_objects", "(", ")", ")", "return", "filter_item_rels", "(", "all_rels", ")" ]
Find every relationship between section and the item model.
[ "Find", "every", "relationship", "between", "section", "and", "the", "item", "model", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/learn/get_realtime_data.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/get_realtime_data.py#L36-L65
def _example_cur_kline(quote_ctx): """ 获取当前K线,输出 股票代码,时间,开盘价,收盘价,最高价,最低价,成交量,成交额 """ # subscribe Kline stock_code_list = ["US.AAPL", "HK.00700"] sub_type_list = [ft.SubType.K_1M, ft.SubType.K_5M, ft.SubType.K_15M, ft.SubType.K_30M, ft.SubType.K_60M, ft.SubType.K_DAY, ft.SubType.K_WEEK, ft.SubType.K_MON] ret_status, ret_data = quote_ctx.subscribe(stock_code_list, sub_type_list) if ret_status != ft.RET_OK: print(ret_data) exit() ret_status, ret_data = quote_ctx.query_subscription() if ret_status == ft.RET_ERROR: print(ret_data) exit() print(ret_data) for code in stock_code_list: for ktype in [ft.SubType.K_DAY, ft.SubType.K_1M, ft.SubType.K_5M]: ret_code, ret_data = quote_ctx.get_cur_kline(code, 5, ktype) if ret_code == ft.RET_ERROR: print(code, ktype, ret_data) exit() kline_table = ret_data print("%s KLINE %s" % (code, ktype)) print(kline_table) print("\n\n")
[ "def", "_example_cur_kline", "(", "quote_ctx", ")", ":", "# subscribe Kline", "stock_code_list", "=", "[", "\"US.AAPL\"", ",", "\"HK.00700\"", "]", "sub_type_list", "=", "[", "ft", ".", "SubType", ".", "K_1M", ",", "ft", ".", "SubType", ".", "K_5M", ",", "ft", ".", "SubType", ".", "K_15M", ",", "ft", ".", "SubType", ".", "K_30M", ",", "ft", ".", "SubType", ".", "K_60M", ",", "ft", ".", "SubType", ".", "K_DAY", ",", "ft", ".", "SubType", ".", "K_WEEK", ",", "ft", ".", "SubType", ".", "K_MON", "]", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "subscribe", "(", "stock_code_list", ",", "sub_type_list", ")", "if", "ret_status", "!=", "ft", ".", "RET_OK", ":", "print", "(", "ret_data", ")", "exit", "(", ")", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "query_subscription", "(", ")", "if", "ret_status", "==", "ft", ".", "RET_ERROR", ":", "print", "(", "ret_data", ")", "exit", "(", ")", "print", "(", "ret_data", ")", "for", "code", "in", "stock_code_list", ":", "for", "ktype", "in", "[", "ft", ".", "SubType", ".", "K_DAY", ",", "ft", ".", "SubType", ".", "K_1M", ",", "ft", ".", "SubType", ".", "K_5M", "]", ":", "ret_code", ",", "ret_data", "=", "quote_ctx", ".", "get_cur_kline", "(", "code", ",", "5", ",", "ktype", ")", "if", "ret_code", "==", "ft", ".", "RET_ERROR", ":", "print", "(", "code", ",", "ktype", ",", "ret_data", ")", "exit", "(", ")", "kline_table", "=", "ret_data", "print", "(", "\"%s KLINE %s\"", "%", "(", "code", ",", "ktype", ")", ")", "print", "(", "kline_table", ")", "print", "(", "\"\\n\\n\"", ")" ]
获取当前K线,输出 股票代码,时间,开盘价,收盘价,最高价,最低价,成交量,成交额
[ "获取当前K线,输出", "股票代码,时间,开盘价,收盘价,最高价,最低价,成交量,成交额" ]
python
train
christophercrouzet/gorilla
gorilla.py
https://github.com/christophercrouzet/gorilla/blob/10c68d04be28524f2cd7be22eddca77678e12860/gorilla.py#L668-L697
def get_decorator_data(obj, set_default=False): """Retrieve any decorator data from an object. Parameters ---------- obj : object Object. set_default : bool If no data is found, a default one is set on the object and returned, otherwise ``None`` is returned. Returns ------- gorilla.DecoratorData The decorator data or ``None``. """ if isinstance(obj, _CLASS_TYPES): datas = getattr(obj, _DECORATOR_DATA, {}) data = datas.setdefault(obj, None) if data is None and set_default: data = DecoratorData() datas[obj] = data setattr(obj, _DECORATOR_DATA, datas) else: data = getattr(obj, _DECORATOR_DATA, None) if data is None and set_default: data = DecoratorData() setattr(obj, _DECORATOR_DATA, data) return data
[ "def", "get_decorator_data", "(", "obj", ",", "set_default", "=", "False", ")", ":", "if", "isinstance", "(", "obj", ",", "_CLASS_TYPES", ")", ":", "datas", "=", "getattr", "(", "obj", ",", "_DECORATOR_DATA", ",", "{", "}", ")", "data", "=", "datas", ".", "setdefault", "(", "obj", ",", "None", ")", "if", "data", "is", "None", "and", "set_default", ":", "data", "=", "DecoratorData", "(", ")", "datas", "[", "obj", "]", "=", "data", "setattr", "(", "obj", ",", "_DECORATOR_DATA", ",", "datas", ")", "else", ":", "data", "=", "getattr", "(", "obj", ",", "_DECORATOR_DATA", ",", "None", ")", "if", "data", "is", "None", "and", "set_default", ":", "data", "=", "DecoratorData", "(", ")", "setattr", "(", "obj", ",", "_DECORATOR_DATA", ",", "data", ")", "return", "data" ]
Retrieve any decorator data from an object. Parameters ---------- obj : object Object. set_default : bool If no data is found, a default one is set on the object and returned, otherwise ``None`` is returned. Returns ------- gorilla.DecoratorData The decorator data or ``None``.
[ "Retrieve", "any", "decorator", "data", "from", "an", "object", "." ]
python
train
Gorialis/jishaku
jishaku/cog.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L199-L214
async def jsk_tasks(self, ctx: commands.Context): """ Shows the currently running jishaku tasks. """ if not self.tasks: return await ctx.send("No currently running tasks.") paginator = commands.Paginator(max_size=1985) for task in self.tasks: paginator.add_line(f"{task.index}: `{task.ctx.command.qualified_name}`, invoked at " f"{task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC") interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author) await interface.send_to(ctx)
[ "async", "def", "jsk_tasks", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ")", ":", "if", "not", "self", ".", "tasks", ":", "return", "await", "ctx", ".", "send", "(", "\"No currently running tasks.\"", ")", "paginator", "=", "commands", ".", "Paginator", "(", "max_size", "=", "1985", ")", "for", "task", "in", "self", ".", "tasks", ":", "paginator", ".", "add_line", "(", "f\"{task.index}: `{task.ctx.command.qualified_name}`, invoked at \"", "f\"{task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC\"", ")", "interface", "=", "PaginatorInterface", "(", "ctx", ".", "bot", ",", "paginator", ",", "owner", "=", "ctx", ".", "author", ")", "await", "interface", ".", "send_to", "(", "ctx", ")" ]
Shows the currently running jishaku tasks.
[ "Shows", "the", "currently", "running", "jishaku", "tasks", "." ]
python
train
rocky/python3-trepan
trepan/bwprocessor/main.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/bwprocessor/main.py#L370-L403
def setup(self): """Initialization done before entering the debugger-command loop. In particular we set up the call stack used for local variable lookup and frame/up/down commands. We return True if we should NOT enter the debugger-command loop.""" self.forget() if self.settings('dbg_trepan'): self.frame = inspect.currentframe() pass if self.event in ['exception', 'c_exception']: exc_type, exc_value, exc_traceback = self.event_arg else: _, _, exc_traceback = (None, None, None,) # NOQA pass if self.frame or exc_traceback: self.stack, self.curindex = \ get_stack(self.frame, exc_traceback, None, self) self.curframe = self.stack[self.curindex][0] self.thread_name = Mthread.current_thread_name() else: self.stack = self.curframe = \ self.botframe = None pass if self.curframe: self.list_lineno = \ max(1, inspect.getlineno(self.curframe)) else: self.list_lineno = None pass # if self.execRcLines()==1: return True return False
[ "def", "setup", "(", "self", ")", ":", "self", ".", "forget", "(", ")", "if", "self", ".", "settings", "(", "'dbg_trepan'", ")", ":", "self", ".", "frame", "=", "inspect", ".", "currentframe", "(", ")", "pass", "if", "self", ".", "event", "in", "[", "'exception'", ",", "'c_exception'", "]", ":", "exc_type", ",", "exc_value", ",", "exc_traceback", "=", "self", ".", "event_arg", "else", ":", "_", ",", "_", ",", "exc_traceback", "=", "(", "None", ",", "None", ",", "None", ",", ")", "# NOQA", "pass", "if", "self", ".", "frame", "or", "exc_traceback", ":", "self", ".", "stack", ",", "self", ".", "curindex", "=", "get_stack", "(", "self", ".", "frame", ",", "exc_traceback", ",", "None", ",", "self", ")", "self", ".", "curframe", "=", "self", ".", "stack", "[", "self", ".", "curindex", "]", "[", "0", "]", "self", ".", "thread_name", "=", "Mthread", ".", "current_thread_name", "(", ")", "else", ":", "self", ".", "stack", "=", "self", ".", "curframe", "=", "self", ".", "botframe", "=", "None", "pass", "if", "self", ".", "curframe", ":", "self", ".", "list_lineno", "=", "max", "(", "1", ",", "inspect", ".", "getlineno", "(", "self", ".", "curframe", ")", ")", "else", ":", "self", ".", "list_lineno", "=", "None", "pass", "# if self.execRcLines()==1: return True", "return", "False" ]
Initialization done before entering the debugger-command loop. In particular we set up the call stack used for local variable lookup and frame/up/down commands. We return True if we should NOT enter the debugger-command loop.
[ "Initialization", "done", "before", "entering", "the", "debugger", "-", "command", "loop", ".", "In", "particular", "we", "set", "up", "the", "call", "stack", "used", "for", "local", "variable", "lookup", "and", "frame", "/", "up", "/", "down", "commands", "." ]
python
test