repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
apache/airflow
airflow/contrib/hooks/gcp_sql_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L524-L565
def start_proxy(self): """ Starts Cloud SQL Proxy. You have to remember to stop the proxy if you started it! """ self._download_sql_proxy_if_needed() if self.sql_proxy_process: raise AirflowException("The sql proxy is already running: {}".format( self.sql_proxy_process)) else: command_to_run = [self.sql_proxy_path] command_to_run.extend(self.command_line_parameters) try: self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory) os.makedirs(self.cloud_sql_proxy_socket_directory) except OSError: # Needed for python 2 compatibility (exists_ok missing) pass command_to_run.extend(self._get_credential_parameters()) self.log.info("Running the command: `%s`", " ".join(command_to_run)) self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE) self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid) while True: line = self.sql_proxy_process.stderr.readline().decode('utf-8') return_code = self.sql_proxy_process.poll() if line == '' and return_code is not None: self.sql_proxy_process = None raise AirflowException( "The cloud_sql_proxy finished early with return code {}!".format( return_code)) if line != '': self.log.info(line) if "googleapi: Error" in line or "invalid instance name:" in line: self.stop_proxy() raise AirflowException( "Error when starting the cloud_sql_proxy {}!".format( line)) if "Ready for new connections" in line: return
[ "def", "start_proxy", "(", "self", ")", ":", "self", ".", "_download_sql_proxy_if_needed", "(", ")", "if", "self", ".", "sql_proxy_process", ":", "raise", "AirflowException", "(", "\"The sql proxy is already running: {}\"", ".", "format", "(", "self", ".", "sql_proxy_process", ")", ")", "else", ":", "command_to_run", "=", "[", "self", ".", "sql_proxy_path", "]", "command_to_run", ".", "extend", "(", "self", ".", "command_line_parameters", ")", "try", ":", "self", ".", "log", ".", "info", "(", "\"Creating directory %s\"", ",", "self", ".", "cloud_sql_proxy_socket_directory", ")", "os", ".", "makedirs", "(", "self", ".", "cloud_sql_proxy_socket_directory", ")", "except", "OSError", ":", "# Needed for python 2 compatibility (exists_ok missing)", "pass", "command_to_run", ".", "extend", "(", "self", ".", "_get_credential_parameters", "(", ")", ")", "self", ".", "log", ".", "info", "(", "\"Running the command: `%s`\"", ",", "\" \"", ".", "join", "(", "command_to_run", ")", ")", "self", ".", "sql_proxy_process", "=", "Popen", "(", "command_to_run", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "self", ".", "log", ".", "info", "(", "\"The pid of cloud_sql_proxy: %s\"", ",", "self", ".", "sql_proxy_process", ".", "pid", ")", "while", "True", ":", "line", "=", "self", ".", "sql_proxy_process", ".", "stderr", ".", "readline", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return_code", "=", "self", ".", "sql_proxy_process", ".", "poll", "(", ")", "if", "line", "==", "''", "and", "return_code", "is", "not", "None", ":", "self", ".", "sql_proxy_process", "=", "None", "raise", "AirflowException", "(", "\"The cloud_sql_proxy finished early with return code {}!\"", ".", "format", "(", "return_code", ")", ")", "if", "line", "!=", "''", ":", "self", ".", "log", ".", "info", "(", "line", ")", "if", "\"googleapi: Error\"", "in", "line", "or", "\"invalid instance name:\"", "in", "line", ":", "self", ".", "stop_proxy", "(", ")", "raise", "AirflowException", "(", "\"Error when starting the cloud_sql_proxy {}!\"", ".", "format", "(", "line", ")", ")", "if", "\"Ready for new connections\"", "in", "line", ":", "return" ]
Starts Cloud SQL Proxy. You have to remember to stop the proxy if you started it!
[ "Starts", "Cloud", "SQL", "Proxy", "." ]
python
test
48.190476
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L962-L966
def _AddStrMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __str__(self): return text_format.MessageToString(self) cls.__str__ = __str__
[ "def", "_AddStrMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "__str__", "(", "self", ")", ":", "return", "text_format", ".", "MessageToString", "(", "self", ")", "cls", ".", "__str__", "=", "__str__" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
python
train
34
marshmallow-code/marshmallow
src/marshmallow/fields.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/fields.py#L774-L783
def _validated(self, value): """Format the value or raise a :exc:`ValidationError` if an error occurs.""" if value is None: return None try: return self._format_num(value) except (TypeError, ValueError): self.fail('invalid', input=value) except OverflowError: self.fail('too_large', input=value)
[ "def", "_validated", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "try", ":", "return", "self", ".", "_format_num", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "fail", "(", "'invalid'", ",", "input", "=", "value", ")", "except", "OverflowError", ":", "self", ".", "fail", "(", "'too_large'", ",", "input", "=", "value", ")" ]
Format the value or raise a :exc:`ValidationError` if an error occurs.
[ "Format", "the", "value", "or", "raise", "a", ":", "exc", ":", "ValidationError", "if", "an", "error", "occurs", "." ]
python
train
37.4
ioos/pyoos
pyoos/utils/dataorg.py
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/utils/dataorg.py#L4-L16
def flatten_element(p): """ Convenience function to return record-style time series representation from elements ('p') members in station element. member['standard'] is a standard_name parameter name, typically CF based. Ideally, member['value'] should already be floating point value, so it's ready to use. Useful with most pyoos collectors. """ rd = {"time": p.time} for member in p.members: rd[member["standard"]] = member["value"] return rd
[ "def", "flatten_element", "(", "p", ")", ":", "rd", "=", "{", "\"time\"", ":", "p", ".", "time", "}", "for", "member", "in", "p", ".", "members", ":", "rd", "[", "member", "[", "\"standard\"", "]", "]", "=", "member", "[", "\"value\"", "]", "return", "rd" ]
Convenience function to return record-style time series representation from elements ('p') members in station element. member['standard'] is a standard_name parameter name, typically CF based. Ideally, member['value'] should already be floating point value, so it's ready to use. Useful with most pyoos collectors.
[ "Convenience", "function", "to", "return", "record", "-", "style", "time", "series", "representation", "from", "elements", "(", "p", ")", "members", "in", "station", "element", ".", "member", "[", "standard", "]", "is", "a", "standard_name", "parameter", "name", "typically", "CF", "based", ".", "Ideally", "member", "[", "value", "]", "should", "already", "be", "floating", "point", "value", "so", "it", "s", "ready", "to", "use", ".", "Useful", "with", "most", "pyoos", "collectors", "." ]
python
train
37.230769
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L635-L653
def evaluate_model_single_recording(model_file, recording): """ Evaluate a model for a single recording. Parameters ---------- model_file : string Model file (.tar) recording : The handwritten recording. """ (preprocessing_queue, feature_list, model, output_semantics) = load_model(model_file) results = evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording) return results
[ "def", "evaluate_model_single_recording", "(", "model_file", ",", "recording", ")", ":", "(", "preprocessing_queue", ",", "feature_list", ",", "model", ",", "output_semantics", ")", "=", "load_model", "(", "model_file", ")", "results", "=", "evaluate_model_single_recording_preloaded", "(", "preprocessing_queue", ",", "feature_list", ",", "model", ",", "output_semantics", ",", "recording", ")", "return", "results" ]
Evaluate a model for a single recording. Parameters ---------- model_file : string Model file (.tar) recording : The handwritten recording.
[ "Evaluate", "a", "model", "for", "a", "single", "recording", "." ]
python
train
36.789474
balabit/typesafety
typesafety/validator.py
https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/validator.py#L107-L118
def undecorate(cls, function): ''' Remove validator decoration from a function. The `function` argument is the function to be cleaned from the validator decorator. ''' if cls.is_function_validated(function): return cls.get_function_validator(function).function return function
[ "def", "undecorate", "(", "cls", ",", "function", ")", ":", "if", "cls", ".", "is_function_validated", "(", "function", ")", ":", "return", "cls", ".", "get_function_validator", "(", "function", ")", ".", "function", "return", "function" ]
Remove validator decoration from a function. The `function` argument is the function to be cleaned from the validator decorator.
[ "Remove", "validator", "decoration", "from", "a", "function", "." ]
python
train
28
RealTimeWeb/datasets
preprocess/earthquakes/earthquakes.py
https://github.com/RealTimeWeb/datasets/blob/2fe5befd251c783744d000bd4763e277616a152f/preprocess/earthquakes/earthquakes.py#L513-L533
def _from_json(json_data): """ Creates a Report from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: Report """ if 'bbox' in json_data: box = BoundingBox._from_json(json_data['bbox']) else: box = BoundingBox(Coordinate(0.,0.,0.), Coordinate(0.,0.,0.)) if 'features' in json_data and json_data['features']: quakes = list(map(Earthquake._from_json, json_data['features'])) else: quakes = [] try: title = json_data['metadata']['title'] except KeyError: raise USGSException("No report title information returned by server") return Report(box, quakes, title)
[ "def", "_from_json", "(", "json_data", ")", ":", "if", "'bbox'", "in", "json_data", ":", "box", "=", "BoundingBox", ".", "_from_json", "(", "json_data", "[", "'bbox'", "]", ")", "else", ":", "box", "=", "BoundingBox", "(", "Coordinate", "(", "0.", ",", "0.", ",", "0.", ")", ",", "Coordinate", "(", "0.", ",", "0.", ",", "0.", ")", ")", "if", "'features'", "in", "json_data", "and", "json_data", "[", "'features'", "]", ":", "quakes", "=", "list", "(", "map", "(", "Earthquake", ".", "_from_json", ",", "json_data", "[", "'features'", "]", ")", ")", "else", ":", "quakes", "=", "[", "]", "try", ":", "title", "=", "json_data", "[", "'metadata'", "]", "[", "'title'", "]", "except", "KeyError", ":", "raise", "USGSException", "(", "\"No report title information returned by server\"", ")", "return", "Report", "(", "box", ",", "quakes", ",", "title", ")" ]
Creates a Report from json data. :param json_data: The raw json data to parse :type json_data: dict :returns: Report
[ "Creates", "a", "Report", "from", "json", "data", ".", ":", "param", "json_data", ":", "The", "raw", "json", "data", "to", "parse", ":", "type", "json_data", ":", "dict", ":", "returns", ":", "Report" ]
python
train
36.095238
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L979-L994
def set_source(self, source_id): """Sets the source. arg: source_id (osid.id.Id): the new publisher raise: InvalidArgument - ``source_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``source_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_source_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(source_id): raise errors.InvalidArgument() self._my_map['sourceId'] = str(source_id)
[ "def", "set_source", "(", "self", ",", "source_id", ")", ":", "# Implemented from template for osid.resource.ResourceForm.set_avatar_template", "if", "self", ".", "get_source_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "if", "not", "self", ".", "_is_valid_id", "(", "source_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'sourceId'", "]", "=", "str", "(", "source_id", ")" ]
Sets the source. arg: source_id (osid.id.Id): the new publisher raise: InvalidArgument - ``source_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``source_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "source", "." ]
python
train
42.5625
bpython/curtsies
examples/tictactoeexample.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/examples/tictactoeexample.py#L68-L94
def value(board, who='x'): """Returns the value of a board >>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']] >>> value(b) 1 >>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']] >>> value(b) -1 >>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']] >>> value(b) 1 >>> b._rows[0][2] = 'x' >>> value(b) -1 """ w = board.winner() if w == who: return 1 if w == opp(who): return -1 if board.turn == 9: return 0 if who == board.whose_turn: return max([value(b, who) for b in board.possible()]) else: return min([value(b, who) for b in board.possible()])
[ "def", "value", "(", "board", ",", "who", "=", "'x'", ")", ":", "w", "=", "board", ".", "winner", "(", ")", "if", "w", "==", "who", ":", "return", "1", "if", "w", "==", "opp", "(", "who", ")", ":", "return", "-", "1", "if", "board", ".", "turn", "==", "9", ":", "return", "0", "if", "who", "==", "board", ".", "whose_turn", ":", "return", "max", "(", "[", "value", "(", "b", ",", "who", ")", "for", "b", "in", "board", ".", "possible", "(", ")", "]", ")", "else", ":", "return", "min", "(", "[", "value", "(", "b", ",", "who", ")", "for", "b", "in", "board", ".", "possible", "(", ")", "]", ")" ]
Returns the value of a board >>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']] >>> value(b) 1 >>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']] >>> value(b) -1 >>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']] >>> value(b) 1 >>> b._rows[0][2] = 'x' >>> value(b) -1
[ "Returns", "the", "value", "of", "a", "board", ">>>", "b", "=", "Board", "()", ";", "b", ".", "_rows", "=", "[[", "x", "x", "x", "]", "[", "x", "x", "x", "]", "[", "x", "x", "x", "]]", ">>>", "value", "(", "b", ")", "1", ">>>", "b", "=", "Board", "()", ";", "b", ".", "_rows", "=", "[[", "o", "o", "o", "]", "[", "o", "o", "o", "]", "[", "o", "o", "o", "]]", ">>>", "value", "(", "b", ")", "-", "1", ">>>", "b", "=", "Board", "()", ";", "b", ".", "_rows", "=", "[[", "x", "o", "]", "[", "x", "o", "]", "[", "]]", ">>>", "value", "(", "b", ")", "1", ">>>", "b", ".", "_rows", "[", "0", "]", "[", "2", "]", "=", "x", ">>>", "value", "(", "b", ")", "-", "1" ]
python
train
26.666667
kadrlica/pymodeler
pymodeler/model.py
https://github.com/kadrlica/pymodeler/blob/f426c01416fd4b8fc3afeeb6d3b5d1cb0cb8f8e3/pymodeler/model.py#L292-L306
def _init_properties(self): """ Loop through the list of Properties, extract the derived and required properties and do the appropriate book-keeping """ self._missing = {} for k, p in self.params.items(): if p.required: self._missing[k] = p if isinstance(p, Derived): if p.loader is None: # Default to using _<param_name> p.loader = self.__getattribute__("_%s" % k) elif isinstance(p.loader, str): p.loader = self.__getattribute__(p.loader)
[ "def", "_init_properties", "(", "self", ")", ":", "self", ".", "_missing", "=", "{", "}", "for", "k", ",", "p", "in", "self", ".", "params", ".", "items", "(", ")", ":", "if", "p", ".", "required", ":", "self", ".", "_missing", "[", "k", "]", "=", "p", "if", "isinstance", "(", "p", ",", "Derived", ")", ":", "if", "p", ".", "loader", "is", "None", ":", "# Default to using _<param_name>", "p", ".", "loader", "=", "self", ".", "__getattribute__", "(", "\"_%s\"", "%", "k", ")", "elif", "isinstance", "(", "p", ".", "loader", ",", "str", ")", ":", "p", ".", "loader", "=", "self", ".", "__getattribute__", "(", "p", ".", "loader", ")" ]
Loop through the list of Properties, extract the derived and required properties and do the appropriate book-keeping
[ "Loop", "through", "the", "list", "of", "Properties", "extract", "the", "derived", "and", "required", "properties", "and", "do", "the", "appropriate", "book", "-", "keeping" ]
python
test
40.4
pypa/setuptools
setuptools/package_index.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/package_index.py#L510-L524
def check_hash(self, checker, filename, tfp): """ checker is a ContentChecker """ checker.report( self.debug, "Validating %%s checksum for %s" % filename) if not checker.is_valid(): tfp.close() os.unlink(filename) raise DistutilsError( "%s validation failed for %s; " "possible download problem?" % (checker.hash.name, os.path.basename(filename)) )
[ "def", "check_hash", "(", "self", ",", "checker", ",", "filename", ",", "tfp", ")", ":", "checker", ".", "report", "(", "self", ".", "debug", ",", "\"Validating %%s checksum for %s\"", "%", "filename", ")", "if", "not", "checker", ".", "is_valid", "(", ")", ":", "tfp", ".", "close", "(", ")", "os", ".", "unlink", "(", "filename", ")", "raise", "DistutilsError", "(", "\"%s validation failed for %s; \"", "\"possible download problem?\"", "%", "(", "checker", ".", "hash", ".", "name", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", ")" ]
checker is a ContentChecker
[ "checker", "is", "a", "ContentChecker" ]
python
train
32.933333
pyviz/holoviews
holoviews/core/layout.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/layout.py#L201-L220
def dimension_values(self, dimension, expanded=True, flat=True): """Return the values along the requested dimension. Applies to the main object in the AdjointLayout. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values Whether to return the expanded values, behavior depends on the type of data: * Columnar: If false returns unique values * Geometry: If false returns scalar values per geometry * Gridded: If false returns 1D coordinates flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension """ dimension = self.get_dimension(dimension, strict=True).name return self.main.dimension_values(dimension, expanded, flat)
[ "def", "dimension_values", "(", "self", ",", "dimension", ",", "expanded", "=", "True", ",", "flat", "=", "True", ")", ":", "dimension", "=", "self", ".", "get_dimension", "(", "dimension", ",", "strict", "=", "True", ")", ".", "name", "return", "self", ".", "main", ".", "dimension_values", "(", "dimension", ",", "expanded", ",", "flat", ")" ]
Return the values along the requested dimension. Applies to the main object in the AdjointLayout. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values Whether to return the expanded values, behavior depends on the type of data: * Columnar: If false returns unique values * Geometry: If false returns scalar values per geometry * Gridded: If false returns 1D coordinates flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
[ "Return", "the", "values", "along", "the", "requested", "dimension", "." ]
python
train
44.8
milesrichardson/ParsePy
parse_rest/installation.py
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/installation.py#L30-L49
def update_channels(cls, installation_id, channels_to_add=set(), channels_to_remove=set(), **kw): """ Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from """ installation_url = cls._get_installation_url(installation_id) current_config = cls.GET(installation_url) new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove)) cls.PUT(installation_url, channels=new_channels)
[ "def", "update_channels", "(", "cls", ",", "installation_id", ",", "channels_to_add", "=", "set", "(", ")", ",", "channels_to_remove", "=", "set", "(", ")", ",", "*", "*", "kw", ")", ":", "installation_url", "=", "cls", ".", "_get_installation_url", "(", "installation_id", ")", "current_config", "=", "cls", ".", "GET", "(", "installation_url", ")", "new_channels", "=", "list", "(", "set", "(", "current_config", "[", "'channels'", "]", ")", ".", "union", "(", "channels_to_add", ")", ".", "difference", "(", "channels_to_remove", ")", ")", "cls", ".", "PUT", "(", "installation_url", ",", "channels", "=", "new_channels", ")" ]
Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from
[ "Allow", "an", "application", "to", "manually", "subscribe", "or", "unsubscribe", "an", "installation", "to", "a", "certain", "push", "channel", "in", "a", "unified", "operation", "." ]
python
train
45.3
twilio/twilio-python
twilio/rest/api/v2010/account/usage/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/__init__.py#L91-L100
def get_instance(self, payload): """ Build an instance of UsageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.UsageInstance :rtype: twilio.rest.api.v2010.account.usage.UsageInstance """ return UsageInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "UsageInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of UsageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.UsageInstance :rtype: twilio.rest.api.v2010.account.usage.UsageInstance
[ "Build", "an", "instance", "of", "UsageInstance" ]
python
train
38.3
dossier/dossier.fc
python/dossier/fc/feature_collection.py
https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_collection.py#L451-L456
def total(self): ''' Returns sum of all counts in all features that are multisets. ''' feats = imap(lambda name: self[name], self._counters()) return sum(chain(*map(lambda mset: map(abs, mset.values()), feats)))
[ "def", "total", "(", "self", ")", ":", "feats", "=", "imap", "(", "lambda", "name", ":", "self", "[", "name", "]", ",", "self", ".", "_counters", "(", ")", ")", "return", "sum", "(", "chain", "(", "*", "map", "(", "lambda", "mset", ":", "map", "(", "abs", ",", "mset", ".", "values", "(", ")", ")", ",", "feats", ")", ")", ")" ]
Returns sum of all counts in all features that are multisets.
[ "Returns", "sum", "of", "all", "counts", "in", "all", "features", "that", "are", "multisets", "." ]
python
train
41
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L598-L633
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False): '''The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register. Pixels without any data are masked. Parameters ---------- pix_regs : iterable, string List of pixel register to read (e.g. Enable, C_High, ...). If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" dcs : iterable, int List of double columns to read. overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- list of masked numpy.ndarrays ''' if pix_regs is None: pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"] self.register_utils.send_commands(self.register.get_commands("ConfMode")) result = [] for pix_reg in pix_regs: pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True) # the result pixel array, only pixel with data are not masked for dc in dcs: with self.readout(fill_buffer=True, callback=None, errback=None): self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc])) data = self.read_data() interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True) if overwrite_config: self.register.set_pixel_register(pix_reg, pixel_data.data) result.append(pixel_data) return result
[ "def", "read_pixel_register", "(", "self", ",", "pix_regs", "=", "None", ",", "dcs", "=", "range", "(", "40", ")", ",", "overwrite_config", "=", "False", ")", ":", "if", "pix_regs", "is", "None", ":", "pix_regs", "=", "[", "\"EnableDigInj\"", ",", "\"Imon\"", ",", "\"Enable\"", ",", "\"C_High\"", ",", "\"C_Low\"", ",", "\"TDAC\"", ",", "\"FDAC\"", "]", "self", ".", "register_utils", ".", "send_commands", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "result", "=", "[", "]", "for", "pix_reg", "in", "pix_regs", ":", "pixel_data", "=", "np", ".", "ma", ".", "masked_array", "(", "np", ".", "zeros", "(", "shape", "=", "(", "80", ",", "336", ")", ",", "dtype", "=", "np", ".", "uint32", ")", ",", "mask", "=", "True", ")", "# the result pixel array, only pixel with data are not masked\r", "for", "dc", "in", "dcs", ":", "with", "self", ".", "readout", "(", "fill_buffer", "=", "True", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "self", ".", "register_utils", ".", "send_commands", "(", "self", ".", "register", ".", "get_commands", "(", "\"RdFrontEnd\"", ",", "name", "=", "[", "pix_reg", "]", ",", "dcs", "=", "[", "dc", "]", ")", ")", "data", "=", "self", ".", "read_data", "(", ")", "interpret_pixel_data", "(", "data", ",", "dc", ",", "pixel_data", ",", "invert", "=", "False", "if", "pix_reg", "==", "\"EnableDigInj\"", "else", "True", ")", "if", "overwrite_config", ":", "self", ".", "register", ".", "set_pixel_register", "(", "pix_reg", ",", "pixel_data", ".", "data", ")", "result", ".", "append", "(", "pixel_data", ")", "return", "result" ]
The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register. Pixels without any data are masked. Parameters ---------- pix_regs : iterable, string List of pixel register to read (e.g. Enable, C_High, ...). If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" dcs : iterable, int List of double columns to read. overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- list of masked numpy.ndarrays
[ "The", "function", "reads", "the", "pixel", "register", "interprets", "the", "data", "and", "returns", "a", "masked", "numpy", "arrays", "with", "the", "data", "for", "the", "chosen", "pixel", "register", ".", "Pixels", "without", "any", "data", "are", "masked", ".", "Parameters", "----------", "pix_regs", ":", "iterable", "string", "List", "of", "pixel", "register", "to", "read", "(", "e", ".", "g", ".", "Enable", "C_High", "...", ")", ".", "If", "None", "all", "are", "read", ":", "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC", "dcs", ":", "iterable", "int", "List", "of", "double", "columns", "to", "read", ".", "overwrite_config", ":", "bool", "The", "read", "values", "overwrite", "the", "config", "in", "RAM", "if", "true", ".", "Returns", "-------", "list", "of", "masked", "numpy", ".", "ndarrays" ]
python
train
45.277778
zblz/naima
naima/radiative.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L768-L819
def flux(self, photon_energy, distance=1 * u.kpc, seed=None): """Differential flux at a given distance from the source from a single seed photon field Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default). """ model = super(InverseCompton, self).flux( photon_energy, distance=distance ) if seed is not None: # Test seed argument if not isinstance(seed, int): if seed not in self.seed_photon_fields: raise ValueError( "Provided seed photon field name is not in" " the definition of the InverseCompton instance" ) else: seed = list(self.seed_photon_fields.keys()).index(seed) elif seed > len(self.seed_photon_fields): raise ValueError( "Provided seed photon field number is larger" " than the number of seed photon fields defined in the" " InverseCompton instance" ) if distance != 0: distance = validate_scalar( "distance", distance, physical_type="length" ) dfac = 4 * np.pi * distance.to("cm") ** 2 out_unit = "1/(s cm2 eV)" else: dfac = 1 out_unit = "1/(s eV)" model = (self.specic[seed] / dfac).to(out_unit) return model
[ "def", "flux", "(", "self", ",", "photon_energy", ",", "distance", "=", "1", "*", "u", ".", "kpc", ",", "seed", "=", "None", ")", ":", "model", "=", "super", "(", "InverseCompton", ",", "self", ")", ".", "flux", "(", "photon_energy", ",", "distance", "=", "distance", ")", "if", "seed", "is", "not", "None", ":", "# Test seed argument", "if", "not", "isinstance", "(", "seed", ",", "int", ")", ":", "if", "seed", "not", "in", "self", ".", "seed_photon_fields", ":", "raise", "ValueError", "(", "\"Provided seed photon field name is not in\"", "\" the definition of the InverseCompton instance\"", ")", "else", ":", "seed", "=", "list", "(", "self", ".", "seed_photon_fields", ".", "keys", "(", ")", ")", ".", "index", "(", "seed", ")", "elif", "seed", ">", "len", "(", "self", ".", "seed_photon_fields", ")", ":", "raise", "ValueError", "(", "\"Provided seed photon field number is larger\"", "\" than the number of seed photon fields defined in the\"", "\" InverseCompton instance\"", ")", "if", "distance", "!=", "0", ":", "distance", "=", "validate_scalar", "(", "\"distance\"", ",", "distance", ",", "physical_type", "=", "\"length\"", ")", "dfac", "=", "4", "*", "np", ".", "pi", "*", "distance", ".", "to", "(", "\"cm\"", ")", "**", "2", "out_unit", "=", "\"1/(s cm2 eV)\"", "else", ":", "dfac", "=", "1", "out_unit", "=", "\"1/(s eV)\"", "model", "=", "(", "self", ".", "specic", "[", "seed", "]", "/", "dfac", ")", ".", "to", "(", "out_unit", ")", "return", "model" ]
Differential flux at a given distance from the source from a single seed photon field Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default).
[ "Differential", "flux", "at", "a", "given", "distance", "from", "the", "source", "from", "a", "single", "seed", "photon", "field" ]
python
train
37.730769
dbcli/athenacli
athenacli/main.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L643-L693
def cli(execute, region, aws_access_key_id, aws_secret_access_key, s3_staging_dir, athenaclirc, profile, database): '''A Athena terminal client with auto-completion and syntax highlighting. \b Examples: - athenacli - athenacli my_database ''' if (athenaclirc == ATHENACLIRC) and (not os.path.exists(os.path.expanduser(ATHENACLIRC))): err_msg = ''' Welcome to athenacli! It seems this is your first time to run athenacli, we generated a default config file for you %s Please change it accordingly, and run athenacli again. ''' % ATHENACLIRC print(err_msg) write_default_config(DEFAULT_CONFIG_FILE, ATHENACLIRC) sys.exit(1) if profile != 'default': os.environ['AWS_PROFILE'] = profile athenacli = AthenaCli( region=region, aws_access_key_id=aws_access_key_id, aws_secret_access_key= aws_secret_access_key, s3_staging_dir=s3_staging_dir, athenaclirc=athenaclirc, profile=profile, database=database ) # --execute argument if execute: if os.path.exists(execute): with open(execute) as f: query = f.read() else: query = execute try: athenacli.formatter.format_name = 'csv' athenacli.run_query(query) exit(0) except Exception as e: click.secho(str(e), err=True, fg='red') exit(1) athenacli.run_cli()
[ "def", "cli", "(", "execute", ",", "region", ",", "aws_access_key_id", ",", "aws_secret_access_key", ",", "s3_staging_dir", ",", "athenaclirc", ",", "profile", ",", "database", ")", ":", "if", "(", "athenaclirc", "==", "ATHENACLIRC", ")", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "expanduser", "(", "ATHENACLIRC", ")", ")", ")", ":", "err_msg", "=", "'''\n Welcome to athenacli!\n\n It seems this is your first time to run athenacli,\n we generated a default config file for you\n %s\n Please change it accordingly, and run athenacli again.\n '''", "%", "ATHENACLIRC", "print", "(", "err_msg", ")", "write_default_config", "(", "DEFAULT_CONFIG_FILE", ",", "ATHENACLIRC", ")", "sys", ".", "exit", "(", "1", ")", "if", "profile", "!=", "'default'", ":", "os", ".", "environ", "[", "'AWS_PROFILE'", "]", "=", "profile", "athenacli", "=", "AthenaCli", "(", "region", "=", "region", ",", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "s3_staging_dir", "=", "s3_staging_dir", ",", "athenaclirc", "=", "athenaclirc", ",", "profile", "=", "profile", ",", "database", "=", "database", ")", "# --execute argument", "if", "execute", ":", "if", "os", ".", "path", ".", "exists", "(", "execute", ")", ":", "with", "open", "(", "execute", ")", "as", "f", ":", "query", "=", "f", ".", "read", "(", ")", "else", ":", "query", "=", "execute", "try", ":", "athenacli", ".", "formatter", ".", "format_name", "=", "'csv'", "athenacli", ".", "run_query", "(", "query", ")", "exit", "(", "0", ")", "except", "Exception", "as", "e", ":", "click", ".", "secho", "(", "str", "(", "e", ")", ",", "err", "=", "True", ",", "fg", "=", "'red'", ")", "exit", "(", "1", ")", "athenacli", ".", "run_cli", "(", ")" ]
A Athena terminal client with auto-completion and syntax highlighting. \b Examples: - athenacli - athenacli my_database
[ "A", "Athena", "terminal", "client", "with", "auto", "-", "completion", "and", "syntax", "highlighting", "." ]
python
train
29.27451
lsst-sqre/ltd-conveyor
ltdconveyor/s3/upload.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/s3/upload.py#L213-L254
def upload_object(bucket_path, bucket, content='', metadata=None, acl=None, cache_control=None, content_type=None): """Upload an arbitrary object to an S3 bucket. Parameters ---------- bucket_path : `str` Destination path (also known as the key name) of the file in the S3 bucket. content : `str` or `bytes`, optional Object content. bucket : boto3 Bucket instance S3 bucket. metadata : `dict`, optional Header metadata values. These keys will appear in headers as ``x-amz-meta-*``. acl : `str`, optional A pre-canned access control list. See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Default is `None`, meaning that no ACL is applied to the object. cache_control : `str`, optional The cache-control header value. For example, ``'max-age=31536000'``. content_type : `str`, optional The object's content type (such as ``text/html``). If left unset, no MIME type is passed to boto3 (which defaults to ``binary/octet-stream``). """ obj = bucket.Object(bucket_path) # Object.put seems to be sensitive to None-type kwargs, so we filter first args = {} if metadata is not None and len(metadata) > 0: # avoid empty Metadata args['Metadata'] = metadata if acl is not None: args['ACL'] = acl if cache_control is not None: args['CacheControl'] = cache_control if content_type is not None: args['ContentType'] = content_type obj.put(Body=content, **args)
[ "def", "upload_object", "(", "bucket_path", ",", "bucket", ",", "content", "=", "''", ",", "metadata", "=", "None", ",", "acl", "=", "None", ",", "cache_control", "=", "None", ",", "content_type", "=", "None", ")", ":", "obj", "=", "bucket", ".", "Object", "(", "bucket_path", ")", "# Object.put seems to be sensitive to None-type kwargs, so we filter first", "args", "=", "{", "}", "if", "metadata", "is", "not", "None", "and", "len", "(", "metadata", ")", ">", "0", ":", "# avoid empty Metadata", "args", "[", "'Metadata'", "]", "=", "metadata", "if", "acl", "is", "not", "None", ":", "args", "[", "'ACL'", "]", "=", "acl", "if", "cache_control", "is", "not", "None", ":", "args", "[", "'CacheControl'", "]", "=", "cache_control", "if", "content_type", "is", "not", "None", ":", "args", "[", "'ContentType'", "]", "=", "content_type", "obj", ".", "put", "(", "Body", "=", "content", ",", "*", "*", "args", ")" ]
Upload an arbitrary object to an S3 bucket. Parameters ---------- bucket_path : `str` Destination path (also known as the key name) of the file in the S3 bucket. content : `str` or `bytes`, optional Object content. bucket : boto3 Bucket instance S3 bucket. metadata : `dict`, optional Header metadata values. These keys will appear in headers as ``x-amz-meta-*``. acl : `str`, optional A pre-canned access control list. See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Default is `None`, meaning that no ACL is applied to the object. cache_control : `str`, optional The cache-control header value. For example, ``'max-age=31536000'``. content_type : `str`, optional The object's content type (such as ``text/html``). If left unset, no MIME type is passed to boto3 (which defaults to ``binary/octet-stream``).
[ "Upload", "an", "arbitrary", "object", "to", "an", "S3", "bucket", "." ]
python
test
37.833333
ekzhu/datasketch
datasketch/lsh.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lsh.py#L136-L149
def insert(self, key, minhash, check_duplication=True): ''' Insert a key to the index, together with a MinHash (or weighted MinHash) of the set referenced by the key. :param str key: The identifier of the set. :param datasketch.MinHash minhash: The MinHash of the set. :param bool check_duplication: To avoid duplicate keys in the storage (`default=True`). It's recommended to not change the default, but if you want to avoid the overhead during insert you can set `check_duplication = False`. ''' self._insert(key, minhash, check_duplication=check_duplication, buffer=False)
[ "def", "insert", "(", "self", ",", "key", ",", "minhash", ",", "check_duplication", "=", "True", ")", ":", "self", ".", "_insert", "(", "key", ",", "minhash", ",", "check_duplication", "=", "check_duplication", ",", "buffer", "=", "False", ")" ]
Insert a key to the index, together with a MinHash (or weighted MinHash) of the set referenced by the key. :param str key: The identifier of the set. :param datasketch.MinHash minhash: The MinHash of the set. :param bool check_duplication: To avoid duplicate keys in the storage (`default=True`). It's recommended to not change the default, but if you want to avoid the overhead during insert you can set `check_duplication = False`.
[ "Insert", "a", "key", "to", "the", "index", "together", "with", "a", "MinHash", "(", "or", "weighted", "MinHash", ")", "of", "the", "set", "referenced", "by", "the", "key", "." ]
python
test
53.785714
prechelt/typecheck-decorator
typecheck/framework.py
https://github.com/prechelt/typecheck-decorator/blob/4aa5a7f17235c70b5b787c9e80bb1f24d3f15933/typecheck/framework.py#L198-L210
def check(self, values, namespace): """specifying a plain tuple allows arguments that are tuples or lists; specifying a specialized (subclassed) tuple allows only that type; specifying a list allows only that list type.""" is_tuplish_type = (issubclass(self._cls, tg.Tuple) or issubclass(type(values), self._cls)) if (not _is_sequence(values) or not is_tuplish_type or len(values) != len(self._checks)): return False for thischeck, thisvalue in zip(self._checks, values): if not thischeck(thisvalue, namespace): return False return True
[ "def", "check", "(", "self", ",", "values", ",", "namespace", ")", ":", "is_tuplish_type", "=", "(", "issubclass", "(", "self", ".", "_cls", ",", "tg", ".", "Tuple", ")", "or", "issubclass", "(", "type", "(", "values", ")", ",", "self", ".", "_cls", ")", ")", "if", "(", "not", "_is_sequence", "(", "values", ")", "or", "not", "is_tuplish_type", "or", "len", "(", "values", ")", "!=", "len", "(", "self", ".", "_checks", ")", ")", ":", "return", "False", "for", "thischeck", ",", "thisvalue", "in", "zip", "(", "self", ".", "_checks", ",", "values", ")", ":", "if", "not", "thischeck", "(", "thisvalue", ",", "namespace", ")", ":", "return", "False", "return", "True" ]
specifying a plain tuple allows arguments that are tuples or lists; specifying a specialized (subclassed) tuple allows only that type; specifying a list allows only that list type.
[ "specifying", "a", "plain", "tuple", "allows", "arguments", "that", "are", "tuples", "or", "lists", ";", "specifying", "a", "specialized", "(", "subclassed", ")", "tuple", "allows", "only", "that", "type", ";", "specifying", "a", "list", "allows", "only", "that", "list", "type", "." ]
python
train
51
pydata/xarray
xarray/core/groupby.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/groupby.py#L72-L88
def _consolidate_slices(slices): """Consolidate adjacent slices in a list of slices. """ result = [] last_slice = slice(None) for slice_ in slices: if not isinstance(slice_, slice): raise ValueError('list element is not a slice: %r' % slice_) if (result and last_slice.stop == slice_.start and _is_one_or_none(last_slice.step) and _is_one_or_none(slice_.step)): last_slice = slice(last_slice.start, slice_.stop, slice_.step) result[-1] = last_slice else: result.append(slice_) last_slice = slice_ return result
[ "def", "_consolidate_slices", "(", "slices", ")", ":", "result", "=", "[", "]", "last_slice", "=", "slice", "(", "None", ")", "for", "slice_", "in", "slices", ":", "if", "not", "isinstance", "(", "slice_", ",", "slice", ")", ":", "raise", "ValueError", "(", "'list element is not a slice: %r'", "%", "slice_", ")", "if", "(", "result", "and", "last_slice", ".", "stop", "==", "slice_", ".", "start", "and", "_is_one_or_none", "(", "last_slice", ".", "step", ")", "and", "_is_one_or_none", "(", "slice_", ".", "step", ")", ")", ":", "last_slice", "=", "slice", "(", "last_slice", ".", "start", ",", "slice_", ".", "stop", ",", "slice_", ".", "step", ")", "result", "[", "-", "1", "]", "=", "last_slice", "else", ":", "result", ".", "append", "(", "slice_", ")", "last_slice", "=", "slice_", "return", "result" ]
Consolidate adjacent slices in a list of slices.
[ "Consolidate", "adjacent", "slices", "in", "a", "list", "of", "slices", "." ]
python
train
37.294118
BerkeleyAutomation/perception
perception/colorized_phoxi_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/colorized_phoxi_sensor.py#L145-L196
def _colorize(self, depth_im, color_im): """Colorize a depth image from the PhoXi using a color image from the webcam. Parameters ---------- depth_im : DepthImage The PhoXi depth image. color_im : ColorImage Corresponding color image. Returns ------- ColorImage A colorized image corresponding to the PhoXi depth image. """ # Project the point cloud into the webcam's frame target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3) pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im) pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth) # Sort the points by their distance from the webcam's apeture pc_data = pc_color.data.T dists = np.linalg.norm(pc_data, axis=1) order = np.argsort(dists) pc_data = pc_data[order] pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame) sorted_dists = dists[order] sorted_depths = depth_im.data.flatten()[order] # Generate image coordinates for each sorted point icds = self._webcam.color_intrinsics.project(pc_color).data.T # Create mask for points that are masked by others rounded_icds = np.array(icds / 3.0, dtype=np.uint32) unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True) icd_depths = sorted_dists[unique_inds] min_depths_pp = icd_depths[unique_inv] depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3 # Create mask for points with missing depth or that lie outside the image valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width), np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height)) valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0) valid_mask = np.logical_and(valid_mask, depth_delta_mask) valid_icds = icds[valid_mask] colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:] color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8) color_im_data[valid_mask] = colors color_im_data[order] = color_im_data.copy() color_im_data = color_im_data.reshape(target_shape) return ColorImage(color_im_data, frame=self._frame)
[ "def", "_colorize", "(", "self", ",", "depth_im", ",", "color_im", ")", ":", "# Project the point cloud into the webcam's frame", "target_shape", "=", "(", "depth_im", ".", "data", ".", "shape", "[", "0", "]", ",", "depth_im", ".", "data", ".", "shape", "[", "1", "]", ",", "3", ")", "pc_depth", "=", "self", ".", "_phoxi", ".", "ir_intrinsics", ".", "deproject", "(", "depth_im", ")", "pc_color", "=", "self", ".", "_T_webcam_world", ".", "inverse", "(", ")", ".", "dot", "(", "self", ".", "_T_phoxi_world", ")", ".", "apply", "(", "pc_depth", ")", "# Sort the points by their distance from the webcam's apeture", "pc_data", "=", "pc_color", ".", "data", ".", "T", "dists", "=", "np", ".", "linalg", ".", "norm", "(", "pc_data", ",", "axis", "=", "1", ")", "order", "=", "np", ".", "argsort", "(", "dists", ")", "pc_data", "=", "pc_data", "[", "order", "]", "pc_color", "=", "PointCloud", "(", "pc_data", ".", "T", ",", "frame", "=", "self", ".", "_webcam", ".", "color_intrinsics", ".", "frame", ")", "sorted_dists", "=", "dists", "[", "order", "]", "sorted_depths", "=", "depth_im", ".", "data", ".", "flatten", "(", ")", "[", "order", "]", "# Generate image coordinates for each sorted point", "icds", "=", "self", ".", "_webcam", ".", "color_intrinsics", ".", "project", "(", "pc_color", ")", ".", "data", ".", "T", "# Create mask for points that are masked by others", "rounded_icds", "=", "np", ".", "array", "(", "icds", "/", "3.0", ",", "dtype", "=", "np", ".", "uint32", ")", "unique_icds", ",", "unique_inds", ",", "unique_inv", "=", "np", ".", "unique", "(", "rounded_icds", ",", "axis", "=", "0", ",", "return_index", "=", "True", ",", "return_inverse", "=", "True", ")", "icd_depths", "=", "sorted_dists", "[", "unique_inds", "]", "min_depths_pp", "=", "icd_depths", "[", "unique_inv", "]", "depth_delta_mask", "=", "np", ".", "abs", "(", "min_depths_pp", "-", "sorted_dists", ")", "<", "5e-3", "# Create mask for points with missing depth or that lie outside the image", "valid_mask", "=", "np", ".", "logical_and", "(", "np", ".", "logical_and", "(", "icds", "[", ":", ",", "0", "]", ">=", "0", ",", "icds", "[", ":", ",", "0", "]", "<", "self", ".", "_webcam", ".", "color_intrinsics", ".", "width", ")", ",", "np", ".", "logical_and", "(", "icds", "[", ":", ",", "1", "]", ">=", "0", ",", "icds", "[", ":", ",", "1", "]", "<", "self", ".", "_webcam", ".", "color_intrinsics", ".", "height", ")", ")", "valid_mask", "=", "np", ".", "logical_and", "(", "valid_mask", ",", "sorted_depths", "!=", "0.0", ")", "valid_mask", "=", "np", ".", "logical_and", "(", "valid_mask", ",", "depth_delta_mask", ")", "valid_icds", "=", "icds", "[", "valid_mask", "]", "colors", "=", "color_im", ".", "data", "[", "valid_icds", "[", ":", ",", "1", "]", ",", "valid_icds", "[", ":", ",", "0", "]", ",", ":", "]", "color_im_data", "=", "np", ".", "zeros", "(", "(", "target_shape", "[", "0", "]", "*", "target_shape", "[", "1", "]", ",", "target_shape", "[", "2", "]", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "color_im_data", "[", "valid_mask", "]", "=", "colors", "color_im_data", "[", "order", "]", "=", "color_im_data", ".", "copy", "(", ")", "color_im_data", "=", "color_im_data", ".", "reshape", "(", "target_shape", ")", "return", "ColorImage", "(", "color_im_data", ",", "frame", "=", "self", ".", "_frame", ")" ]
Colorize a depth image from the PhoXi using a color image from the webcam. Parameters ---------- depth_im : DepthImage The PhoXi depth image. color_im : ColorImage Corresponding color image. Returns ------- ColorImage A colorized image corresponding to the PhoXi depth image.
[ "Colorize", "a", "depth", "image", "from", "the", "PhoXi", "using", "a", "color", "image", "from", "the", "webcam", "." ]
python
train
47.653846
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10782-L11057
def imshow(data, photometric=None, planarconfig=None, bitspersample=None, interpolation=None, cmap=None, vmin=None, vmax=None, figure=None, title=None, dpi=96, subplot=None, maxdim=None, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported C{from matplotlib import pyplot}. Parameters ---------- data : nd array The image data. photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} The color space of the image data. planarconfig : {'CONTIG' or 'SEPARATE'} Defines how components of each pixel are stored. bitspersample : int Number of bits per channel in integer RGB images. interpolation : str The image interpolation method used in matplotlib.imshow. By default, 'nearest' will be used for image dimensions <= 512, else 'bilinear'. cmap : str or matplotlib.colors.Colormap The colormap maps non-RGBA scalar data to colors. vmin, vmax : scalar Data range covered by the colormap. By default, the complete range of the data is covered. figure : matplotlib.figure.Figure Matplotlib figure to use for plotting. title : str Window and subplot title. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int Maximum image width and length. kwargs : dict Additional arguments for matplotlib.pyplot.imshow. """ # TODO: rewrite detection of isrgb, iscontig # TODO: use planarconfig if photometric is None: photometric = 'RGB' if maxdim is None: maxdim = 2**16 isrgb = photometric in ('RGB', 'YCBCR') # 'PALETTE', 'YCBCR' if data.dtype == 'float16': data = data.astype('float32') if data.dtype.kind == 'b': isrgb = False if isrgb and not (data.shape[-1] in (3, 4) or ( data.ndim > 2 and data.shape[-3] in (3, 4))): isrgb = False photometric = 'MINISBLACK' data = data.squeeze() if photometric in ('MINISWHITE', 'MINISBLACK', None): data = reshape_nd(data, 2) else: data = reshape_nd(data, 3) dims = data.ndim if dims < 2: raise ValueError('not an image') if dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and data.shape[-1] < data.shape[-3] // 8 and data.shape[-1] < 5): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if interpolation is None: threshold = 512 elif isinstance(interpolation, int): threshold = interpolation else: threshold = 0 if isrgb: data = data[..., :maxdim, :maxdim, :maxdim] if threshold: if (data.shape[-2] > threshold or data.shape[-3] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' else: data = data[..., :maxdim, :maxdim] if threshold: if (data.shape[-1] > threshold or data.shape[-2] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' if photometric == 'PALETTE' and isrgb: datamax = data.max() if datamax > 255: data = data >> 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, inttypes): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data = data << (8 - bitspersample) elif bitspersample > 8: data = data >> (bitspersample - 8) # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax else: data = data / datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': data = numpy.absolute(data) datamax = data.max() if isrgb: vmin = 0 else: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) elif data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass size = len(title.splitlines()) if title else 1 pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03, left=0.1, right=0.95, hspace=0.05, wspace=0.0) if subplot is None: subplot = 111 subplot = pyplot.subplot(subplot) subplot.set_facecolor((0, 0, 0)) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.char == '?': cmap = 'gray' elif data.dtype.kind in 'buf' or vmin == 0: cmap = 'viridis' else: cmap = 'coolwarm' if photometric == 'MINISWHITE': cmap += '_r' image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return '%s @ %s [%4i, %4i]' % ( curaxdat[1][y, x], current, y, x) return '%s @ [%4i, %4i]' % (data[y, x], y, x) except IndexError: return '' def none(event): return '' subplot.format_coord = format_coord image.get_cursor_data = none image.format_cursor_data = none if dims: current = list((0,) * dims) curaxdat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas curaxdat[1] = data[tuple(current)].squeeze() image.set_data(curaxdat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) curaxdat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = curaxdat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image
[ "def", "imshow", "(", "data", ",", "photometric", "=", "None", ",", "planarconfig", "=", "None", ",", "bitspersample", "=", "None", ",", "interpolation", "=", "None", ",", "cmap", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "figure", "=", "None", ",", "title", "=", "None", ",", "dpi", "=", "96", ",", "subplot", "=", "None", ",", "maxdim", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO: rewrite detection of isrgb, iscontig", "# TODO: use planarconfig", "if", "photometric", "is", "None", ":", "photometric", "=", "'RGB'", "if", "maxdim", "is", "None", ":", "maxdim", "=", "2", "**", "16", "isrgb", "=", "photometric", "in", "(", "'RGB'", ",", "'YCBCR'", ")", "# 'PALETTE', 'YCBCR'", "if", "data", ".", "dtype", "==", "'float16'", ":", "data", "=", "data", ".", "astype", "(", "'float32'", ")", "if", "data", ".", "dtype", ".", "kind", "==", "'b'", ":", "isrgb", "=", "False", "if", "isrgb", "and", "not", "(", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "or", "(", "data", ".", "ndim", ">", "2", "and", "data", ".", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ")", ")", ":", "isrgb", "=", "False", "photometric", "=", "'MINISBLACK'", "data", "=", "data", ".", "squeeze", "(", ")", "if", "photometric", "in", "(", "'MINISWHITE'", ",", "'MINISBLACK'", ",", "None", ")", ":", "data", "=", "reshape_nd", "(", "data", ",", "2", ")", "else", ":", "data", "=", "reshape_nd", "(", "data", ",", "3", ")", "dims", "=", "data", ".", "ndim", "if", "dims", "<", "2", ":", "raise", "ValueError", "(", "'not an image'", ")", "if", "dims", "==", "2", ":", "dims", "=", "0", "isrgb", "=", "False", "else", ":", "if", "isrgb", "and", "data", ".", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "2", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "elif", "not", "isrgb", "and", "(", "data", ".", "shape", "[", "-", "1", "]", "<", "data", ".", "shape", "[", "-", "2", "]", "//", "8", "and", "data", ".", "shape", "[", "-", "1", "]", "<", "data", ".", "shape", "[", "-", "3", "]", "//", "8", "and", "data", ".", "shape", "[", "-", "1", "]", "<", "5", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "1", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "isrgb", "=", "isrgb", "and", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "dims", "-=", "3", "if", "isrgb", "else", "2", "if", "interpolation", "is", "None", ":", "threshold", "=", "512", "elif", "isinstance", "(", "interpolation", ",", "int", ")", ":", "threshold", "=", "interpolation", "else", ":", "threshold", "=", "0", "if", "isrgb", ":", "data", "=", "data", "[", "...", ",", ":", "maxdim", ",", ":", "maxdim", ",", ":", "maxdim", "]", "if", "threshold", ":", "if", "(", "data", ".", "shape", "[", "-", "2", "]", ">", "threshold", "or", "data", ".", "shape", "[", "-", "3", "]", ">", "threshold", ")", ":", "interpolation", "=", "'bilinear'", "else", ":", "interpolation", "=", "'nearest'", "else", ":", "data", "=", "data", "[", "...", ",", ":", "maxdim", ",", ":", "maxdim", "]", "if", "threshold", ":", "if", "(", "data", ".", "shape", "[", "-", "1", "]", ">", "threshold", "or", "data", ".", "shape", "[", "-", "2", "]", ">", "threshold", ")", ":", "interpolation", "=", "'bilinear'", "else", ":", "interpolation", "=", "'nearest'", "if", "photometric", "==", "'PALETTE'", "and", "isrgb", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "datamax", ">", "255", ":", "data", "=", "data", ">>", "8", "# possible precision loss", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "in", "'ui'", ":", "if", "not", "(", "isrgb", "and", "data", ".", "dtype", ".", "itemsize", "<=", "1", ")", "or", "bitspersample", "is", "None", ":", "try", ":", "bitspersample", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "data", ".", "max", "(", ")", ",", "2", ")", ")", ")", "except", "Exception", ":", "bitspersample", "=", "data", ".", "dtype", ".", "itemsize", "*", "8", "elif", "not", "isinstance", "(", "bitspersample", ",", "inttypes", ")", ":", "# bitspersample can be tuple, e.g. (5, 6, 5)", "bitspersample", "=", "data", ".", "dtype", ".", "itemsize", "*", "8", "datamax", "=", "2", "**", "bitspersample", "if", "isrgb", ":", "if", "bitspersample", "<", "8", ":", "data", "=", "data", "<<", "(", "8", "-", "bitspersample", ")", "elif", "bitspersample", ">", "8", ":", "data", "=", "data", ">>", "(", "bitspersample", "-", "8", ")", "# precision loss", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "isrgb", "and", "datamax", ">", "1.0", ":", "if", "data", ".", "dtype", ".", "char", "==", "'d'", ":", "data", "=", "data", ".", "astype", "(", "'f'", ")", "data", "/=", "datamax", "else", ":", "data", "=", "data", "/", "datamax", "elif", "data", ".", "dtype", ".", "kind", "==", "'b'", ":", "datamax", "=", "1", "elif", "data", ".", "dtype", ".", "kind", "==", "'c'", ":", "data", "=", "numpy", ".", "absolute", "(", "data", ")", "datamax", "=", "data", ".", "max", "(", ")", "if", "isrgb", ":", "vmin", "=", "0", "else", ":", "if", "vmax", "is", "None", ":", "vmax", "=", "datamax", "if", "vmin", "is", "None", ":", "if", "data", ".", "dtype", ".", "kind", "==", "'i'", ":", "dtmin", "=", "numpy", ".", "iinfo", "(", "data", ".", "dtype", ")", ".", "min", "vmin", "=", "numpy", ".", "min", "(", "data", ")", "if", "vmin", "==", "dtmin", ":", "vmin", "=", "numpy", ".", "min", "(", "data", "[", "data", ">", "dtmin", "]", ")", "elif", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "dtmin", "=", "numpy", ".", "finfo", "(", "data", ".", "dtype", ")", ".", "min", "vmin", "=", "numpy", ".", "min", "(", "data", ")", "if", "vmin", "==", "dtmin", ":", "vmin", "=", "numpy", ".", "min", "(", "data", "[", "data", ">", "dtmin", "]", ")", "else", ":", "vmin", "=", "0", "pyplot", "=", "sys", ".", "modules", "[", "'matplotlib.pyplot'", "]", "if", "figure", "is", "None", ":", "pyplot", ".", "rc", "(", "'font'", ",", "family", "=", "'sans-serif'", ",", "weight", "=", "'normal'", ",", "size", "=", "8", ")", "figure", "=", "pyplot", ".", "figure", "(", "dpi", "=", "dpi", ",", "figsize", "=", "(", "10.3", ",", "6.3", ")", ",", "frameon", "=", "True", ",", "facecolor", "=", "'1.0'", ",", "edgecolor", "=", "'w'", ")", "try", ":", "figure", ".", "canvas", ".", "manager", ".", "window", ".", "title", "(", "title", ")", "except", "Exception", ":", "pass", "size", "=", "len", "(", "title", ".", "splitlines", "(", ")", ")", "if", "title", "else", "1", "pyplot", ".", "subplots_adjust", "(", "bottom", "=", "0.03", "*", "(", "dims", "+", "2", ")", ",", "top", "=", "0.98", "-", "size", "*", "0.03", ",", "left", "=", "0.1", ",", "right", "=", "0.95", ",", "hspace", "=", "0.05", ",", "wspace", "=", "0.0", ")", "if", "subplot", "is", "None", ":", "subplot", "=", "111", "subplot", "=", "pyplot", ".", "subplot", "(", "subplot", ")", "subplot", ".", "set_facecolor", "(", "(", "0", ",", "0", ",", "0", ")", ")", "if", "title", ":", "try", ":", "title", "=", "unicode", "(", "title", ",", "'Windows-1252'", ")", "except", "TypeError", ":", "pass", "pyplot", ".", "title", "(", "title", ",", "size", "=", "11", ")", "if", "cmap", "is", "None", ":", "if", "data", ".", "dtype", ".", "char", "==", "'?'", ":", "cmap", "=", "'gray'", "elif", "data", ".", "dtype", ".", "kind", "in", "'buf'", "or", "vmin", "==", "0", ":", "cmap", "=", "'viridis'", "else", ":", "cmap", "=", "'coolwarm'", "if", "photometric", "==", "'MINISWHITE'", ":", "cmap", "+=", "'_r'", "image", "=", "pyplot", ".", "imshow", "(", "numpy", ".", "atleast_2d", "(", "data", "[", "(", "0", ",", ")", "*", "dims", "]", ".", "squeeze", "(", ")", ")", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "cmap", "=", "cmap", ",", "interpolation", "=", "interpolation", ",", "*", "*", "kwargs", ")", "if", "not", "isrgb", ":", "pyplot", ".", "colorbar", "(", ")", "# panchor=(0.55, 0.5), fraction=0.05", "def", "format_coord", "(", "x", ",", "y", ")", ":", "# callback function to format coordinate display in toolbar", "x", "=", "int", "(", "x", "+", "0.5", ")", "y", "=", "int", "(", "y", "+", "0.5", ")", "try", ":", "if", "dims", ":", "return", "'%s @ %s [%4i, %4i]'", "%", "(", "curaxdat", "[", "1", "]", "[", "y", ",", "x", "]", ",", "current", ",", "y", ",", "x", ")", "return", "'%s @ [%4i, %4i]'", "%", "(", "data", "[", "y", ",", "x", "]", ",", "y", ",", "x", ")", "except", "IndexError", ":", "return", "''", "def", "none", "(", "event", ")", ":", "return", "''", "subplot", ".", "format_coord", "=", "format_coord", "image", ".", "get_cursor_data", "=", "none", "image", ".", "format_cursor_data", "=", "none", "if", "dims", ":", "current", "=", "list", "(", "(", "0", ",", ")", "*", "dims", ")", "curaxdat", "=", "[", "0", ",", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "]", "sliders", "=", "[", "pyplot", ".", "Slider", "(", "pyplot", ".", "axes", "(", "[", "0.125", ",", "0.03", "*", "(", "axis", "+", "1", ")", ",", "0.725", ",", "0.025", "]", ")", ",", "'Dimension %i'", "%", "axis", ",", "0", ",", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "0", ",", "facecolor", "=", "'0.5'", ",", "valfmt", "=", "'%%.0f [%i]'", "%", "data", ".", "shape", "[", "axis", "]", ")", "for", "axis", "in", "range", "(", "dims", ")", "]", "for", "slider", "in", "sliders", ":", "slider", ".", "drawon", "=", "False", "def", "set_image", "(", "current", ",", "sliders", "=", "sliders", ",", "data", "=", "data", ")", ":", "# change image and redraw canvas", "curaxdat", "[", "1", "]", "=", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "image", ".", "set_data", "(", "curaxdat", "[", "1", "]", ")", "for", "ctrl", ",", "index", "in", "zip", "(", "sliders", ",", "current", ")", ":", "ctrl", ".", "eventson", "=", "False", "ctrl", ".", "set_val", "(", "index", ")", "ctrl", ".", "eventson", "=", "True", "figure", ".", "canvas", ".", "draw", "(", ")", "def", "on_changed", "(", "index", ",", "axis", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "# callback function for slider change event", "index", "=", "int", "(", "round", "(", "index", ")", ")", "curaxdat", "[", "0", "]", "=", "axis", "if", "index", "==", "current", "[", "axis", "]", ":", "return", "if", "index", ">=", "data", ".", "shape", "[", "axis", "]", ":", "index", "=", "0", "elif", "index", "<", "0", ":", "index", "=", "data", ".", "shape", "[", "axis", "]", "-", "1", "current", "[", "axis", "]", "=", "index", "set_image", "(", "current", ")", "def", "on_keypressed", "(", "event", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "# callback function for key press event", "key", "=", "event", ".", "key", "axis", "=", "curaxdat", "[", "0", "]", "if", "str", "(", "key", ")", "in", "'0123456789'", ":", "on_changed", "(", "key", ",", "axis", ")", "elif", "key", "==", "'right'", ":", "on_changed", "(", "current", "[", "axis", "]", "+", "1", ",", "axis", ")", "elif", "key", "==", "'left'", ":", "on_changed", "(", "current", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'up'", ":", "curaxdat", "[", "0", "]", "=", "0", "if", "axis", "==", "len", "(", "data", ".", "shape", ")", "-", "1", "else", "axis", "+", "1", "elif", "key", "==", "'down'", ":", "curaxdat", "[", "0", "]", "=", "len", "(", "data", ".", "shape", ")", "-", "1", "if", "axis", "==", "0", "else", "axis", "-", "1", "elif", "key", "==", "'end'", ":", "on_changed", "(", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'home'", ":", "on_changed", "(", "0", ",", "axis", ")", "figure", ".", "canvas", ".", "mpl_connect", "(", "'key_press_event'", ",", "on_keypressed", ")", "for", "axis", ",", "ctrl", "in", "enumerate", "(", "sliders", ")", ":", "ctrl", ".", "on_changed", "(", "lambda", "k", ",", "a", "=", "axis", ":", "on_changed", "(", "k", ",", "a", ")", ")", "return", "figure", ",", "subplot", ",", "image" ]
Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported C{from matplotlib import pyplot}. Parameters ---------- data : nd array The image data. photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} The color space of the image data. planarconfig : {'CONTIG' or 'SEPARATE'} Defines how components of each pixel are stored. bitspersample : int Number of bits per channel in integer RGB images. interpolation : str The image interpolation method used in matplotlib.imshow. By default, 'nearest' will be used for image dimensions <= 512, else 'bilinear'. cmap : str or matplotlib.colors.Colormap The colormap maps non-RGBA scalar data to colors. vmin, vmax : scalar Data range covered by the colormap. By default, the complete range of the data is covered. figure : matplotlib.figure.Figure Matplotlib figure to use for plotting. title : str Window and subplot title. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int Maximum image width and length. kwargs : dict Additional arguments for matplotlib.pyplot.imshow.
[ "Plot", "n", "-", "dimensional", "images", "using", "matplotlib", ".", "pyplot", "." ]
python
train
34.362319
hydraplatform/hydra-base
hydra_base/lib/notes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/notes.py#L82-L97
def update_note(note, **kwargs): """ Update a note """ note_i = _get_note(note.id) if note.ref_key != note_i.ref_key: raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key)) note_i.set_ref(note.ref_key, note.ref_id) note_i.value = note.value db.DBSession.flush() return note_i
[ "def", "update_note", "(", "note", ",", "*", "*", "kwargs", ")", ":", "note_i", "=", "_get_note", "(", "note", ".", "id", ")", "if", "note", ".", "ref_key", "!=", "note_i", ".", "ref_key", ":", "raise", "HydraError", "(", "\"Cannot convert a %s note to a %s note. Please create a new note instead.\"", "%", "(", "note_i", ".", "ref_key", ",", "note", ".", "ref_key", ")", ")", "note_i", ".", "set_ref", "(", "note", ".", "ref_key", ",", "note", ".", "ref_id", ")", "note_i", ".", "value", "=", "note", ".", "value", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "note_i" ]
Update a note
[ "Update", "a", "note" ]
python
train
23.625
wakatime/wakatime
wakatime/packages/pygments/lexers/rebol.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/rebol.py#L235-L244
def analyse_text(text): """ Check if code contains REBOL header and so it probably not R code """ if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): # The code starts with REBOL header return 1.0 elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE): # The code contains REBOL header but also some text before it return 0.5
[ "def", "analyse_text", "(", "text", ")", ":", "if", "re", ".", "match", "(", "r'^\\s*REBOL\\s*\\['", ",", "text", ",", "re", ".", "IGNORECASE", ")", ":", "# The code starts with REBOL header", "return", "1.0", "elif", "re", ".", "search", "(", "r'\\s*REBOL\\s*['", ",", "text", ",", "re", ".", "IGNORECASE", ")", ":", "# The code contains REBOL header but also some text before it", "return", "0.5" ]
Check if code contains REBOL header and so it probably not R code
[ "Check", "if", "code", "contains", "REBOL", "header", "and", "so", "it", "probably", "not", "R", "code" ]
python
train
40.3
John-Lin/pydcard
pydcard/pydcard.py
https://github.com/John-Lin/pydcard/blob/57b57cca2c69dc0c260f5b05ae440341860d8ce4/pydcard/pydcard.py#L9-L18
def pageassert(func): ''' Decorator that assert page number ''' @wraps(func) def wrapper(*args, **kwargs): if args[0] < 1 or args[0] > 40: raise ValueError('Page Number not found') return func(*args, **kwargs) return wrapper
[ "def", "pageassert", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "[", "0", "]", "<", "1", "or", "args", "[", "0", "]", ">", "40", ":", "raise", "ValueError", "(", "'Page Number not found'", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator that assert page number
[ "Decorator", "that", "assert", "page", "number" ]
python
train
26.7
aksas/pypo4sel
core/pypo4sel/core/common.py
https://github.com/aksas/pypo4sel/blob/935b5f9bfa6682aefdef8a43ebcbcf274dec752c/core/pypo4sel/core/common.py#L226-L252
def build_locator(selector): """ - ID = "#valid_id" - CLASS_NAME = ".valid_class_name" - TAG_NAME = "valid_tag_name" - XPATH = start with "./" or "//" or "$x:" - LINK_TEXT = start with "$link_text:" - PARTIAL_LINK_TEXT = start with "$partial_link_text:" - NAME = "@valid_name_attribute_value" CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|: :type selector: str|tuple :param selector: :rtype: tuple[selenium.webdriver.common.by.By, str] :return: """ if type(selector) is tuple: return selector if not isinstance(selector, six.string_types): raise InvalidSelectorException("Invalid locator values passed in") s = selector.strip() for test, by, index in selectors: if test(s): return by, s[index:] raise InvalidSelectorException("Invalid locator values passed in: {}".format(selector))
[ "def", "build_locator", "(", "selector", ")", ":", "if", "type", "(", "selector", ")", "is", "tuple", ":", "return", "selector", "if", "not", "isinstance", "(", "selector", ",", "six", ".", "string_types", ")", ":", "raise", "InvalidSelectorException", "(", "\"Invalid locator values passed in\"", ")", "s", "=", "selector", ".", "strip", "(", ")", "for", "test", ",", "by", ",", "index", "in", "selectors", ":", "if", "test", "(", "s", ")", ":", "return", "by", ",", "s", "[", "index", ":", "]", "raise", "InvalidSelectorException", "(", "\"Invalid locator values passed in: {}\"", ".", "format", "(", "selector", ")", ")" ]
- ID = "#valid_id" - CLASS_NAME = ".valid_class_name" - TAG_NAME = "valid_tag_name" - XPATH = start with "./" or "//" or "$x:" - LINK_TEXT = start with "$link_text:" - PARTIAL_LINK_TEXT = start with "$partial_link_text:" - NAME = "@valid_name_attribute_value" CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|: :type selector: str|tuple :param selector: :rtype: tuple[selenium.webdriver.common.by.By, str] :return:
[ "-", "ID", "=", "#valid_id", "-", "CLASS_NAME", "=", ".", "valid_class_name", "-", "TAG_NAME", "=", "valid_tag_name", "-", "XPATH", "=", "start", "with", ".", "/", "or", "//", "or", "$x", ":", "-", "LINK_TEXT", "=", "start", "with", "$link_text", ":", "-", "PARTIAL_LINK_TEXT", "=", "start", "with", "$partial_link_text", ":", "-", "NAME", "=", "@valid_name_attribute_value" ]
python
train
32.666667
acutesoftware/AIKIF
aikif/toolbox/text_tools.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L5-L25
def parse_text_to_table(txt): """ takes a blob of text and finds delimiter OR guesses the column positions to parse into a table. input: txt = blob of text, lines separated by \n output: res = table of text """ res = [] # resulting table delim = identify_delim(txt) print('txt to parse = ', txt, '\ndelim=',delim) if delim == '' or delim == ' ': fixed_split = identify_col_pos(txt) if fixed_split == []: res = [] else: res = parse_text_by_col_pos(txt, fixed_split) else: res = parse_text_by_delim(txt, delim) return res
[ "def", "parse_text_to_table", "(", "txt", ")", ":", "res", "=", "[", "]", "# resulting table\r", "delim", "=", "identify_delim", "(", "txt", ")", "print", "(", "'txt to parse = '", ",", "txt", ",", "'\\ndelim='", ",", "delim", ")", "if", "delim", "==", "''", "or", "delim", "==", "' '", ":", "fixed_split", "=", "identify_col_pos", "(", "txt", ")", "if", "fixed_split", "==", "[", "]", ":", "res", "=", "[", "]", "else", ":", "res", "=", "parse_text_by_col_pos", "(", "txt", ",", "fixed_split", ")", "else", ":", "res", "=", "parse_text_by_delim", "(", "txt", ",", "delim", ")", "return", "res" ]
takes a blob of text and finds delimiter OR guesses the column positions to parse into a table. input: txt = blob of text, lines separated by \n output: res = table of text
[ "takes", "a", "blob", "of", "text", "and", "finds", "delimiter", "OR", "guesses", "the", "column", "positions", "to", "parse", "into", "a", "table", ".", "input", ":", "txt", "=", "blob", "of", "text", "lines", "separated", "by", "\\", "n", "output", ":", "res", "=", "table", "of", "text" ]
python
train
26.904762
danilobellini/audiolazy
audiolazy/lazy_io.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_io.py#L220-L250
def close(self): """ Destructor for this audio interface. Waits the threads to finish their streams, if desired. """ with self.halting: # Avoid simultaneous "close" threads if not self.finished: # Ignore all "close" calls, but the first, self.finished = True # and any call to play would raise ThreadError # Closes all playing AudioThread instances while True: with self.lock: # Ensure there's no other thread messing around try: thread = self._threads[0] # Needless to say: pop = deadlock except IndexError: # Empty list break # No more threads if not self.wait: thread.stop() thread.join() # Closes all recording RecStream instances while self._recordings: recst = self._recordings[-1] recst.stop() recst.take(inf) # Ensure it'll be closed # Finishes assert not self._pa._streams # No stream should survive self._pa.terminate()
[ "def", "close", "(", "self", ")", ":", "with", "self", ".", "halting", ":", "# Avoid simultaneous \"close\" threads", "if", "not", "self", ".", "finished", ":", "# Ignore all \"close\" calls, but the first,", "self", ".", "finished", "=", "True", "# and any call to play would raise ThreadError", "# Closes all playing AudioThread instances", "while", "True", ":", "with", "self", ".", "lock", ":", "# Ensure there's no other thread messing around", "try", ":", "thread", "=", "self", ".", "_threads", "[", "0", "]", "# Needless to say: pop = deadlock", "except", "IndexError", ":", "# Empty list", "break", "# No more threads", "if", "not", "self", ".", "wait", ":", "thread", ".", "stop", "(", ")", "thread", ".", "join", "(", ")", "# Closes all recording RecStream instances", "while", "self", ".", "_recordings", ":", "recst", "=", "self", ".", "_recordings", "[", "-", "1", "]", "recst", ".", "stop", "(", ")", "recst", ".", "take", "(", "inf", ")", "# Ensure it'll be closed", "# Finishes", "assert", "not", "self", ".", "_pa", ".", "_streams", "# No stream should survive", "self", ".", "_pa", ".", "terminate", "(", ")" ]
Destructor for this audio interface. Waits the threads to finish their streams, if desired.
[ "Destructor", "for", "this", "audio", "interface", ".", "Waits", "the", "threads", "to", "finish", "their", "streams", "if", "desired", "." ]
python
train
32.870968
hugapi/hug
hug/routing.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/routing.py#L256-L277
def allow_origins(self, *origins, methods=None, max_age=None, credentials=None, headers=None, **overrides): """Convenience method for quickly allowing other resources to access this one""" response_headers = {} if origins: @hug.response_middleware() def process_data(request, response, resource): if 'ORIGIN' in request.headers: origin = request.headers['ORIGIN'] if origin in origins: response.set_header('Access-Control-Allow-Origin', origin) else: response_headers['Access-Control-Allow-Origin'] = '*' if methods: response_headers['Access-Control-Allow-Methods'] = ', '.join(methods) if max_age: response_headers['Access-Control-Max-Age'] = max_age if credentials: response_headers['Access-Control-Allow-Credentials'] = str(credentials).lower() if headers: response_headers['Access-Control-Allow-Headers'] = headers return self.add_response_headers(response_headers, **overrides)
[ "def", "allow_origins", "(", "self", ",", "*", "origins", ",", "methods", "=", "None", ",", "max_age", "=", "None", ",", "credentials", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "overrides", ")", ":", "response_headers", "=", "{", "}", "if", "origins", ":", "@", "hug", ".", "response_middleware", "(", ")", "def", "process_data", "(", "request", ",", "response", ",", "resource", ")", ":", "if", "'ORIGIN'", "in", "request", ".", "headers", ":", "origin", "=", "request", ".", "headers", "[", "'ORIGIN'", "]", "if", "origin", "in", "origins", ":", "response", ".", "set_header", "(", "'Access-Control-Allow-Origin'", ",", "origin", ")", "else", ":", "response_headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "if", "methods", ":", "response_headers", "[", "'Access-Control-Allow-Methods'", "]", "=", "', '", ".", "join", "(", "methods", ")", "if", "max_age", ":", "response_headers", "[", "'Access-Control-Max-Age'", "]", "=", "max_age", "if", "credentials", ":", "response_headers", "[", "'Access-Control-Allow-Credentials'", "]", "=", "str", "(", "credentials", ")", ".", "lower", "(", ")", "if", "headers", ":", "response_headers", "[", "'Access-Control-Allow-Headers'", "]", "=", "headers", "return", "self", ".", "add_response_headers", "(", "response_headers", ",", "*", "*", "overrides", ")" ]
Convenience method for quickly allowing other resources to access this one
[ "Convenience", "method", "for", "quickly", "allowing", "other", "resources", "to", "access", "this", "one" ]
python
train
49.909091
RedFantom/ttkwidgets
ttkwidgets/checkboxtreeview.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/checkboxtreeview.py#L133-L145
def tag_del(self, item, tag): """ Remove tag from the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str """ tags = list(self.item(item, "tags")) if tag in tags: tags.remove(tag) self.item(item, tags=tuple(tags))
[ "def", "tag_del", "(", "self", ",", "item", ",", "tag", ")", ":", "tags", "=", "list", "(", "self", ".", "item", "(", "item", ",", "\"tags\"", ")", ")", "if", "tag", "in", "tags", ":", "tags", ".", "remove", "(", "tag", ")", "self", ".", "item", "(", "item", ",", "tags", "=", "tuple", "(", "tags", ")", ")" ]
Remove tag from the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str
[ "Remove", "tag", "from", "the", "tags", "of", "item", ".", ":", "param", "item", ":", "item", "identifier", ":", "type", "item", ":", "str", ":", "param", "tag", ":", "tag", "name", ":", "type", "tag", ":", "str" ]
python
train
26.846154
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/from_inspire.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L178-L230
def get_record(self): """Override the base.""" self.recid = self.get_recid() self.remove_controlfields() self.update_system_numbers() self.add_systemnumber("Inspire", recid=self.recid) self.add_control_number("003", "SzGeCERN") self.update_collections() self.update_languages() self.update_reportnumbers() self.update_authors() self.update_journals() self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds") self.update_pagenumber() self.update_notes() self.update_experiments() self.update_isbn() self.update_dois() self.update_links_and_ffts() self.update_date() self.update_date_year() self.update_hidden_notes() self.update_oai_info() self.update_cnum() self.update_conference_info() self.fields_list = [ "909", "541", "961", "970", "690", "695", "981", ] self.strip_fields() if "ANNOUNCEMENT" in self.collections: self.update_conference_111() self.update_conference_links() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) if "THESIS" in self.collections: self.update_thesis_information() self.update_thesis_supervisors() if "PROCEEDINGS" in self.collections: # Special proceeding syntax self.update_title_to_proceeding() self.update_author_to_proceeding() record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")]) # 690 tags if self.tag_as_cern: record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")]) return self.record
[ "def", "get_record", "(", "self", ")", ":", "self", ".", "recid", "=", "self", ".", "get_recid", "(", ")", "self", ".", "remove_controlfields", "(", ")", "self", ".", "update_system_numbers", "(", ")", "self", ".", "add_systemnumber", "(", "\"Inspire\"", ",", "recid", "=", "self", ".", "recid", ")", "self", ".", "add_control_number", "(", "\"003\"", ",", "\"SzGeCERN\"", ")", "self", ".", "update_collections", "(", ")", "self", ".", "update_languages", "(", ")", "self", ".", "update_reportnumbers", "(", ")", "self", ".", "update_authors", "(", ")", "self", ".", "update_journals", "(", ")", "self", ".", "update_subject_categories", "(", "\"INSPIRE\"", ",", "\"SzGeCERN\"", ",", "\"categories_cds\"", ")", "self", ".", "update_pagenumber", "(", ")", "self", ".", "update_notes", "(", ")", "self", ".", "update_experiments", "(", ")", "self", ".", "update_isbn", "(", ")", "self", ".", "update_dois", "(", ")", "self", ".", "update_links_and_ffts", "(", ")", "self", ".", "update_date", "(", ")", "self", ".", "update_date_year", "(", ")", "self", ".", "update_hidden_notes", "(", ")", "self", ".", "update_oai_info", "(", ")", "self", ".", "update_cnum", "(", ")", "self", ".", "update_conference_info", "(", ")", "self", ".", "fields_list", "=", "[", "\"909\"", ",", "\"541\"", ",", "\"961\"", ",", "\"970\"", ",", "\"690\"", ",", "\"695\"", ",", "\"981\"", ",", "]", "self", ".", "strip_fields", "(", ")", "if", "\"ANNOUNCEMENT\"", "in", "self", ".", "collections", ":", "self", ".", "update_conference_111", "(", ")", "self", ".", "update_conference_links", "(", ")", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CONFERENCE\"", ")", "]", ")", "if", "\"THESIS\"", "in", "self", ".", "collections", ":", "self", ".", "update_thesis_information", "(", ")", "self", ".", "update_thesis_supervisors", "(", ")", "if", "\"PROCEEDINGS\"", "in", "self", ".", "collections", ":", "# Special proceeding syntax", "self", ".", "update_title_to_proceeding", "(", ")", "self", ".", "update_author_to_proceeding", "(", ")", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CONFERENCE\"", ")", "]", ")", "# 690 tags", "if", "self", ".", "tag_as_cern", ":", "record_add_field", "(", "self", ".", "record", ",", "\"690\"", ",", "ind1", "=", "\"C\"", ",", "subfields", "=", "[", "(", "\"a\"", ",", "\"CERN\"", ")", "]", ")", "return", "self", ".", "record" ]
Override the base.
[ "Override", "the", "base", "." ]
python
valid
33.698113
senaite/senaite.core
bika/lims/workflow/analysisservice/guards.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/analysisservice/guards.py#L24-L43
def guard_activate(analysis_service): """Returns whether the transition activate can be performed for the analysis service passed in """ calculation = analysis_service.getCalculation() if not calculation: return True # If the calculation is inactive, we cannot activate the service if not api.is_active(calculation): return False # All services that we depend on to calculate our result are active or we # don't depend on other services. dependencies = calculation.getDependentServices() for dependency in dependencies: if not api.is_active(dependency): return False return True
[ "def", "guard_activate", "(", "analysis_service", ")", ":", "calculation", "=", "analysis_service", ".", "getCalculation", "(", ")", "if", "not", "calculation", ":", "return", "True", "# If the calculation is inactive, we cannot activate the service", "if", "not", "api", ".", "is_active", "(", "calculation", ")", ":", "return", "False", "# All services that we depend on to calculate our result are active or we", "# don't depend on other services.", "dependencies", "=", "calculation", ".", "getDependentServices", "(", ")", "for", "dependency", "in", "dependencies", ":", "if", "not", "api", ".", "is_active", "(", "dependency", ")", ":", "return", "False", "return", "True" ]
Returns whether the transition activate can be performed for the analysis service passed in
[ "Returns", "whether", "the", "transition", "activate", "can", "be", "performed", "for", "the", "analysis", "service", "passed", "in" ]
python
train
32.3
Chilipp/docrep
docrep/__init__.py
https://github.com/Chilipp/docrep/blob/637971f76e1a6e1c70e36dcd1b02bbc37ba02487/docrep/__init__.py#L289-L330
def get_sections(self, s, base, sections=['Parameters', 'Other Parameters']): """ Method that extracts the specified sections out of the given string if (and only if) the docstring follows the numpy documentation guidelines [1]_. Note that the section either must appear in the :attr:`param_like_sections` or the :attr:`text_sections` attribute. Parameters ---------- s: str Docstring to split base: str base to use in the :attr:`sections` attribute sections: list of str sections to look for. Each section must be followed by a newline character ('\\n') and a bar of '-' (following the numpy (napoleon) docstring conventions). Returns ------- str The replaced string References ---------- .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt See Also -------- delete_params, keep_params, delete_types, keep_types, delete_kwargs: For manipulating the docstring sections save_docstring: for saving an entire docstring """ params = self.params # Remove the summary and dedent the rest s = self._remove_summary(s) for section in sections: key = '%s.%s' % (base, section.lower().replace(' ', '_')) params[key] = self._get_section(s, section) return s
[ "def", "get_sections", "(", "self", ",", "s", ",", "base", ",", "sections", "=", "[", "'Parameters'", ",", "'Other Parameters'", "]", ")", ":", "params", "=", "self", ".", "params", "# Remove the summary and dedent the rest", "s", "=", "self", ".", "_remove_summary", "(", "s", ")", "for", "section", "in", "sections", ":", "key", "=", "'%s.%s'", "%", "(", "base", ",", "section", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", ")", "params", "[", "key", "]", "=", "self", ".", "_get_section", "(", "s", ",", "section", ")", "return", "s" ]
Method that extracts the specified sections out of the given string if (and only if) the docstring follows the numpy documentation guidelines [1]_. Note that the section either must appear in the :attr:`param_like_sections` or the :attr:`text_sections` attribute. Parameters ---------- s: str Docstring to split base: str base to use in the :attr:`sections` attribute sections: list of str sections to look for. Each section must be followed by a newline character ('\\n') and a bar of '-' (following the numpy (napoleon) docstring conventions). Returns ------- str The replaced string References ---------- .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt See Also -------- delete_params, keep_params, delete_types, keep_types, delete_kwargs: For manipulating the docstring sections save_docstring: for saving an entire docstring
[ "Method", "that", "extracts", "the", "specified", "sections", "out", "of", "the", "given", "string", "if", "(", "and", "only", "if", ")", "the", "docstring", "follows", "the", "numpy", "documentation", "guidelines", "[", "1", "]", "_", ".", "Note", "that", "the", "section", "either", "must", "appear", "in", "the", ":", "attr", ":", "param_like_sections", "or", "the", ":", "attr", ":", "text_sections", "attribute", "." ]
python
train
35.285714
raiden-network/raiden
raiden/network/transport/matrix/client.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/transport/matrix/client.py#L389-L408
def _sync(self, timeout_ms=30000): """ Reimplements MatrixClient._sync, add 'account_data' support to /sync """ response = self.api.sync(self.sync_token, timeout_ms) prev_sync_token = self.sync_token self.sync_token = response["next_batch"] if self._handle_thread is not None: # if previous _handle_thread is still running, wait for it and re-raise if needed self._handle_thread.get() is_first_sync = (prev_sync_token is None) self._handle_thread = gevent.Greenlet(self._handle_response, response, is_first_sync) self._handle_thread.name = ( f'GMatrixClient._sync user_id:{self.user_id} sync_token:{prev_sync_token}' ) self._handle_thread.link_exception(lambda g: self.sync_thread.kill(g.exception)) self._handle_thread.start() if self._post_hook_func is not None: self._post_hook_func(self.sync_token)
[ "def", "_sync", "(", "self", ",", "timeout_ms", "=", "30000", ")", ":", "response", "=", "self", ".", "api", ".", "sync", "(", "self", ".", "sync_token", ",", "timeout_ms", ")", "prev_sync_token", "=", "self", ".", "sync_token", "self", ".", "sync_token", "=", "response", "[", "\"next_batch\"", "]", "if", "self", ".", "_handle_thread", "is", "not", "None", ":", "# if previous _handle_thread is still running, wait for it and re-raise if needed", "self", ".", "_handle_thread", ".", "get", "(", ")", "is_first_sync", "=", "(", "prev_sync_token", "is", "None", ")", "self", ".", "_handle_thread", "=", "gevent", ".", "Greenlet", "(", "self", ".", "_handle_response", ",", "response", ",", "is_first_sync", ")", "self", ".", "_handle_thread", ".", "name", "=", "(", "f'GMatrixClient._sync user_id:{self.user_id} sync_token:{prev_sync_token}'", ")", "self", ".", "_handle_thread", ".", "link_exception", "(", "lambda", "g", ":", "self", ".", "sync_thread", ".", "kill", "(", "g", ".", "exception", ")", ")", "self", ".", "_handle_thread", ".", "start", "(", ")", "if", "self", ".", "_post_hook_func", "is", "not", "None", ":", "self", ".", "_post_hook_func", "(", "self", ".", "sync_token", ")" ]
Reimplements MatrixClient._sync, add 'account_data' support to /sync
[ "Reimplements", "MatrixClient", ".", "_sync", "add", "account_data", "support", "to", "/", "sync" ]
python
train
46.5
CybOXProject/mixbox
mixbox/signals.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L59-L72
def __purge(): """Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock. """ global __receivers newreceivers = collections.defaultdict(list) for signal, receivers in six.iteritems(__receivers): alive = [x for x in receivers if not __is_dead(x)] newreceivers[signal] = alive __receivers = newreceivers
[ "def", "__purge", "(", ")", ":", "global", "__receivers", "newreceivers", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "signal", ",", "receivers", "in", "six", ".", "iteritems", "(", "__receivers", ")", ":", "alive", "=", "[", "x", "for", "x", "in", "receivers", "if", "not", "__is_dead", "(", "x", ")", "]", "newreceivers", "[", "signal", "]", "=", "alive", "__receivers", "=", "newreceivers" ]
Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock.
[ "Remove", "all", "dead", "signal", "receivers", "from", "the", "global", "receivers", "collection", "." ]
python
train
29.428571
Murali-group/halp
halp/directed_hypergraph.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/directed_hypergraph.py#L912-L924
def is_F_hypergraph(self): """Indicates whether the hypergraph is an F-hypergraph. In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every hyperedge has exactly one node in the tail. :returns: bool -- True iff the hypergraph is an F-hypergraph. """ for hyperedge_id in self._hyperedge_attributes: tail = self.get_hyperedge_tail(hyperedge_id) if len(tail) > 1: return False return True
[ "def", "is_F_hypergraph", "(", "self", ")", ":", "for", "hyperedge_id", "in", "self", ".", "_hyperedge_attributes", ":", "tail", "=", "self", ".", "get_hyperedge_tail", "(", "hyperedge_id", ")", "if", "len", "(", "tail", ")", ">", "1", ":", "return", "False", "return", "True" ]
Indicates whether the hypergraph is an F-hypergraph. In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every hyperedge has exactly one node in the tail. :returns: bool -- True iff the hypergraph is an F-hypergraph.
[ "Indicates", "whether", "the", "hypergraph", "is", "an", "F", "-", "hypergraph", ".", "In", "an", "F", "-", "hypergraph", "all", "hyperedges", "are", "F", "-", "hyperedges", "--", "that", "is", "every", "hyperedge", "has", "exactly", "one", "node", "in", "the", "tail", "." ]
python
train
37.230769
hbldh/pybankid
bankid/jsonclient.py
https://github.com/hbldh/pybankid/blob/1405f66e41f912cdda15e20aea08cdfa6b60480a/bankid/jsonclient.py#L79-L126
def authenticate( self, end_user_ip, personal_number=None, requirement=None, **kwargs ): """Request an authentication order. The :py:meth:`collect` method is used to query the status of the order. Note that personal number is not needed when authentication is to be done on the same device, provided that the returned ``autoStartToken`` is used to open the BankID Client. Example data returned: .. code-block:: json { "orderRef":"131daac9-16c6-4618-beb0-365768f37288", "autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6" } :param end_user_ip: IP address of the user requesting the authentication. :type end_user_ip: str :param personal_number: The Swedish personal number in format YYYYMMDDXXXX. :type personal_number: str :param requirement: An optional dictionary stating how the signature must be created and verified. See BankID Relying Party Guidelines, section 13.5 for more details. :type requirement: dict :return: The order response. :rtype: dict :raises BankIDError: raises a subclass of this error when error has been returned from server. """ data = {"endUserIp": end_user_ip} if personal_number: data["personalNumber"] = personal_number if requirement and isinstance(requirement, dict): data["requirement"] = requirement # Handling potentially changed optional in-parameters. data.update(kwargs) response = self.client.post(self._auth_endpoint, json=data) if response.status_code == 200: return response.json() else: raise get_json_error_class(response)
[ "def", "authenticate", "(", "self", ",", "end_user_ip", ",", "personal_number", "=", "None", ",", "requirement", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "\"endUserIp\"", ":", "end_user_ip", "}", "if", "personal_number", ":", "data", "[", "\"personalNumber\"", "]", "=", "personal_number", "if", "requirement", "and", "isinstance", "(", "requirement", ",", "dict", ")", ":", "data", "[", "\"requirement\"", "]", "=", "requirement", "# Handling potentially changed optional in-parameters.", "data", ".", "update", "(", "kwargs", ")", "response", "=", "self", ".", "client", ".", "post", "(", "self", ".", "_auth_endpoint", ",", "json", "=", "data", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "(", ")", "else", ":", "raise", "get_json_error_class", "(", "response", ")" ]
Request an authentication order. The :py:meth:`collect` method is used to query the status of the order. Note that personal number is not needed when authentication is to be done on the same device, provided that the returned ``autoStartToken`` is used to open the BankID Client. Example data returned: .. code-block:: json { "orderRef":"131daac9-16c6-4618-beb0-365768f37288", "autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6" } :param end_user_ip: IP address of the user requesting the authentication. :type end_user_ip: str :param personal_number: The Swedish personal number in format YYYYMMDDXXXX. :type personal_number: str :param requirement: An optional dictionary stating how the signature must be created and verified. See BankID Relying Party Guidelines, section 13.5 for more details. :type requirement: dict :return: The order response. :rtype: dict :raises BankIDError: raises a subclass of this error when error has been returned from server.
[ "Request", "an", "authentication", "order", ".", "The", ":", "py", ":", "meth", ":", "collect", "method", "is", "used", "to", "query", "the", "status", "of", "the", "order", "." ]
python
train
37.979167
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L302-L349
def _DoCopyFile(source_filename, target_filename, copy_symlink=True): ''' :param unicode source_filename: The source filename. Schemas: local, ftp, http :param unicode target_filename: Target filename. Schemas: local, ftp :param copy_symlink: @see _CopyFileLocal :raises FileNotFoundError: If source_filename does not exist ''' from six.moves.urllib.parse import urlparse source_url = urlparse(source_filename) target_url = urlparse(target_filename) if _UrlIsLocal(source_url): if not Exists(source_filename): from ._exceptions import FileNotFoundError raise FileNotFoundError(source_filename) if _UrlIsLocal(target_url): # local to local _CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink) elif target_url.scheme in ['ftp']: from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) elif source_url.scheme in ['http', 'https', 'ftp']: if _UrlIsLocal(target_url): # HTTP/FTP to local from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: # HTTP/FTP to other ==> NotImplemented from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: from ._exceptions import NotImplementedProtocol # @Reimport raise NotImplementedProtocol(source_url.scheme)
[ "def", "_DoCopyFile", "(", "source_filename", ",", "target_filename", ",", "copy_symlink", "=", "True", ")", ":", "from", "six", ".", "moves", ".", "urllib", ".", "parse", "import", "urlparse", "source_url", "=", "urlparse", "(", "source_filename", ")", "target_url", "=", "urlparse", "(", "target_filename", ")", "if", "_UrlIsLocal", "(", "source_url", ")", ":", "if", "not", "Exists", "(", "source_filename", ")", ":", "from", ".", "_exceptions", "import", "FileNotFoundError", "raise", "FileNotFoundError", "(", "source_filename", ")", "if", "_UrlIsLocal", "(", "target_url", ")", ":", "# local to local", "_CopyFileLocal", "(", "source_filename", ",", "target_filename", ",", "copy_symlink", "=", "copy_symlink", ")", "elif", "target_url", ".", "scheme", "in", "[", "'ftp'", "]", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "target_url", ".", "scheme", ")", "else", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "target_url", ".", "scheme", ")", "elif", "source_url", ".", "scheme", "in", "[", "'http'", ",", "'https'", ",", "'ftp'", "]", ":", "if", "_UrlIsLocal", "(", "target_url", ")", ":", "# HTTP/FTP to local", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "target_url", ".", "scheme", ")", "else", ":", "# HTTP/FTP to other ==> NotImplemented", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "target_url", ".", "scheme", ")", "else", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "# @Reimport", "raise", "NotImplementedProtocol", "(", "source_url", ".", "scheme", ")" ]
:param unicode source_filename: The source filename. Schemas: local, ftp, http :param unicode target_filename: Target filename. Schemas: local, ftp :param copy_symlink: @see _CopyFileLocal :raises FileNotFoundError: If source_filename does not exist
[ ":", "param", "unicode", "source_filename", ":", "The", "source", "filename", ".", "Schemas", ":", "local", "ftp", "http" ]
python
valid
35.166667
paramiko/paramiko
paramiko/sftp_file.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_file.py#L343-L356
def truncate(self, size): """ Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file """ self.sftp._log( DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size) ) attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr)
[ "def", "truncate", "(", "self", ",", "size", ")", ":", "self", ".", "sftp", ".", "_log", "(", "DEBUG", ",", "\"truncate({}, {!r})\"", ".", "format", "(", "hexlify", "(", "self", ".", "handle", ")", ",", "size", ")", ")", "attr", "=", "SFTPAttributes", "(", ")", "attr", ".", "st_size", "=", "size", "self", ".", "sftp", ".", "_request", "(", "CMD_FSETSTAT", ",", "self", ".", "handle", ",", "attr", ")" ]
Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file
[ "Change", "the", "size", "of", "this", "file", ".", "This", "usually", "extends", "or", "shrinks", "the", "size", "of", "the", "file", "just", "like", "the", "truncate", "()", "method", "on", "Python", "file", "objects", "." ]
python
train
34.357143
fastai/fastai
old/fastai/structured.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/structured.py#L139-L176
def apply_cats(df, trn): """Changes any columns of strings in df into categorical variables using trn as a template for the category codes. Parameters: ----------- df: A pandas dataframe. Any columns of strings will be changed to categorical values. The category codes are determined by trn. trn: A pandas dataframe. When creating a category for df, it looks up the what the category's code were in trn and makes those the category codes for df. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']}) >>> df col1 col2 0 1 a 1 2 b 2 3 a note the type of col2 is string >>> train_cats(df) >>> df col1 col2 0 1 a 1 2 b 2 3 a now the type of col2 is category {a : 1, b : 2} >>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']}) >>> apply_cats(df2, df) col1 col2 0 1 b 1 2 a 2 3 a now the type of col is category {a : 1, b : 2} """ for n,c in df.items(): if (n in trn.columns) and (trn[n].dtype.name=='category'): df[n] = c.astype('category').cat.as_ordered() df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
[ "def", "apply_cats", "(", "df", ",", "trn", ")", ":", "for", "n", ",", "c", "in", "df", ".", "items", "(", ")", ":", "if", "(", "n", "in", "trn", ".", "columns", ")", "and", "(", "trn", "[", "n", "]", ".", "dtype", ".", "name", "==", "'category'", ")", ":", "df", "[", "n", "]", "=", "c", ".", "astype", "(", "'category'", ")", ".", "cat", ".", "as_ordered", "(", ")", "df", "[", "n", "]", ".", "cat", ".", "set_categories", "(", "trn", "[", "n", "]", ".", "cat", ".", "categories", ",", "ordered", "=", "True", ",", "inplace", "=", "True", ")" ]
Changes any columns of strings in df into categorical variables using trn as a template for the category codes. Parameters: ----------- df: A pandas dataframe. Any columns of strings will be changed to categorical values. The category codes are determined by trn. trn: A pandas dataframe. When creating a category for df, it looks up the what the category's code were in trn and makes those the category codes for df. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']}) >>> df col1 col2 0 1 a 1 2 b 2 3 a note the type of col2 is string >>> train_cats(df) >>> df col1 col2 0 1 a 1 2 b 2 3 a now the type of col2 is category {a : 1, b : 2} >>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']}) >>> apply_cats(df2, df) col1 col2 0 1 b 1 2 a 2 3 a now the type of col is category {a : 1, b : 2}
[ "Changes", "any", "columns", "of", "strings", "in", "df", "into", "categorical", "variables", "using", "trn", "as", "a", "template", "for", "the", "category", "codes", ".", "Parameters", ":", "-----------", "df", ":", "A", "pandas", "dataframe", ".", "Any", "columns", "of", "strings", "will", "be", "changed", "to", "categorical", "values", ".", "The", "category", "codes", "are", "determined", "by", "trn", ".", "trn", ":", "A", "pandas", "dataframe", ".", "When", "creating", "a", "category", "for", "df", "it", "looks", "up", "the", "what", "the", "category", "s", "code", "were", "in", "trn", "and", "makes", "those", "the", "category", "codes", "for", "df", ".", "Examples", ":", "---------", ">>>", "df", "=", "pd", ".", "DataFrame", "(", "{", "col1", ":", "[", "1", "2", "3", "]", "col2", ":", "[", "a", "b", "a", "]", "}", ")", ">>>", "df", "col1", "col2", "0", "1", "a", "1", "2", "b", "2", "3", "a", "note", "the", "type", "of", "col2", "is", "string", ">>>", "train_cats", "(", "df", ")", ">>>", "df", "col1", "col2", "0", "1", "a", "1", "2", "b", "2", "3", "a", "now", "the", "type", "of", "col2", "is", "category", "{", "a", ":", "1", "b", ":", "2", "}", ">>>", "df2", "=", "pd", ".", "DataFrame", "(", "{", "col1", ":", "[", "1", "2", "3", "]", "col2", ":", "[", "b", "a", "a", "]", "}", ")", ">>>", "apply_cats", "(", "df2", "df", ")", "col1", "col2", "0", "1", "b", "1", "2", "a", "2", "3", "a", "now", "the", "type", "of", "col", "is", "category", "{", "a", ":", "1", "b", ":", "2", "}" ]
python
train
34.552632
ace0/pyrelic
pyrelic/pbc.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/pbc.py#L139-L158
def mul_table(self, other): """ Fast multiplication using a the LWNAF precomputation table. """ # Get a BigInt other = coerceBigInt(other) if not other: return NotImplemented other %= orderG2() # Building the precomputation table, if there is not one already. if not self._table: self._table = lwnafTable() librelic.ep2_mul_pre_lwnaf(byref(self._table), byref(self)) result = G2Element() librelic.ep2_mul_fix_lwnaf(byref(result), byref(self._table), byref(other)) return result
[ "def", "mul_table", "(", "self", ",", "other", ")", ":", "# Get a BigInt", "other", "=", "coerceBigInt", "(", "other", ")", "if", "not", "other", ":", "return", "NotImplemented", "other", "%=", "orderG2", "(", ")", "# Building the precomputation table, if there is not one already.", "if", "not", "self", ".", "_table", ":", "self", ".", "_table", "=", "lwnafTable", "(", ")", "librelic", ".", "ep2_mul_pre_lwnaf", "(", "byref", "(", "self", ".", "_table", ")", ",", "byref", "(", "self", ")", ")", "result", "=", "G2Element", "(", ")", "librelic", ".", "ep2_mul_fix_lwnaf", "(", "byref", "(", "result", ")", ",", "byref", "(", "self", ".", "_table", ")", ",", "byref", "(", "other", ")", ")", "return", "result" ]
Fast multiplication using a the LWNAF precomputation table.
[ "Fast", "multiplication", "using", "a", "the", "LWNAF", "precomputation", "table", "." ]
python
train
30.3
googleapis/google-cloud-python
bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py#L337-L372
def to_dataframe(self, dtypes=None): """Create a :class:`pandas.DataFrame` of rows in the page. This method requires the pandas libary to create a data frame and the fastavro library to parse row blocks. .. warning:: DATETIME columns are not supported. They are currently parsed as strings in the fastavro library. Args: dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. Returns: pandas.DataFrame: A data frame of all rows in the stream. """ if pandas is None: raise ImportError(_PANDAS_REQUIRED) if dtypes is None: dtypes = {} columns = collections.defaultdict(list) for row in self: for column in row: columns[column].append(row[column]) for column in dtypes: columns[column] = pandas.Series(columns[column], dtype=dtypes[column]) return pandas.DataFrame(columns, columns=self._column_names)
[ "def", "to_dataframe", "(", "self", ",", "dtypes", "=", "None", ")", ":", "if", "pandas", "is", "None", ":", "raise", "ImportError", "(", "_PANDAS_REQUIRED", ")", "if", "dtypes", "is", "None", ":", "dtypes", "=", "{", "}", "columns", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "row", "in", "self", ":", "for", "column", "in", "row", ":", "columns", "[", "column", "]", ".", "append", "(", "row", "[", "column", "]", ")", "for", "column", "in", "dtypes", ":", "columns", "[", "column", "]", "=", "pandas", ".", "Series", "(", "columns", "[", "column", "]", ",", "dtype", "=", "dtypes", "[", "column", "]", ")", "return", "pandas", ".", "DataFrame", "(", "columns", ",", "columns", "=", "self", ".", "_column_names", ")" ]
Create a :class:`pandas.DataFrame` of rows in the page. This method requires the pandas libary to create a data frame and the fastavro library to parse row blocks. .. warning:: DATETIME columns are not supported. They are currently parsed as strings in the fastavro library. Args: dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. Returns: pandas.DataFrame: A data frame of all rows in the stream.
[ "Create", "a", ":", "class", ":", "pandas", ".", "DataFrame", "of", "rows", "in", "the", "page", "." ]
python
train
35.777778
Autodesk/aomi
aomi/vault.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/vault.py#L130-L151
def wrap_hvac(msg): """Error catching Vault API wrapper This decorator wraps API interactions with Vault. It will catch and return appropriate error output on common problems. Do we even need this now that we extend the hvac class?""" # pylint: disable=missing-docstring def wrap_call(func): # pylint: disable=missing-docstring def func_wrapper(self, vault_client): try: return func(self, vault_client) except (hvac.exceptions.InvalidRequest, hvac.exceptions.Forbidden) as vault_exception: if vault_exception.errors[0] == 'permission denied': emsg = "Permission denied %s from %s" % (msg, self.path) raise aomi.exceptions.AomiCredentials(emsg) else: raise return func_wrapper return wrap_call
[ "def", "wrap_hvac", "(", "msg", ")", ":", "# pylint: disable=missing-docstring", "def", "wrap_call", "(", "func", ")", ":", "# pylint: disable=missing-docstring", "def", "func_wrapper", "(", "self", ",", "vault_client", ")", ":", "try", ":", "return", "func", "(", "self", ",", "vault_client", ")", "except", "(", "hvac", ".", "exceptions", ".", "InvalidRequest", ",", "hvac", ".", "exceptions", ".", "Forbidden", ")", "as", "vault_exception", ":", "if", "vault_exception", ".", "errors", "[", "0", "]", "==", "'permission denied'", ":", "emsg", "=", "\"Permission denied %s from %s\"", "%", "(", "msg", ",", "self", ".", "path", ")", "raise", "aomi", ".", "exceptions", ".", "AomiCredentials", "(", "emsg", ")", "else", ":", "raise", "return", "func_wrapper", "return", "wrap_call" ]
Error catching Vault API wrapper This decorator wraps API interactions with Vault. It will catch and return appropriate error output on common problems. Do we even need this now that we extend the hvac class?
[ "Error", "catching", "Vault", "API", "wrapper", "This", "decorator", "wraps", "API", "interactions", "with", "Vault", ".", "It", "will", "catch", "and", "return", "appropriate", "error", "output", "on", "common", "problems", ".", "Do", "we", "even", "need", "this", "now", "that", "we", "extend", "the", "hvac", "class?" ]
python
train
40
openfisca/openfisca-survey-manager
openfisca_survey_manager/scenarios.py
https://github.com/openfisca/openfisca-survey-manager/blob/bed6c65dc5e4ec2bdc9cda5b865fefd9e3d0c358/openfisca_survey_manager/scenarios.py#L1071-L1182
def summarize_variable(self, variable = None, use_baseline = False, weighted = False, force_compute = False): """ Prints a summary of a variable including its memory usage. :param string variable: the variable being summarized :param bool use_baseline: the tax-benefit-system considered :param bool weighted: whether the produced statistics should be weigthted or not :param bool force_compute: whether the computation of the variable should be forced Example: >>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario >>> survey_scenario = create_randomly_initialized_survey_scenario() >>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True) <BLANKLINE> housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B Details: 2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%). >>> survey_scenario.summarize_variable(variable = "rent", force_compute = True) <BLANKLINE> rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B Details: 2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 """ if use_baseline: simulation = self.baseline_simulation else: simulation = self.simulation tax_benefit_system = simulation.tax_benefit_system assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable) variable_instance = tax_benefit_system.variables[variable] default_value = variable_instance.default_value value_type = variable_instance.value_type if weighted: weight_variable = self.weight_column_name_by_entity[variable_instance.entity.key] weights = simulation.calculate(weight_variable, simulation.period) infos = simulation.get_memory_usage(variables = [variable])['by_variable'].get(variable) if not infos: if force_compute: self.calculate_variable(variable = variable, period = simulation.period, use_baseline = use_baseline) self.summarize_variable(variable = variable, use_baseline = use_baseline, weighted = weighted) return else: print("{} is not computed yet. Use keyword argument force_compute = True".format(variable)) return header_line = "{}: {} periods * {} cells * item size {} ({}, default = {}) = {}".format( variable, infos['nb_arrays'], infos['nb_cells_by_array'], infos['cell_size'], infos['dtype'], default_value, humanize.naturalsize(infos['total_nb_bytes'], gnu = True), ) print("") print(header_line) print("Details:") holder = simulation.get_holder(variable) if holder is not None: if holder.variable.definition_period == ETERNITY: array = holder.get_array(ETERNITY) print("permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}".format( array.mean() if not weighted else np.average(array, weights = weights), array.min(), array.max(), np.median(array), ( (array == default_value).sum() / len(array) if not weighted else ((array == default_value) * weights).sum() / weights.sum() ) )) else: for period in sorted(holder.get_known_periods()): array = holder.get_array(period) if array.shape == (): print("{}: always = {}".format(period, array)) continue if value_type == Enum: possible_values = variable_instance.possible_values categories_by_index = dict(zip( range(len(possible_values._member_names_)), possible_values._member_names_ )) categories_type = pd.api.types.CategoricalDtype(categories = possible_values._member_names_, ordered = True) df = pd.DataFrame({variable: array}).replace(categories_by_index).astype(categories_type) df['weights'] = weights if weighted else 1 groupby = df.groupby(variable)['weights'].sum() total = groupby.sum() expr = [" {} = {:.2e} ({:.1%})".format(index, row, row / total) for index, row in groupby.iteritems()] print("{}:{}.".format(period, ",".join(expr))) continue print("{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}".format( period, array.mean() if not weighted else np.average(array, weights = weights), array.min(), array.max(), array.sum() if not weighted else np.sum(array * weights), ( (array == default_value).sum() / len(array) if not weighted else ((array == default_value) * weights).sum() / weights.sum() ), np.median(array), ))
[ "def", "summarize_variable", "(", "self", ",", "variable", "=", "None", ",", "use_baseline", "=", "False", ",", "weighted", "=", "False", ",", "force_compute", "=", "False", ")", ":", "if", "use_baseline", ":", "simulation", "=", "self", ".", "baseline_simulation", "else", ":", "simulation", "=", "self", ".", "simulation", "tax_benefit_system", "=", "simulation", ".", "tax_benefit_system", "assert", "variable", "in", "tax_benefit_system", ".", "variables", ",", "\"{} is not a valid variable\"", ".", "format", "(", "variable", ")", "variable_instance", "=", "tax_benefit_system", ".", "variables", "[", "variable", "]", "default_value", "=", "variable_instance", ".", "default_value", "value_type", "=", "variable_instance", ".", "value_type", "if", "weighted", ":", "weight_variable", "=", "self", ".", "weight_column_name_by_entity", "[", "variable_instance", ".", "entity", ".", "key", "]", "weights", "=", "simulation", ".", "calculate", "(", "weight_variable", ",", "simulation", ".", "period", ")", "infos", "=", "simulation", ".", "get_memory_usage", "(", "variables", "=", "[", "variable", "]", ")", "[", "'by_variable'", "]", ".", "get", "(", "variable", ")", "if", "not", "infos", ":", "if", "force_compute", ":", "self", ".", "calculate_variable", "(", "variable", "=", "variable", ",", "period", "=", "simulation", ".", "period", ",", "use_baseline", "=", "use_baseline", ")", "self", ".", "summarize_variable", "(", "variable", "=", "variable", ",", "use_baseline", "=", "use_baseline", ",", "weighted", "=", "weighted", ")", "return", "else", ":", "print", "(", "\"{} is not computed yet. Use keyword argument force_compute = True\"", ".", "format", "(", "variable", ")", ")", "return", "header_line", "=", "\"{}: {} periods * {} cells * item size {} ({}, default = {}) = {}\"", ".", "format", "(", "variable", ",", "infos", "[", "'nb_arrays'", "]", ",", "infos", "[", "'nb_cells_by_array'", "]", ",", "infos", "[", "'cell_size'", "]", ",", "infos", "[", "'dtype'", "]", ",", "default_value", ",", "humanize", ".", "naturalsize", "(", "infos", "[", "'total_nb_bytes'", "]", ",", "gnu", "=", "True", ")", ",", ")", "print", "(", "\"\"", ")", "print", "(", "header_line", ")", "print", "(", "\"Details:\"", ")", "holder", "=", "simulation", ".", "get_holder", "(", "variable", ")", "if", "holder", "is", "not", "None", ":", "if", "holder", ".", "variable", ".", "definition_period", "==", "ETERNITY", ":", "array", "=", "holder", ".", "get_array", "(", "ETERNITY", ")", "print", "(", "\"permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}\"", ".", "format", "(", "array", ".", "mean", "(", ")", "if", "not", "weighted", "else", "np", ".", "average", "(", "array", ",", "weights", "=", "weights", ")", ",", "array", ".", "min", "(", ")", ",", "array", ".", "max", "(", ")", ",", "np", ".", "median", "(", "array", ")", ",", "(", "(", "array", "==", "default_value", ")", ".", "sum", "(", ")", "/", "len", "(", "array", ")", "if", "not", "weighted", "else", "(", "(", "array", "==", "default_value", ")", "*", "weights", ")", ".", "sum", "(", ")", "/", "weights", ".", "sum", "(", ")", ")", ")", ")", "else", ":", "for", "period", "in", "sorted", "(", "holder", ".", "get_known_periods", "(", ")", ")", ":", "array", "=", "holder", ".", "get_array", "(", "period", ")", "if", "array", ".", "shape", "==", "(", ")", ":", "print", "(", "\"{}: always = {}\"", ".", "format", "(", "period", ",", "array", ")", ")", "continue", "if", "value_type", "==", "Enum", ":", "possible_values", "=", "variable_instance", ".", "possible_values", "categories_by_index", "=", "dict", "(", "zip", "(", "range", "(", "len", "(", "possible_values", ".", "_member_names_", ")", ")", ",", "possible_values", ".", "_member_names_", ")", ")", "categories_type", "=", "pd", ".", "api", ".", "types", ".", "CategoricalDtype", "(", "categories", "=", "possible_values", ".", "_member_names_", ",", "ordered", "=", "True", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "variable", ":", "array", "}", ")", ".", "replace", "(", "categories_by_index", ")", ".", "astype", "(", "categories_type", ")", "df", "[", "'weights'", "]", "=", "weights", "if", "weighted", "else", "1", "groupby", "=", "df", ".", "groupby", "(", "variable", ")", "[", "'weights'", "]", ".", "sum", "(", ")", "total", "=", "groupby", ".", "sum", "(", ")", "expr", "=", "[", "\" {} = {:.2e} ({:.1%})\"", ".", "format", "(", "index", ",", "row", ",", "row", "/", "total", ")", "for", "index", ",", "row", "in", "groupby", ".", "iteritems", "(", ")", "]", "print", "(", "\"{}:{}.\"", ".", "format", "(", "period", ",", "\",\"", ".", "join", "(", "expr", ")", ")", ")", "continue", "print", "(", "\"{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}\"", ".", "format", "(", "period", ",", "array", ".", "mean", "(", ")", "if", "not", "weighted", "else", "np", ".", "average", "(", "array", ",", "weights", "=", "weights", ")", ",", "array", ".", "min", "(", ")", ",", "array", ".", "max", "(", ")", ",", "array", ".", "sum", "(", ")", "if", "not", "weighted", "else", "np", ".", "sum", "(", "array", "*", "weights", ")", ",", "(", "(", "array", "==", "default_value", ")", ".", "sum", "(", ")", "/", "len", "(", "array", ")", "if", "not", "weighted", "else", "(", "(", "array", "==", "default_value", ")", "*", "weights", ")", ".", "sum", "(", ")", "/", "weights", ".", "sum", "(", ")", ")", ",", "np", ".", "median", "(", "array", ")", ",", ")", ")" ]
Prints a summary of a variable including its memory usage. :param string variable: the variable being summarized :param bool use_baseline: the tax-benefit-system considered :param bool weighted: whether the produced statistics should be weigthted or not :param bool force_compute: whether the computation of the variable should be forced Example: >>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario >>> survey_scenario = create_randomly_initialized_survey_scenario() >>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True) <BLANKLINE> housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B Details: 2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%). >>> survey_scenario.summarize_variable(variable = "rent", force_compute = True) <BLANKLINE> rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B Details: 2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
[ "Prints", "a", "summary", "of", "a", "variable", "including", "its", "memory", "usage", "." ]
python
train
53.678571
lowandrew/OLCTools
spadespipeline/typingclasses.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/typingclasses.py#L31-L42
def main(self): """ Run the necessary methods in the correct order """ if not os.path.isfile(self.gdcs_report): logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Run the analyses ShortKSippingMethods(self, self.cutoff) # Create the reports self.reporter() else: self.report_parse()
[ "def", "main", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "gdcs_report", ")", ":", "logging", ".", "info", "(", "'Starting {} analysis pipeline'", ".", "format", "(", "self", ".", "analysistype", ")", ")", "# Run the analyses", "ShortKSippingMethods", "(", "self", ",", "self", ".", "cutoff", ")", "# Create the reports", "self", ".", "reporter", "(", ")", "else", ":", "self", ".", "report_parse", "(", ")" ]
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
python
train
33.833333
Azure/azure-cosmos-python
azure/cosmos/auth.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/auth.py#L69-L114
def __GetAuthorizationTokenUsingMasterKey(verb, resource_id_or_fullname, resource_type, headers, master_key): """Gets the authorization token using `master_key. :param str verb: :param str resource_id_or_fullname: :param str resource_type: :param dict headers: :param str master_key: :return: The authorization token. :rtype: dict """ # decodes the master key which is encoded in base64 key = base64.b64decode(master_key) # Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n'.format( verb=(verb.lower() or ''), resource_type=(resource_type.lower() or ''), resource_id_or_fullname=(resource_id_or_fullname or ''), x_date=headers.get(http_constants.HttpHeaders.XDate, '').lower(), http_date=headers.get(http_constants.HttpHeaders.HttpDate, '').lower()) if six.PY2: body = text.decode('utf-8') digest = hmac.new(key, body, sha256).digest() signature = digest.encode('base64') else: # python 3 support body = text.encode('utf-8') digest = hmac.new(key, body, sha256).digest() signature = base64.encodebytes(digest).decode('utf-8') master_token = 'master' token_version = '1.0' return 'type={type}&ver={ver}&sig={sig}'.format(type=master_token, ver=token_version, sig=signature[:-1])
[ "def", "__GetAuthorizationTokenUsingMasterKey", "(", "verb", ",", "resource_id_or_fullname", ",", "resource_type", ",", "headers", ",", "master_key", ")", ":", "# decodes the master key which is encoded in base64 ", "key", "=", "base64", ".", "b64decode", "(", "master_key", ")", "# Skipping lower casing of resource_id_or_fullname since it may now contain \"ID\" of the resource as part of the fullname", "text", "=", "'{verb}\\n{resource_type}\\n{resource_id_or_fullname}\\n{x_date}\\n{http_date}\\n'", ".", "format", "(", "verb", "=", "(", "verb", ".", "lower", "(", ")", "or", "''", ")", ",", "resource_type", "=", "(", "resource_type", ".", "lower", "(", ")", "or", "''", ")", ",", "resource_id_or_fullname", "=", "(", "resource_id_or_fullname", "or", "''", ")", ",", "x_date", "=", "headers", ".", "get", "(", "http_constants", ".", "HttpHeaders", ".", "XDate", ",", "''", ")", ".", "lower", "(", ")", ",", "http_date", "=", "headers", ".", "get", "(", "http_constants", ".", "HttpHeaders", ".", "HttpDate", ",", "''", ")", ".", "lower", "(", ")", ")", "if", "six", ".", "PY2", ":", "body", "=", "text", ".", "decode", "(", "'utf-8'", ")", "digest", "=", "hmac", ".", "new", "(", "key", ",", "body", ",", "sha256", ")", ".", "digest", "(", ")", "signature", "=", "digest", ".", "encode", "(", "'base64'", ")", "else", ":", "# python 3 support", "body", "=", "text", ".", "encode", "(", "'utf-8'", ")", "digest", "=", "hmac", ".", "new", "(", "key", ",", "body", ",", "sha256", ")", ".", "digest", "(", ")", "signature", "=", "base64", ".", "encodebytes", "(", "digest", ")", ".", "decode", "(", "'utf-8'", ")", "master_token", "=", "'master'", "token_version", "=", "'1.0'", "return", "'type={type}&ver={ver}&sig={sig}'", ".", "format", "(", "type", "=", "master_token", ",", "ver", "=", "token_version", ",", "sig", "=", "signature", "[", ":", "-", "1", "]", ")" ]
Gets the authorization token using `master_key. :param str verb: :param str resource_id_or_fullname: :param str resource_type: :param dict headers: :param str master_key: :return: The authorization token. :rtype: dict
[ "Gets", "the", "authorization", "token", "using", "master_key", "." ]
python
train
38.021739
lowandrew/OLCTools
spadespipeline/mMLST.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mMLST.py#L132-L153
def makedbthreads(self, folder): """ Setup and create threads for class :param folder: folder with sequence files with which to create blast databases """ # Create and start threads for each fasta file in the list for i in range(len(folder)): # Send the threads to makeblastdb threads = Thread(target=self.makeblastdb, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() # Make blast databases for MLST files (if necessary) for alleledir in folder: # List comprehension to remove any previously created database files from list allelefiles = glob('{}/*.fasta'.format(alleledir)) # For each allele file for allelefile in allelefiles: # Add the fasta file to the queue self.dqueue.put(allelefile) self.dqueue.join()
[ "def", "makedbthreads", "(", "self", ",", "folder", ")", ":", "# Create and start threads for each fasta file in the list", "for", "i", "in", "range", "(", "len", "(", "folder", ")", ")", ":", "# Send the threads to makeblastdb", "threads", "=", "Thread", "(", "target", "=", "self", ".", "makeblastdb", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "# Make blast databases for MLST files (if necessary)", "for", "alleledir", "in", "folder", ":", "# List comprehension to remove any previously created database files from list", "allelefiles", "=", "glob", "(", "'{}/*.fasta'", ".", "format", "(", "alleledir", ")", ")", "# For each allele file", "for", "allelefile", "in", "allelefiles", ":", "# Add the fasta file to the queue", "self", ".", "dqueue", ".", "put", "(", "allelefile", ")", "self", ".", "dqueue", ".", "join", "(", ")" ]
Setup and create threads for class :param folder: folder with sequence files with which to create blast databases
[ "Setup", "and", "create", "threads", "for", "class", ":", "param", "folder", ":", "folder", "with", "sequence", "files", "with", "which", "to", "create", "blast", "databases" ]
python
train
45.5
numenta/htmresearch
htmresearch/frameworks/layers/physical_objects.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L429-L444
def sampleLocationFromFeature(self, feature): """ Samples a location from one specific feature. This is only supported with three dimensions. """ if feature == "face": return self._sampleFromFaces() elif feature == "edge": return self._sampleFromEdges() elif feature == "vertex": return self._sampleFromVertices() elif feature == "random": return self.sampleLocation() else: raise NameError("No such feature in {}: {}".format(self, feature))
[ "def", "sampleLocationFromFeature", "(", "self", ",", "feature", ")", ":", "if", "feature", "==", "\"face\"", ":", "return", "self", ".", "_sampleFromFaces", "(", ")", "elif", "feature", "==", "\"edge\"", ":", "return", "self", ".", "_sampleFromEdges", "(", ")", "elif", "feature", "==", "\"vertex\"", ":", "return", "self", ".", "_sampleFromVertices", "(", ")", "elif", "feature", "==", "\"random\"", ":", "return", "self", ".", "sampleLocation", "(", ")", "else", ":", "raise", "NameError", "(", "\"No such feature in {}: {}\"", ".", "format", "(", "self", ",", "feature", ")", ")" ]
Samples a location from one specific feature. This is only supported with three dimensions.
[ "Samples", "a", "location", "from", "one", "specific", "feature", "." ]
python
train
30.8125
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L346-L363
def related_lua_args(self): '''Generator of load_related arguments''' related = self.queryelem.select_related if related: meta = self.meta for rel in related: field = meta.dfields[rel] relmodel = field.relmodel bk = self.backend.basekey(relmodel._meta) if relmodel else '' fields = list(related[rel]) if meta.pkname() in fields: fields.remove(meta.pkname()) if not fields: fields.append('') ftype = field.type if field in meta.multifields else '' data = {'field': field.attname, 'type': ftype, 'bk': bk, 'fields': fields} yield field.name, data
[ "def", "related_lua_args", "(", "self", ")", ":", "related", "=", "self", ".", "queryelem", ".", "select_related", "if", "related", ":", "meta", "=", "self", ".", "meta", "for", "rel", "in", "related", ":", "field", "=", "meta", ".", "dfields", "[", "rel", "]", "relmodel", "=", "field", ".", "relmodel", "bk", "=", "self", ".", "backend", ".", "basekey", "(", "relmodel", ".", "_meta", ")", "if", "relmodel", "else", "''", "fields", "=", "list", "(", "related", "[", "rel", "]", ")", "if", "meta", ".", "pkname", "(", ")", "in", "fields", ":", "fields", ".", "remove", "(", "meta", ".", "pkname", "(", ")", ")", "if", "not", "fields", ":", "fields", ".", "append", "(", "''", ")", "ftype", "=", "field", ".", "type", "if", "field", "in", "meta", ".", "multifields", "else", "''", "data", "=", "{", "'field'", ":", "field", ".", "attname", ",", "'type'", ":", "ftype", ",", "'bk'", ":", "bk", ",", "'fields'", ":", "fields", "}", "yield", "field", ".", "name", ",", "data" ]
Generator of load_related arguments
[ "Generator", "of", "load_related", "arguments" ]
python
train
44.888889
secynic/ipwhois
ipwhois/scripts/ipwhois_cli.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L762-L840
def generate_output_network(self, json_data=None, hr=True, show_name=False, colorize=True): """ The function for generating CLI output RDAP network results. Args: json_data (:obj:`dict`): The data to process. Defaults to None. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output. """ if json_data is None: json_data = {} output = generate_output( line='0', short=HR_RDAP['network']['_short'] if hr else 'network', name=HR_RDAP['network']['_name'] if (hr and show_name) else None, is_parent=True, colorize=colorize ) for key, val in json_data['network'].items(): if key in ['links', 'status']: output += self.generate_output_list( source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize ) elif key in ['notices', 'remarks']: output += self.generate_output_notices( source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize ) elif key == 'events': output += self.generate_output_events( source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize ) elif key not in ['raw']: output += generate_output( line='1', short=HR_RDAP['network'][key]['_short'] if hr else key, name=HR_RDAP['network'][key]['_name'] if ( hr and show_name) else None, value=val, colorize=colorize ) return output
[ "def", "generate_output_network", "(", "self", ",", "json_data", "=", "None", ",", "hr", "=", "True", ",", "show_name", "=", "False", ",", "colorize", "=", "True", ")", ":", "if", "json_data", "is", "None", ":", "json_data", "=", "{", "}", "output", "=", "generate_output", "(", "line", "=", "'0'", ",", "short", "=", "HR_RDAP", "[", "'network'", "]", "[", "'_short'", "]", "if", "hr", "else", "'network'", ",", "name", "=", "HR_RDAP", "[", "'network'", "]", "[", "'_name'", "]", "if", "(", "hr", "and", "show_name", ")", "else", "None", ",", "is_parent", "=", "True", ",", "colorize", "=", "colorize", ")", "for", "key", ",", "val", "in", "json_data", "[", "'network'", "]", ".", "items", "(", ")", ":", "if", "key", "in", "[", "'links'", ",", "'status'", "]", ":", "output", "+=", "self", ".", "generate_output_list", "(", "source", "=", "'network'", ",", "key", "=", "key", ",", "val", "=", "val", ",", "line", "=", "'1'", ",", "hr", "=", "hr", ",", "show_name", "=", "show_name", ",", "colorize", "=", "colorize", ")", "elif", "key", "in", "[", "'notices'", ",", "'remarks'", "]", ":", "output", "+=", "self", ".", "generate_output_notices", "(", "source", "=", "'network'", ",", "key", "=", "key", ",", "val", "=", "val", ",", "line", "=", "'1'", ",", "hr", "=", "hr", ",", "show_name", "=", "show_name", ",", "colorize", "=", "colorize", ")", "elif", "key", "==", "'events'", ":", "output", "+=", "self", ".", "generate_output_events", "(", "source", "=", "'network'", ",", "key", "=", "key", ",", "val", "=", "val", ",", "line", "=", "'1'", ",", "hr", "=", "hr", ",", "show_name", "=", "show_name", ",", "colorize", "=", "colorize", ")", "elif", "key", "not", "in", "[", "'raw'", "]", ":", "output", "+=", "generate_output", "(", "line", "=", "'1'", ",", "short", "=", "HR_RDAP", "[", "'network'", "]", "[", "key", "]", "[", "'_short'", "]", "if", "hr", "else", "key", ",", "name", "=", "HR_RDAP", "[", "'network'", "]", "[", "key", "]", "[", "'_name'", "]", "if", "(", "hr", "and", "show_name", ")", "else", "None", ",", "value", "=", "val", ",", "colorize", "=", "colorize", ")", "return", "output" ]
The function for generating CLI output RDAP network results. Args: json_data (:obj:`dict`): The data to process. Defaults to None. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
[ "The", "function", "for", "generating", "CLI", "output", "RDAP", "network", "results", "." ]
python
train
31.392405
juga0/dhcpcanon
dhcpcanon/dhcpcapfsm.py
https://github.com/juga0/dhcpcanon/blob/9f51a29e57fe93dc93fb22bb0ed12fcfe9557e59/dhcpcanon/dhcpcapfsm.py#L48-L70
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None): """Reset object attributes when state is INIT.""" logger.debug('Reseting attributes.') if iface is None: iface = conf.iface if client_mac is None: # scapy for python 3 returns byte, not tuple tempmac = get_if_raw_hwaddr(iface) if isinstance(tempmac, tuple) and len(tempmac) == 2: mac = tempmac[1] else: mac = tempmac client_mac = str2mac(mac) self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid) if scriptfile is not None: self.script = ClientScript(scriptfile) else: self.script = None self.time_sent_request = None self.discover_attempts = 0 self.request_attempts = 0 self.current_state = STATE_PREINIT self.offers = list()
[ "def", "reset", "(", "self", ",", "iface", "=", "None", ",", "client_mac", "=", "None", ",", "xid", "=", "None", ",", "scriptfile", "=", "None", ")", ":", "logger", ".", "debug", "(", "'Reseting attributes.'", ")", "if", "iface", "is", "None", ":", "iface", "=", "conf", ".", "iface", "if", "client_mac", "is", "None", ":", "# scapy for python 3 returns byte, not tuple", "tempmac", "=", "get_if_raw_hwaddr", "(", "iface", ")", "if", "isinstance", "(", "tempmac", ",", "tuple", ")", "and", "len", "(", "tempmac", ")", "==", "2", ":", "mac", "=", "tempmac", "[", "1", "]", "else", ":", "mac", "=", "tempmac", "client_mac", "=", "str2mac", "(", "mac", ")", "self", ".", "client", "=", "DHCPCAP", "(", "iface", "=", "iface", ",", "client_mac", "=", "client_mac", ",", "xid", "=", "xid", ")", "if", "scriptfile", "is", "not", "None", ":", "self", ".", "script", "=", "ClientScript", "(", "scriptfile", ")", "else", ":", "self", ".", "script", "=", "None", "self", ".", "time_sent_request", "=", "None", "self", ".", "discover_attempts", "=", "0", "self", ".", "request_attempts", "=", "0", "self", ".", "current_state", "=", "STATE_PREINIT", "self", ".", "offers", "=", "list", "(", ")" ]
Reset object attributes when state is INIT.
[ "Reset", "object", "attributes", "when", "state", "is", "INIT", "." ]
python
test
39.73913
neo4j-drivers/neobolt
neobolt/impl/python/routing.py
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/routing.py#L403-L408
def remove_writer(self, address): """ Remove a writer address from the routing table, if present. """ log_debug("[#0000] C: <ROUTING> Removing writer %r", address) self.routing_table.writers.discard(address) log_debug("[#0000] C: <ROUTING> table=%r", self.routing_table)
[ "def", "remove_writer", "(", "self", ",", "address", ")", ":", "log_debug", "(", "\"[#0000] C: <ROUTING> Removing writer %r\"", ",", "address", ")", "self", ".", "routing_table", ".", "writers", ".", "discard", "(", "address", ")", "log_debug", "(", "\"[#0000] C: <ROUTING> table=%r\"", ",", "self", ".", "routing_table", ")" ]
Remove a writer address from the routing table, if present.
[ "Remove", "a", "writer", "address", "from", "the", "routing", "table", "if", "present", "." ]
python
train
51.166667
calvinku96/labreporthelper
labreporthelper/bestfit/bestfit.py
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/bestfit/bestfit.py#L108-L128
def get_rmse(self, data_x=None, data_y=None): """ Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max """ if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
[ "def", "get_rmse", "(", "self", ",", "data_x", "=", "None", ",", "data_y", "=", "None", ")", ":", "if", "data_x", "is", "None", ":", "data_x", "=", "np", ".", "array", "(", "self", ".", "args", "[", "\"x\"", "]", ")", "if", "data_y", "is", "None", ":", "data_y", "=", "np", ".", "array", "(", "self", ".", "args", "[", "\"y\"", "]", ")", "if", "len", "(", "data_x", ")", "!=", "len", "(", "data_y", ")", ":", "raise", "ValueError", "(", "\"Lengths of data_x and data_y are different\"", ")", "rmse_y", "=", "self", ".", "bestfit_func", "(", "data_x", ")", "return", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "(", "rmse_y", "-", "data_y", ")", "**", "2", ")", ")" ]
Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max
[ "Get", "Root", "Mean", "Square", "Error", "using", "self", ".", "bestfit_func" ]
python
train
36.142857
trevisanj/f311
f311/hapi.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/hapi.py#L10086-L10099
def PROFILE_SDRAUTIAN(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,sg): """ # Speed dependent Rautian profile based on HTP. # Input parameters: # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # Gam2 : Speed dependence of the line-width in cm-1 (Input). # anuVC : Velocity-changing frequency in cm-1 (Input). # Shift0 : Speed-averaged line-shift in cm-1 (Input). # Shift2 : Speed dependence of the line-shift in cm-1 (Input) # sg : Current WaveNumber of the Computation in cm-1 (Input). """ return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,cZero,sg)
[ "def", "PROFILE_SDRAUTIAN", "(", "sg0", ",", "GamD", ",", "Gam0", ",", "Gam2", ",", "Shift0", ",", "Shift2", ",", "anuVC", ",", "sg", ")", ":", "return", "pcqsdhc", "(", "sg0", ",", "GamD", ",", "Gam0", ",", "Gam2", ",", "Shift0", ",", "Shift2", ",", "anuVC", ",", "cZero", ",", "sg", ")" ]
# Speed dependent Rautian profile based on HTP. # Input parameters: # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # Gam2 : Speed dependence of the line-width in cm-1 (Input). # anuVC : Velocity-changing frequency in cm-1 (Input). # Shift0 : Speed-averaged line-shift in cm-1 (Input). # Shift2 : Speed dependence of the line-shift in cm-1 (Input) # sg : Current WaveNumber of the Computation in cm-1 (Input).
[ "#", "Speed", "dependent", "Rautian", "profile", "based", "on", "HTP", ".", "#", "Input", "parameters", ":", "#", "sg0", ":", "Unperturbed", "line", "position", "in", "cm", "-", "1", "(", "Input", ")", ".", "#", "GamD", ":", "Doppler", "HWHM", "in", "cm", "-", "1", "(", "Input", ")", "#", "Gam0", ":", "Speed", "-", "averaged", "line", "-", "width", "in", "cm", "-", "1", "(", "Input", ")", ".", "#", "Gam2", ":", "Speed", "dependence", "of", "the", "line", "-", "width", "in", "cm", "-", "1", "(", "Input", ")", ".", "#", "anuVC", ":", "Velocity", "-", "changing", "frequency", "in", "cm", "-", "1", "(", "Input", ")", ".", "#", "Shift0", ":", "Speed", "-", "averaged", "line", "-", "shift", "in", "cm", "-", "1", "(", "Input", ")", ".", "#", "Shift2", ":", "Speed", "dependence", "of", "the", "line", "-", "shift", "in", "cm", "-", "1", "(", "Input", ")", "#", "sg", ":", "Current", "WaveNumber", "of", "the", "Computation", "in", "cm", "-", "1", "(", "Input", ")", "." ]
python
train
53.928571
Asana/python-asana
asana/resources/gen/teams.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/teams.py#L33-L45
def find_by_user(self, user, params={}, **options): """Returns the compact records for all teams to which user is assigned. Parameters ---------- user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request. [params] : {Object} Parameters for the request - [organization] : {Id} The workspace or organization to filter teams on. """ path = "/users/%s/teams" % (user) return self.client.get_collection(path, params, **options)
[ "def", "find_by_user", "(", "self", ",", "user", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/users/%s/teams\"", "%", "(", "user", ")", "return", "self", ".", "client", ".", "get_collection", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Returns the compact records for all teams to which user is assigned. Parameters ---------- user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request. [params] : {Object} Parameters for the request - [organization] : {Id} The workspace or organization to filter teams on.
[ "Returns", "the", "compact", "records", "for", "all", "teams", "to", "which", "user", "is", "assigned", "." ]
python
train
48.692308
saltstack/salt
salt/cli/daemons.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L570-L588
def start(self): ''' Start the actual syndic. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' super(Syndic, self).start() if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() try: self.syndic.tune_in() except KeyboardInterrupt: self.action_log_info('Stopping') self.shutdown()
[ "def", "start", "(", "self", ")", ":", "super", "(", "Syndic", ",", "self", ")", ".", "start", "(", ")", "if", "check_user", "(", "self", ".", "config", "[", "'user'", "]", ")", ":", "self", ".", "action_log_info", "(", "'Starting up'", ")", "self", ".", "verify_hash_type", "(", ")", "try", ":", "self", ".", "syndic", ".", "tune_in", "(", ")", "except", "KeyboardInterrupt", ":", "self", ".", "action_log_info", "(", "'Stopping'", ")", "self", ".", "shutdown", "(", ")" ]
Start the actual syndic. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`.
[ "Start", "the", "actual", "syndic", "." ]
python
train
29.368421
numba/llvmlite
llvmlite/ir/builder.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/ir/builder.py#L968-L973
def assume(self, cond): """ Optimizer hint: assume *cond* is always true. """ fn = self.module.declare_intrinsic("llvm.assume") return self.call(fn, [cond])
[ "def", "assume", "(", "self", ",", "cond", ")", ":", "fn", "=", "self", ".", "module", ".", "declare_intrinsic", "(", "\"llvm.assume\"", ")", "return", "self", ".", "call", "(", "fn", ",", "[", "cond", "]", ")" ]
Optimizer hint: assume *cond* is always true.
[ "Optimizer", "hint", ":", "assume", "*", "cond", "*", "is", "always", "true", "." ]
python
train
31.833333
Tanganelli/CoAPthon3
coapthon/layers/forwardLayer.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/forwardLayer.py#L83-L119
def _forward_request(transaction, destination, path): """ Forward requests. :type transaction: Transaction :param transaction: the transaction that owns the request :param destination: the destination of the request (IP, port) :param path: the path of the request. :rtype : Transaction :return: the edited transaction """ client = HelperClient(destination) request = Request() request.options = copy.deepcopy(transaction.request.options) del request.block2 del request.block1 del request.uri_path del request.proxy_uri del request.proxy_schema # TODO handle observing del request.observe # request.observe = transaction.request.observe request.uri_path = path request.destination = destination request.payload = transaction.request.payload request.code = transaction.request.code response = client.send_request(request) client.stop() if response is not None: transaction.response.payload = response.payload transaction.response.code = response.code transaction.response.options = response.options else: transaction.response.code = defines.Codes.SERVICE_UNAVAILABLE.number return transaction
[ "def", "_forward_request", "(", "transaction", ",", "destination", ",", "path", ")", ":", "client", "=", "HelperClient", "(", "destination", ")", "request", "=", "Request", "(", ")", "request", ".", "options", "=", "copy", ".", "deepcopy", "(", "transaction", ".", "request", ".", "options", ")", "del", "request", ".", "block2", "del", "request", ".", "block1", "del", "request", ".", "uri_path", "del", "request", ".", "proxy_uri", "del", "request", ".", "proxy_schema", "# TODO handle observing", "del", "request", ".", "observe", "# request.observe = transaction.request.observe", "request", ".", "uri_path", "=", "path", "request", ".", "destination", "=", "destination", "request", ".", "payload", "=", "transaction", ".", "request", ".", "payload", "request", ".", "code", "=", "transaction", ".", "request", ".", "code", "response", "=", "client", ".", "send_request", "(", "request", ")", "client", ".", "stop", "(", ")", "if", "response", "is", "not", "None", ":", "transaction", ".", "response", ".", "payload", "=", "response", ".", "payload", "transaction", ".", "response", ".", "code", "=", "response", ".", "code", "transaction", ".", "response", ".", "options", "=", "response", ".", "options", "else", ":", "transaction", ".", "response", ".", "code", "=", "defines", ".", "Codes", ".", "SERVICE_UNAVAILABLE", ".", "number", "return", "transaction" ]
Forward requests. :type transaction: Transaction :param transaction: the transaction that owns the request :param destination: the destination of the request (IP, port) :param path: the path of the request. :rtype : Transaction :return: the edited transaction
[ "Forward", "requests", "." ]
python
train
36.135135
apache/spark
python/pyspark/mllib/linalg/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1328-L1344
def fromML(mat): """ Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0 """ if isinstance(mat, newlinalg.DenseMatrix): return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed) elif isinstance(mat, newlinalg.SparseMatrix): return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed) else: raise TypeError("Unsupported matrix type %s" % type(mat))
[ "def", "fromML", "(", "mat", ")", ":", "if", "isinstance", "(", "mat", ",", "newlinalg", ".", "DenseMatrix", ")", ":", "return", "DenseMatrix", "(", "mat", ".", "numRows", ",", "mat", ".", "numCols", ",", "mat", ".", "values", ",", "mat", ".", "isTransposed", ")", "elif", "isinstance", "(", "mat", ",", "newlinalg", ".", "SparseMatrix", ")", ":", "return", "SparseMatrix", "(", "mat", ".", "numRows", ",", "mat", ".", "numCols", ",", "mat", ".", "colPtrs", ",", "mat", ".", "rowIndices", ",", "mat", ".", "values", ",", "mat", ".", "isTransposed", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported matrix type %s\"", "%", "type", "(", "mat", ")", ")" ]
Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0
[ "Convert", "a", "matrix", "from", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
python
train
42.705882
google/apitools
apitools/base/py/base_api.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L689-L731
def _RunMethod(self, method_config, request, global_params=None, upload=None, upload_config=None, download=None): """Call this method with request.""" if upload is not None and download is not None: # TODO(craigcitro): This just involves refactoring the logic # below into callbacks that we can pass around; in particular, # the order should be that the upload gets the initial request, # and then passes its reply to a download if one exists, and # then that goes to ProcessResponse and is returned. raise exceptions.NotYetImplementedError( 'Cannot yet use both upload and download at once') http_request = self.PrepareHttpRequest( method_config, request, global_params, upload, upload_config, download) # TODO(craigcitro): Make num_retries customizable on Transfer # objects, and pass in self.__client.num_retries when initializing # an upload or download. if download is not None: download.InitializeDownload(http_request, client=self.client) return http_response = None if upload is not None: http_response = upload.InitializeUpload( http_request, client=self.client) if http_response is None: http = self.__client.http if upload and upload.bytes_http: http = upload.bytes_http opts = { 'retries': self.__client.num_retries, 'max_retry_wait': self.__client.max_retry_wait, } if self.__client.check_response_func: opts['check_response_func'] = self.__client.check_response_func if self.__client.retry_func: opts['retry_func'] = self.__client.retry_func http_response = http_wrapper.MakeRequest( http, http_request, **opts) return self.ProcessHttpResponse(method_config, http_response, request)
[ "def", "_RunMethod", "(", "self", ",", "method_config", ",", "request", ",", "global_params", "=", "None", ",", "upload", "=", "None", ",", "upload_config", "=", "None", ",", "download", "=", "None", ")", ":", "if", "upload", "is", "not", "None", "and", "download", "is", "not", "None", ":", "# TODO(craigcitro): This just involves refactoring the logic", "# below into callbacks that we can pass around; in particular,", "# the order should be that the upload gets the initial request,", "# and then passes its reply to a download if one exists, and", "# then that goes to ProcessResponse and is returned.", "raise", "exceptions", ".", "NotYetImplementedError", "(", "'Cannot yet use both upload and download at once'", ")", "http_request", "=", "self", ".", "PrepareHttpRequest", "(", "method_config", ",", "request", ",", "global_params", ",", "upload", ",", "upload_config", ",", "download", ")", "# TODO(craigcitro): Make num_retries customizable on Transfer", "# objects, and pass in self.__client.num_retries when initializing", "# an upload or download.", "if", "download", "is", "not", "None", ":", "download", ".", "InitializeDownload", "(", "http_request", ",", "client", "=", "self", ".", "client", ")", "return", "http_response", "=", "None", "if", "upload", "is", "not", "None", ":", "http_response", "=", "upload", ".", "InitializeUpload", "(", "http_request", ",", "client", "=", "self", ".", "client", ")", "if", "http_response", "is", "None", ":", "http", "=", "self", ".", "__client", ".", "http", "if", "upload", "and", "upload", ".", "bytes_http", ":", "http", "=", "upload", ".", "bytes_http", "opts", "=", "{", "'retries'", ":", "self", ".", "__client", ".", "num_retries", ",", "'max_retry_wait'", ":", "self", ".", "__client", ".", "max_retry_wait", ",", "}", "if", "self", ".", "__client", ".", "check_response_func", ":", "opts", "[", "'check_response_func'", "]", "=", "self", ".", "__client", ".", "check_response_func", "if", "self", ".", "__client", ".", "retry_func", ":", "opts", "[", "'retry_func'", "]", "=", "self", ".", "__client", ".", "retry_func", "http_response", "=", "http_wrapper", ".", "MakeRequest", "(", "http", ",", "http_request", ",", "*", "*", "opts", ")", "return", "self", ".", "ProcessHttpResponse", "(", "method_config", ",", "http_response", ",", "request", ")" ]
Call this method with request.
[ "Call", "this", "method", "with", "request", "." ]
python
train
46.72093
googleapis/google-cloud-python
datastore/google/cloud/datastore/_gapic.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/_gapic.py#L28-L49
def make_datastore_api(client): """Create an instance of the GAPIC Datastore API. :type client: :class:`~google.cloud.datastore.client.Client` :param client: The client that holds configuration details. :rtype: :class:`.datastore.v1.datastore_client.DatastoreClient` :returns: A datastore API instance with the proper credentials. """ parse_result = six.moves.urllib_parse.urlparse(client._base_url) host = parse_result.netloc if parse_result.scheme == "https": channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host) else: channel = insecure_channel(host) return datastore_client.DatastoreClient( channel=channel, client_info=client_info.ClientInfo( client_library_version=__version__, gapic_version=__version__ ), )
[ "def", "make_datastore_api", "(", "client", ")", ":", "parse_result", "=", "six", ".", "moves", ".", "urllib_parse", ".", "urlparse", "(", "client", ".", "_base_url", ")", "host", "=", "parse_result", ".", "netloc", "if", "parse_result", ".", "scheme", "==", "\"https\"", ":", "channel", "=", "make_secure_channel", "(", "client", ".", "_credentials", ",", "DEFAULT_USER_AGENT", ",", "host", ")", "else", ":", "channel", "=", "insecure_channel", "(", "host", ")", "return", "datastore_client", ".", "DatastoreClient", "(", "channel", "=", "channel", ",", "client_info", "=", "client_info", ".", "ClientInfo", "(", "client_library_version", "=", "__version__", ",", "gapic_version", "=", "__version__", ")", ",", ")" ]
Create an instance of the GAPIC Datastore API. :type client: :class:`~google.cloud.datastore.client.Client` :param client: The client that holds configuration details. :rtype: :class:`.datastore.v1.datastore_client.DatastoreClient` :returns: A datastore API instance with the proper credentials.
[ "Create", "an", "instance", "of", "the", "GAPIC", "Datastore", "API", "." ]
python
train
37.272727
apache/incubator-mxnet
python/mxnet/gluon/trainer.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L258-L270
def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. """ if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
[ "def", "set_learning_rate", "(", "self", ",", "lr", ")", ":", "if", "not", "isinstance", "(", "self", ".", "_optimizer", ",", "opt", ".", "Optimizer", ")", ":", "raise", "UserWarning", "(", "\"Optimizer has to be defined before its learning \"", "\"rate is mutated.\"", ")", "else", ":", "self", ".", "_optimizer", ".", "set_learning_rate", "(", "lr", ")" ]
Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer.
[ "Sets", "a", "new", "learning", "rate", "of", "the", "optimizer", "." ]
python
train
34.615385
tornadoweb/tornado
tornado/iostream.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/iostream.py#L526-L563
def write(self, data: Union[bytes, memoryview]) -> "Future[None]": """Asynchronously write the given data to this stream. This method returns a `.Future` that resolves (with a result of ``None``) when the write has been completed. The ``data`` argument may be of type `bytes` or `memoryview`. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 4.5 Added support for `memoryview` arguments. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ self._check_closed() if data: if ( self.max_write_buffer_size is not None and len(self._write_buffer) + len(data) > self.max_write_buffer_size ): raise StreamBufferFullError("Reached maximum write buffer size") self._write_buffer.append(data) self._total_write_index += len(data) future = Future() # type: Future[None] future.add_done_callback(lambda f: f.exception()) self._write_futures.append((self._total_write_index, future)) if not self._connecting: self._handle_write() if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() return future
[ "def", "write", "(", "self", ",", "data", ":", "Union", "[", "bytes", ",", "memoryview", "]", ")", "->", "\"Future[None]\"", ":", "self", ".", "_check_closed", "(", ")", "if", "data", ":", "if", "(", "self", ".", "max_write_buffer_size", "is", "not", "None", "and", "len", "(", "self", ".", "_write_buffer", ")", "+", "len", "(", "data", ")", ">", "self", ".", "max_write_buffer_size", ")", ":", "raise", "StreamBufferFullError", "(", "\"Reached maximum write buffer size\"", ")", "self", ".", "_write_buffer", ".", "append", "(", "data", ")", "self", ".", "_total_write_index", "+=", "len", "(", "data", ")", "future", "=", "Future", "(", ")", "# type: Future[None]", "future", ".", "add_done_callback", "(", "lambda", "f", ":", "f", ".", "exception", "(", ")", ")", "self", ".", "_write_futures", ".", "append", "(", "(", "self", ".", "_total_write_index", ",", "future", ")", ")", "if", "not", "self", ".", "_connecting", ":", "self", ".", "_handle_write", "(", ")", "if", "self", ".", "_write_buffer", ":", "self", ".", "_add_io_state", "(", "self", ".", "io_loop", ".", "WRITE", ")", "self", ".", "_maybe_add_error_listener", "(", ")", "return", "future" ]
Asynchronously write the given data to this stream. This method returns a `.Future` that resolves (with a result of ``None``) when the write has been completed. The ``data`` argument may be of type `bytes` or `memoryview`. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 4.5 Added support for `memoryview` arguments. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead.
[ "Asynchronously", "write", "the", "given", "data", "to", "this", "stream", "." ]
python
train
36.973684
tensorflow/tensor2tensor
tensor2tensor/models/research/autoencoders.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1085-L1103
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
[ "def", "autoencoder_residual", "(", ")", ":", "hparams", "=", "autoencoder_autoregressive", "(", ")", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "clip_grad_norm", "=", "1.0", "hparams", ".", "learning_rate_constant", "=", "0.5", "hparams", ".", "learning_rate_warmup_steps", "=", "500", "hparams", ".", "learning_rate_schedule", "=", "\"constant * linear_warmup * rsqrt_decay\"", "hparams", ".", "num_hidden_layers", "=", "5", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "max_hidden_size", "=", "1024", "hparams", ".", "add_hparam", "(", "\"num_residual_layers\"", ",", "2", ")", "hparams", ".", "add_hparam", "(", "\"residual_kernel_height\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"residual_kernel_width\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"residual_filter_multiplier\"", ",", "2.0", ")", "hparams", ".", "add_hparam", "(", "\"residual_dropout\"", ",", "0.2", ")", "hparams", ".", "add_hparam", "(", "\"residual_use_separable_conv\"", ",", "int", "(", "True", ")", ")", "hparams", ".", "add_hparam", "(", "\"kl_beta\"", ",", "1.0", ")", "return", "hparams" ]
Residual autoencoder model.
[ "Residual", "autoencoder", "model", "." ]
python
train
40.315789
Xion/taipan
taipan/collections/dicts.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/collections/dicts.py#L320-L336
def mapkeys(function, dict_): """Return a new dictionary where the keys come from applying ``function`` to the keys of given dictionary. .. warning:: If ``function`` returns the same value for more than one key, it is undefined which key will be chosen for the resulting dictionary. :param function: Function taking a dictionary key, or None (corresponding to identity function) .. versionadded:: 0.0.2 """ ensure_mapping(dict_) function = identity() if function is None else ensure_callable(function) return dict_.__class__((function(k), v) for k, v in iteritems(dict_))
[ "def", "mapkeys", "(", "function", ",", "dict_", ")", ":", "ensure_mapping", "(", "dict_", ")", "function", "=", "identity", "(", ")", "if", "function", "is", "None", "else", "ensure_callable", "(", "function", ")", "return", "dict_", ".", "__class__", "(", "(", "function", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "iteritems", "(", "dict_", ")", ")" ]
Return a new dictionary where the keys come from applying ``function`` to the keys of given dictionary. .. warning:: If ``function`` returns the same value for more than one key, it is undefined which key will be chosen for the resulting dictionary. :param function: Function taking a dictionary key, or None (corresponding to identity function) .. versionadded:: 0.0.2
[ "Return", "a", "new", "dictionary", "where", "the", "keys", "come", "from", "applying", "function", "to", "the", "keys", "of", "given", "dictionary", "." ]
python
train
37.176471
BD2KGenomics/protect
src/protect/pipeline/ProTECT.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L94-L148
def _ensure_patient_group_is_ok(patient_object, patient_name=None): """ Ensure that the provided entries for the patient groups is formatted properly. :param set|dict patient_object: The values passed to the samples patient group :param str patient_name: Optional name for the set :raises ParameterError: If required entry doesnt exist """ from protect.addons.common import TCGAToGTEx assert isinstance(patient_object, (set, dict)), '%s,%s' % (patient_object, patient_name) # set(dict) = set of keys of the dict test_set = set(patient_object) if 'tumor_type' not in patient_object: raise ParameterError(('The patient entry for sample %s ' % patient_name) + 'does not contain a Tumor type.') elif patient_object['tumor_type'] not in TCGAToGTEx: raise ParameterError(('The patient entry for sample %s ' % patient_name) + 'does contains an invalid Tumor type. Please use one of the ' 'valid TCGA tumor types.') if {'tumor_dna_fastq_1', 'normal_dna_fastq_1', 'tumor_rna_fastq_1'}.issubset(test_set): # Best case scenario, we get all fastqs pass else: # We have less than 3 fastqs so we have to have a haplotype. if 'hla_haplotype_files' not in test_set: raise ParameterError(('The patient entry for sample %s ' % patient_name) + 'does not contain a hla_haplotype_files entry.\nCannot haplotype ' 'patient if all the input sequence files are not fastqs.') # Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf if (({re.search('tumor_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None} or {re.search('normal_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}) and ('mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set)): raise ParameterError(('The patient entry for sample %s ' % patient_name) + 'does not contain a mutation_vcf or fusion_bedpe entry. If both ' 'tumor and normal DNA sequences (fastqs or bam) are not provided, ' 'a pre-computed vcf and/or bedpe must be provided.') # We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions if {re.search('tumor_rna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}: if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set: # The only case where it is ok to not have the genome mapped rna. pass else: raise ParameterError(('The patient entry for sample %s ' % patient_name) + 'does not contain a tumor rna sequence data entry. We require ' 'either tumor_rna_fastq_1 or tumor_rna_bam.') # If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless # we have also been provided expression values. if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set: if 'expression_files' not in test_set: raise ParameterError(('The patient entry for sample %s ' % patient_name + 'was provided a tumor rna bam with sequences mapped to the ' 'genome but was not provided a matching rna bam for the ' 'transcriptome or a tar containing expression values. ' 'We require either a matching transcriptome bam to estimate' 'expression, or the precomputed expression values.'))
[ "def", "_ensure_patient_group_is_ok", "(", "patient_object", ",", "patient_name", "=", "None", ")", ":", "from", "protect", ".", "addons", ".", "common", "import", "TCGAToGTEx", "assert", "isinstance", "(", "patient_object", ",", "(", "set", ",", "dict", ")", ")", ",", "'%s,%s'", "%", "(", "patient_object", ",", "patient_name", ")", "# set(dict) = set of keys of the dict", "test_set", "=", "set", "(", "patient_object", ")", "if", "'tumor_type'", "not", "in", "patient_object", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", ")", "+", "'does not contain a Tumor type.'", ")", "elif", "patient_object", "[", "'tumor_type'", "]", "not", "in", "TCGAToGTEx", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", ")", "+", "'does contains an invalid Tumor type. Please use one of the '", "'valid TCGA tumor types.'", ")", "if", "{", "'tumor_dna_fastq_1'", ",", "'normal_dna_fastq_1'", ",", "'tumor_rna_fastq_1'", "}", ".", "issubset", "(", "test_set", ")", ":", "# Best case scenario, we get all fastqs", "pass", "else", ":", "# We have less than 3 fastqs so we have to have a haplotype.", "if", "'hla_haplotype_files'", "not", "in", "test_set", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", ")", "+", "'does not contain a hla_haplotype_files entry.\\nCannot haplotype '", "'patient if all the input sequence files are not fastqs.'", ")", "# Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf", "if", "(", "(", "{", "re", ".", "search", "(", "'tumor_dna_((bam)|(fastq_1)).*'", ",", "x", ")", "for", "x", "in", "test_set", "}", "==", "{", "None", "}", "or", "{", "re", ".", "search", "(", "'normal_dna_((bam)|(fastq_1)).*'", ",", "x", ")", "for", "x", "in", "test_set", "}", "==", "{", "None", "}", ")", "and", "(", "'mutation_vcf'", "not", "in", "test_set", "and", "'fusion_bedpe'", "not", "in", "test_set", ")", ")", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", ")", "+", "'does not contain a mutation_vcf or fusion_bedpe entry. If both '", "'tumor and normal DNA sequences (fastqs or bam) are not provided, '", "'a pre-computed vcf and/or bedpe must be provided.'", ")", "# We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions", "if", "{", "re", ".", "search", "(", "'tumor_rna_((bam)|(fastq_1)).*'", ",", "x", ")", "for", "x", "in", "test_set", "}", "==", "{", "None", "}", ":", "if", "'mutation_vcf'", "not", "in", "test_set", "and", "'fusion_bedpe'", "in", "test_set", ":", "# The only case where it is ok to not have the genome mapped rna.", "pass", "else", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", ")", "+", "'does not contain a tumor rna sequence data entry. We require '", "'either tumor_rna_fastq_1 or tumor_rna_bam.'", ")", "# If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless", "# we have also been provided expression values.", "if", "'tumor_rna_bam'", "in", "test_set", "and", "'tumor_rna_transcriptome_bam'", "not", "in", "test_set", ":", "if", "'expression_files'", "not", "in", "test_set", ":", "raise", "ParameterError", "(", "(", "'The patient entry for sample %s '", "%", "patient_name", "+", "'was provided a tumor rna bam with sequences mapped to the '", "'genome but was not provided a matching rna bam for the '", "'transcriptome or a tar containing expression values. '", "'We require either a matching transcriptome bam to estimate'", "'expression, or the precomputed expression values.'", ")", ")" ]
Ensure that the provided entries for the patient groups is formatted properly. :param set|dict patient_object: The values passed to the samples patient group :param str patient_name: Optional name for the set :raises ParameterError: If required entry doesnt exist
[ "Ensure", "that", "the", "provided", "entries", "for", "the", "patient", "groups", "is", "formatted", "properly", "." ]
python
train
69.745455
onnx/onnxmltools
onnxmltools/convert/coreml/shape_calculators/neural_network/LSTM.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/neural_network/LSTM.py#L12-L43
def calculate_lstm_output_shapes(operator): ''' See LSTM's conversion function for its output shapes. ''' check_input_and_output_numbers(operator, input_count_range=[1, 3], output_count_range=[1, 3]) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input_shape = operator.inputs[0].type.shape if len(input_shape) not in [2, 4]: raise RuntimeError('Input must be a 2-D tensor') params = operator.raw_operator.uniDirectionalLSTM # The following line is more accurate but it may break some tests # output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, params.outputVectorSize] output_shape = ['None', params.outputVectorSize] state_shape = [1, params.outputVectorSize] # TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function if len(operator.inputs) > 1: Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence Y_h_in.type.shape = state_shape if len(operator.inputs) > 2: Y_c_in = operator.inputs[2] # The initial cell state of a single sequence Y_c_in.type.shape = state_shape operator.outputs[0].type.shape = output_shape if len(operator.outputs) > 1: operator.outputs[1].type.shape = state_shape if len(operator.outputs) > 2: operator.outputs[2].type.shape = state_shape
[ "def", "calculate_lstm_output_shapes", "(", "operator", ")", ":", "check_input_and_output_numbers", "(", "operator", ",", "input_count_range", "=", "[", "1", ",", "3", "]", ",", "output_count_range", "=", "[", "1", ",", "3", "]", ")", "check_input_and_output_types", "(", "operator", ",", "good_input_types", "=", "[", "FloatTensorType", "]", ")", "input_shape", "=", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", "if", "len", "(", "input_shape", ")", "not", "in", "[", "2", ",", "4", "]", ":", "raise", "RuntimeError", "(", "'Input must be a 2-D tensor'", ")", "params", "=", "operator", ".", "raw_operator", ".", "uniDirectionalLSTM", "# The following line is more accurate but it may break some tests", "# output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, params.outputVectorSize]", "output_shape", "=", "[", "'None'", ",", "params", ".", "outputVectorSize", "]", "state_shape", "=", "[", "1", ",", "params", ".", "outputVectorSize", "]", "# TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function", "if", "len", "(", "operator", ".", "inputs", ")", ">", "1", ":", "Y_h_in", "=", "operator", ".", "inputs", "[", "1", "]", "# The initial hidden state of a single sequence", "Y_h_in", ".", "type", ".", "shape", "=", "state_shape", "if", "len", "(", "operator", ".", "inputs", ")", ">", "2", ":", "Y_c_in", "=", "operator", ".", "inputs", "[", "2", "]", "# The initial cell state of a single sequence", "Y_c_in", ".", "type", ".", "shape", "=", "state_shape", "operator", ".", "outputs", "[", "0", "]", ".", "type", ".", "shape", "=", "output_shape", "if", "len", "(", "operator", ".", "outputs", ")", ">", "1", ":", "operator", ".", "outputs", "[", "1", "]", ".", "type", ".", "shape", "=", "state_shape", "if", "len", "(", "operator", ".", "outputs", ")", ">", "2", ":", "operator", ".", "outputs", "[", "2", "]", ".", "type", ".", "shape", "=", "state_shape" ]
See LSTM's conversion function for its output shapes.
[ "See", "LSTM", "s", "conversion", "function", "for", "its", "output", "shapes", "." ]
python
train
44.1875
Parsl/parsl
parsl/dataflow/usage_tracking/usage.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/usage_tracking/usage.py#L156-L176
def construct_start_message(self): """Collect preliminary run info at the start of the DFK. Returns : - Message dict dumped as json string, ready for UDP """ uname = getpass.getuser().encode('latin1') hashed_username = hashlib.sha256(uname).hexdigest()[0:10] hname = socket.gethostname().encode('latin1') hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10] message = {'uuid': self.uuid, 'uname': hashed_username, 'hname': hashed_hostname, 'test': self.test_mode, 'parsl_v': self.parsl_version, 'python_v': self.python_version, 'os': platform.system(), 'os_v': platform.release(), 'start': time.time()} return json.dumps(message)
[ "def", "construct_start_message", "(", "self", ")", ":", "uname", "=", "getpass", ".", "getuser", "(", ")", ".", "encode", "(", "'latin1'", ")", "hashed_username", "=", "hashlib", ".", "sha256", "(", "uname", ")", ".", "hexdigest", "(", ")", "[", "0", ":", "10", "]", "hname", "=", "socket", ".", "gethostname", "(", ")", ".", "encode", "(", "'latin1'", ")", "hashed_hostname", "=", "hashlib", ".", "sha256", "(", "hname", ")", ".", "hexdigest", "(", ")", "[", "0", ":", "10", "]", "message", "=", "{", "'uuid'", ":", "self", ".", "uuid", ",", "'uname'", ":", "hashed_username", ",", "'hname'", ":", "hashed_hostname", ",", "'test'", ":", "self", ".", "test_mode", ",", "'parsl_v'", ":", "self", ".", "parsl_version", ",", "'python_v'", ":", "self", ".", "python_version", ",", "'os'", ":", "platform", ".", "system", "(", ")", ",", "'os_v'", ":", "platform", ".", "release", "(", ")", ",", "'start'", ":", "time", ".", "time", "(", ")", "}", "return", "json", ".", "dumps", "(", "message", ")" ]
Collect preliminary run info at the start of the DFK. Returns : - Message dict dumped as json string, ready for UDP
[ "Collect", "preliminary", "run", "info", "at", "the", "start", "of", "the", "DFK", "." ]
python
valid
40.666667
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L1199-L1206
def get_output_files(self): """ Return list of output files for this DAG node and its job. """ output_files = list(self.__output_files) if isinstance(self.job(), CondorDAGJob): output_files = output_files + self.job().get_output_files() return output_files
[ "def", "get_output_files", "(", "self", ")", ":", "output_files", "=", "list", "(", "self", ".", "__output_files", ")", "if", "isinstance", "(", "self", ".", "job", "(", ")", ",", "CondorDAGJob", ")", ":", "output_files", "=", "output_files", "+", "self", ".", "job", "(", ")", ".", "get_output_files", "(", ")", "return", "output_files" ]
Return list of output files for this DAG node and its job.
[ "Return", "list", "of", "output", "files", "for", "this", "DAG", "node", "and", "its", "job", "." ]
python
train
34.875
openid/python-openid
openid/yadis/accept.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/accept.py#L74-L119
def matchTypes(accept_types, have_types): """Given the result of parsing an Accept: header, and the available MIME types, return the acceptable types with their quality markdowns. For example: >>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5') >>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg']) [('text/html', 1.0), ('text/plain', 0.5)] Type signature: ([(str, str, float)], [str]) -> [(str, float)] """ if not accept_types: # Accept all of them default = 1 else: default = 0 match_main = {} match_sub = {} for (main, sub, q) in accept_types: if main == '*': default = max(default, q) continue elif sub == '*': match_main[main] = max(match_main.get(main, 0), q) else: match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q) accepted_list = [] order_maintainer = 0 for mtype in have_types: main, sub = mtype.split('/') if (main, sub) in match_sub: q = match_sub[(main, sub)] else: q = match_main.get(main, default) if q: accepted_list.append((1 - q, order_maintainer, q, mtype)) order_maintainer += 1 accepted_list.sort() return [(mtype, q) for (_, _, q, mtype) in accepted_list]
[ "def", "matchTypes", "(", "accept_types", ",", "have_types", ")", ":", "if", "not", "accept_types", ":", "# Accept all of them", "default", "=", "1", "else", ":", "default", "=", "0", "match_main", "=", "{", "}", "match_sub", "=", "{", "}", "for", "(", "main", ",", "sub", ",", "q", ")", "in", "accept_types", ":", "if", "main", "==", "'*'", ":", "default", "=", "max", "(", "default", ",", "q", ")", "continue", "elif", "sub", "==", "'*'", ":", "match_main", "[", "main", "]", "=", "max", "(", "match_main", ".", "get", "(", "main", ",", "0", ")", ",", "q", ")", "else", ":", "match_sub", "[", "(", "main", ",", "sub", ")", "]", "=", "max", "(", "match_sub", ".", "get", "(", "(", "main", ",", "sub", ")", ",", "0", ")", ",", "q", ")", "accepted_list", "=", "[", "]", "order_maintainer", "=", "0", "for", "mtype", "in", "have_types", ":", "main", ",", "sub", "=", "mtype", ".", "split", "(", "'/'", ")", "if", "(", "main", ",", "sub", ")", "in", "match_sub", ":", "q", "=", "match_sub", "[", "(", "main", ",", "sub", ")", "]", "else", ":", "q", "=", "match_main", ".", "get", "(", "main", ",", "default", ")", "if", "q", ":", "accepted_list", ".", "append", "(", "(", "1", "-", "q", ",", "order_maintainer", ",", "q", ",", "mtype", ")", ")", "order_maintainer", "+=", "1", "accepted_list", ".", "sort", "(", ")", "return", "[", "(", "mtype", ",", "q", ")", "for", "(", "_", ",", "_", ",", "q", ",", "mtype", ")", "in", "accepted_list", "]" ]
Given the result of parsing an Accept: header, and the available MIME types, return the acceptable types with their quality markdowns. For example: >>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5') >>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg']) [('text/html', 1.0), ('text/plain', 0.5)] Type signature: ([(str, str, float)], [str]) -> [(str, float)]
[ "Given", "the", "result", "of", "parsing", "an", "Accept", ":", "header", "and", "the", "available", "MIME", "types", "return", "the", "acceptable", "types", "with", "their", "quality", "markdowns", "." ]
python
train
29.021739
textbook/aslack
aslack/slack_bot/bot.py
https://github.com/textbook/aslack/blob/9ac6a44e4464180109fa4be130ad7a980a9d1acc/aslack/slack_bot/bot.py#L231-L245
def _respond(self, channel, text): """Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send. """ result = self._format_message(channel, text) if result is not None: logger.info( 'Sending message: %r', truncate(result, max_len=50), ) self.socket.send_str(result)
[ "def", "_respond", "(", "self", ",", "channel", ",", "text", ")", ":", "result", "=", "self", ".", "_format_message", "(", "channel", ",", "text", ")", "if", "result", "is", "not", "None", ":", "logger", ".", "info", "(", "'Sending message: %r'", ",", "truncate", "(", "result", ",", "max_len", "=", "50", ")", ",", ")", "self", ".", "socket", ".", "send_str", "(", "result", ")" ]
Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send.
[ "Respond", "to", "a", "message", "on", "the", "current", "socket", "." ]
python
valid
31.266667
spyder-ide/spyder
spyder/plugins/editor/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L2189-L2194
def set_or_clear_breakpoint(self): """Set/Clear breakpoint""" editorstack = self.get_current_editorstack() if editorstack is not None: self.switch_to_plugin() editorstack.set_or_clear_breakpoint()
[ "def", "set_or_clear_breakpoint", "(", "self", ")", ":", "editorstack", "=", "self", ".", "get_current_editorstack", "(", ")", "if", "editorstack", "is", "not", "None", ":", "self", ".", "switch_to_plugin", "(", ")", "editorstack", ".", "set_or_clear_breakpoint", "(", ")" ]
Set/Clear breakpoint
[ "Set", "/", "Clear", "breakpoint" ]
python
train
40.666667
d11wtq/dockerpty
dockerpty/__init__.py
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/__init__.py#L20-L30
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None): """ Present the PTY of the container inside the current process. This is just a wrapper for PseudoTerminal(client, container).start() """ operation = RunOperation(client, container, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin, logs=logs) PseudoTerminal(client, operation).start()
[ "def", "start", "(", "client", ",", "container", ",", "interactive", "=", "True", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "stdin", "=", "None", ",", "logs", "=", "None", ")", ":", "operation", "=", "RunOperation", "(", "client", ",", "container", ",", "interactive", "=", "interactive", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "stdin", "=", "stdin", ",", "logs", "=", "logs", ")", "PseudoTerminal", "(", "client", ",", "operation", ")", ".", "start", "(", ")" ]
Present the PTY of the container inside the current process. This is just a wrapper for PseudoTerminal(client, container).start()
[ "Present", "the", "PTY", "of", "the", "container", "inside", "the", "current", "process", "." ]
python
train
40.454545
pypa/pipenv
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L3045-L3048
def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) py31compat.makedirs(dirname, exist_ok=True)
[ "def", "ensure_directory", "(", "path", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "py31compat", ".", "makedirs", "(", "dirname", ",", "exist_ok", "=", "True", ")" ]
Ensure that the parent directory of `path` exists
[ "Ensure", "that", "the", "parent", "directory", "of", "path", "exists" ]
python
train
42
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L3995-L4139
def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\bdecltype$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. # We need to check the line forward for NOLINT raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, error) ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, error) error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }")
[ "def", "CheckTrailingSemicolon", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Block bodies should not be followed by a semicolon. Due to C++11", "# brace initialization, there are more places where semicolons are", "# required than not, so we use a whitelist approach to check these", "# rather than a blacklist. These are the places where \"};\" should", "# be replaced by just \"}\":", "# 1. Some flavor of block following closing parenthesis:", "# for (;;) {};", "# while (...) {};", "# switch (...) {};", "# Function(...) {};", "# if (...) {};", "# if (...) else if (...) {};", "#", "# 2. else block:", "# if (...) else {};", "#", "# 3. const member function:", "# Function(...) const {};", "#", "# 4. Block following some statement:", "# x = 42;", "# {};", "#", "# 5. Block at the beginning of a function:", "# Function(...) {", "# {};", "# }", "#", "# Note that naively checking for the preceding \"{\" will also match", "# braces inside multi-dimensional arrays, but this is fine since", "# that expression will not contain semicolons.", "#", "# 6. Block following another block:", "# while (true) {}", "# {};", "#", "# 7. End of namespaces:", "# namespace {};", "#", "# These semicolons seems far more common than other kinds of", "# redundant semicolons, possibly due to people converting classes", "# to namespaces. For now we do not warn for this case.", "#", "# Try matching case 1 first.", "match", "=", "Match", "(", "r'^(.*\\)\\s*)\\{'", ",", "line", ")", "if", "match", ":", "# Matched closing parenthesis (case 1). Check the token before the", "# matching opening parenthesis, and don't warn if it looks like a", "# macro. This avoids these false positives:", "# - macro that defines a base class", "# - multi-line macro that defines a base class", "# - macro that defines the whole class-head", "#", "# But we still issue warnings for macros that we know are safe to", "# warn, specifically:", "# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P", "# - TYPED_TEST", "# - INTERFACE_DEF", "# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:", "#", "# We implement a whitelist of safe macros instead of a blacklist of", "# unsafe macros, even though the latter appears less frequently in", "# google code and would have been easier to implement. This is because", "# the downside for getting the whitelist wrong means some extra", "# semicolons, while the downside for getting the blacklist wrong", "# would result in compile errors.", "#", "# In addition to macros, we also don't want to warn on", "# - Compound literals", "# - Lambdas", "# - alignas specifier with anonymous structs", "# - decltype", "closing_brace_pos", "=", "match", ".", "group", "(", "1", ")", ".", "rfind", "(", "')'", ")", "opening_parenthesis", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "closing_brace_pos", ")", "if", "opening_parenthesis", "[", "2", "]", ">", "-", "1", ":", "line_prefix", "=", "opening_parenthesis", "[", "0", "]", "[", "0", ":", "opening_parenthesis", "[", "2", "]", "]", "macro", "=", "Search", "(", "r'\\b([A-Z_][A-Z0-9_]*)\\s*$'", ",", "line_prefix", ")", "func", "=", "Match", "(", "r'^(.*\\])\\s*$'", ",", "line_prefix", ")", "if", "(", "(", "macro", "and", "macro", ".", "group", "(", "1", ")", "not", "in", "(", "'TEST'", ",", "'TEST_F'", ",", "'MATCHER'", ",", "'MATCHER_P'", ",", "'TYPED_TEST'", ",", "'EXCLUSIVE_LOCKS_REQUIRED'", ",", "'SHARED_LOCKS_REQUIRED'", ",", "'LOCKS_EXCLUDED'", ",", "'INTERFACE_DEF'", ")", ")", "or", "(", "func", "and", "not", "Search", "(", "r'\\boperator\\s*\\[\\s*\\]'", ",", "func", ".", "group", "(", "1", ")", ")", ")", "or", "Search", "(", "r'\\b(?:struct|union)\\s+alignas\\s*$'", ",", "line_prefix", ")", "or", "Search", "(", "r'\\bdecltype$'", ",", "line_prefix", ")", "or", "Search", "(", "r'\\s+=\\s*$'", ",", "line_prefix", ")", ")", ":", "match", "=", "None", "if", "(", "match", "and", "opening_parenthesis", "[", "1", "]", ">", "1", "and", "Search", "(", "r'\\]\\s*$'", ",", "clean_lines", ".", "elided", "[", "opening_parenthesis", "[", "1", "]", "-", "1", "]", ")", ")", ":", "# Multi-line lambda-expression", "match", "=", "None", "else", ":", "# Try matching cases 2-3.", "match", "=", "Match", "(", "r'^(.*(?:else|\\)\\s*const)\\s*)\\{'", ",", "line", ")", "if", "not", "match", ":", "# Try matching cases 4-6. These are always matched on separate lines.", "#", "# Note that we can't simply concatenate the previous line to the", "# current line and do a single match, otherwise we may output", "# duplicate warnings for the blank line case:", "# if (cond) {", "# // blank line", "# }", "prevline", "=", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", "if", "prevline", "and", "Search", "(", "r'[;{}]\\s*$'", ",", "prevline", ")", ":", "match", "=", "Match", "(", "r'^(\\s*)\\{'", ",", "line", ")", "# Check matching closing brace", "if", "match", ":", "(", "endline", ",", "endlinenum", ",", "endpos", ")", "=", "CloseExpression", "(", "clean_lines", ",", "linenum", ",", "len", "(", "match", ".", "group", "(", "1", ")", ")", ")", "if", "endpos", ">", "-", "1", "and", "Match", "(", "r'^\\s*;'", ",", "endline", "[", "endpos", ":", "]", ")", ":", "# Current {} pair is eligible for semicolon check, and we have found", "# the redundant semicolon, output warning here.", "#", "# Note: because we are scanning forward for opening braces, and", "# outputting warnings for the matching closing brace, if there are", "# nested blocks with trailing semicolons, we will get the error", "# messages in reversed order.", "# We need to check the line forward for NOLINT", "raw_lines", "=", "clean_lines", ".", "raw_lines", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "endlinenum", "-", "1", "]", ",", "endlinenum", "-", "1", ",", "error", ")", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "endlinenum", "]", ",", "endlinenum", ",", "error", ")", "error", "(", "filename", ",", "endlinenum", ",", "'readability/braces'", ",", "4", ",", "\"You don't need a ; after a }\"", ")" ]
Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Looks", "for", "redundant", "trailing", "semicolon", "." ]
python
valid
37.951724
thespacedoctor/tastic
tastic/tastic.py
https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/tastic.py#L970-L1003
def del_tag( self, tag): """*delete a tag this taskpaper object* **Key Arguments:** - ``tag`` -- the tag to delete to the object **Usage:** .. code-block:: python aTask.del_tag("@due") """ if tag.replace("@", "") not in self.tags: return self.refresh oldContent = self.to_string(indentLevel=1) newTags = [] newTags[:] = [n for n in newTags if tag not in n] self.tags = newTags newContent = self.to_string(indentLevel=1) # ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO # THIS OBJECT self.parent._update_document_tree( oldContent=oldContent, newContent=newContent ) self.refresh return None
[ "def", "del_tag", "(", "self", ",", "tag", ")", ":", "if", "tag", ".", "replace", "(", "\"@\"", ",", "\"\"", ")", "not", "in", "self", ".", "tags", ":", "return", "self", ".", "refresh", "oldContent", "=", "self", ".", "to_string", "(", "indentLevel", "=", "1", ")", "newTags", "=", "[", "]", "newTags", "[", ":", "]", "=", "[", "n", "for", "n", "in", "newTags", "if", "tag", "not", "in", "n", "]", "self", ".", "tags", "=", "newTags", "newContent", "=", "self", ".", "to_string", "(", "indentLevel", "=", "1", ")", "# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO", "# THIS OBJECT", "self", ".", "parent", ".", "_update_document_tree", "(", "oldContent", "=", "oldContent", ",", "newContent", "=", "newContent", ")", "self", ".", "refresh", "return", "None" ]
*delete a tag this taskpaper object* **Key Arguments:** - ``tag`` -- the tag to delete to the object **Usage:** .. code-block:: python aTask.del_tag("@due")
[ "*", "delete", "a", "tag", "this", "taskpaper", "object", "*" ]
python
train
24.235294
KelSolaar/Umbra
umbra/components/factory/script_editor/models.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/models.py#L1087-L1108
def insert_pattern(self, pattern, index): """ Inserts given pattern into the Model. :param pattern: Pattern. :type pattern: unicode :param index: Insertion index. :type index: int :return: Method success. :rtype: bool """ LOGGER.debug("> Inserting '{0}' at '{1}' index.".format(pattern, index)) self.remove_pattern(pattern) self.beginInsertRows(self.get_node_index(self.root_node), index, index) pattern_node = PatternNode(name=pattern) self.root_node.insert_child(pattern_node, index) self.endInsertRows() self.pattern_inserted.emit(pattern_node) return True
[ "def", "insert_pattern", "(", "self", ",", "pattern", ",", "index", ")", ":", "LOGGER", ".", "debug", "(", "\"> Inserting '{0}' at '{1}' index.\"", ".", "format", "(", "pattern", ",", "index", ")", ")", "self", ".", "remove_pattern", "(", "pattern", ")", "self", ".", "beginInsertRows", "(", "self", ".", "get_node_index", "(", "self", ".", "root_node", ")", ",", "index", ",", "index", ")", "pattern_node", "=", "PatternNode", "(", "name", "=", "pattern", ")", "self", ".", "root_node", ".", "insert_child", "(", "pattern_node", ",", "index", ")", "self", ".", "endInsertRows", "(", ")", "self", ".", "pattern_inserted", ".", "emit", "(", "pattern_node", ")", "return", "True" ]
Inserts given pattern into the Model. :param pattern: Pattern. :type pattern: unicode :param index: Insertion index. :type index: int :return: Method success. :rtype: bool
[ "Inserts", "given", "pattern", "into", "the", "Model", "." ]
python
train
30.818182
mozilla/socorrolib
socorrolib/lib/context_tools.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/context_tools.py#L13-L30
def temp_file_context(raw_dump_path, logger=None): """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.""" try: yield raw_dump_path finally: if 'TEMPORARY' in raw_dump_path: try: os.unlink(raw_dump_path) except OSError: if logger is None: logger = FakeLogger() logger.warning( 'unable to delete %s. manual deletion is required.', raw_dump_path, exc_info=True )
[ "def", "temp_file_context", "(", "raw_dump_path", ",", "logger", "=", "None", ")", ":", "try", ":", "yield", "raw_dump_path", "finally", ":", "if", "'TEMPORARY'", "in", "raw_dump_path", ":", "try", ":", "os", ".", "unlink", "(", "raw_dump_path", ")", "except", "OSError", ":", "if", "logger", "is", "None", ":", "logger", "=", "FakeLogger", "(", ")", "logger", ".", "warning", "(", "'unable to delete %s. manual deletion is required.'", ",", "raw_dump_path", ",", "exc_info", "=", "True", ")" ]
this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.
[ "this", "contextmanager", "implements", "conditionally", "deleting", "a", "pathname", "at", "the", "end", "of", "a", "context", "if", "the", "pathname", "indicates", "that", "it", "is", "a", "temp", "file", "by", "having", "the", "word", "TEMPORARY", "embedded", "in", "it", "." ]
python
train
37.888889
boakley/robotframework-lint
rflint/parser/parser.py
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L34-L55
def RobotFactory(path, parent=None): '''Return an instance of SuiteFile, ResourceFile, SuiteFolder Exactly which is returned depends on whether it's a file or folder, and if a file, the contents of the file. If there is a testcase table, this will return an instance of SuiteFile, otherwise it will return an instance of ResourceFile. ''' if os.path.isdir(path): return SuiteFolder(path, parent) else: rf = RobotFile(path, parent) for table in rf.tables: if isinstance(table, TestcaseTable): rf.__class__ = SuiteFile return rf rf.__class__ = ResourceFile return rf
[ "def", "RobotFactory", "(", "path", ",", "parent", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "SuiteFolder", "(", "path", ",", "parent", ")", "else", ":", "rf", "=", "RobotFile", "(", "path", ",", "parent", ")", "for", "table", "in", "rf", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "TestcaseTable", ")", ":", "rf", ".", "__class__", "=", "SuiteFile", "return", "rf", "rf", ".", "__class__", "=", "ResourceFile", "return", "rf" ]
Return an instance of SuiteFile, ResourceFile, SuiteFolder Exactly which is returned depends on whether it's a file or folder, and if a file, the contents of the file. If there is a testcase table, this will return an instance of SuiteFile, otherwise it will return an instance of ResourceFile.
[ "Return", "an", "instance", "of", "SuiteFile", "ResourceFile", "SuiteFolder" ]
python
valid
30.181818
amcat/nlpipe
nlpipe/modules/corenlp.py
https://github.com/amcat/nlpipe/blob/e9dcf0214d5dc6ba3900b8d7359909e1e33f1ce7/nlpipe/modules/corenlp.py#L55-L65
def get_singleton(cls, annotators=None, **options): """ Get or create a corenlp parser with the given annotator and options Note: multiple parsers with the same annotator and different options are not supported. """ if annotators is not None: annotators = tuple(annotators) if annotators not in cls._singletons: cls._singletons[annotators] = cls(annotators, **options) return cls._singletons[annotators]
[ "def", "get_singleton", "(", "cls", ",", "annotators", "=", "None", ",", "*", "*", "options", ")", ":", "if", "annotators", "is", "not", "None", ":", "annotators", "=", "tuple", "(", "annotators", ")", "if", "annotators", "not", "in", "cls", ".", "_singletons", ":", "cls", ".", "_singletons", "[", "annotators", "]", "=", "cls", "(", "annotators", ",", "*", "*", "options", ")", "return", "cls", ".", "_singletons", "[", "annotators", "]" ]
Get or create a corenlp parser with the given annotator and options Note: multiple parsers with the same annotator and different options are not supported.
[ "Get", "or", "create", "a", "corenlp", "parser", "with", "the", "given", "annotator", "and", "options", "Note", ":", "multiple", "parsers", "with", "the", "same", "annotator", "and", "different", "options", "are", "not", "supported", "." ]
python
train
44.272727
elmotec/massedit
massedit.py
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L310-L315
def set_code_exprs(self, codes): """Convenience: sets all the code expressions at once.""" self.code_objs = dict() self._codes = [] for code in codes: self.append_code_expr(code)
[ "def", "set_code_exprs", "(", "self", ",", "codes", ")", ":", "self", ".", "code_objs", "=", "dict", "(", ")", "self", ".", "_codes", "=", "[", "]", "for", "code", "in", "codes", ":", "self", ".", "append_code_expr", "(", "code", ")" ]
Convenience: sets all the code expressions at once.
[ "Convenience", ":", "sets", "all", "the", "code", "expressions", "at", "once", "." ]
python
train
36.166667
Azure/azure-cosmos-table-python
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L742-L804
def _query_entities(self, table_name, filter=None, select=None, max_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None, _context=None): ''' Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int max_results: The maximum number of entities to return. :param obj marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of table. The marker value is opaque to the client. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list(:class:`~azure.storage.table.models.Entity`) ''' _validate_not_none('table_name', table_name) _validate_not_none('accept', accept) next_partition_key = None if marker is None else marker.get('nextpartitionkey') next_row_key = None if marker is None else marker.get('nextrowkey') request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations(secondary=True) request.path = '/' + _to_str(table_name) + '()' request.headers = {'Accept': _to_str(accept)} request.query = { '$filter': _to_str(filter), '$select': _to_str(select), '$top': _int_to_str(max_results), 'NextPartitionKey': _to_str(next_partition_key), 'NextRowKey': _to_str(next_row_key), 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_json_response_to_entities, [property_resolver, self.require_encryption, self.key_encryption_key, self.key_resolver_function], operation_context=_context)
[ "def", "_query_entities", "(", "self", ",", "table_name", ",", "filter", "=", "None", ",", "select", "=", "None", ",", "max_results", "=", "None", ",", "marker", "=", "None", ",", "accept", "=", "TablePayloadFormat", ".", "JSON_MINIMAL_METADATA", ",", "property_resolver", "=", "None", ",", "timeout", "=", "None", ",", "_context", "=", "None", ")", ":", "_validate_not_none", "(", "'table_name'", ",", "table_name", ")", "_validate_not_none", "(", "'accept'", ",", "accept", ")", "next_partition_key", "=", "None", "if", "marker", "is", "None", "else", "marker", ".", "get", "(", "'nextpartitionkey'", ")", "next_row_key", "=", "None", "if", "marker", "is", "None", "else", "marker", ".", "get", "(", "'nextrowkey'", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", "secondary", "=", "True", ")", "request", ".", "path", "=", "'/'", "+", "_to_str", "(", "table_name", ")", "+", "'()'", "request", ".", "headers", "=", "{", "'Accept'", ":", "_to_str", "(", "accept", ")", "}", "request", ".", "query", "=", "{", "'$filter'", ":", "_to_str", "(", "filter", ")", ",", "'$select'", ":", "_to_str", "(", "select", ")", ",", "'$top'", ":", "_int_to_str", "(", "max_results", ")", ",", "'NextPartitionKey'", ":", "_to_str", "(", "next_partition_key", ")", ",", "'NextRowKey'", ":", "_to_str", "(", "next_row_key", ")", ",", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", ",", "}", "return", "self", ".", "_perform_request", "(", "request", ",", "_convert_json_response_to_entities", ",", "[", "property_resolver", ",", "self", ".", "require_encryption", ",", "self", ".", "key_encryption_key", ",", "self", ".", "key_resolver_function", "]", ",", "operation_context", "=", "_context", ")" ]
Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int max_results: The maximum number of entities to return. :param obj marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of table. The marker value is opaque to the client. :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list(:class:`~azure.storage.table.models.Entity`)
[ "Returns", "a", "list", "of", "entities", "under", "the", "specified", "table", ".", "Makes", "a", "single", "list", "request", "to", "the", "service", ".", "Used", "internally", "by", "the", "query_entities", "method", "." ]
python
train
54.269841
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/interactive.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1561-L1571
def do_trace(self, arg): """ t - trace at the current assembly instruction trace - trace at the current assembly instruction """ if arg: # XXX this check is to be removed raise CmdError("too many arguments") if self.lastEvent is None: raise CmdError("no current thread set") self.lastEvent.get_thread().set_tf() return True
[ "def", "do_trace", "(", "self", ",", "arg", ")", ":", "if", "arg", ":", "# XXX this check is to be removed", "raise", "CmdError", "(", "\"too many arguments\"", ")", "if", "self", ".", "lastEvent", "is", "None", ":", "raise", "CmdError", "(", "\"no current thread set\"", ")", "self", ".", "lastEvent", ".", "get_thread", "(", ")", ".", "set_tf", "(", ")", "return", "True" ]
t - trace at the current assembly instruction trace - trace at the current assembly instruction
[ "t", "-", "trace", "at", "the", "current", "assembly", "instruction", "trace", "-", "trace", "at", "the", "current", "assembly", "instruction" ]
python
train
36.818182
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5313-L5317
def parseExternalSubset(self, ExternalID, SystemID): """parse Markup declarations from an external subset [30] extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl ::= (markupdecl | conditionalSect | PEReference | S) * """ libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID)
[ "def", "parseExternalSubset", "(", "self", ",", "ExternalID", ",", "SystemID", ")", ":", "libxml2mod", ".", "xmlParseExternalSubset", "(", "self", ".", "_o", ",", "ExternalID", ",", "SystemID", ")" ]
parse Markup declarations from an external subset [30] extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl ::= (markupdecl | conditionalSect | PEReference | S) *
[ "parse", "Markup", "declarations", "from", "an", "external", "subset", "[", "30", "]", "extSubset", "::", "=", "textDecl?", "extSubsetDecl", "[", "31", "]", "extSubsetDecl", "::", "=", "(", "markupdecl", "|", "conditionalSect", "|", "PEReference", "|", "S", ")", "*" ]
python
train
65.2
aboSamoor/polyglot
polyglot/base.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/base.py#L159-L166
def read(self, size=None): """ Read `size` of bytes.""" if size is None: return self.buf.read() + self.open_file.read() contents = self.buf.read(size) if len(contents) < size: contents += self.open_file.read(size - len(contents)) return contents
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "return", "self", ".", "buf", ".", "read", "(", ")", "+", "self", ".", "open_file", ".", "read", "(", ")", "contents", "=", "self", ".", "buf", ".", "read", "(", "size", ")", "if", "len", "(", "contents", ")", "<", "size", ":", "contents", "+=", "self", ".", "open_file", ".", "read", "(", "size", "-", "len", "(", "contents", ")", ")", "return", "contents" ]
Read `size` of bytes.
[ "Read", "size", "of", "bytes", "." ]
python
train
33.75
numenta/htmresearch
htmresearch/frameworks/thalamus/thalamus.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L326-L343
def _preSynapticTRNCells(self, i, j): """ Given a relay cell at the given coordinate, return a list of the (x,y) coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in. :param i, j: relay cell Coordinates :return: """ xmin = max(i - 1, 0) xmax = min(i + 2, self.trnWidth) ymin = max(j - 1, 0) ymax = min(j + 2, self.trnHeight) trnCells = [ (x, y) for x in range(xmin, xmax) for y in range(ymin, ymax) ] return trnCells
[ "def", "_preSynapticTRNCells", "(", "self", ",", "i", ",", "j", ")", ":", "xmin", "=", "max", "(", "i", "-", "1", ",", "0", ")", "xmax", "=", "min", "(", "i", "+", "2", ",", "self", ".", "trnWidth", ")", "ymin", "=", "max", "(", "j", "-", "1", ",", "0", ")", "ymax", "=", "min", "(", "j", "+", "2", ",", "self", ".", "trnHeight", ")", "trnCells", "=", "[", "(", "x", ",", "y", ")", "for", "x", "in", "range", "(", "xmin", ",", "xmax", ")", "for", "y", "in", "range", "(", "ymin", ",", "ymax", ")", "]", "return", "trnCells" ]
Given a relay cell at the given coordinate, return a list of the (x,y) coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in. :param i, j: relay cell Coordinates :return:
[ "Given", "a", "relay", "cell", "at", "the", "given", "coordinate", "return", "a", "list", "of", "the", "(", "x", "y", ")", "coordinates", "of", "all", "TRN", "cells", "that", "project", "to", "it", ".", "This", "assumes", "a", "3X3", "fan", "-", "in", "." ]
python
train
26.777778
suds-community/suds
suds/store.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/store.py#L548-L570
def open(self, url): """ Open a document at the specified URL. The document URL's needs not contain a protocol identifier, and if it does, that protocol identifier is ignored when looking up the store content. Missing documents referenced using the internal 'suds' protocol are reported by raising an exception. For other protocols, None is returned instead. @param url: A document URL. @type url: str @return: Document content or None if not found. @rtype: bytes """ protocol, location = self.__split(url) content = self.__find(location) if protocol == 'suds' and content is None: raise Exception, 'location "%s" not in document store' % location return content
[ "def", "open", "(", "self", ",", "url", ")", ":", "protocol", ",", "location", "=", "self", ".", "__split", "(", "url", ")", "content", "=", "self", ".", "__find", "(", "location", ")", "if", "protocol", "==", "'suds'", "and", "content", "is", "None", ":", "raise", "Exception", ",", "'location \"%s\" not in document store'", "%", "location", "return", "content" ]
Open a document at the specified URL. The document URL's needs not contain a protocol identifier, and if it does, that protocol identifier is ignored when looking up the store content. Missing documents referenced using the internal 'suds' protocol are reported by raising an exception. For other protocols, None is returned instead. @param url: A document URL. @type url: str @return: Document content or None if not found. @rtype: bytes
[ "Open", "a", "document", "at", "the", "specified", "URL", "." ]
python
train
34.434783
lpantano/seqcluster
seqcluster/libs/report.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/report.py#L64-L71
def _expand(dat, counts, start, end): """ expand the same counts from start to end """ for pos in range(start, end): for s in counts: dat[s][pos] += counts[s] return dat
[ "def", "_expand", "(", "dat", ",", "counts", ",", "start", ",", "end", ")", ":", "for", "pos", "in", "range", "(", "start", ",", "end", ")", ":", "for", "s", "in", "counts", ":", "dat", "[", "s", "]", "[", "pos", "]", "+=", "counts", "[", "s", "]", "return", "dat" ]
expand the same counts from start to end
[ "expand", "the", "same", "counts", "from", "start", "to", "end" ]
python
train
25.25
mbedmicro/pyOCD
pyocd/probe/pydapaccess/dap_access_cmsis_dap.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L1010-L1025
def _abort_all_transfers(self, exception): """ Abort any ongoing transfers and clear all buffers """ pending_reads = len(self._commands_to_read) # invalidate _transfer_list for transfer in self._transfer_list: transfer.add_error(exception) # clear all deferred buffers self._init_deferred_buffers() # finish all pending reads and ignore the data # Only do this if the error is a tranfer error. # Otherwise this could cause another exception if isinstance(exception, DAPAccessIntf.TransferError): for _ in range(pending_reads): self._interface.read()
[ "def", "_abort_all_transfers", "(", "self", ",", "exception", ")", ":", "pending_reads", "=", "len", "(", "self", ".", "_commands_to_read", ")", "# invalidate _transfer_list", "for", "transfer", "in", "self", ".", "_transfer_list", ":", "transfer", ".", "add_error", "(", "exception", ")", "# clear all deferred buffers", "self", ".", "_init_deferred_buffers", "(", ")", "# finish all pending reads and ignore the data", "# Only do this if the error is a tranfer error.", "# Otherwise this could cause another exception", "if", "isinstance", "(", "exception", ",", "DAPAccessIntf", ".", "TransferError", ")", ":", "for", "_", "in", "range", "(", "pending_reads", ")", ":", "self", ".", "_interface", ".", "read", "(", ")" ]
Abort any ongoing transfers and clear all buffers
[ "Abort", "any", "ongoing", "transfers", "and", "clear", "all", "buffers" ]
python
train
41.875
anthill/koala
koala/ast/__init__.py
https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L271-L320
def build_ast(expression, debug = False): """build an AST from an Excel formula expression in reverse polish notation""" #use a directed graph to store the tree G = DiGraph() stack = [] for n in expression: # Since the graph does not maintain the order of adding nodes/edges # add an extra attribute 'pos' so we can always sort to the correct order if isinstance(n,OperatorNode): if n.ttype == "operator-infix": arg2 = stack.pop() arg1 = stack.pop() # Hack to write the name of sheet in 2argument address if(n.tvalue == ':'): if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue: arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue G.add_node(arg1,pos = 1) G.add_node(arg2,pos = 2) G.add_edge(arg1, n) G.add_edge(arg2, n) else: arg1 = stack.pop() G.add_node(arg1,pos = 1) G.add_edge(arg1, n) elif isinstance(n,FunctionNode): args = [] for _ in range(n.num_args): try: args.append(stack.pop()) except: raise Exception() #try: # args = [stack.pop() for _ in range(n.num_args)] #except: # print 'STACK', stack, type(n) # raise Exception('prut') args.reverse() for i,a in enumerate(args): G.add_node(a,pos = i) G.add_edge(a,n) else: G.add_node(n,pos=0) stack.append(n) return G,stack.pop()
[ "def", "build_ast", "(", "expression", ",", "debug", "=", "False", ")", ":", "#use a directed graph to store the tree", "G", "=", "DiGraph", "(", ")", "stack", "=", "[", "]", "for", "n", "in", "expression", ":", "# Since the graph does not maintain the order of adding nodes/edges", "# add an extra attribute 'pos' so we can always sort to the correct order", "if", "isinstance", "(", "n", ",", "OperatorNode", ")", ":", "if", "n", ".", "ttype", "==", "\"operator-infix\"", ":", "arg2", "=", "stack", ".", "pop", "(", ")", "arg1", "=", "stack", ".", "pop", "(", ")", "# Hack to write the name of sheet in 2argument address", "if", "(", "n", ".", "tvalue", "==", "':'", ")", ":", "if", "'!'", "in", "arg1", ".", "tvalue", "and", "arg2", ".", "ttype", "==", "'operand'", "and", "'!'", "not", "in", "arg2", ".", "tvalue", ":", "arg2", ".", "tvalue", "=", "arg1", ".", "tvalue", ".", "split", "(", "'!'", ")", "[", "0", "]", "+", "'!'", "+", "arg2", ".", "tvalue", "G", ".", "add_node", "(", "arg1", ",", "pos", "=", "1", ")", "G", ".", "add_node", "(", "arg2", ",", "pos", "=", "2", ")", "G", ".", "add_edge", "(", "arg1", ",", "n", ")", "G", ".", "add_edge", "(", "arg2", ",", "n", ")", "else", ":", "arg1", "=", "stack", ".", "pop", "(", ")", "G", ".", "add_node", "(", "arg1", ",", "pos", "=", "1", ")", "G", ".", "add_edge", "(", "arg1", ",", "n", ")", "elif", "isinstance", "(", "n", ",", "FunctionNode", ")", ":", "args", "=", "[", "]", "for", "_", "in", "range", "(", "n", ".", "num_args", ")", ":", "try", ":", "args", ".", "append", "(", "stack", ".", "pop", "(", ")", ")", "except", ":", "raise", "Exception", "(", ")", "#try:", "# args = [stack.pop() for _ in range(n.num_args)]", "#except:", "# print 'STACK', stack, type(n)", "# raise Exception('prut')", "args", ".", "reverse", "(", ")", "for", "i", ",", "a", "in", "enumerate", "(", "args", ")", ":", "G", ".", "add_node", "(", "a", ",", "pos", "=", "i", ")", "G", ".", "add_edge", "(", "a", ",", "n", ")", "else", ":", "G", ".", "add_node", "(", "n", ",", "pos", "=", "0", ")", "stack", ".", "append", "(", "n", ")", "return", "G", ",", "stack", ".", "pop", "(", ")" ]
build an AST from an Excel formula expression in reverse polish notation
[ "build", "an", "AST", "from", "an", "Excel", "formula", "expression", "in", "reverse", "polish", "notation" ]
python
train
34.64
minhhoit/yacms
yacms/core/templatetags/yacms_tags.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/templatetags/yacms_tags.py#L703-L729
def translate_url(context, language): """ Translates the current URL for the given language code, eg: {% translate_url de %} """ try: request = context["request"] except KeyError: return "" view = resolve(request.path) current_language = translation.get_language() translation.activate(language) try: url = reverse(view.func, args=view.args, kwargs=view.kwargs) except NoReverseMatch: try: url_name = (view.url_name if not view.namespace else '%s:%s' % (view.namespace, view.url_name)) url = reverse(url_name, args=view.args, kwargs=view.kwargs) except NoReverseMatch: url_name = "admin:" + view.url_name url = reverse(url_name, args=view.args, kwargs=view.kwargs) translation.activate(current_language) if context['request'].META["QUERY_STRING"]: url += "?" + context['request'].META["QUERY_STRING"] return url
[ "def", "translate_url", "(", "context", ",", "language", ")", ":", "try", ":", "request", "=", "context", "[", "\"request\"", "]", "except", "KeyError", ":", "return", "\"\"", "view", "=", "resolve", "(", "request", ".", "path", ")", "current_language", "=", "translation", ".", "get_language", "(", ")", "translation", ".", "activate", "(", "language", ")", "try", ":", "url", "=", "reverse", "(", "view", ".", "func", ",", "args", "=", "view", ".", "args", ",", "kwargs", "=", "view", ".", "kwargs", ")", "except", "NoReverseMatch", ":", "try", ":", "url_name", "=", "(", "view", ".", "url_name", "if", "not", "view", ".", "namespace", "else", "'%s:%s'", "%", "(", "view", ".", "namespace", ",", "view", ".", "url_name", ")", ")", "url", "=", "reverse", "(", "url_name", ",", "args", "=", "view", ".", "args", ",", "kwargs", "=", "view", ".", "kwargs", ")", "except", "NoReverseMatch", ":", "url_name", "=", "\"admin:\"", "+", "view", ".", "url_name", "url", "=", "reverse", "(", "url_name", ",", "args", "=", "view", ".", "args", ",", "kwargs", "=", "view", ".", "kwargs", ")", "translation", ".", "activate", "(", "current_language", ")", "if", "context", "[", "'request'", "]", ".", "META", "[", "\"QUERY_STRING\"", "]", ":", "url", "+=", "\"?\"", "+", "context", "[", "'request'", "]", ".", "META", "[", "\"QUERY_STRING\"", "]", "return", "url" ]
Translates the current URL for the given language code, eg: {% translate_url de %}
[ "Translates", "the", "current", "URL", "for", "the", "given", "language", "code", "eg", ":" ]
python
train
35.777778
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/row.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/row.py#L870-L918
def commit(self): """Makes a ``ReadModifyWriteRow`` API request. This commits modifications made by :meth:`append_cell_value` and :meth:`increment_cell_value`. If no modifications were made, makes no API request and just returns ``{}``. Modifies a row atomically, reading the latest existing timestamp / value from the specified columns and writing a new value by appending / incrementing. The new cell created uses either the current server time or the highest timestamp of a cell in that column (if it exceeds the server time). After committing the accumulated mutations, resets the local mutations. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. :raises: :class:`ValueError <exceptions.ValueError>` if the number of mutations exceeds the :data:`MAX_MUTATIONS`. """ num_mutations = len(self._rule_pb_list) if num_mutations == 0: return {} if num_mutations > MAX_MUTATIONS: raise ValueError( "%d total append mutations exceed the maximum " "allowable %d." % (num_mutations, MAX_MUTATIONS) ) data_client = self._table._instance._client.table_data_client row_response = data_client.read_modify_write_row( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list ) # Reset modifications after commit-ing request. self.clear() # NOTE: We expect row_response.key == self._row_key but don't check. return _parse_rmw_row_response(row_response)
[ "def", "commit", "(", "self", ")", ":", "num_mutations", "=", "len", "(", "self", ".", "_rule_pb_list", ")", "if", "num_mutations", "==", "0", ":", "return", "{", "}", "if", "num_mutations", ">", "MAX_MUTATIONS", ":", "raise", "ValueError", "(", "\"%d total append mutations exceed the maximum \"", "\"allowable %d.\"", "%", "(", "num_mutations", ",", "MAX_MUTATIONS", ")", ")", "data_client", "=", "self", ".", "_table", ".", "_instance", ".", "_client", ".", "table_data_client", "row_response", "=", "data_client", ".", "read_modify_write_row", "(", "table_name", "=", "self", ".", "_table", ".", "name", ",", "row_key", "=", "self", ".", "_row_key", ",", "rules", "=", "self", ".", "_rule_pb_list", ")", "# Reset modifications after commit-ing request.", "self", ".", "clear", "(", ")", "# NOTE: We expect row_response.key == self._row_key but don't check.", "return", "_parse_rmw_row_response", "(", "row_response", ")" ]
Makes a ``ReadModifyWriteRow`` API request. This commits modifications made by :meth:`append_cell_value` and :meth:`increment_cell_value`. If no modifications were made, makes no API request and just returns ``{}``. Modifies a row atomically, reading the latest existing timestamp / value from the specified columns and writing a new value by appending / incrementing. The new cell created uses either the current server time or the highest timestamp of a cell in that column (if it exceeds the server time). After committing the accumulated mutations, resets the local mutations. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. :raises: :class:`ValueError <exceptions.ValueError>` if the number of mutations exceeds the :data:`MAX_MUTATIONS`.
[ "Makes", "a", "ReadModifyWriteRow", "API", "request", "." ]
python
train
42.938776
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L613-L616
def most_recent_submission(project, group): """Return the most recent submission for the user and project id.""" return (Submission.query_by(project=project, group=group) .order_by(Submission.created_at.desc()).first())
[ "def", "most_recent_submission", "(", "project", ",", "group", ")", ":", "return", "(", "Submission", ".", "query_by", "(", "project", "=", "project", ",", "group", "=", "group", ")", ".", "order_by", "(", "Submission", ".", "created_at", ".", "desc", "(", ")", ")", ".", "first", "(", ")", ")" ]
Return the most recent submission for the user and project id.
[ "Return", "the", "most", "recent", "submission", "for", "the", "user", "and", "project", "id", "." ]
python
train
62
oscarbranson/latools
latools/D_obj.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L409-L427
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage='despiked'): """ Subtract provided background from signal (focus stage). Results is saved in new 'bkgsub' focus stage Returns ------- None """ if 'bkgsub' not in self.data.keys(): self.data['bkgsub'] = Bunch() self.data['bkgsub'][analyte] = self.data[focus_stage][analyte] - bkg if ind is not None: self.data['bkgsub'][analyte][ind] = np.nan return
[ "def", "bkg_subtract", "(", "self", ",", "analyte", ",", "bkg", ",", "ind", "=", "None", ",", "focus_stage", "=", "'despiked'", ")", ":", "if", "'bkgsub'", "not", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "self", ".", "data", "[", "'bkgsub'", "]", "=", "Bunch", "(", ")", "self", ".", "data", "[", "'bkgsub'", "]", "[", "analyte", "]", "=", "self", ".", "data", "[", "focus_stage", "]", "[", "analyte", "]", "-", "bkg", "if", "ind", "is", "not", "None", ":", "self", ".", "data", "[", "'bkgsub'", "]", "[", "analyte", "]", "[", "ind", "]", "=", "np", ".", "nan", "return" ]
Subtract provided background from signal (focus stage). Results is saved in new 'bkgsub' focus stage Returns ------- None
[ "Subtract", "provided", "background", "from", "signal", "(", "focus", "stage", ")", "." ]
python
test
26.631579
googleapis/google-cloud-python
logging/google/cloud/logging/_http.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_http.py#L287-L322
def sink_update( self, project, sink_name, filter_, destination, unique_writer_identity=False ): """API call: update a sink resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update :type project: str :param project: ID of the project containing the sink. :type sink_name: str :param sink_name: the name of the sink :type filter_: str :param filter_: the advanced logs filter expression defining the entries exported by the sink. :type destination: str :param destination: destination URI for the entries exported by the sink. :type unique_writer_identity: bool :param unique_writer_identity: (Optional) determines the kind of IAM identity returned as writer_identity in the new sink. :rtype: dict :returns: The returned (updated) resource. """ target = "/projects/%s/sinks/%s" % (project, sink_name) data = {"name": sink_name, "filter": filter_, "destination": destination} query_params = {"uniqueWriterIdentity": unique_writer_identity} return self.api_request( method="PUT", path=target, query_params=query_params, data=data )
[ "def", "sink_update", "(", "self", ",", "project", ",", "sink_name", ",", "filter_", ",", "destination", ",", "unique_writer_identity", "=", "False", ")", ":", "target", "=", "\"/projects/%s/sinks/%s\"", "%", "(", "project", ",", "sink_name", ")", "data", "=", "{", "\"name\"", ":", "sink_name", ",", "\"filter\"", ":", "filter_", ",", "\"destination\"", ":", "destination", "}", "query_params", "=", "{", "\"uniqueWriterIdentity\"", ":", "unique_writer_identity", "}", "return", "self", ".", "api_request", "(", "method", "=", "\"PUT\"", ",", "path", "=", "target", ",", "query_params", "=", "query_params", ",", "data", "=", "data", ")" ]
API call: update a sink resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update :type project: str :param project: ID of the project containing the sink. :type sink_name: str :param sink_name: the name of the sink :type filter_: str :param filter_: the advanced logs filter expression defining the entries exported by the sink. :type destination: str :param destination: destination URI for the entries exported by the sink. :type unique_writer_identity: bool :param unique_writer_identity: (Optional) determines the kind of IAM identity returned as writer_identity in the new sink. :rtype: dict :returns: The returned (updated) resource.
[ "API", "call", ":", "update", "a", "sink", "resource", "." ]
python
train
37.805556
IdentityPython/pyop
src/pyop/provider.py
https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/provider.py#L116-L131
def parse_authentication_request(self, request_body, http_headers=None): # type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest """ Parses and verifies an authentication request. :param request_body: urlencoded authentication request :param http_headers: http headers """ auth_req = AuthorizationRequest().deserialize(request_body) for validator in self.authentication_request_validators: validator(auth_req) logger.debug('parsed authentication_request: %s', auth_req) return auth_req
[ "def", "parse_authentication_request", "(", "self", ",", "request_body", ",", "http_headers", "=", "None", ")", ":", "# type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest", "auth_req", "=", "AuthorizationRequest", "(", ")", ".", "deserialize", "(", "request_body", ")", "for", "validator", "in", "self", ".", "authentication_request_validators", ":", "validator", "(", "auth_req", ")", "logger", ".", "debug", "(", "'parsed authentication_request: %s'", ",", "auth_req", ")", "return", "auth_req" ]
Parses and verifies an authentication request. :param request_body: urlencoded authentication request :param http_headers: http headers
[ "Parses", "and", "verifies", "an", "authentication", "request", "." ]
python
train
37.0625
Yubico/python-pyhsm
pyhsm/tools/decrypt_aead.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/tools/decrypt_aead.py#L264-L277
def walk_dir(path, args, state): """ Check all files in `path' to see if there is any requests that we should send out on the bus. """ if args.debug: sys.stderr.write("Walking %s\n" % path) for root, _dirs, files in os.walk(path): if not safe_process_files(root, files, args, state): return False if state.should_quit(): return False return True
[ "def", "walk_dir", "(", "path", ",", "args", ",", "state", ")", ":", "if", "args", ".", "debug", ":", "sys", ".", "stderr", ".", "write", "(", "\"Walking %s\\n\"", "%", "path", ")", "for", "root", ",", "_dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "not", "safe_process_files", "(", "root", ",", "files", ",", "args", ",", "state", ")", ":", "return", "False", "if", "state", ".", "should_quit", "(", ")", ":", "return", "False", "return", "True" ]
Check all files in `path' to see if there is any requests that we should send out on the bus.
[ "Check", "all", "files", "in", "path", "to", "see", "if", "there", "is", "any", "requests", "that", "we", "should", "send", "out", "on", "the", "bus", "." ]
python
train
29.214286
mila-iqia/fuel
fuel/converters/ilsvrc2010.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L243-L280
def train_set_producer(socket, train_archive, patch_archive, wnid_map): """Load/send images from the training set TAR file or patch images. Parameters ---------- socket : :class:`zmq.Socket` PUSH socket on which to send loaded images. train_archive : str or file-like object Filename or file handle for the TAR archive of training images. patch_archive : str or file-like object Filename or file handle for the TAR archive of patch images. wnid_map : dict A dictionary that maps WordNet IDs to 0-based class indices. Used to decode the filenames of the inner TAR files. """ patch_images = extract_patch_images(patch_archive, 'train') num_patched = 0 with tar_open(train_archive) as tar: for inner_tar_info in tar: with tar_open(tar.extractfile(inner_tar_info.name)) as inner: wnid = inner_tar_info.name.split('.')[0] class_index = wnid_map[wnid] filenames = sorted(info.name for info in inner if info.isfile()) images_gen = (load_from_tar_or_patch(inner, filename, patch_images) for filename in filenames) pathless_filenames = (os.path.split(fn)[-1] for fn in filenames) stream = equizip(pathless_filenames, images_gen) for image_fn, (image_data, patched) in stream: if patched: num_patched += 1 socket.send_pyobj((image_fn, class_index), zmq.SNDMORE) socket.send(image_data) if num_patched != len(patch_images): raise ValueError('not all patch images were used')
[ "def", "train_set_producer", "(", "socket", ",", "train_archive", ",", "patch_archive", ",", "wnid_map", ")", ":", "patch_images", "=", "extract_patch_images", "(", "patch_archive", ",", "'train'", ")", "num_patched", "=", "0", "with", "tar_open", "(", "train_archive", ")", "as", "tar", ":", "for", "inner_tar_info", "in", "tar", ":", "with", "tar_open", "(", "tar", ".", "extractfile", "(", "inner_tar_info", ".", "name", ")", ")", "as", "inner", ":", "wnid", "=", "inner_tar_info", ".", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", "class_index", "=", "wnid_map", "[", "wnid", "]", "filenames", "=", "sorted", "(", "info", ".", "name", "for", "info", "in", "inner", "if", "info", ".", "isfile", "(", ")", ")", "images_gen", "=", "(", "load_from_tar_or_patch", "(", "inner", ",", "filename", ",", "patch_images", ")", "for", "filename", "in", "filenames", ")", "pathless_filenames", "=", "(", "os", ".", "path", ".", "split", "(", "fn", ")", "[", "-", "1", "]", "for", "fn", "in", "filenames", ")", "stream", "=", "equizip", "(", "pathless_filenames", ",", "images_gen", ")", "for", "image_fn", ",", "(", "image_data", ",", "patched", ")", "in", "stream", ":", "if", "patched", ":", "num_patched", "+=", "1", "socket", ".", "send_pyobj", "(", "(", "image_fn", ",", "class_index", ")", ",", "zmq", ".", "SNDMORE", ")", "socket", ".", "send", "(", "image_data", ")", "if", "num_patched", "!=", "len", "(", "patch_images", ")", ":", "raise", "ValueError", "(", "'not all patch images were used'", ")" ]
Load/send images from the training set TAR file or patch images. Parameters ---------- socket : :class:`zmq.Socket` PUSH socket on which to send loaded images. train_archive : str or file-like object Filename or file handle for the TAR archive of training images. patch_archive : str or file-like object Filename or file handle for the TAR archive of patch images. wnid_map : dict A dictionary that maps WordNet IDs to 0-based class indices. Used to decode the filenames of the inner TAR files.
[ "Load", "/", "send", "images", "from", "the", "training", "set", "TAR", "file", "or", "patch", "images", "." ]
python
train
47.315789