repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
juju/charm-helpers
charmhelpers/fetch/ubuntu.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/ubuntu.py#L259-L267
def apt_purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) log("Purging {}".format(packages)) _run_apt_command(cmd, fatal)
[ "def", "apt_purge", "(", "packages", ",", "fatal", "=", "False", ")", ":", "cmd", "=", "[", "'apt-get'", ",", "'--assume-yes'", ",", "'purge'", "]", "if", "isinstance", "(", "packages", ",", "six", ".", "string_types", ")", ":", "cmd", ".", "append", "(", "packages", ")", "else", ":", "cmd", ".", "extend", "(", "packages", ")", "log", "(", "\"Purging {}\"", ".", "format", "(", "packages", ")", ")", "_run_apt_command", "(", "cmd", ",", "fatal", ")" ]
Purge one or more packages.
[ "Purge", "one", "or", "more", "packages", "." ]
python
train
rene-aguirre/pywinusb
pywinusb/hid/helpers.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/helpers.py#L56-L71
def synchronized(lock): """ Synchronization decorator. Allos to set a mutex on any function """ @simple_decorator def wrap(function_target): """Decorator wrapper""" def new_function(*args, **kw): """Decorated function with Mutex""" lock.acquire() try: return function_target(*args, **kw) finally: lock.release() return new_function return wrap
[ "def", "synchronized", "(", "lock", ")", ":", "@", "simple_decorator", "def", "wrap", "(", "function_target", ")", ":", "\"\"\"Decorator wrapper\"\"\"", "def", "new_function", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "\"\"\"Decorated function with Mutex\"\"\"", "lock", ".", "acquire", "(", ")", "try", ":", "return", "function_target", "(", "*", "args", ",", "*", "*", "kw", ")", "finally", ":", "lock", ".", "release", "(", ")", "return", "new_function", "return", "wrap" ]
Synchronization decorator. Allos to set a mutex on any function
[ "Synchronization", "decorator", ".", "Allos", "to", "set", "a", "mutex", "on", "any", "function" ]
python
train
Jammy2211/PyAutoLens
autolens/pipeline/phase.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/pipeline/phase.py#L1485-L1517
def run(self, data, results=None, mask=None, positions=None): """ Run a fit for each galaxy from the previous phase. Parameters ---------- data: LensData results: ResultsCollection Results from all previous phases mask: Mask The mask positions Returns ------- results: HyperGalaxyResults A collection of results, with one item per a galaxy """ model_image = results.last.unmasked_model_image galaxy_tuples = results.last.constant.name_instance_tuples_for_class(g.Galaxy) results_copy = copy.copy(results.last) for name, galaxy in galaxy_tuples: optimizer = self.optimizer.copy_with_name_extension(name) optimizer.variable.hyper_galaxy = g.HyperGalaxy galaxy_image = results.last.unmasked_image_for_galaxy(galaxy) optimizer.fit(self.__class__.Analysis(data, model_image, galaxy_image)) getattr(results_copy.variable, name).hyper_galaxy = optimizer.variable.hyper_galaxy getattr(results_copy.constant, name).hyper_galaxy = optimizer.constant.hyper_galaxy return results_copy
[ "def", "run", "(", "self", ",", "data", ",", "results", "=", "None", ",", "mask", "=", "None", ",", "positions", "=", "None", ")", ":", "model_image", "=", "results", ".", "last", ".", "unmasked_model_image", "galaxy_tuples", "=", "results", ".", "last", ".", "constant", ".", "name_instance_tuples_for_class", "(", "g", ".", "Galaxy", ")", "results_copy", "=", "copy", ".", "copy", "(", "results", ".", "last", ")", "for", "name", ",", "galaxy", "in", "galaxy_tuples", ":", "optimizer", "=", "self", ".", "optimizer", ".", "copy_with_name_extension", "(", "name", ")", "optimizer", ".", "variable", ".", "hyper_galaxy", "=", "g", ".", "HyperGalaxy", "galaxy_image", "=", "results", ".", "last", ".", "unmasked_image_for_galaxy", "(", "galaxy", ")", "optimizer", ".", "fit", "(", "self", ".", "__class__", ".", "Analysis", "(", "data", ",", "model_image", ",", "galaxy_image", ")", ")", "getattr", "(", "results_copy", ".", "variable", ",", "name", ")", ".", "hyper_galaxy", "=", "optimizer", ".", "variable", ".", "hyper_galaxy", "getattr", "(", "results_copy", ".", "constant", ",", "name", ")", ".", "hyper_galaxy", "=", "optimizer", ".", "constant", ".", "hyper_galaxy", "return", "results_copy" ]
Run a fit for each galaxy from the previous phase. Parameters ---------- data: LensData results: ResultsCollection Results from all previous phases mask: Mask The mask positions Returns ------- results: HyperGalaxyResults A collection of results, with one item per a galaxy
[ "Run", "a", "fit", "for", "each", "galaxy", "from", "the", "previous", "phase", "." ]
python
valid
pip-services3-python/pip-services3-commons-python
pip_services3_commons/run/Opener.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/run/Opener.py#L36-L54
def is_opened(components): """ Checks if all components are opened. To be checked components must implement [[IOpenable]] interface. If they don't the call to this method returns true. :param components: a list of components that are to be checked. :return: true if all components are opened and false if at least one component is closed. """ if components == None: return True result = True for component in components: result = result and Opener.is_opened_one(component) return result
[ "def", "is_opened", "(", "components", ")", ":", "if", "components", "==", "None", ":", "return", "True", "result", "=", "True", "for", "component", "in", "components", ":", "result", "=", "result", "and", "Opener", ".", "is_opened_one", "(", "component", ")", "return", "result" ]
Checks if all components are opened. To be checked components must implement [[IOpenable]] interface. If they don't the call to this method returns true. :param components: a list of components that are to be checked. :return: true if all components are opened and false if at least one component is closed.
[ "Checks", "if", "all", "components", "are", "opened", "." ]
python
train
TestInABox/stackInABox
stackinabox/util/responses/core.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/util/responses/core.py#L44-L78
def registration(uri): """Responses handler registration. Registers a handler for a given URI with Responses so that it can be intercepted and handed to Stack-In-A-Box. :param uri: URI used for the base of the HTTP requests :returns: n/a """ # log the URI that is used to access the Stack-In-A-Box services logger.debug('Registering Stack-In-A-Box at {0} under Python Responses' .format(uri)) # tell Stack-In-A-Box what URI to match with StackInABox.update_uri(uri) # Build the regex for the URI and register all HTTP verbs # with Responses regex = re.compile('(http)?s?(://)?{0}:?(\d+)?/'.format(uri), re.I) METHODS = [ responses.DELETE, responses.GET, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT ] for method in METHODS: responses.add_callback(method, regex, callback=responses_callback)
[ "def", "registration", "(", "uri", ")", ":", "# log the URI that is used to access the Stack-In-A-Box services", "logger", ".", "debug", "(", "'Registering Stack-In-A-Box at {0} under Python Responses'", ".", "format", "(", "uri", ")", ")", "# tell Stack-In-A-Box what URI to match with", "StackInABox", ".", "update_uri", "(", "uri", ")", "# Build the regex for the URI and register all HTTP verbs", "# with Responses", "regex", "=", "re", ".", "compile", "(", "'(http)?s?(://)?{0}:?(\\d+)?/'", ".", "format", "(", "uri", ")", ",", "re", ".", "I", ")", "METHODS", "=", "[", "responses", ".", "DELETE", ",", "responses", ".", "GET", ",", "responses", ".", "HEAD", ",", "responses", ".", "OPTIONS", ",", "responses", ".", "PATCH", ",", "responses", ".", "POST", ",", "responses", ".", "PUT", "]", "for", "method", "in", "METHODS", ":", "responses", ".", "add_callback", "(", "method", ",", "regex", ",", "callback", "=", "responses_callback", ")" ]
Responses handler registration. Registers a handler for a given URI with Responses so that it can be intercepted and handed to Stack-In-A-Box. :param uri: URI used for the base of the HTTP requests :returns: n/a
[ "Responses", "handler", "registration", "." ]
python
train
edx/XBlock
xblock/django/request.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/django/request.py#L14-L24
def webob_to_django_response(webob_response): """Returns a django response to the `webob_response`""" from django.http import HttpResponse django_response = HttpResponse( webob_response.app_iter, content_type=webob_response.content_type, status=webob_response.status_code, ) for name, value in webob_response.headerlist: django_response[name] = value return django_response
[ "def", "webob_to_django_response", "(", "webob_response", ")", ":", "from", "django", ".", "http", "import", "HttpResponse", "django_response", "=", "HttpResponse", "(", "webob_response", ".", "app_iter", ",", "content_type", "=", "webob_response", ".", "content_type", ",", "status", "=", "webob_response", ".", "status_code", ",", ")", "for", "name", ",", "value", "in", "webob_response", ".", "headerlist", ":", "django_response", "[", "name", "]", "=", "value", "return", "django_response" ]
Returns a django response to the `webob_response`
[ "Returns", "a", "django", "response", "to", "the", "webob_response" ]
python
train
markovmodel/PyEMMA
pyemma/_ext/variational/estimators/running_moments.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/estimators/running_moments.py#L233-L308
def add(self, X, Y=None, weights=None): """ Add trajectory to estimate. Parameters ---------- X : ndarray(T, N) array of N time series. Y : ndarray(T, N) array of N time series, usually time shifted version of X. weights : None or float or ndarray(T, ): weights assigned to each trajectory point. If None, all data points have weight one. If float, the same weight will be given to all data points. If ndarray, each data point is assigned a separate weight. """ # check input T = X.shape[0] if Y is not None: assert Y.shape[0] == T, 'X and Y must have equal length' # Weights cannot be used for compute_YY: if weights is not None and self.compute_YY: raise ValueError('Use of weights is not implemented for compute_YY==True') if weights is not None: # Convert to array of length T if weights is a single number: if isinstance(weights, numbers.Real): weights = weights * np.ones(T, dtype=float) # Check appropriate length if weights is an array: elif isinstance(weights, np.ndarray): if len(weights) != T: raise ValueError('weights and X must have equal length. Was {} and {} respectively.'.format(len(weights), len(X))) else: raise TypeError('weights is of type %s, must be a number or ndarray' % (type(weights))) # estimate and add to storage if self.compute_XX and not self.compute_XY and not self.compute_YY: w, s_X, C_XX = moments_XX(X, remove_mean=self.remove_mean, weights=weights, sparse_mode=self.sparse_mode, modify_data=self.modify_data, column_selection=self.column_selection, diag_only=self.diag_only) if self.column_selection is not None: s_Xk = s_X[self.column_selection] else: s_Xk = s_X self.storage_XX.store(Moments(w, s_X, s_Xk, C_XX)) elif self.compute_XX and self.compute_XY and not self.compute_YY: assert Y is not None w, s_X, s_Y, C_XX, C_XY = moments_XXXY(X, Y, remove_mean=self.remove_mean, symmetrize=self.symmetrize, weights=weights, sparse_mode=self.sparse_mode, modify_data=self.modify_data, column_selection=self.column_selection, diag_only=self.diag_only) # make copy in order to get independently mergeable moments if self.column_selection is not None: s_Xk = s_X[self.column_selection] s_Yk = s_Y[self.column_selection] else: s_Xk = s_X s_Yk = s_Y self.storage_XX.store(Moments(w, s_X, s_Xk, C_XX)) self.storage_XY.store(Moments(w, s_X, s_Yk, C_XY)) else: # compute block assert Y is not None assert not self.symmetrize w, s, C = moments_block(X, Y, remove_mean=self.remove_mean, sparse_mode=self.sparse_mode, modify_data=self.modify_data, column_selection=self.column_selection, diag_only=self.diag_only) # make copy in order to get independently mergeable moments if self.column_selection is not None: s0k = s[0][self.column_selection] s1k = s[1][self.column_selection] else: s0k = s[0] s1k = s[1] if self.compute_XX: self.storage_XX.store(Moments(w, s[0], s0k, C[0][0])) if self.compute_XY: self.storage_XY.store(Moments(w, s[0], s1k, C[0][1])) self.storage_YY.store(Moments(w, s[1], s1k, C[1][1]))
[ "def", "add", "(", "self", ",", "X", ",", "Y", "=", "None", ",", "weights", "=", "None", ")", ":", "# check input", "T", "=", "X", ".", "shape", "[", "0", "]", "if", "Y", "is", "not", "None", ":", "assert", "Y", ".", "shape", "[", "0", "]", "==", "T", ",", "'X and Y must have equal length'", "# Weights cannot be used for compute_YY:", "if", "weights", "is", "not", "None", "and", "self", ".", "compute_YY", ":", "raise", "ValueError", "(", "'Use of weights is not implemented for compute_YY==True'", ")", "if", "weights", "is", "not", "None", ":", "# Convert to array of length T if weights is a single number:", "if", "isinstance", "(", "weights", ",", "numbers", ".", "Real", ")", ":", "weights", "=", "weights", "*", "np", ".", "ones", "(", "T", ",", "dtype", "=", "float", ")", "# Check appropriate length if weights is an array:", "elif", "isinstance", "(", "weights", ",", "np", ".", "ndarray", ")", ":", "if", "len", "(", "weights", ")", "!=", "T", ":", "raise", "ValueError", "(", "'weights and X must have equal length. Was {} and {} respectively.'", ".", "format", "(", "len", "(", "weights", ")", ",", "len", "(", "X", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "'weights is of type %s, must be a number or ndarray'", "%", "(", "type", "(", "weights", ")", ")", ")", "# estimate and add to storage", "if", "self", ".", "compute_XX", "and", "not", "self", ".", "compute_XY", "and", "not", "self", ".", "compute_YY", ":", "w", ",", "s_X", ",", "C_XX", "=", "moments_XX", "(", "X", ",", "remove_mean", "=", "self", ".", "remove_mean", ",", "weights", "=", "weights", ",", "sparse_mode", "=", "self", ".", "sparse_mode", ",", "modify_data", "=", "self", ".", "modify_data", ",", "column_selection", "=", "self", ".", "column_selection", ",", "diag_only", "=", "self", ".", "diag_only", ")", "if", "self", ".", "column_selection", "is", "not", "None", ":", "s_Xk", "=", "s_X", "[", "self", ".", "column_selection", "]", "else", ":", "s_Xk", "=", "s_X", "self", ".", "storage_XX", ".", "store", "(", "Moments", "(", "w", ",", "s_X", ",", "s_Xk", ",", "C_XX", ")", ")", "elif", "self", ".", "compute_XX", "and", "self", ".", "compute_XY", "and", "not", "self", ".", "compute_YY", ":", "assert", "Y", "is", "not", "None", "w", ",", "s_X", ",", "s_Y", ",", "C_XX", ",", "C_XY", "=", "moments_XXXY", "(", "X", ",", "Y", ",", "remove_mean", "=", "self", ".", "remove_mean", ",", "symmetrize", "=", "self", ".", "symmetrize", ",", "weights", "=", "weights", ",", "sparse_mode", "=", "self", ".", "sparse_mode", ",", "modify_data", "=", "self", ".", "modify_data", ",", "column_selection", "=", "self", ".", "column_selection", ",", "diag_only", "=", "self", ".", "diag_only", ")", "# make copy in order to get independently mergeable moments", "if", "self", ".", "column_selection", "is", "not", "None", ":", "s_Xk", "=", "s_X", "[", "self", ".", "column_selection", "]", "s_Yk", "=", "s_Y", "[", "self", ".", "column_selection", "]", "else", ":", "s_Xk", "=", "s_X", "s_Yk", "=", "s_Y", "self", ".", "storage_XX", ".", "store", "(", "Moments", "(", "w", ",", "s_X", ",", "s_Xk", ",", "C_XX", ")", ")", "self", ".", "storage_XY", ".", "store", "(", "Moments", "(", "w", ",", "s_X", ",", "s_Yk", ",", "C_XY", ")", ")", "else", ":", "# compute block", "assert", "Y", "is", "not", "None", "assert", "not", "self", ".", "symmetrize", "w", ",", "s", ",", "C", "=", "moments_block", "(", "X", ",", "Y", ",", "remove_mean", "=", "self", ".", "remove_mean", ",", "sparse_mode", "=", "self", ".", "sparse_mode", ",", "modify_data", "=", "self", ".", "modify_data", ",", "column_selection", "=", "self", ".", "column_selection", ",", "diag_only", "=", "self", ".", "diag_only", ")", "# make copy in order to get independently mergeable moments", "if", "self", ".", "column_selection", "is", "not", "None", ":", "s0k", "=", "s", "[", "0", "]", "[", "self", ".", "column_selection", "]", "s1k", "=", "s", "[", "1", "]", "[", "self", ".", "column_selection", "]", "else", ":", "s0k", "=", "s", "[", "0", "]", "s1k", "=", "s", "[", "1", "]", "if", "self", ".", "compute_XX", ":", "self", ".", "storage_XX", ".", "store", "(", "Moments", "(", "w", ",", "s", "[", "0", "]", ",", "s0k", ",", "C", "[", "0", "]", "[", "0", "]", ")", ")", "if", "self", ".", "compute_XY", ":", "self", ".", "storage_XY", ".", "store", "(", "Moments", "(", "w", ",", "s", "[", "0", "]", ",", "s1k", ",", "C", "[", "0", "]", "[", "1", "]", ")", ")", "self", ".", "storage_YY", ".", "store", "(", "Moments", "(", "w", ",", "s", "[", "1", "]", ",", "s1k", ",", "C", "[", "1", "]", "[", "1", "]", ")", ")" ]
Add trajectory to estimate. Parameters ---------- X : ndarray(T, N) array of N time series. Y : ndarray(T, N) array of N time series, usually time shifted version of X. weights : None or float or ndarray(T, ): weights assigned to each trajectory point. If None, all data points have weight one. If float, the same weight will be given to all data points. If ndarray, each data point is assigned a separate weight.
[ "Add", "trajectory", "to", "estimate", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/checksum.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/checksum.py#L168-L189
def calculate_checksum_on_bytes( b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm. """ checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) checksum_calc.update(b) return checksum_calc.hexdigest()
[ "def", "calculate_checksum_on_bytes", "(", "b", ",", "algorithm", "=", "d1_common", ".", "const", ".", "DEFAULT_CHECKSUM_ALGORITHM", ")", ":", "checksum_calc", "=", "get_checksum_calculator_by_dataone_designator", "(", "algorithm", ")", "checksum_calc", ".", "update", "(", "b", ")", "return", "checksum_calc", ".", "hexdigest", "(", ")" ]
Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm.
[ "Calculate", "the", "checksum", "of", "bytes", "." ]
python
train
NetEaseGame/ATX
atx/cmds/tcpproxy.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/cmds/tcpproxy.py#L16-L28
def main(forward=26944, host='127.0.0.1', listen=5555): ''' Args: - forward(int): local forward port - host(string): local forward host - listen(int): listen port ''' # HTTP->HTTP: On your computer, browse to "http://127.0.0.1:81/" and you'll get http://www.google.com server = maproxy.proxyserver.ProxyServer("127.0.0.1", forward) server.listen(listen) print("Local IP:", socket.gethostbyname(socket.gethostname())) print("0.0.0.0:{} -> {}:{}".format(listen, host, forward)) tornado.ioloop.IOLoop.instance().start()
[ "def", "main", "(", "forward", "=", "26944", ",", "host", "=", "'127.0.0.1'", ",", "listen", "=", "5555", ")", ":", "# HTTP->HTTP: On your computer, browse to \"http://127.0.0.1:81/\" and you'll get http://www.google.com", "server", "=", "maproxy", ".", "proxyserver", ".", "ProxyServer", "(", "\"127.0.0.1\"", ",", "forward", ")", "server", ".", "listen", "(", "listen", ")", "print", "(", "\"Local IP:\"", ",", "socket", ".", "gethostbyname", "(", "socket", ".", "gethostname", "(", ")", ")", ")", "print", "(", "\"0.0.0.0:{} -> {}:{}\"", ".", "format", "(", "listen", ",", "host", ",", "forward", ")", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")" ]
Args: - forward(int): local forward port - host(string): local forward host - listen(int): listen port
[ "Args", ":", "-", "forward", "(", "int", ")", ":", "local", "forward", "port", "-", "host", "(", "string", ")", ":", "local", "forward", "host", "-", "listen", "(", "int", ")", ":", "listen", "port" ]
python
train
jlesquembre/autopilot
src/autopilot/main.py
https://github.com/jlesquembre/autopilot/blob/ca5f36269ba0173bd29c39db6971dac57a58513d/src/autopilot/main.py#L55-L82
def release(no_master, release_type): '''Releases a new version''' try: locale.setlocale(locale.LC_ALL, '') except: print("Warning: Unable to set locale. Expect encoding problems.") git.is_repo_clean(master=(not no_master)) config = utils.get_config() config.update(utils.get_dist_metadata()) config['project_dir'] = Path(os.getcwd()) config['release_type'] = release_type with tempfile.TemporaryDirectory(prefix='ap_tmp') as tmp_dir: config['tmp_dir'] = tmp_dir values = release_ui(config) if type(values) is not str: utils.release(project_name=config['project_name'], tmp_dir=tmp_dir, project_dir=config['project_dir'], pypi_servers=config['pypi_servers'], **values) print('New release options:') pprint.pprint(values) else: print(values)
[ "def", "release", "(", "no_master", ",", "release_type", ")", ":", "try", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "''", ")", "except", ":", "print", "(", "\"Warning: Unable to set locale. Expect encoding problems.\"", ")", "git", ".", "is_repo_clean", "(", "master", "=", "(", "not", "no_master", ")", ")", "config", "=", "utils", ".", "get_config", "(", ")", "config", ".", "update", "(", "utils", ".", "get_dist_metadata", "(", ")", ")", "config", "[", "'project_dir'", "]", "=", "Path", "(", "os", ".", "getcwd", "(", ")", ")", "config", "[", "'release_type'", "]", "=", "release_type", "with", "tempfile", ".", "TemporaryDirectory", "(", "prefix", "=", "'ap_tmp'", ")", "as", "tmp_dir", ":", "config", "[", "'tmp_dir'", "]", "=", "tmp_dir", "values", "=", "release_ui", "(", "config", ")", "if", "type", "(", "values", ")", "is", "not", "str", ":", "utils", ".", "release", "(", "project_name", "=", "config", "[", "'project_name'", "]", ",", "tmp_dir", "=", "tmp_dir", ",", "project_dir", "=", "config", "[", "'project_dir'", "]", ",", "pypi_servers", "=", "config", "[", "'pypi_servers'", "]", ",", "*", "*", "values", ")", "print", "(", "'New release options:'", ")", "pprint", ".", "pprint", "(", "values", ")", "else", ":", "print", "(", "values", ")" ]
Releases a new version
[ "Releases", "a", "new", "version" ]
python
train
testedminds/sand
sand/csv.py
https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/csv.py#L23-L26
def csv_to_dicts(file, header=None): """Reads a csv and returns a List of Dicts with keys given by header row.""" with open(file) as csvfile: return [row for row in csv.DictReader(csvfile, fieldnames=header)]
[ "def", "csv_to_dicts", "(", "file", ",", "header", "=", "None", ")", ":", "with", "open", "(", "file", ")", "as", "csvfile", ":", "return", "[", "row", "for", "row", "in", "csv", ".", "DictReader", "(", "csvfile", ",", "fieldnames", "=", "header", ")", "]" ]
Reads a csv and returns a List of Dicts with keys given by header row.
[ "Reads", "a", "csv", "and", "returns", "a", "List", "of", "Dicts", "with", "keys", "given", "by", "header", "row", "." ]
python
train
liftoff/pyminifier
pyminifier/__main__.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/__main__.py#L17-L171
def main(): """ Sets up our command line options, prints the usage/help (if warranted), and runs :py:func:`pyminifier.pyminify` with the given command line options. """ usage = '%prog [options] "<input file>"' if '__main__.py' in sys.argv[0]: # python -m pyminifier usage = 'pyminifier [options] "<input file>"' parser = OptionParser(usage=usage, version=__version__) parser.disable_interspersed_args() parser.add_option( "-o", "--outfile", dest="outfile", default=None, help="Save output to the given file.", metavar="<file path>" ) parser.add_option( "-d", "--destdir", dest="destdir", default="./minified", help=("Save output to the given directory. " "This option is required when handling multiple files. " "Defaults to './minified' and will be created if not present. "), metavar="<file path>" ) parser.add_option( "--nominify", action="store_true", dest="nominify", default=False, help="Don't bother minifying (only used with --pyz).", ) parser.add_option( "--use-tabs", action="store_true", dest="tabs", default=False, help="Use tabs for indentation instead of spaces.", ) parser.add_option( "--bzip2", action="store_true", dest="bzip2", default=False, help=("bzip2-compress the result into a self-executing python script. " "Only works on stand-alone scripts without implicit imports.") ) parser.add_option( "--gzip", action="store_true", dest="gzip", default=False, help=("gzip-compress the result into a self-executing python script. " "Only works on stand-alone scripts without implicit imports.") ) if lzma: parser.add_option( "--lzma", action="store_true", dest="lzma", default=False, help=("lzma-compress the result into a self-executing python script. " "Only works on stand-alone scripts without implicit imports.") ) parser.add_option( "--pyz", dest="pyz", default=None, help=("zip-compress the result into a self-executing python script. " "This will create a new file that includes any necessary implicit" " (local to the script) modules. Will include/process all files " "given as arguments to pyminifier.py on the command line."), metavar="<name of archive>.pyz" ) parser.add_option( "-O", "--obfuscate", action="store_true", dest="obfuscate", default=False, help=( "Obfuscate all function/method names, variables, and classes. " "Default is to NOT obfuscate." ) ) parser.add_option( "--obfuscate-classes", action="store_true", dest="obf_classes", default=False, help="Obfuscate class names." ) parser.add_option( "--obfuscate-functions", action="store_true", dest="obf_functions", default=False, help="Obfuscate function and method names." ) parser.add_option( "--obfuscate-variables", action="store_true", dest="obf_variables", default=False, help="Obfuscate variable names." ) parser.add_option( "--obfuscate-import-methods", action="store_true", dest="obf_import_methods", default=False, help="Obfuscate globally-imported mouled methods (e.g. 'Ag=re.compile')." ) parser.add_option( "--obfuscate-builtins", action="store_true", dest="obf_builtins", default=False, help="Obfuscate built-ins (i.e. True, False, object, Exception, etc)." ) parser.add_option( "--replacement-length", dest="replacement_length", default=1, help=( "The length of the random names that will be used when obfuscating " "identifiers." ), metavar="1" ) parser.add_option( "--nonlatin", action="store_true", dest="use_nonlatin", default=False, help=( "Use non-latin (unicode) characters in obfuscation (Python 3 only)." " WARNING: This results in some SERIOUSLY hard-to-read code." ) ) parser.add_option( "--prepend", dest="prepend", default=None, help=( "Prepend the text in this file to the top of our output. " "e.g. A copyright notice." ), metavar="<file path>" ) options, files = parser.parse_args() if not files: parser.print_help() sys.exit(2) pyminify(options, files)
[ "def", "main", "(", ")", ":", "usage", "=", "'%prog [options] \"<input file>\"'", "if", "'__main__.py'", "in", "sys", ".", "argv", "[", "0", "]", ":", "# python -m pyminifier", "usage", "=", "'pyminifier [options] \"<input file>\"'", "parser", "=", "OptionParser", "(", "usage", "=", "usage", ",", "version", "=", "__version__", ")", "parser", ".", "disable_interspersed_args", "(", ")", "parser", ".", "add_option", "(", "\"-o\"", ",", "\"--outfile\"", ",", "dest", "=", "\"outfile\"", ",", "default", "=", "None", ",", "help", "=", "\"Save output to the given file.\"", ",", "metavar", "=", "\"<file path>\"", ")", "parser", ".", "add_option", "(", "\"-d\"", ",", "\"--destdir\"", ",", "dest", "=", "\"destdir\"", ",", "default", "=", "\"./minified\"", ",", "help", "=", "(", "\"Save output to the given directory. \"", "\"This option is required when handling multiple files. \"", "\"Defaults to './minified' and will be created if not present. \"", ")", ",", "metavar", "=", "\"<file path>\"", ")", "parser", ".", "add_option", "(", "\"--nominify\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"nominify\"", ",", "default", "=", "False", ",", "help", "=", "\"Don't bother minifying (only used with --pyz).\"", ",", ")", "parser", ".", "add_option", "(", "\"--use-tabs\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"tabs\"", ",", "default", "=", "False", ",", "help", "=", "\"Use tabs for indentation instead of spaces.\"", ",", ")", "parser", ".", "add_option", "(", "\"--bzip2\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"bzip2\"", ",", "default", "=", "False", ",", "help", "=", "(", "\"bzip2-compress the result into a self-executing python script. \"", "\"Only works on stand-alone scripts without implicit imports.\"", ")", ")", "parser", ".", "add_option", "(", "\"--gzip\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"gzip\"", ",", "default", "=", "False", ",", "help", "=", "(", "\"gzip-compress the result into a self-executing python script. \"", "\"Only works on stand-alone scripts without implicit imports.\"", ")", ")", "if", "lzma", ":", "parser", ".", "add_option", "(", "\"--lzma\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"lzma\"", ",", "default", "=", "False", ",", "help", "=", "(", "\"lzma-compress the result into a self-executing python script. \"", "\"Only works on stand-alone scripts without implicit imports.\"", ")", ")", "parser", ".", "add_option", "(", "\"--pyz\"", ",", "dest", "=", "\"pyz\"", ",", "default", "=", "None", ",", "help", "=", "(", "\"zip-compress the result into a self-executing python script. \"", "\"This will create a new file that includes any necessary implicit\"", "\" (local to the script) modules. Will include/process all files \"", "\"given as arguments to pyminifier.py on the command line.\"", ")", ",", "metavar", "=", "\"<name of archive>.pyz\"", ")", "parser", ".", "add_option", "(", "\"-O\"", ",", "\"--obfuscate\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obfuscate\"", ",", "default", "=", "False", ",", "help", "=", "(", "\"Obfuscate all function/method names, variables, and classes. \"", "\"Default is to NOT obfuscate.\"", ")", ")", "parser", ".", "add_option", "(", "\"--obfuscate-classes\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obf_classes\"", ",", "default", "=", "False", ",", "help", "=", "\"Obfuscate class names.\"", ")", "parser", ".", "add_option", "(", "\"--obfuscate-functions\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obf_functions\"", ",", "default", "=", "False", ",", "help", "=", "\"Obfuscate function and method names.\"", ")", "parser", ".", "add_option", "(", "\"--obfuscate-variables\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obf_variables\"", ",", "default", "=", "False", ",", "help", "=", "\"Obfuscate variable names.\"", ")", "parser", ".", "add_option", "(", "\"--obfuscate-import-methods\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obf_import_methods\"", ",", "default", "=", "False", ",", "help", "=", "\"Obfuscate globally-imported mouled methods (e.g. 'Ag=re.compile').\"", ")", "parser", ".", "add_option", "(", "\"--obfuscate-builtins\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"obf_builtins\"", ",", "default", "=", "False", ",", "help", "=", "\"Obfuscate built-ins (i.e. True, False, object, Exception, etc).\"", ")", "parser", ".", "add_option", "(", "\"--replacement-length\"", ",", "dest", "=", "\"replacement_length\"", ",", "default", "=", "1", ",", "help", "=", "(", "\"The length of the random names that will be used when obfuscating \"", "\"identifiers.\"", ")", ",", "metavar", "=", "\"1\"", ")", "parser", ".", "add_option", "(", "\"--nonlatin\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"use_nonlatin\"", ",", "default", "=", "False", ",", "help", "=", "(", "\"Use non-latin (unicode) characters in obfuscation (Python 3 only).\"", "\" WARNING: This results in some SERIOUSLY hard-to-read code.\"", ")", ")", "parser", ".", "add_option", "(", "\"--prepend\"", ",", "dest", "=", "\"prepend\"", ",", "default", "=", "None", ",", "help", "=", "(", "\"Prepend the text in this file to the top of our output. \"", "\"e.g. A copyright notice.\"", ")", ",", "metavar", "=", "\"<file path>\"", ")", "options", ",", "files", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "files", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "2", ")", "pyminify", "(", "options", ",", "files", ")" ]
Sets up our command line options, prints the usage/help (if warranted), and runs :py:func:`pyminifier.pyminify` with the given command line options.
[ "Sets", "up", "our", "command", "line", "options", "prints", "the", "usage", "/", "help", "(", "if", "warranted", ")", "and", "runs", ":", "py", ":", "func", ":", "pyminifier", ".", "pyminify", "with", "the", "given", "command", "line", "options", "." ]
python
train
dpkp/kafka-python
kafka/metrics/dict_reporter.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/metrics/dict_reporter.py#L21-L35
def snapshot(self): """ Return a nested dictionary snapshot of all metrics and their values at this time. Example: { 'category': { 'metric1_name': 42.0, 'metric2_name': 'foo' } } """ return dict((category, dict((name, metric.value()) for name, metric in list(metrics.items()))) for category, metrics in list(self._store.items()))
[ "def", "snapshot", "(", "self", ")", ":", "return", "dict", "(", "(", "category", ",", "dict", "(", "(", "name", ",", "metric", ".", "value", "(", ")", ")", "for", "name", ",", "metric", "in", "list", "(", "metrics", ".", "items", "(", ")", ")", ")", ")", "for", "category", ",", "metrics", "in", "list", "(", "self", ".", "_store", ".", "items", "(", ")", ")", ")" ]
Return a nested dictionary snapshot of all metrics and their values at this time. Example: { 'category': { 'metric1_name': 42.0, 'metric2_name': 'foo' } }
[ "Return", "a", "nested", "dictionary", "snapshot", "of", "all", "metrics", "and", "their", "values", "at", "this", "time", ".", "Example", ":", "{", "category", ":", "{", "metric1_name", ":", "42", ".", "0", "metric2_name", ":", "foo", "}", "}" ]
python
train
Asana/python-asana
asana/resources/gen/tasks.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/tasks.py#L178-L189
def add_dependencies(self, task, params={}, **options): """Marks a set of tasks as dependencies of this task, if they are not already dependencies. *A task can have at most 15 dependencies.* Parameters ---------- task : {Id} The task to add dependencies to. [data] : {Object} Data for the request - dependencies : {Array} An array of task IDs that this task should depend on. """ path = "/tasks/%s/addDependencies" % (task) return self.client.post(path, params, **options)
[ "def", "add_dependencies", "(", "self", ",", "task", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/tasks/%s/addDependencies\"", "%", "(", "task", ")", "return", "self", ".", "client", ".", "post", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Marks a set of tasks as dependencies of this task, if they are not already dependencies. *A task can have at most 15 dependencies.* Parameters ---------- task : {Id} The task to add dependencies to. [data] : {Object} Data for the request - dependencies : {Array} An array of task IDs that this task should depend on.
[ "Marks", "a", "set", "of", "tasks", "as", "dependencies", "of", "this", "task", "if", "they", "are", "not", "already", "dependencies", ".", "*", "A", "task", "can", "have", "at", "most", "15", "dependencies", ".", "*" ]
python
train
diffeo/rejester
rejester/_registry.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L524-L579
def popitem(self, dict_name, priority_min='-inf', priority_max='+inf'): '''Select an item and remove it. The item comes from `dict_name`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `dict_name` and return it. This runs as a single atomic operation but still requires a session lock. :param str dict_name: source dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was popped, or :const:`None` ''' if self._session_lock_identifier is None: raise ProgrammerError('must acquire lock first') conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(''' if redis.call("get", KEYS[1]) == ARGV[1] then -- remove next item of dict_name local next_key, next_priority = redis.call("zrangebyscore", KEYS[3], ARGV[2], ARGV[3], "WITHSCORES", "LIMIT", 0, 1)[1] if not next_key then return {} end redis.call("zrem", KEYS[3], next_key) local next_val = redis.call("hget", KEYS[2], next_key) -- zrem removed it from list, so also remove from hash redis.call("hdel", KEYS[2], next_key) return {next_key, next_val} else -- ERROR: No longer own the lock return -1 end ''') dict_name = self._namespace(dict_name) key_value = script(keys=[self._lock_name, dict_name, dict_name + "keys"], args=[self._session_lock_identifier, priority_min, priority_max]) if key_value == -1: raise KeyError( 'Registry failed to return an item from %s' % dict_name) if key_value == []: return None return self._decode(key_value[0]), self._decode(key_value[1])
[ "def", "popitem", "(", "self", ",", "dict_name", ",", "priority_min", "=", "'-inf'", ",", "priority_max", "=", "'+inf'", ")", ":", "if", "self", ".", "_session_lock_identifier", "is", "None", ":", "raise", "ProgrammerError", "(", "'must acquire lock first'", ")", "conn", "=", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", "script", "=", "conn", ".", "register_script", "(", "'''\n if redis.call(\"get\", KEYS[1]) == ARGV[1]\n then\n -- remove next item of dict_name\n local next_key, next_priority = redis.call(\"zrangebyscore\",\n KEYS[3], ARGV[2], ARGV[3], \"WITHSCORES\", \"LIMIT\", 0, 1)[1]\n\n if not next_key then\n return {}\n end\n \n redis.call(\"zrem\", KEYS[3], next_key)\n local next_val = redis.call(\"hget\", KEYS[2], next_key)\n -- zrem removed it from list, so also remove from hash\n redis.call(\"hdel\", KEYS[2], next_key)\n return {next_key, next_val}\n else\n -- ERROR: No longer own the lock\n return -1\n end\n '''", ")", "dict_name", "=", "self", ".", "_namespace", "(", "dict_name", ")", "key_value", "=", "script", "(", "keys", "=", "[", "self", ".", "_lock_name", ",", "dict_name", ",", "dict_name", "+", "\"keys\"", "]", ",", "args", "=", "[", "self", ".", "_session_lock_identifier", ",", "priority_min", ",", "priority_max", "]", ")", "if", "key_value", "==", "-", "1", ":", "raise", "KeyError", "(", "'Registry failed to return an item from %s'", "%", "dict_name", ")", "if", "key_value", "==", "[", "]", ":", "return", "None", "return", "self", ".", "_decode", "(", "key_value", "[", "0", "]", ")", ",", "self", ".", "_decode", "(", "key_value", "[", "1", "]", ")" ]
Select an item and remove it. The item comes from `dict_name`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `dict_name` and return it. This runs as a single atomic operation but still requires a session lock. :param str dict_name: source dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was popped, or :const:`None`
[ "Select", "an", "item", "and", "remove", "it", "." ]
python
train
simion/pip-upgrader
pip_upgrader/requirements_detector.py
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/requirements_detector.py#L32-L45
def autodetect_files(self): """ Attempt to detect requirements files in the current working directory """ if self._is_valid_requirements_file('requirements.txt'): self.filenames.append('requirements.txt') if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover self.filenames.append('requirements.pip') if os.path.isdir('requirements'): for filename in os.listdir('requirements'): file_path = os.path.join('requirements', filename) if self._is_valid_requirements_file(file_path): self.filenames.append(file_path) self._check_inclusions_recursively()
[ "def", "autodetect_files", "(", "self", ")", ":", "if", "self", ".", "_is_valid_requirements_file", "(", "'requirements.txt'", ")", ":", "self", ".", "filenames", ".", "append", "(", "'requirements.txt'", ")", "if", "self", ".", "_is_valid_requirements_file", "(", "'requirements.pip'", ")", ":", "# pragma: nocover", "self", ".", "filenames", ".", "append", "(", "'requirements.pip'", ")", "if", "os", ".", "path", ".", "isdir", "(", "'requirements'", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "'requirements'", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "'requirements'", ",", "filename", ")", "if", "self", ".", "_is_valid_requirements_file", "(", "file_path", ")", ":", "self", ".", "filenames", ".", "append", "(", "file_path", ")", "self", ".", "_check_inclusions_recursively", "(", ")" ]
Attempt to detect requirements files in the current working directory
[ "Attempt", "to", "detect", "requirements", "files", "in", "the", "current", "working", "directory" ]
python
test
nmdp-bioinformatics/SeqAnn
seqann/util.py
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/util.py#L257-L270
def deserialize_date(string): """ Deserializes string to date. :param string: str. :type string: str :return: date. :rtype: date """ try: from dateutil.parser import parse return parse(string).date() except ImportError: return string
[ "def", "deserialize_date", "(", "string", ")", ":", "try", ":", "from", "dateutil", ".", "parser", "import", "parse", "return", "parse", "(", "string", ")", ".", "date", "(", ")", "except", "ImportError", ":", "return", "string" ]
Deserializes string to date. :param string: str. :type string: str :return: date. :rtype: date
[ "Deserializes", "string", "to", "date", "." ]
python
train
aiidateam/aiida-codtools
aiida_codtools/common/cli.py
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/common/cli.py#L170-L200
def run(self, daemon=False): """Launch the process with the given inputs, by default running in the current interpreter. :param daemon: boolean, if True, will submit the process instead of running it. """ from aiida.engine import launch # If daemon is True, submit the process and return if daemon: node = launch.submit(self.process, **self.inputs) echo.echo_info('Submitted {}<{}>'.format(self.process_name, node.pk)) return # Otherwise we run locally and wait for the process to finish echo.echo_info('Running {}'.format(self.process_name)) try: _, node = launch.run_get_node(self.process, **self.inputs) except Exception as exception: # pylint: disable=broad-except echo.echo_critical('an exception occurred during execution: {}'.format(str(exception))) if node.is_killed: echo.echo_critical('{}<{}> was killed'.format(self.process_name, node.pk)) elif not node.is_finished_ok: arguments = [self.process_name, node.pk, node.exit_status, node.exit_message] echo.echo_warning('{}<{}> failed with exit status {}: {}'.format(*arguments)) else: output = [] echo.echo_success('{}<{}> finished successfully\n'.format(self.process_name, node.pk)) for triple in sorted(node.get_outgoing().all(), key=lambda triple: triple.link_label): output.append([triple.link_label, '{}<{}>'.format(triple.node.__class__.__name__, triple.node.pk)]) echo.echo(tabulate.tabulate(output, headers=['Output label', 'Node']))
[ "def", "run", "(", "self", ",", "daemon", "=", "False", ")", ":", "from", "aiida", ".", "engine", "import", "launch", "# If daemon is True, submit the process and return", "if", "daemon", ":", "node", "=", "launch", ".", "submit", "(", "self", ".", "process", ",", "*", "*", "self", ".", "inputs", ")", "echo", ".", "echo_info", "(", "'Submitted {}<{}>'", ".", "format", "(", "self", ".", "process_name", ",", "node", ".", "pk", ")", ")", "return", "# Otherwise we run locally and wait for the process to finish", "echo", ".", "echo_info", "(", "'Running {}'", ".", "format", "(", "self", ".", "process_name", ")", ")", "try", ":", "_", ",", "node", "=", "launch", ".", "run_get_node", "(", "self", ".", "process", ",", "*", "*", "self", ".", "inputs", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "echo", ".", "echo_critical", "(", "'an exception occurred during execution: {}'", ".", "format", "(", "str", "(", "exception", ")", ")", ")", "if", "node", ".", "is_killed", ":", "echo", ".", "echo_critical", "(", "'{}<{}> was killed'", ".", "format", "(", "self", ".", "process_name", ",", "node", ".", "pk", ")", ")", "elif", "not", "node", ".", "is_finished_ok", ":", "arguments", "=", "[", "self", ".", "process_name", ",", "node", ".", "pk", ",", "node", ".", "exit_status", ",", "node", ".", "exit_message", "]", "echo", ".", "echo_warning", "(", "'{}<{}> failed with exit status {}: {}'", ".", "format", "(", "*", "arguments", ")", ")", "else", ":", "output", "=", "[", "]", "echo", ".", "echo_success", "(", "'{}<{}> finished successfully\\n'", ".", "format", "(", "self", ".", "process_name", ",", "node", ".", "pk", ")", ")", "for", "triple", "in", "sorted", "(", "node", ".", "get_outgoing", "(", ")", ".", "all", "(", ")", ",", "key", "=", "lambda", "triple", ":", "triple", ".", "link_label", ")", ":", "output", ".", "append", "(", "[", "triple", ".", "link_label", ",", "'{}<{}>'", ".", "format", "(", "triple", ".", "node", ".", "__class__", ".", "__name__", ",", "triple", ".", "node", ".", "pk", ")", "]", ")", "echo", ".", "echo", "(", "tabulate", ".", "tabulate", "(", "output", ",", "headers", "=", "[", "'Output label'", ",", "'Node'", "]", ")", ")" ]
Launch the process with the given inputs, by default running in the current interpreter. :param daemon: boolean, if True, will submit the process instead of running it.
[ "Launch", "the", "process", "with", "the", "given", "inputs", "by", "default", "running", "in", "the", "current", "interpreter", "." ]
python
train
RonenNess/grepfunc
grepfunc/grepfunc.py
https://github.com/RonenNess/grepfunc/blob/a5323d082c0a581e4b053ef85838c9a0101b43ff/grepfunc/grepfunc.py#L18-L39
def __fix_args(kwargs): """ Set all named arguments shortcuts and flags. """ kwargs.setdefault('fixed_strings', kwargs.get('F')) kwargs.setdefault('basic_regexp', kwargs.get('G')) kwargs.setdefault('extended_regexp', kwargs.get('E')) kwargs.setdefault('ignore_case', kwargs.get('i')) kwargs.setdefault('invert', kwargs.get('v')) kwargs.setdefault('words', kwargs.get('w')) kwargs.setdefault('line', kwargs.get('x')) kwargs.setdefault('count', kwargs.get('c')) kwargs.setdefault('max_count', kwargs.get('m')) kwargs.setdefault('after_context', kwargs.get('A')) kwargs.setdefault('before_context', kwargs.get('B')) kwargs.setdefault('quiet', kwargs.get('q')) kwargs.setdefault('byte_offset', kwargs.get('b')) kwargs.setdefault('only_matching', kwargs.get('o')) kwargs.setdefault('line_number', kwargs.get('n')) kwargs.setdefault('regex_flags', kwargs.get('r')) kwargs.setdefault('keep_eol', kwargs.get('k')) kwargs.setdefault('trim', kwargs.get('t'))
[ "def", "__fix_args", "(", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'fixed_strings'", ",", "kwargs", ".", "get", "(", "'F'", ")", ")", "kwargs", ".", "setdefault", "(", "'basic_regexp'", ",", "kwargs", ".", "get", "(", "'G'", ")", ")", "kwargs", ".", "setdefault", "(", "'extended_regexp'", ",", "kwargs", ".", "get", "(", "'E'", ")", ")", "kwargs", ".", "setdefault", "(", "'ignore_case'", ",", "kwargs", ".", "get", "(", "'i'", ")", ")", "kwargs", ".", "setdefault", "(", "'invert'", ",", "kwargs", ".", "get", "(", "'v'", ")", ")", "kwargs", ".", "setdefault", "(", "'words'", ",", "kwargs", ".", "get", "(", "'w'", ")", ")", "kwargs", ".", "setdefault", "(", "'line'", ",", "kwargs", ".", "get", "(", "'x'", ")", ")", "kwargs", ".", "setdefault", "(", "'count'", ",", "kwargs", ".", "get", "(", "'c'", ")", ")", "kwargs", ".", "setdefault", "(", "'max_count'", ",", "kwargs", ".", "get", "(", "'m'", ")", ")", "kwargs", ".", "setdefault", "(", "'after_context'", ",", "kwargs", ".", "get", "(", "'A'", ")", ")", "kwargs", ".", "setdefault", "(", "'before_context'", ",", "kwargs", ".", "get", "(", "'B'", ")", ")", "kwargs", ".", "setdefault", "(", "'quiet'", ",", "kwargs", ".", "get", "(", "'q'", ")", ")", "kwargs", ".", "setdefault", "(", "'byte_offset'", ",", "kwargs", ".", "get", "(", "'b'", ")", ")", "kwargs", ".", "setdefault", "(", "'only_matching'", ",", "kwargs", ".", "get", "(", "'o'", ")", ")", "kwargs", ".", "setdefault", "(", "'line_number'", ",", "kwargs", ".", "get", "(", "'n'", ")", ")", "kwargs", ".", "setdefault", "(", "'regex_flags'", ",", "kwargs", ".", "get", "(", "'r'", ")", ")", "kwargs", ".", "setdefault", "(", "'keep_eol'", ",", "kwargs", ".", "get", "(", "'k'", ")", ")", "kwargs", ".", "setdefault", "(", "'trim'", ",", "kwargs", ".", "get", "(", "'t'", ")", ")" ]
Set all named arguments shortcuts and flags.
[ "Set", "all", "named", "arguments", "shortcuts", "and", "flags", "." ]
python
train
theonion/django-bulbs
bulbs/api/permissions.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/api/permissions.py#L78-L86
def has_permission(self, request, view): """If method is GET, user can access, if method is PUT or POST user must be a superuser. """ has_permission = False if request.method == "GET" \ or request.method in ["PUT", "POST", "DELETE"] \ and request.user and request.user.is_superuser: has_permission = True return has_permission
[ "def", "has_permission", "(", "self", ",", "request", ",", "view", ")", ":", "has_permission", "=", "False", "if", "request", ".", "method", "==", "\"GET\"", "or", "request", ".", "method", "in", "[", "\"PUT\"", ",", "\"POST\"", ",", "\"DELETE\"", "]", "and", "request", ".", "user", "and", "request", ".", "user", ".", "is_superuser", ":", "has_permission", "=", "True", "return", "has_permission" ]
If method is GET, user can access, if method is PUT or POST user must be a superuser.
[ "If", "method", "is", "GET", "user", "can", "access", "if", "method", "is", "PUT", "or", "POST", "user", "must", "be", "a", "superuser", "." ]
python
train
volfpeter/graphscraper
src/graphscraper/spotifyartist.py
https://github.com/volfpeter/graphscraper/blob/11d407509956a282ee25190ed6491a162fc0fe7f/src/graphscraper/spotifyartist.py#L347-L382
def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]: """ Returns zero or more artist name - external ID pairs that match the specified artist name. Arguments: artist_name (str): The artist name to search in the Spotify API. limit (int): The maximum number of results to return. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found. """ response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("search"), params={"q": artist_name, "type": "artist", "limit": limit}, headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"]["items"] for artist in data: artist = NameExternalIDPair(artist["name"].strip(), artist["id"].strip()) if not artist.name or not artist.external_id: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
[ "def", "search_artists_by_name", "(", "self", ",", "artist_name", ":", "str", ",", "limit", ":", "int", "=", "5", ")", "->", "List", "[", "NameExternalIDPair", "]", ":", "response", ":", "requests", ".", "Response", "=", "requests", ".", "get", "(", "self", ".", "_API_URL_TEMPLATE", ".", "format", "(", "\"search\"", ")", ",", "params", "=", "{", "\"q\"", ":", "artist_name", ",", "\"type\"", ":", "\"artist\"", ",", "\"limit\"", ":", "limit", "}", ",", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer {}\"", ".", "format", "(", "self", ".", "_token", ".", "access_token", ")", "}", ")", "# TODO: handle API rate limiting", "response", ".", "raise_for_status", "(", ")", "if", "not", "response", ".", "text", ":", "return", "[", "]", "result", ":", "List", "[", "NameExternalIDPair", "]", "=", "[", "]", "data", ":", "List", "[", "Dict", "]", "=", "response", ".", "json", "(", ")", "[", "\"artists\"", "]", "[", "\"items\"", "]", "for", "artist", "in", "data", ":", "artist", "=", "NameExternalIDPair", "(", "artist", "[", "\"name\"", "]", ".", "strip", "(", ")", ",", "artist", "[", "\"id\"", "]", ".", "strip", "(", ")", ")", "if", "not", "artist", ".", "name", "or", "not", "artist", ".", "external_id", ":", "raise", "SpotifyClientError", "(", "\"Name or ID is missing\"", ")", "result", ".", "append", "(", "artist", ")", "return", "result" ]
Returns zero or more artist name - external ID pairs that match the specified artist name. Arguments: artist_name (str): The artist name to search in the Spotify API. limit (int): The maximum number of results to return. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
[ "Returns", "zero", "or", "more", "artist", "name", "-", "external", "ID", "pairs", "that", "match", "the", "specified", "artist", "name", "." ]
python
train
Spinmob/spinmob
_functions.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L35-L124
def coarsen_data(x, y, ey=None, ex=None, level=2, exponential=False): """ Coarsens the supplied data set. Returns coarsened arrays of x, y, along with quadrature-coarsened arrays of ey and ex if specified. Parameters ---------- x, y Data arrays. Can be lists (will convert to numpy arrays). These are coarsened by taking an average. ey=None, ex=None y and x uncertainties. Accepts arrays, lists, or numbers. These are coarsened by averaging in quadrature. level=2 For linear coarsening (default, see below), every n=level points will be averaged together (in quadrature for errors). For exponential coarsening, bins will be spaced by the specified scaling=level factor; for example, level=1.4 will group points within 40% of each other's x values. This is a great option for log-x plots, as the outcome will be evenly spaced. exponential=False If False, coarsen using linear spacing. If True, the bins will be exponentially spaced by the specified level. """ # Normal coarsening if not exponential: # Coarsen the data xc = coarsen_array(x, level, 'mean') yc = coarsen_array(y, level, 'mean') # Coarsen the y error in quadrature if not ey is None: if not is_iterable(ey): ey = [ey]*len(y) eyc = _n.sqrt(coarsen_array(_n.power(ey,2)/level, level, 'mean')) # Coarsen the x error in quadrature if not ex is None: if not is_iterable(ey): ex = [ex]*len(x) exc = _n.sqrt(coarsen_array(_n.power(ex,2)/level, level, 'mean')) # Exponential coarsen else: # Make sure the data are arrays x = _n.array(x) y = _n.array(y) # Create the new arrays to fill xc = [] yc = [] if not ey is None: if not is_iterable(ey): ey = _n.array([ey]*len(y)) eyc = [] if not ex is None: if not is_iterable(ex): ex = _n.array([ex]*len(x)) exc = [] # Find the first element that is greater than zero x0 = x[x>0][0] # Now loop over the exponential bins n = 0 while x0*level**n < x[-1]: # Get all the points between x[n] and x[n]*r mask = _n.logical_and(x0*level**n <= x, x < x0*level**(n+1)) # Only do something if points exist from this range! if len(x[mask]): # Take the average x value xc.append(_n.average(x[mask])) yc.append(_n.average(y[mask])) # do the errors in quadrature if not ey is None: eyc.append(_n.sqrt(_n.average((ey**2)[mask])/len(ey[mask]))) if not ex is None: exc.append(_n.sqrt(_n.average((ex**2)[mask])/len(ex[mask]))) # Increment the counter n += 1 # Done exponential loop # Done coarsening # Return depending on situation if ey is None and ex is None: return _n.array(xc), _n.array(yc) elif ex is None : return _n.array(xc), _n.array(yc), _n.array(eyc) elif ey is None : return _n.array(xc), _n.array(yc), _n.array(exc) else : return _n.array(xc), _n.array(yc), _n.array(eyc), _n.array(exc)
[ "def", "coarsen_data", "(", "x", ",", "y", ",", "ey", "=", "None", ",", "ex", "=", "None", ",", "level", "=", "2", ",", "exponential", "=", "False", ")", ":", "# Normal coarsening", "if", "not", "exponential", ":", "# Coarsen the data", "xc", "=", "coarsen_array", "(", "x", ",", "level", ",", "'mean'", ")", "yc", "=", "coarsen_array", "(", "y", ",", "level", ",", "'mean'", ")", "# Coarsen the y error in quadrature", "if", "not", "ey", "is", "None", ":", "if", "not", "is_iterable", "(", "ey", ")", ":", "ey", "=", "[", "ey", "]", "*", "len", "(", "y", ")", "eyc", "=", "_n", ".", "sqrt", "(", "coarsen_array", "(", "_n", ".", "power", "(", "ey", ",", "2", ")", "/", "level", ",", "level", ",", "'mean'", ")", ")", "# Coarsen the x error in quadrature", "if", "not", "ex", "is", "None", ":", "if", "not", "is_iterable", "(", "ey", ")", ":", "ex", "=", "[", "ex", "]", "*", "len", "(", "x", ")", "exc", "=", "_n", ".", "sqrt", "(", "coarsen_array", "(", "_n", ".", "power", "(", "ex", ",", "2", ")", "/", "level", ",", "level", ",", "'mean'", ")", ")", "# Exponential coarsen ", "else", ":", "# Make sure the data are arrays", "x", "=", "_n", ".", "array", "(", "x", ")", "y", "=", "_n", ".", "array", "(", "y", ")", "# Create the new arrays to fill", "xc", "=", "[", "]", "yc", "=", "[", "]", "if", "not", "ey", "is", "None", ":", "if", "not", "is_iterable", "(", "ey", ")", ":", "ey", "=", "_n", ".", "array", "(", "[", "ey", "]", "*", "len", "(", "y", ")", ")", "eyc", "=", "[", "]", "if", "not", "ex", "is", "None", ":", "if", "not", "is_iterable", "(", "ex", ")", ":", "ex", "=", "_n", ".", "array", "(", "[", "ex", "]", "*", "len", "(", "x", ")", ")", "exc", "=", "[", "]", "# Find the first element that is greater than zero ", "x0", "=", "x", "[", "x", ">", "0", "]", "[", "0", "]", "# Now loop over the exponential bins", "n", "=", "0", "while", "x0", "*", "level", "**", "n", "<", "x", "[", "-", "1", "]", ":", "# Get all the points between x[n] and x[n]*r", "mask", "=", "_n", ".", "logical_and", "(", "x0", "*", "level", "**", "n", "<=", "x", ",", "x", "<", "x0", "*", "level", "**", "(", "n", "+", "1", ")", ")", "# Only do something if points exist from this range!", "if", "len", "(", "x", "[", "mask", "]", ")", ":", "# Take the average x value", "xc", ".", "append", "(", "_n", ".", "average", "(", "x", "[", "mask", "]", ")", ")", "yc", ".", "append", "(", "_n", ".", "average", "(", "y", "[", "mask", "]", ")", ")", "# do the errors in quadrature", "if", "not", "ey", "is", "None", ":", "eyc", ".", "append", "(", "_n", ".", "sqrt", "(", "_n", ".", "average", "(", "(", "ey", "**", "2", ")", "[", "mask", "]", ")", "/", "len", "(", "ey", "[", "mask", "]", ")", ")", ")", "if", "not", "ex", "is", "None", ":", "exc", ".", "append", "(", "_n", ".", "sqrt", "(", "_n", ".", "average", "(", "(", "ex", "**", "2", ")", "[", "mask", "]", ")", "/", "len", "(", "ex", "[", "mask", "]", ")", ")", ")", "# Increment the counter", "n", "+=", "1", "# Done exponential loop", "# Done coarsening ", "# Return depending on situation", "if", "ey", "is", "None", "and", "ex", "is", "None", ":", "return", "_n", ".", "array", "(", "xc", ")", ",", "_n", ".", "array", "(", "yc", ")", "elif", "ex", "is", "None", ":", "return", "_n", ".", "array", "(", "xc", ")", ",", "_n", ".", "array", "(", "yc", ")", ",", "_n", ".", "array", "(", "eyc", ")", "elif", "ey", "is", "None", ":", "return", "_n", ".", "array", "(", "xc", ")", ",", "_n", ".", "array", "(", "yc", ")", ",", "_n", ".", "array", "(", "exc", ")", "else", ":", "return", "_n", ".", "array", "(", "xc", ")", ",", "_n", ".", "array", "(", "yc", ")", ",", "_n", ".", "array", "(", "eyc", ")", ",", "_n", ".", "array", "(", "exc", ")" ]
Coarsens the supplied data set. Returns coarsened arrays of x, y, along with quadrature-coarsened arrays of ey and ex if specified. Parameters ---------- x, y Data arrays. Can be lists (will convert to numpy arrays). These are coarsened by taking an average. ey=None, ex=None y and x uncertainties. Accepts arrays, lists, or numbers. These are coarsened by averaging in quadrature. level=2 For linear coarsening (default, see below), every n=level points will be averaged together (in quadrature for errors). For exponential coarsening, bins will be spaced by the specified scaling=level factor; for example, level=1.4 will group points within 40% of each other's x values. This is a great option for log-x plots, as the outcome will be evenly spaced. exponential=False If False, coarsen using linear spacing. If True, the bins will be exponentially spaced by the specified level.
[ "Coarsens", "the", "supplied", "data", "set", ".", "Returns", "coarsened", "arrays", "of", "x", "y", "along", "with", "quadrature", "-", "coarsened", "arrays", "of", "ey", "and", "ex", "if", "specified", ".", "Parameters", "----------", "x", "y", "Data", "arrays", ".", "Can", "be", "lists", "(", "will", "convert", "to", "numpy", "arrays", ")", ".", "These", "are", "coarsened", "by", "taking", "an", "average", ".", "ey", "=", "None", "ex", "=", "None", "y", "and", "x", "uncertainties", ".", "Accepts", "arrays", "lists", "or", "numbers", ".", "These", "are", "coarsened", "by", "averaging", "in", "quadrature", ".", "level", "=", "2", "For", "linear", "coarsening", "(", "default", "see", "below", ")", "every", "n", "=", "level", "points", "will", "be", "averaged", "together", "(", "in", "quadrature", "for", "errors", ")", ".", "For", "exponential", "coarsening", "bins", "will", "be", "spaced", "by", "the", "specified", "scaling", "=", "level", "factor", ";", "for", "example", "level", "=", "1", ".", "4", "will", "group", "points", "within", "40%", "of", "each", "other", "s", "x", "values", ".", "This", "is", "a", "great", "option", "for", "log", "-", "x", "plots", "as", "the", "outcome", "will", "be", "evenly", "spaced", ".", "exponential", "=", "False", "If", "False", "coarsen", "using", "linear", "spacing", ".", "If", "True", "the", "bins", "will", "be", "exponentially", "spaced", "by", "the", "specified", "level", "." ]
python
train
nerdvegas/rez
src/build_utils/virtualenv/virtualenv.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/virtualenv/virtualenv.py#L385-L390
def show_progress(self): """If we are in a progress scope, and no log messages have been shown, write out another '.'""" if self.in_progress_hanging: sys.stdout.write('.') sys.stdout.flush()
[ "def", "show_progress", "(", "self", ")", ":", "if", "self", ".", "in_progress_hanging", ":", "sys", ".", "stdout", ".", "write", "(", "'.'", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
If we are in a progress scope, and no log messages have been shown, write out another '.
[ "If", "we", "are", "in", "a", "progress", "scope", "and", "no", "log", "messages", "have", "been", "shown", "write", "out", "another", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/qc/viral.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/viral.py#L59-L64
def get_files(data): """Retrieve pre-installed viral reference files. """ all_files = glob.glob(os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "viral", "*"))) return sorted(all_files)
[ "def", "get_files", "(", "data", ")", ":", "all_files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "dd", ".", "get_ref_file", "(", "data", ")", ")", ",", "os", ".", "pardir", ",", "\"viral\"", ",", "\"*\"", ")", ")", ")", "return", "sorted", "(", "all_files", ")" ]
Retrieve pre-installed viral reference files.
[ "Retrieve", "pre", "-", "installed", "viral", "reference", "files", "." ]
python
train
base4sistemas/satcfe
satcfe/clientelocal.py
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/clientelocal.py#L163-L170
def extrair_logs(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`. :return: Uma resposta SAT especializada em ``ExtrairLogs``. :rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs """ retorno = super(ClienteSATLocal, self).extrair_logs() return RespostaExtrairLogs.analisar(retorno)
[ "def", "extrair_logs", "(", "self", ")", ":", "retorno", "=", "super", "(", "ClienteSATLocal", ",", "self", ")", ".", "extrair_logs", "(", ")", "return", "RespostaExtrairLogs", ".", "analisar", "(", "retorno", ")" ]
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`. :return: Uma resposta SAT especializada em ``ExtrairLogs``. :rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs
[ "Sobrepõe", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "extrair_logs", "." ]
python
train
theislab/anndata
anndata/base.py
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/base.py#L225-L232
def _check_2d_shape(X): """Check shape of array or sparse matrix. Assure that X is always 2D: Unlike numpy we always deal with 2D arrays. """ if X.dtype.names is None and len(X.shape) != 2: raise ValueError('X needs to be 2-dimensional, not ' '{}-dimensional.'.format(len(X.shape)))
[ "def", "_check_2d_shape", "(", "X", ")", ":", "if", "X", ".", "dtype", ".", "names", "is", "None", "and", "len", "(", "X", ".", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "'X needs to be 2-dimensional, not '", "'{}-dimensional.'", ".", "format", "(", "len", "(", "X", ".", "shape", ")", ")", ")" ]
Check shape of array or sparse matrix. Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
[ "Check", "shape", "of", "array", "or", "sparse", "matrix", "." ]
python
train
hollenstein/maspy
maspy/proteindb.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L293-L324
def load(cls, path, name): """Imports the specified ``proteindb`` file from the hard disk. :param path: filedirectory of the ``proteindb`` file :param name: filename without the file extension ".proteindb" .. note:: this generates rather large files, which actually take longer to import than to newly generate. Maybe saving / loading should be limited to the protein database whitout in silico digestion information. """ filepath = aux.joinpath(path, name + '.proteindb') with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. proteinsString = io.TextIOWrapper(containerZip.open('proteins'), encoding='utf-8' ).read() peptidesString = io.TextIOWrapper(containerZip.open('peptides'), encoding='utf-8' ).read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8' ).read() newInstance = cls() newInstance.proteins = json.loads(proteinsString, object_hook=ProteinSequence.jsonHook) newInstance.peptides = json.loads(peptidesString, object_hook=PeptideSequence.jsonHook) newInstance.info.update(json.loads(infoString)) return newInstance
[ "def", "load", "(", "cls", ",", "path", ",", "name", ")", ":", "filepath", "=", "aux", ".", "joinpath", "(", "path", ",", "name", "+", "'.proteindb'", ")", "with", "zipfile", ".", "ZipFile", "(", "filepath", ",", "'r'", ",", "allowZip64", "=", "True", ")", "as", "containerZip", ":", "#Convert the zipfile data into a str object, necessary since", "#containerZip.read() returns a bytes object.", "proteinsString", "=", "io", ".", "TextIOWrapper", "(", "containerZip", ".", "open", "(", "'proteins'", ")", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "peptidesString", "=", "io", ".", "TextIOWrapper", "(", "containerZip", ".", "open", "(", "'peptides'", ")", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "infoString", "=", "io", ".", "TextIOWrapper", "(", "containerZip", ".", "open", "(", "'info'", ")", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "newInstance", "=", "cls", "(", ")", "newInstance", ".", "proteins", "=", "json", ".", "loads", "(", "proteinsString", ",", "object_hook", "=", "ProteinSequence", ".", "jsonHook", ")", "newInstance", ".", "peptides", "=", "json", ".", "loads", "(", "peptidesString", ",", "object_hook", "=", "PeptideSequence", ".", "jsonHook", ")", "newInstance", ".", "info", ".", "update", "(", "json", ".", "loads", "(", "infoString", ")", ")", "return", "newInstance" ]
Imports the specified ``proteindb`` file from the hard disk. :param path: filedirectory of the ``proteindb`` file :param name: filename without the file extension ".proteindb" .. note:: this generates rather large files, which actually take longer to import than to newly generate. Maybe saving / loading should be limited to the protein database whitout in silico digestion information.
[ "Imports", "the", "specified", "proteindb", "file", "from", "the", "hard", "disk", "." ]
python
train
NeuroML/pyNeuroML
pyneuroml/pynml.py
https://github.com/NeuroML/pyNeuroML/blob/aeba2e3040b360bb26556f643cccbfb3dac3b8fb/pyneuroml/pynml.py#L419-L441
def quick_summary(nml2_doc): ''' Or better just use nml2_doc.summary(show_includes=False) ''' info = 'Contents of NeuroML 2 document: %s\n'%nml2_doc.id membs = inspect.getmembers(nml2_doc) for memb in membs: if isinstance(memb[1], list) and len(memb[1])>0 \ and not memb[0].endswith('_'): info+=' %s:\n ['%memb[0] for entry in memb[1]: extra = '???' extra = entry.name if hasattr(entry,'name') else extra extra = entry.href if hasattr(entry,'href') else extra extra = entry.id if hasattr(entry,'id') else extra info+=" %s (%s),"%(entry, extra) info+=']\n' return info
[ "def", "quick_summary", "(", "nml2_doc", ")", ":", "info", "=", "'Contents of NeuroML 2 document: %s\\n'", "%", "nml2_doc", ".", "id", "membs", "=", "inspect", ".", "getmembers", "(", "nml2_doc", ")", "for", "memb", "in", "membs", ":", "if", "isinstance", "(", "memb", "[", "1", "]", ",", "list", ")", "and", "len", "(", "memb", "[", "1", "]", ")", ">", "0", "and", "not", "memb", "[", "0", "]", ".", "endswith", "(", "'_'", ")", ":", "info", "+=", "' %s:\\n ['", "%", "memb", "[", "0", "]", "for", "entry", "in", "memb", "[", "1", "]", ":", "extra", "=", "'???'", "extra", "=", "entry", ".", "name", "if", "hasattr", "(", "entry", ",", "'name'", ")", "else", "extra", "extra", "=", "entry", ".", "href", "if", "hasattr", "(", "entry", ",", "'href'", ")", "else", "extra", "extra", "=", "entry", ".", "id", "if", "hasattr", "(", "entry", ",", "'id'", ")", "else", "extra", "info", "+=", "\" %s (%s),\"", "%", "(", "entry", ",", "extra", ")", "info", "+=", "']\\n'", "return", "info" ]
Or better just use nml2_doc.summary(show_includes=False)
[ "Or", "better", "just", "use", "nml2_doc", ".", "summary", "(", "show_includes", "=", "False", ")" ]
python
train
pypa/pipenv
pipenv/vendor/pexpect/screen.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/screen.py#L138-L144
def pretty (self): '''This returns a copy of the screen as a unicode string with an ASCII text box around the screen border. This is similar to __str__/__unicode__ except that it adds a box.''' top_bot = u'+' + u'-'*self.cols + u'+\n' return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
[ "def", "pretty", "(", "self", ")", ":", "top_bot", "=", "u'+'", "+", "u'-'", "*", "self", ".", "cols", "+", "u'+\\n'", "return", "top_bot", "+", "u'\\n'", ".", "join", "(", "[", "u'|'", "+", "line", "+", "u'|'", "for", "line", "in", "unicode", "(", "self", ")", ".", "split", "(", "u'\\n'", ")", "]", ")", "+", "u'\\n'", "+", "top_bot" ]
This returns a copy of the screen as a unicode string with an ASCII text box around the screen border. This is similar to __str__/__unicode__ except that it adds a box.
[ "This", "returns", "a", "copy", "of", "the", "screen", "as", "a", "unicode", "string", "with", "an", "ASCII", "text", "box", "around", "the", "screen", "border", ".", "This", "is", "similar", "to", "__str__", "/", "__unicode__", "except", "that", "it", "adds", "a", "box", "." ]
python
train
RedHatInsights/insights-core
insights/configtree/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L165-L174
def find(self, *queries, **kwargs): """ Finds the first result found anywhere in the configuration. Pass `one=last` for the last result. Returns `None` if no results are found. """ kwargs["deep"] = True kwargs["roots"] = False if "one" not in kwargs: kwargs["one"] = first return self.select(*queries, **kwargs)
[ "def", "find", "(", "self", ",", "*", "queries", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"deep\"", "]", "=", "True", "kwargs", "[", "\"roots\"", "]", "=", "False", "if", "\"one\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"one\"", "]", "=", "first", "return", "self", ".", "select", "(", "*", "queries", ",", "*", "*", "kwargs", ")" ]
Finds the first result found anywhere in the configuration. Pass `one=last` for the last result. Returns `None` if no results are found.
[ "Finds", "the", "first", "result", "found", "anywhere", "in", "the", "configuration", ".", "Pass", "one", "=", "last", "for", "the", "last", "result", ".", "Returns", "None", "if", "no", "results", "are", "found", "." ]
python
train
scanny/python-pptx
pptx/parts/image.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/image.py#L230-L245
def ext(self): """ Canonical file extension for this image e.g. ``'png'``. The returned extension is all lowercase and is the canonical extension for the content type of this image, regardless of what extension may have been used in its filename, if any. """ ext_map = { 'BMP': 'bmp', 'GIF': 'gif', 'JPEG': 'jpg', 'PNG': 'png', 'TIFF': 'tiff', 'WMF': 'wmf' } format = self._format if format not in ext_map: tmpl = "unsupported image format, expected one of: %s, got '%s'" raise ValueError(tmpl % (ext_map.keys(), format)) return ext_map[format]
[ "def", "ext", "(", "self", ")", ":", "ext_map", "=", "{", "'BMP'", ":", "'bmp'", ",", "'GIF'", ":", "'gif'", ",", "'JPEG'", ":", "'jpg'", ",", "'PNG'", ":", "'png'", ",", "'TIFF'", ":", "'tiff'", ",", "'WMF'", ":", "'wmf'", "}", "format", "=", "self", ".", "_format", "if", "format", "not", "in", "ext_map", ":", "tmpl", "=", "\"unsupported image format, expected one of: %s, got '%s'\"", "raise", "ValueError", "(", "tmpl", "%", "(", "ext_map", ".", "keys", "(", ")", ",", "format", ")", ")", "return", "ext_map", "[", "format", "]" ]
Canonical file extension for this image e.g. ``'png'``. The returned extension is all lowercase and is the canonical extension for the content type of this image, regardless of what extension may have been used in its filename, if any.
[ "Canonical", "file", "extension", "for", "this", "image", "e", ".", "g", ".", "png", ".", "The", "returned", "extension", "is", "all", "lowercase", "and", "is", "the", "canonical", "extension", "for", "the", "content", "type", "of", "this", "image", "regardless", "of", "what", "extension", "may", "have", "been", "used", "in", "its", "filename", "if", "any", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/wide_deep/census_dataset.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/wide_deep/census_dataset.py#L89-L157
def build_model_columns(): """Builds a set of wide and deep feature columns.""" # Continuous variable columns age = tf.feature_column.numeric_column('age') education_num = tf.feature_column.numeric_column('education_num') capital_gain = tf.feature_column.numeric_column('capital_gain') capital_loss = tf.feature_column.numeric_column('capital_loss') hours_per_week = tf.feature_column.numeric_column('hours_per_week') education = tf.feature_column.categorical_column_with_vocabulary_list( 'education', [ 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college', 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school', '5th-6th', '10th', '1st-4th', 'Preschool', '12th']) marital_status = tf.feature_column.categorical_column_with_vocabulary_list( 'marital_status', [ 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent', 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed']) relationship = tf.feature_column.categorical_column_with_vocabulary_list( 'relationship', [ 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried', 'Other-relative']) workclass = tf.feature_column.categorical_column_with_vocabulary_list( 'workclass', [ 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov', 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked']) # To show an example of hashing: occupation = tf.feature_column.categorical_column_with_hash_bucket( 'occupation', hash_bucket_size=_HASH_BUCKET_SIZE) # Transformations. age_buckets = tf.feature_column.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) # Wide columns and deep columns. base_columns = [ education, marital_status, relationship, workclass, occupation, age_buckets, ] crossed_columns = [ tf.feature_column.crossed_column( ['education', 'occupation'], hash_bucket_size=_HASH_BUCKET_SIZE), tf.feature_column.crossed_column( [age_buckets, 'education', 'occupation'], hash_bucket_size=_HASH_BUCKET_SIZE), ] wide_columns = base_columns + crossed_columns deep_columns = [ age, education_num, capital_gain, capital_loss, hours_per_week, tf.feature_column.indicator_column(workclass), tf.feature_column.indicator_column(education), tf.feature_column.indicator_column(marital_status), tf.feature_column.indicator_column(relationship), # To show an example of embedding tf.feature_column.embedding_column(occupation, dimension=8), ] return wide_columns, deep_columns
[ "def", "build_model_columns", "(", ")", ":", "# Continuous variable columns", "age", "=", "tf", ".", "feature_column", ".", "numeric_column", "(", "'age'", ")", "education_num", "=", "tf", ".", "feature_column", ".", "numeric_column", "(", "'education_num'", ")", "capital_gain", "=", "tf", ".", "feature_column", ".", "numeric_column", "(", "'capital_gain'", ")", "capital_loss", "=", "tf", ".", "feature_column", ".", "numeric_column", "(", "'capital_loss'", ")", "hours_per_week", "=", "tf", ".", "feature_column", ".", "numeric_column", "(", "'hours_per_week'", ")", "education", "=", "tf", ".", "feature_column", ".", "categorical_column_with_vocabulary_list", "(", "'education'", ",", "[", "'Bachelors'", ",", "'HS-grad'", ",", "'11th'", ",", "'Masters'", ",", "'9th'", ",", "'Some-college'", ",", "'Assoc-acdm'", ",", "'Assoc-voc'", ",", "'7th-8th'", ",", "'Doctorate'", ",", "'Prof-school'", ",", "'5th-6th'", ",", "'10th'", ",", "'1st-4th'", ",", "'Preschool'", ",", "'12th'", "]", ")", "marital_status", "=", "tf", ".", "feature_column", ".", "categorical_column_with_vocabulary_list", "(", "'marital_status'", ",", "[", "'Married-civ-spouse'", ",", "'Divorced'", ",", "'Married-spouse-absent'", ",", "'Never-married'", ",", "'Separated'", ",", "'Married-AF-spouse'", ",", "'Widowed'", "]", ")", "relationship", "=", "tf", ".", "feature_column", ".", "categorical_column_with_vocabulary_list", "(", "'relationship'", ",", "[", "'Husband'", ",", "'Not-in-family'", ",", "'Wife'", ",", "'Own-child'", ",", "'Unmarried'", ",", "'Other-relative'", "]", ")", "workclass", "=", "tf", ".", "feature_column", ".", "categorical_column_with_vocabulary_list", "(", "'workclass'", ",", "[", "'Self-emp-not-inc'", ",", "'Private'", ",", "'State-gov'", ",", "'Federal-gov'", ",", "'Local-gov'", ",", "'?'", ",", "'Self-emp-inc'", ",", "'Without-pay'", ",", "'Never-worked'", "]", ")", "# To show an example of hashing:", "occupation", "=", "tf", ".", "feature_column", ".", "categorical_column_with_hash_bucket", "(", "'occupation'", ",", "hash_bucket_size", "=", "_HASH_BUCKET_SIZE", ")", "# Transformations.", "age_buckets", "=", "tf", ".", "feature_column", ".", "bucketized_column", "(", "age", ",", "boundaries", "=", "[", "18", ",", "25", ",", "30", ",", "35", ",", "40", ",", "45", ",", "50", ",", "55", ",", "60", ",", "65", "]", ")", "# Wide columns and deep columns.", "base_columns", "=", "[", "education", ",", "marital_status", ",", "relationship", ",", "workclass", ",", "occupation", ",", "age_buckets", ",", "]", "crossed_columns", "=", "[", "tf", ".", "feature_column", ".", "crossed_column", "(", "[", "'education'", ",", "'occupation'", "]", ",", "hash_bucket_size", "=", "_HASH_BUCKET_SIZE", ")", ",", "tf", ".", "feature_column", ".", "crossed_column", "(", "[", "age_buckets", ",", "'education'", ",", "'occupation'", "]", ",", "hash_bucket_size", "=", "_HASH_BUCKET_SIZE", ")", ",", "]", "wide_columns", "=", "base_columns", "+", "crossed_columns", "deep_columns", "=", "[", "age", ",", "education_num", ",", "capital_gain", ",", "capital_loss", ",", "hours_per_week", ",", "tf", ".", "feature_column", ".", "indicator_column", "(", "workclass", ")", ",", "tf", ".", "feature_column", ".", "indicator_column", "(", "education", ")", ",", "tf", ".", "feature_column", ".", "indicator_column", "(", "marital_status", ")", ",", "tf", ".", "feature_column", ".", "indicator_column", "(", "relationship", ")", ",", "# To show an example of embedding", "tf", ".", "feature_column", ".", "embedding_column", "(", "occupation", ",", "dimension", "=", "8", ")", ",", "]", "return", "wide_columns", ",", "deep_columns" ]
Builds a set of wide and deep feature columns.
[ "Builds", "a", "set", "of", "wide", "and", "deep", "feature", "columns", "." ]
python
train
fabioz/PyDev.Debugger
pydev_ipython/qt_loaders.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/qt_loaders.py#L85-L124
def has_binding(api): """Safely check for PyQt4 or PySide, without importing submodules Parameters ---------- api : str [ 'pyqtv1' | 'pyqt' | 'pyside' | 'pyqtdefault'] Which module to check for Returns ------- True if the relevant module appears to be importable """ # we can't import an incomplete pyside and pyqt4 # this will cause a crash in sip (#1431) # check for complete presence before importing module_name = {QT_API_PYSIDE: 'PySide', QT_API_PYQT: 'PyQt4', QT_API_PYQTv1: 'PyQt4', QT_API_PYQT_DEFAULT: 'PyQt4', QT_API_PYQT5: 'PyQt5', } module_name = module_name[api] import imp try: #importing top level PyQt4/PySide module is ok... mod = __import__(module_name) #...importing submodules is not imp.find_module('QtCore', mod.__path__) imp.find_module('QtGui', mod.__path__) imp.find_module('QtSvg', mod.__path__) #we can also safely check PySide version if api == QT_API_PYSIDE: return check_version(mod.__version__, '1.0.3') else: return True except ImportError: return False
[ "def", "has_binding", "(", "api", ")", ":", "# we can't import an incomplete pyside and pyqt4", "# this will cause a crash in sip (#1431)", "# check for complete presence before importing", "module_name", "=", "{", "QT_API_PYSIDE", ":", "'PySide'", ",", "QT_API_PYQT", ":", "'PyQt4'", ",", "QT_API_PYQTv1", ":", "'PyQt4'", ",", "QT_API_PYQT_DEFAULT", ":", "'PyQt4'", ",", "QT_API_PYQT5", ":", "'PyQt5'", ",", "}", "module_name", "=", "module_name", "[", "api", "]", "import", "imp", "try", ":", "#importing top level PyQt4/PySide module is ok...", "mod", "=", "__import__", "(", "module_name", ")", "#...importing submodules is not", "imp", ".", "find_module", "(", "'QtCore'", ",", "mod", ".", "__path__", ")", "imp", ".", "find_module", "(", "'QtGui'", ",", "mod", ".", "__path__", ")", "imp", ".", "find_module", "(", "'QtSvg'", ",", "mod", ".", "__path__", ")", "#we can also safely check PySide version", "if", "api", "==", "QT_API_PYSIDE", ":", "return", "check_version", "(", "mod", ".", "__version__", ",", "'1.0.3'", ")", "else", ":", "return", "True", "except", "ImportError", ":", "return", "False" ]
Safely check for PyQt4 or PySide, without importing submodules Parameters ---------- api : str [ 'pyqtv1' | 'pyqt' | 'pyside' | 'pyqtdefault'] Which module to check for Returns ------- True if the relevant module appears to be importable
[ "Safely", "check", "for", "PyQt4", "or", "PySide", "without", "importing", "submodules" ]
python
train
saltstack/salt
salt/netapi/rest_wsgi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_wsgi.py#L192-L203
def get_json(environ): ''' Return the request body as JSON ''' content_type = environ.get('CONTENT_TYPE', '') if content_type != 'application/json': raise HTTPError(406, 'JSON required') try: return salt.utils.json.loads(read_body(environ)) except ValueError as exc: raise HTTPError(400, exc)
[ "def", "get_json", "(", "environ", ")", ":", "content_type", "=", "environ", ".", "get", "(", "'CONTENT_TYPE'", ",", "''", ")", "if", "content_type", "!=", "'application/json'", ":", "raise", "HTTPError", "(", "406", ",", "'JSON required'", ")", "try", ":", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "read_body", "(", "environ", ")", ")", "except", "ValueError", "as", "exc", ":", "raise", "HTTPError", "(", "400", ",", "exc", ")" ]
Return the request body as JSON
[ "Return", "the", "request", "body", "as", "JSON" ]
python
train
KelSolaar/Foundations
foundations/strings.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/strings.py#L62-L80
def get_nice_name(name): """ Converts a string to nice string: **currentLogText** -> **Current Log Text**. Usage:: >>> get_nice_name("getMeANiceName") u'Get Me A Nice Name' >>> get_nice_name("__getMeANiceName") u'__Get Me A Nice Name' :param name: Current string to be nicified. :type name: unicode :return: Nicified string. :rtype: unicode """ chunks = re.sub(r"(.)([A-Z][a-z]+)", r"\1 \2", name) return " ".join(element.title() for element in re.sub(r"([a-z0-9])([A-Z])", r"\1 \2", chunks).split())
[ "def", "get_nice_name", "(", "name", ")", ":", "chunks", "=", "re", ".", "sub", "(", "r\"(.)([A-Z][a-z]+)\"", ",", "r\"\\1 \\2\"", ",", "name", ")", "return", "\" \"", ".", "join", "(", "element", ".", "title", "(", ")", "for", "element", "in", "re", ".", "sub", "(", "r\"([a-z0-9])([A-Z])\"", ",", "r\"\\1 \\2\"", ",", "chunks", ")", ".", "split", "(", ")", ")" ]
Converts a string to nice string: **currentLogText** -> **Current Log Text**. Usage:: >>> get_nice_name("getMeANiceName") u'Get Me A Nice Name' >>> get_nice_name("__getMeANiceName") u'__Get Me A Nice Name' :param name: Current string to be nicified. :type name: unicode :return: Nicified string. :rtype: unicode
[ "Converts", "a", "string", "to", "nice", "string", ":", "**", "currentLogText", "**", "-", ">", "**", "Current", "Log", "Text", "**", "." ]
python
train
signetlabdei/sem
sem/cli.py
https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/cli.py#L349-L367
def query_parameters(param_list, defaults=None): """ Asks the user for parameters. If available, proposes some defaults. Args: param_list (list): List of parameters to ask the user for values. defaults (list): A list of proposed defaults. It must be a list of the same length as param_list. A value of None in one element of the list means that no default will be proposed for the corresponding parameter. """ script_params = collections.OrderedDict([k, []] for k in param_list) for param, default in zip(list(script_params.keys()), defaults): user_input = click.prompt("%s" % param, default=default) script_params[param] = ast.literal_eval(user_input) return script_params
[ "def", "query_parameters", "(", "param_list", ",", "defaults", "=", "None", ")", ":", "script_params", "=", "collections", ".", "OrderedDict", "(", "[", "k", ",", "[", "]", "]", "for", "k", "in", "param_list", ")", "for", "param", ",", "default", "in", "zip", "(", "list", "(", "script_params", ".", "keys", "(", ")", ")", ",", "defaults", ")", ":", "user_input", "=", "click", ".", "prompt", "(", "\"%s\"", "%", "param", ",", "default", "=", "default", ")", "script_params", "[", "param", "]", "=", "ast", ".", "literal_eval", "(", "user_input", ")", "return", "script_params" ]
Asks the user for parameters. If available, proposes some defaults. Args: param_list (list): List of parameters to ask the user for values. defaults (list): A list of proposed defaults. It must be a list of the same length as param_list. A value of None in one element of the list means that no default will be proposed for the corresponding parameter.
[ "Asks", "the", "user", "for", "parameters", ".", "If", "available", "proposes", "some", "defaults", "." ]
python
train
bram85/topydo
topydo/lib/WriteCommand.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/WriteCommand.py#L22-L77
def postprocess_input_todo(self, p_todo): """ Post-processes a parsed todo when adding it to the list. * It converts relative dates to absolute ones. * Automatically inserts a creation date if not present. * Handles more user-friendly dependencies with before:, partof: and after: tags """ def convert_date(p_tag): value = p_todo.tag_value(p_tag) if value: dateobj = relative_date_to_date(value) if dateobj: p_todo.set_tag(p_tag, dateobj.isoformat()) def add_dependencies(p_tag): for value in p_todo.tag_values(p_tag): try: dep = self.todolist.todo(value) if p_tag == 'after': self.todolist.add_dependency(p_todo, dep) elif p_tag == 'before' or p_tag == 'partof': self.todolist.add_dependency(dep, p_todo) elif p_tag.startswith('parent'): for parent in self.todolist.parents(dep): self.todolist.add_dependency(parent, p_todo) elif p_tag.startswith('child'): for child in self.todolist.children(dep): self.todolist.add_dependency(p_todo, child) except InvalidTodoException: pass p_todo.remove_tag(p_tag, value) convert_date(config().tag_start()) convert_date(config().tag_due()) keywords = [ 'after', 'before', 'child-of', 'childof', 'children-of', 'childrenof', 'parent-of', 'parentof', 'parents-of', 'parentsof', 'partof', ] for keyword in keywords: add_dependencies(keyword)
[ "def", "postprocess_input_todo", "(", "self", ",", "p_todo", ")", ":", "def", "convert_date", "(", "p_tag", ")", ":", "value", "=", "p_todo", ".", "tag_value", "(", "p_tag", ")", "if", "value", ":", "dateobj", "=", "relative_date_to_date", "(", "value", ")", "if", "dateobj", ":", "p_todo", ".", "set_tag", "(", "p_tag", ",", "dateobj", ".", "isoformat", "(", ")", ")", "def", "add_dependencies", "(", "p_tag", ")", ":", "for", "value", "in", "p_todo", ".", "tag_values", "(", "p_tag", ")", ":", "try", ":", "dep", "=", "self", ".", "todolist", ".", "todo", "(", "value", ")", "if", "p_tag", "==", "'after'", ":", "self", ".", "todolist", ".", "add_dependency", "(", "p_todo", ",", "dep", ")", "elif", "p_tag", "==", "'before'", "or", "p_tag", "==", "'partof'", ":", "self", ".", "todolist", ".", "add_dependency", "(", "dep", ",", "p_todo", ")", "elif", "p_tag", ".", "startswith", "(", "'parent'", ")", ":", "for", "parent", "in", "self", ".", "todolist", ".", "parents", "(", "dep", ")", ":", "self", ".", "todolist", ".", "add_dependency", "(", "parent", ",", "p_todo", ")", "elif", "p_tag", ".", "startswith", "(", "'child'", ")", ":", "for", "child", "in", "self", ".", "todolist", ".", "children", "(", "dep", ")", ":", "self", ".", "todolist", ".", "add_dependency", "(", "p_todo", ",", "child", ")", "except", "InvalidTodoException", ":", "pass", "p_todo", ".", "remove_tag", "(", "p_tag", ",", "value", ")", "convert_date", "(", "config", "(", ")", ".", "tag_start", "(", ")", ")", "convert_date", "(", "config", "(", ")", ".", "tag_due", "(", ")", ")", "keywords", "=", "[", "'after'", ",", "'before'", ",", "'child-of'", ",", "'childof'", ",", "'children-of'", ",", "'childrenof'", ",", "'parent-of'", ",", "'parentof'", ",", "'parents-of'", ",", "'parentsof'", ",", "'partof'", ",", "]", "for", "keyword", "in", "keywords", ":", "add_dependencies", "(", "keyword", ")" ]
Post-processes a parsed todo when adding it to the list. * It converts relative dates to absolute ones. * Automatically inserts a creation date if not present. * Handles more user-friendly dependencies with before:, partof: and after: tags
[ "Post", "-", "processes", "a", "parsed", "todo", "when", "adding", "it", "to", "the", "list", "." ]
python
train
hover2pi/svo_filters
svo_filters/svo.py
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L799-L886
def filters(filter_directory=None, update=False, fmt='table', **kwargs): """ Get a list of the available filters Parameters ---------- filter_directory: str The directory containing the filter relative spectral response curves update: bool Check the filter directory for new filters and generate pickle of table fmt: str The format for the returned table Returns ------- list The list of band names """ if filter_directory is None: filter_directory = resource_filename('svo_filters', 'data/filters/') # Get the pickle path and make sure file exists p_path = os.path.join(filter_directory, 'filter_list.p') updated = False if not os.path.isfile(p_path): os.system('touch {}'.format(p_path)) if update: print('Loading filters into table...') # Get all the filters (except the pickle) files = glob(filter_directory+'*') files = [f for f in files if not f.endswith('.p')] bands = [os.path.basename(b) for b in files] tables = [] for band in bands: # Load the filter band = band.replace('.txt', '') filt = Filter(band, **kwargs) filt.Band = band # Put metadata into table with correct dtypes info = filt.info(True) vals = [float(i) if i.replace('.', '').replace('-', '') .replace('+', '').isnumeric() else i for i in info['Values']] dtypes = np.array([type(i) for i in vals]) table = at.Table(np.array([vals]), names=info['Attributes'], dtype=dtypes) tables.append(table) del filt, info, table # Write to the pickle with open(p_path, 'wb') as file: pickle.dump(at.vstack(tables), file) # Load the saved pickle data = {} if os.path.isfile(p_path): with open(p_path, 'rb') as file: data = pickle.load(file) # Return the data if data: if fmt == 'dict': data = {r[0]: {k: r[k].value if hasattr(r[k], 'unit') else r[k] for k in data.keys()[1:]} for r in data} else: # Add Band as index data.add_index('Band') return data # Or try to generate it once else: if not updated: updated = True filters(update=True) else: print('No filters found in', filter_directory)
[ "def", "filters", "(", "filter_directory", "=", "None", ",", "update", "=", "False", ",", "fmt", "=", "'table'", ",", "*", "*", "kwargs", ")", ":", "if", "filter_directory", "is", "None", ":", "filter_directory", "=", "resource_filename", "(", "'svo_filters'", ",", "'data/filters/'", ")", "# Get the pickle path and make sure file exists", "p_path", "=", "os", ".", "path", ".", "join", "(", "filter_directory", ",", "'filter_list.p'", ")", "updated", "=", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "p_path", ")", ":", "os", ".", "system", "(", "'touch {}'", ".", "format", "(", "p_path", ")", ")", "if", "update", ":", "print", "(", "'Loading filters into table...'", ")", "# Get all the filters (except the pickle)", "files", "=", "glob", "(", "filter_directory", "+", "'*'", ")", "files", "=", "[", "f", "for", "f", "in", "files", "if", "not", "f", ".", "endswith", "(", "'.p'", ")", "]", "bands", "=", "[", "os", ".", "path", ".", "basename", "(", "b", ")", "for", "b", "in", "files", "]", "tables", "=", "[", "]", "for", "band", "in", "bands", ":", "# Load the filter", "band", "=", "band", ".", "replace", "(", "'.txt'", ",", "''", ")", "filt", "=", "Filter", "(", "band", ",", "*", "*", "kwargs", ")", "filt", ".", "Band", "=", "band", "# Put metadata into table with correct dtypes", "info", "=", "filt", ".", "info", "(", "True", ")", "vals", "=", "[", "float", "(", "i", ")", "if", "i", ".", "replace", "(", "'.'", ",", "''", ")", ".", "replace", "(", "'-'", ",", "''", ")", ".", "replace", "(", "'+'", ",", "''", ")", ".", "isnumeric", "(", ")", "else", "i", "for", "i", "in", "info", "[", "'Values'", "]", "]", "dtypes", "=", "np", ".", "array", "(", "[", "type", "(", "i", ")", "for", "i", "in", "vals", "]", ")", "table", "=", "at", ".", "Table", "(", "np", ".", "array", "(", "[", "vals", "]", ")", ",", "names", "=", "info", "[", "'Attributes'", "]", ",", "dtype", "=", "dtypes", ")", "tables", ".", "append", "(", "table", ")", "del", "filt", ",", "info", ",", "table", "# Write to the pickle", "with", "open", "(", "p_path", ",", "'wb'", ")", "as", "file", ":", "pickle", ".", "dump", "(", "at", ".", "vstack", "(", "tables", ")", ",", "file", ")", "# Load the saved pickle", "data", "=", "{", "}", "if", "os", ".", "path", ".", "isfile", "(", "p_path", ")", ":", "with", "open", "(", "p_path", ",", "'rb'", ")", "as", "file", ":", "data", "=", "pickle", ".", "load", "(", "file", ")", "# Return the data", "if", "data", ":", "if", "fmt", "==", "'dict'", ":", "data", "=", "{", "r", "[", "0", "]", ":", "{", "k", ":", "r", "[", "k", "]", ".", "value", "if", "hasattr", "(", "r", "[", "k", "]", ",", "'unit'", ")", "else", "r", "[", "k", "]", "for", "k", "in", "data", ".", "keys", "(", ")", "[", "1", ":", "]", "}", "for", "r", "in", "data", "}", "else", ":", "# Add Band as index", "data", ".", "add_index", "(", "'Band'", ")", "return", "data", "# Or try to generate it once", "else", ":", "if", "not", "updated", ":", "updated", "=", "True", "filters", "(", "update", "=", "True", ")", "else", ":", "print", "(", "'No filters found in'", ",", "filter_directory", ")" ]
Get a list of the available filters Parameters ---------- filter_directory: str The directory containing the filter relative spectral response curves update: bool Check the filter directory for new filters and generate pickle of table fmt: str The format for the returned table Returns ------- list The list of band names
[ "Get", "a", "list", "of", "the", "available", "filters" ]
python
train
googleapis/oauth2client
oauth2client/contrib/flask_util.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/flask_util.py#L444-L453
def has_credentials(self): """Returns True if there are valid credentials for the current user.""" if not self.credentials: return False # Is the access token expired? If so, do we have an refresh token? elif (self.credentials.access_token_expired and not self.credentials.refresh_token): return False else: return True
[ "def", "has_credentials", "(", "self", ")", ":", "if", "not", "self", ".", "credentials", ":", "return", "False", "# Is the access token expired? If so, do we have an refresh token?", "elif", "(", "self", ".", "credentials", ".", "access_token_expired", "and", "not", "self", ".", "credentials", ".", "refresh_token", ")", ":", "return", "False", "else", ":", "return", "True" ]
Returns True if there are valid credentials for the current user.
[ "Returns", "True", "if", "there", "are", "valid", "credentials", "for", "the", "current", "user", "." ]
python
valid
BerkeleyAutomation/perception
perception/orthographic_intrinsics.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/orthographic_intrinsics.py#L308-L339
def load(filename): """Load a CameraIntrinsics object from a file. Parameters ---------- filename : :obj:`str` The .intr file to load the object from. Returns ------- :obj:`CameraIntrinsics` The CameraIntrinsics object loaded from the file. Raises ------ ValueError If filename does not have the .intr extension. """ file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != INTR_EXTENSION: raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION)) f = open(filename, 'r') ci = json.load(f) f.close() return OrthographicIntrinsics(frame=ci['_frame'], vol_height=ci['_vol_height'], vol_width=ci['_vol_width'], vol_depth=ci['_vol_depth'], plane_height=ci['_plane_height'], plane_width=ci['_plane_width'], depth_scale=ci['_depth_scale'])
[ "def", "load", "(", "filename", ")", ":", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "file_ext", ".", "lower", "(", ")", "!=", "INTR_EXTENSION", ":", "raise", "ValueError", "(", "'Extension %s not supported for CameraIntrinsics. Must be stored with extension %s'", "%", "(", "file_ext", ",", "INTR_EXTENSION", ")", ")", "f", "=", "open", "(", "filename", ",", "'r'", ")", "ci", "=", "json", ".", "load", "(", "f", ")", "f", ".", "close", "(", ")", "return", "OrthographicIntrinsics", "(", "frame", "=", "ci", "[", "'_frame'", "]", ",", "vol_height", "=", "ci", "[", "'_vol_height'", "]", ",", "vol_width", "=", "ci", "[", "'_vol_width'", "]", ",", "vol_depth", "=", "ci", "[", "'_vol_depth'", "]", ",", "plane_height", "=", "ci", "[", "'_plane_height'", "]", ",", "plane_width", "=", "ci", "[", "'_plane_width'", "]", ",", "depth_scale", "=", "ci", "[", "'_depth_scale'", "]", ")" ]
Load a CameraIntrinsics object from a file. Parameters ---------- filename : :obj:`str` The .intr file to load the object from. Returns ------- :obj:`CameraIntrinsics` The CameraIntrinsics object loaded from the file. Raises ------ ValueError If filename does not have the .intr extension.
[ "Load", "a", "CameraIntrinsics", "object", "from", "a", "file", "." ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/data.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/data.py#L152-L176
def _read_file(self, filename): """Return the stored coverage data from the given file. Returns two values, suitable for assigning to `self.lines` and `self.arcs`. """ lines = {} arcs = {} try: data = self.raw_data(filename) if isinstance(data, dict): # Unpack the 'lines' item. lines = dict([ (f, dict.fromkeys(linenos, None)) for f, linenos in iitems(data.get('lines', {})) ]) # Unpack the 'arcs' item. arcs = dict([ (f, dict.fromkeys(arcpairs, None)) for f, arcpairs in iitems(data.get('arcs', {})) ]) except Exception: pass return lines, arcs
[ "def", "_read_file", "(", "self", ",", "filename", ")", ":", "lines", "=", "{", "}", "arcs", "=", "{", "}", "try", ":", "data", "=", "self", ".", "raw_data", "(", "filename", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "# Unpack the 'lines' item.", "lines", "=", "dict", "(", "[", "(", "f", ",", "dict", ".", "fromkeys", "(", "linenos", ",", "None", ")", ")", "for", "f", ",", "linenos", "in", "iitems", "(", "data", ".", "get", "(", "'lines'", ",", "{", "}", ")", ")", "]", ")", "# Unpack the 'arcs' item.", "arcs", "=", "dict", "(", "[", "(", "f", ",", "dict", ".", "fromkeys", "(", "arcpairs", ",", "None", ")", ")", "for", "f", ",", "arcpairs", "in", "iitems", "(", "data", ".", "get", "(", "'arcs'", ",", "{", "}", ")", ")", "]", ")", "except", "Exception", ":", "pass", "return", "lines", ",", "arcs" ]
Return the stored coverage data from the given file. Returns two values, suitable for assigning to `self.lines` and `self.arcs`.
[ "Return", "the", "stored", "coverage", "data", "from", "the", "given", "file", "." ]
python
test
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1257-L1287
def _handle_object(self, object_input): ''' Method to handle possible values passed for adding, removing, modifying triples. Detects type of input and sets appropriate http://www.w3.org/2001/XMLSchema# datatype Args: object_input (str,int,datetime,): many possible inputs Returns: (rdflib.term.Literal): with appropriate datatype attribute ''' # if object is string, convert to rdflib.term.Literal with appropriate datatype if type(object_input) == str: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.string) # integer elif type(object_input) == int: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.int) # float elif type(object_input) == float: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.float) # date elif type(object_input) == datetime.datetime: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.date) else: return object_input
[ "def", "_handle_object", "(", "self", ",", "object_input", ")", ":", "# if object is string, convert to rdflib.term.Literal with appropriate datatype", "if", "type", "(", "object_input", ")", "==", "str", ":", "return", "rdflib", ".", "term", ".", "Literal", "(", "object_input", ",", "datatype", "=", "rdflib", ".", "XSD", ".", "string", ")", "# integer", "elif", "type", "(", "object_input", ")", "==", "int", ":", "return", "rdflib", ".", "term", ".", "Literal", "(", "object_input", ",", "datatype", "=", "rdflib", ".", "XSD", ".", "int", ")", "# float", "elif", "type", "(", "object_input", ")", "==", "float", ":", "return", "rdflib", ".", "term", ".", "Literal", "(", "object_input", ",", "datatype", "=", "rdflib", ".", "XSD", ".", "float", ")", "# date", "elif", "type", "(", "object_input", ")", "==", "datetime", ".", "datetime", ":", "return", "rdflib", ".", "term", ".", "Literal", "(", "object_input", ",", "datatype", "=", "rdflib", ".", "XSD", ".", "date", ")", "else", ":", "return", "object_input" ]
Method to handle possible values passed for adding, removing, modifying triples. Detects type of input and sets appropriate http://www.w3.org/2001/XMLSchema# datatype Args: object_input (str,int,datetime,): many possible inputs Returns: (rdflib.term.Literal): with appropriate datatype attribute
[ "Method", "to", "handle", "possible", "values", "passed", "for", "adding", "removing", "modifying", "triples", ".", "Detects", "type", "of", "input", "and", "sets", "appropriate", "http", ":", "//", "www", ".", "w3", ".", "org", "/", "2001", "/", "XMLSchema#", "datatype" ]
python
train
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L1077-L1104
def set_verify(self, mode, callback): """ et the verification flags for this Context object to *mode* and specify that *callback* should be used for verification callbacks. :param mode: The verify mode, this should be one of :const:`VERIFY_NONE` and :const:`VERIFY_PEER`. If :const:`VERIFY_PEER` is used, *mode* can be OR:ed with :const:`VERIFY_FAIL_IF_NO_PEER_CERT` and :const:`VERIFY_CLIENT_ONCE` to further control the behaviour. :param callback: The Python callback to use. This should take five arguments: A Connection object, an X509 object, and three integer variables, which are in turn potential error number, error depth and return code. *callback* should return True if verification passes and False otherwise. :return: None See SSL_CTX_set_verify(3SSL) for further details. """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") if not callable(callback): raise TypeError("callback must be callable") self._verify_helper = _VerifyHelper(callback) self._verify_callback = self._verify_helper.callback _lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)
[ "def", "set_verify", "(", "self", ",", "mode", ",", "callback", ")", ":", "if", "not", "isinstance", "(", "mode", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "\"mode must be an integer\"", ")", "if", "not", "callable", "(", "callback", ")", ":", "raise", "TypeError", "(", "\"callback must be callable\"", ")", "self", ".", "_verify_helper", "=", "_VerifyHelper", "(", "callback", ")", "self", ".", "_verify_callback", "=", "self", ".", "_verify_helper", ".", "callback", "_lib", ".", "SSL_CTX_set_verify", "(", "self", ".", "_context", ",", "mode", ",", "self", ".", "_verify_callback", ")" ]
et the verification flags for this Context object to *mode* and specify that *callback* should be used for verification callbacks. :param mode: The verify mode, this should be one of :const:`VERIFY_NONE` and :const:`VERIFY_PEER`. If :const:`VERIFY_PEER` is used, *mode* can be OR:ed with :const:`VERIFY_FAIL_IF_NO_PEER_CERT` and :const:`VERIFY_CLIENT_ONCE` to further control the behaviour. :param callback: The Python callback to use. This should take five arguments: A Connection object, an X509 object, and three integer variables, which are in turn potential error number, error depth and return code. *callback* should return True if verification passes and False otherwise. :return: None See SSL_CTX_set_verify(3SSL) for further details.
[ "et", "the", "verification", "flags", "for", "this", "Context", "object", "to", "*", "mode", "*", "and", "specify", "that", "*", "callback", "*", "should", "be", "used", "for", "verification", "callbacks", "." ]
python
test
moonso/loqusdb
loqusdb/commands/cli.py
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/cli.py#L62-L111
def cli(ctx, database, username, password, authdb, port, host, uri, verbose, config, test): """loqusdb: manage a local variant count database.""" loglevel = "INFO" if verbose: loglevel = "DEBUG" coloredlogs.install(level=loglevel) LOG.info("Running loqusdb version %s", __version__) configs = {} if config: try: configs = yaml.safe_load(config) except yaml.YAMLError as err: LOG.warning(err) ctx.abort() uri = configs.get('uri') or uri if test: uri = "mongomock://" try: client = get_client( host=configs.get('host') or host, port=configs.get('port') or port, username=configs.get('username') or username, password=configs.get('password') or password, authdb=authdb or database or 'loqusdb', uri=uri, ) except DB_Error as err: LOG.warning(err) ctx.abort() database = configs.get('db_name') or database if not database: database = 'loqusdb' if uri: uri_info = uri_parser.parse_uri(uri) database = uri_info.get('database') adapter = MongoAdapter(client, db_name=database) ctx.obj = {} ctx.obj['db'] = database ctx.obj['user'] = username ctx.obj['password'] = password ctx.obj['port'] = port ctx.obj['host'] = host ctx.obj['adapter'] = adapter ctx.obj['version'] = __version__
[ "def", "cli", "(", "ctx", ",", "database", ",", "username", ",", "password", ",", "authdb", ",", "port", ",", "host", ",", "uri", ",", "verbose", ",", "config", ",", "test", ")", ":", "loglevel", "=", "\"INFO\"", "if", "verbose", ":", "loglevel", "=", "\"DEBUG\"", "coloredlogs", ".", "install", "(", "level", "=", "loglevel", ")", "LOG", ".", "info", "(", "\"Running loqusdb version %s\"", ",", "__version__", ")", "configs", "=", "{", "}", "if", "config", ":", "try", ":", "configs", "=", "yaml", ".", "safe_load", "(", "config", ")", "except", "yaml", ".", "YAMLError", "as", "err", ":", "LOG", ".", "warning", "(", "err", ")", "ctx", ".", "abort", "(", ")", "uri", "=", "configs", ".", "get", "(", "'uri'", ")", "or", "uri", "if", "test", ":", "uri", "=", "\"mongomock://\"", "try", ":", "client", "=", "get_client", "(", "host", "=", "configs", ".", "get", "(", "'host'", ")", "or", "host", ",", "port", "=", "configs", ".", "get", "(", "'port'", ")", "or", "port", ",", "username", "=", "configs", ".", "get", "(", "'username'", ")", "or", "username", ",", "password", "=", "configs", ".", "get", "(", "'password'", ")", "or", "password", ",", "authdb", "=", "authdb", "or", "database", "or", "'loqusdb'", ",", "uri", "=", "uri", ",", ")", "except", "DB_Error", "as", "err", ":", "LOG", ".", "warning", "(", "err", ")", "ctx", ".", "abort", "(", ")", "database", "=", "configs", ".", "get", "(", "'db_name'", ")", "or", "database", "if", "not", "database", ":", "database", "=", "'loqusdb'", "if", "uri", ":", "uri_info", "=", "uri_parser", ".", "parse_uri", "(", "uri", ")", "database", "=", "uri_info", ".", "get", "(", "'database'", ")", "adapter", "=", "MongoAdapter", "(", "client", ",", "db_name", "=", "database", ")", "ctx", ".", "obj", "=", "{", "}", "ctx", ".", "obj", "[", "'db'", "]", "=", "database", "ctx", ".", "obj", "[", "'user'", "]", "=", "username", "ctx", ".", "obj", "[", "'password'", "]", "=", "password", "ctx", ".", "obj", "[", "'port'", "]", "=", "port", "ctx", ".", "obj", "[", "'host'", "]", "=", "host", "ctx", ".", "obj", "[", "'adapter'", "]", "=", "adapter", "ctx", ".", "obj", "[", "'version'", "]", "=", "__version__" ]
loqusdb: manage a local variant count database.
[ "loqusdb", ":", "manage", "a", "local", "variant", "count", "database", "." ]
python
train
saltstack/salt
salt/modules/win_groupadd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L166-L200
def info(name): ''' Return information about a group Args: name (str): The name of the group for which to get information Returns: dict: A dictionary of information about the group CLI Example: .. code-block:: bash salt '*' group.info foo ''' try: groupObj = _get_group_object(name) gr_name = groupObj.Name gr_mem = [_get_username(x) for x in groupObj.members()] except pywintypes.com_error as exc: msg = 'Failed to access group {0}. {1}'.format( name, win32api.FormatMessage(exc.excepinfo[5])) log.debug(msg) return False if not gr_name: return False return {'name': gr_name, 'passwd': None, 'gid': None, 'members': gr_mem}
[ "def", "info", "(", "name", ")", ":", "try", ":", "groupObj", "=", "_get_group_object", "(", "name", ")", "gr_name", "=", "groupObj", ".", "Name", "gr_mem", "=", "[", "_get_username", "(", "x", ")", "for", "x", "in", "groupObj", ".", "members", "(", ")", "]", "except", "pywintypes", ".", "com_error", "as", "exc", ":", "msg", "=", "'Failed to access group {0}. {1}'", ".", "format", "(", "name", ",", "win32api", ".", "FormatMessage", "(", "exc", ".", "excepinfo", "[", "5", "]", ")", ")", "log", ".", "debug", "(", "msg", ")", "return", "False", "if", "not", "gr_name", ":", "return", "False", "return", "{", "'name'", ":", "gr_name", ",", "'passwd'", ":", "None", ",", "'gid'", ":", "None", ",", "'members'", ":", "gr_mem", "}" ]
Return information about a group Args: name (str): The name of the group for which to get information Returns: dict: A dictionary of information about the group CLI Example: .. code-block:: bash salt '*' group.info foo
[ "Return", "information", "about", "a", "group" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/fetch_doi.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/fetch_doi.py#L165-L180
def clean_doi(doi_string): """ Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. """ regex = re.compile(r'\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b') try: # Returns a list of matching strings m = re.findall(regex, doi_string) except TypeError as e: # If doi_string is None type, return empty list print("TypeError cleaning DOI: {}, {}".format(doi_string, e)) m = [] return m
[ "def", "clean_doi", "(", "doi_string", ")", ":", "regex", "=", "re", ".", "compile", "(", "r'\\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?![\"&\\'<>,])\\S)+)\\b'", ")", "try", ":", "# Returns a list of matching strings", "m", "=", "re", ".", "findall", "(", "regex", ",", "doi_string", ")", "except", "TypeError", "as", "e", ":", "# If doi_string is None type, return empty list", "print", "(", "\"TypeError cleaning DOI: {}, {}\"", ".", "format", "(", "doi_string", ",", "e", ")", ")", "m", "=", "[", "]", "return", "m" ]
Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids.
[ "Use", "regex", "to", "extract", "all", "DOI", "ids", "from", "string", "(", "i", ".", "e", ".", "10", ".", "1029", "/", "2005pa001215", ")" ]
python
train
bfontaine/term2048
term2048/board.py
https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L103-L105
def getCol(self, x): """return the x-th column, starting at 0""" return [self.getCell(x, i) for i in self.__size_range]
[ "def", "getCol", "(", "self", ",", "x", ")", ":", "return", "[", "self", ".", "getCell", "(", "x", ",", "i", ")", "for", "i", "in", "self", ".", "__size_range", "]" ]
return the x-th column, starting at 0
[ "return", "the", "x", "-", "th", "column", "starting", "at", "0" ]
python
train
tensorflow/hub
examples/image_retraining/retrain.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/image_retraining/retrain.py#L481-L544
def get_random_cached_bottlenecks(sess, image_lists, how_many, category, bottleneck_dir, image_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name): """Retrieves bottleneck values for cached images. If no distortions are being applied, this function can retrieve the cached bottleneck values directly from disk for images. It picks a random set of images from the specified category. Args: sess: Current TensorFlow Session. image_lists: OrderedDict of training images for each label. how_many: If positive, a random sample of this size will be chosen. If negative, all bottlenecks will be retrieved. category: Name string of which set to pull from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. image_dir: Root folder string of the subfolders containing the training images. jpeg_data_tensor: The layer to feed jpeg image data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. module_name: The name of the image module being used. Returns: List of bottleneck arrays, their corresponding ground truths, and the relevant filenames. """ class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] filenames = [] if how_many >= 0: # Retrieve a random sample of bottlenecks. for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) bottlenecks.append(bottleneck) ground_truths.append(label_index) filenames.append(image_name) else: # Retrieve all bottlenecks. for label_index, label_name in enumerate(image_lists.keys()): for image_index, image_name in enumerate( image_lists[label_name][category]): image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) bottlenecks.append(bottleneck) ground_truths.append(label_index) filenames.append(image_name) return bottlenecks, ground_truths, filenames
[ "def", "get_random_cached_bottlenecks", "(", "sess", ",", "image_lists", ",", "how_many", ",", "category", ",", "bottleneck_dir", ",", "image_dir", ",", "jpeg_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ",", "module_name", ")", ":", "class_count", "=", "len", "(", "image_lists", ".", "keys", "(", ")", ")", "bottlenecks", "=", "[", "]", "ground_truths", "=", "[", "]", "filenames", "=", "[", "]", "if", "how_many", ">=", "0", ":", "# Retrieve a random sample of bottlenecks.", "for", "unused_i", "in", "range", "(", "how_many", ")", ":", "label_index", "=", "random", ".", "randrange", "(", "class_count", ")", "label_name", "=", "list", "(", "image_lists", ".", "keys", "(", ")", ")", "[", "label_index", "]", "image_index", "=", "random", ".", "randrange", "(", "MAX_NUM_IMAGES_PER_CLASS", "+", "1", ")", "image_name", "=", "get_image_path", "(", "image_lists", ",", "label_name", ",", "image_index", ",", "image_dir", ",", "category", ")", "bottleneck", "=", "get_or_create_bottleneck", "(", "sess", ",", "image_lists", ",", "label_name", ",", "image_index", ",", "image_dir", ",", "category", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ",", "module_name", ")", "bottlenecks", ".", "append", "(", "bottleneck", ")", "ground_truths", ".", "append", "(", "label_index", ")", "filenames", ".", "append", "(", "image_name", ")", "else", ":", "# Retrieve all bottlenecks.", "for", "label_index", ",", "label_name", "in", "enumerate", "(", "image_lists", ".", "keys", "(", ")", ")", ":", "for", "image_index", ",", "image_name", "in", "enumerate", "(", "image_lists", "[", "label_name", "]", "[", "category", "]", ")", ":", "image_name", "=", "get_image_path", "(", "image_lists", ",", "label_name", ",", "image_index", ",", "image_dir", ",", "category", ")", "bottleneck", "=", "get_or_create_bottleneck", "(", "sess", ",", "image_lists", ",", "label_name", ",", "image_index", ",", "image_dir", ",", "category", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ",", "module_name", ")", "bottlenecks", ".", "append", "(", "bottleneck", ")", "ground_truths", ".", "append", "(", "label_index", ")", "filenames", ".", "append", "(", "image_name", ")", "return", "bottlenecks", ",", "ground_truths", ",", "filenames" ]
Retrieves bottleneck values for cached images. If no distortions are being applied, this function can retrieve the cached bottleneck values directly from disk for images. It picks a random set of images from the specified category. Args: sess: Current TensorFlow Session. image_lists: OrderedDict of training images for each label. how_many: If positive, a random sample of this size will be chosen. If negative, all bottlenecks will be retrieved. category: Name string of which set to pull from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. image_dir: Root folder string of the subfolders containing the training images. jpeg_data_tensor: The layer to feed jpeg image data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. module_name: The name of the image module being used. Returns: List of bottleneck arrays, their corresponding ground truths, and the relevant filenames.
[ "Retrieves", "bottleneck", "values", "for", "cached", "images", "." ]
python
train
rosenbrockc/fortpy
fortpy/tramp.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/tramp.py#L78-L86
def touch(self, filepath): """Touches the specified file so that its modified time changes.""" if self.is_ssh(filepath): self._check_ssh() remotepath = self._get_remote(filepath) stdin, stdout, stderr = self.ssh.exec_command("touch {}".format(remotepath)) stdin.close() else: os.system("touch {}".format(filepath))
[ "def", "touch", "(", "self", ",", "filepath", ")", ":", "if", "self", ".", "is_ssh", "(", "filepath", ")", ":", "self", ".", "_check_ssh", "(", ")", "remotepath", "=", "self", ".", "_get_remote", "(", "filepath", ")", "stdin", ",", "stdout", ",", "stderr", "=", "self", ".", "ssh", ".", "exec_command", "(", "\"touch {}\"", ".", "format", "(", "remotepath", ")", ")", "stdin", ".", "close", "(", ")", "else", ":", "os", ".", "system", "(", "\"touch {}\"", ".", "format", "(", "filepath", ")", ")" ]
Touches the specified file so that its modified time changes.
[ "Touches", "the", "specified", "file", "so", "that", "its", "modified", "time", "changes", "." ]
python
train
emencia/emencia-django-forum
forum/models.py
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L161-L171
def get_paginated_position(self): """ Return the Post position in the paginated list """ # If Post list is not paginated if not settings.FORUM_THREAD_DETAIL_PAGINATE: return 0 count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1 return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))
[ "def", "get_paginated_position", "(", "self", ")", ":", "# If Post list is not paginated", "if", "not", "settings", ".", "FORUM_THREAD_DETAIL_PAGINATE", ":", "return", "0", "count", "=", "Post", ".", "objects", ".", "filter", "(", "thread", "=", "self", ".", "thread_id", ",", "created__lt", "=", "self", ".", "created", ")", ".", "count", "(", ")", "+", "1", "return", "int", "(", "math", ".", "ceil", "(", "count", "/", "float", "(", "settings", ".", "FORUM_THREAD_DETAIL_PAGINATE", ")", ")", ")" ]
Return the Post position in the paginated list
[ "Return", "the", "Post", "position", "in", "the", "paginated", "list" ]
python
train
Opentrons/opentrons
api/src/opentrons/server/endpoints/networking.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/server/endpoints/networking.py#L155-L177
def _deduce_security(kwargs) -> nmcli.SECURITY_TYPES: """ Make sure that the security_type is known, or throw. """ # Security should be one of our valid strings sec_translation = { 'wpa-psk': nmcli.SECURITY_TYPES.WPA_PSK, 'none': nmcli.SECURITY_TYPES.NONE, 'wpa-eap': nmcli.SECURITY_TYPES.WPA_EAP, } if not kwargs.get('securityType'): if kwargs.get('psk') and kwargs.get('eapConfig'): raise ConfigureArgsError( 'Cannot deduce security type: psk and eap both passed') elif kwargs.get('psk'): kwargs['securityType'] = 'wpa-psk' elif kwargs.get('eapConfig'): kwargs['securityType'] = 'wpa-eap' else: kwargs['securityType'] = 'none' try: return sec_translation[kwargs['securityType']] except KeyError: raise ConfigureArgsError('securityType must be one of {}' .format(','.join(sec_translation.keys())))
[ "def", "_deduce_security", "(", "kwargs", ")", "->", "nmcli", ".", "SECURITY_TYPES", ":", "# Security should be one of our valid strings", "sec_translation", "=", "{", "'wpa-psk'", ":", "nmcli", ".", "SECURITY_TYPES", ".", "WPA_PSK", ",", "'none'", ":", "nmcli", ".", "SECURITY_TYPES", ".", "NONE", ",", "'wpa-eap'", ":", "nmcli", ".", "SECURITY_TYPES", ".", "WPA_EAP", ",", "}", "if", "not", "kwargs", ".", "get", "(", "'securityType'", ")", ":", "if", "kwargs", ".", "get", "(", "'psk'", ")", "and", "kwargs", ".", "get", "(", "'eapConfig'", ")", ":", "raise", "ConfigureArgsError", "(", "'Cannot deduce security type: psk and eap both passed'", ")", "elif", "kwargs", ".", "get", "(", "'psk'", ")", ":", "kwargs", "[", "'securityType'", "]", "=", "'wpa-psk'", "elif", "kwargs", ".", "get", "(", "'eapConfig'", ")", ":", "kwargs", "[", "'securityType'", "]", "=", "'wpa-eap'", "else", ":", "kwargs", "[", "'securityType'", "]", "=", "'none'", "try", ":", "return", "sec_translation", "[", "kwargs", "[", "'securityType'", "]", "]", "except", "KeyError", ":", "raise", "ConfigureArgsError", "(", "'securityType must be one of {}'", ".", "format", "(", "','", ".", "join", "(", "sec_translation", ".", "keys", "(", ")", ")", ")", ")" ]
Make sure that the security_type is known, or throw.
[ "Make", "sure", "that", "the", "security_type", "is", "known", "or", "throw", "." ]
python
train
saltstack/salt
salt/modules/namecheap_domains_dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/namecheap_domains_dns.py#L206-L235
def set_default(sld, tld): ''' Sets domain to use namecheap default DNS servers. Required for free services like Host record management, URL forwarding, email forwarding, dynamic DNS and other value added services. sld SLD of the domain name tld TLD of the domain name Returns ``True`` if the domain was successfully pointed at the default DNS servers. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_default sld tld ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setDefault') opts['SLD'] = sld opts['TLD'] = tld response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName('DomainDNSSetDefaultResult')[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('Updated'))
[ "def", "set_default", "(", "sld", ",", "tld", ")", ":", "opts", "=", "salt", ".", "utils", ".", "namecheap", ".", "get_opts", "(", "'namecheap.domains.dns.setDefault'", ")", "opts", "[", "'SLD'", "]", "=", "sld", "opts", "[", "'TLD'", "]", "=", "tld", "response_xml", "=", "salt", ".", "utils", ".", "namecheap", ".", "post_request", "(", "opts", ")", "if", "response_xml", "is", "None", ":", "return", "False", "dnsresult", "=", "response_xml", ".", "getElementsByTagName", "(", "'DomainDNSSetDefaultResult'", ")", "[", "0", "]", "return", "salt", ".", "utils", ".", "namecheap", ".", "string_to_value", "(", "dnsresult", ".", "getAttribute", "(", "'Updated'", ")", ")" ]
Sets domain to use namecheap default DNS servers. Required for free services like Host record management, URL forwarding, email forwarding, dynamic DNS and other value added services. sld SLD of the domain name tld TLD of the domain name Returns ``True`` if the domain was successfully pointed at the default DNS servers. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_default sld tld
[ "Sets", "domain", "to", "use", "namecheap", "default", "DNS", "servers", ".", "Required", "for", "free", "services", "like", "Host", "record", "management", "URL", "forwarding", "email", "forwarding", "dynamic", "DNS", "and", "other", "value", "added", "services", "." ]
python
train
zhanglab/psamm
psamm/lpsolver/cplex.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/lpsolver/cplex.py#L206-L221
def add_linear_constraints(self, *relations): """Add constraints to the problem Each constraint is represented by a Relation, and the expression in that relation can be a set expression. """ constraints = [] for relation in relations: if self._check_relation(relation): constraints.append(Constraint(self, None)) else: for name in self._add_constraints(relation): constraints.append(Constraint(self, name)) return constraints
[ "def", "add_linear_constraints", "(", "self", ",", "*", "relations", ")", ":", "constraints", "=", "[", "]", "for", "relation", "in", "relations", ":", "if", "self", ".", "_check_relation", "(", "relation", ")", ":", "constraints", ".", "append", "(", "Constraint", "(", "self", ",", "None", ")", ")", "else", ":", "for", "name", "in", "self", ".", "_add_constraints", "(", "relation", ")", ":", "constraints", ".", "append", "(", "Constraint", "(", "self", ",", "name", ")", ")", "return", "constraints" ]
Add constraints to the problem Each constraint is represented by a Relation, and the expression in that relation can be a set expression.
[ "Add", "constraints", "to", "the", "problem" ]
python
train
borntyping/python-riemann-client
riemann_client/transport.py
https://github.com/borntyping/python-riemann-client/blob/3e181d90bdf685afd21c1ec5ee20e6840b011ea5/riemann_client/transport.py#L122-L124
def connect(self): """Connects to the given host""" self.socket = socket.create_connection(self.address, self.timeout)
[ "def", "connect", "(", "self", ")", ":", "self", ".", "socket", "=", "socket", ".", "create_connection", "(", "self", ".", "address", ",", "self", ".", "timeout", ")" ]
Connects to the given host
[ "Connects", "to", "the", "given", "host" ]
python
train
jacebrowning/comparable
comparable/simple.py
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L113-L118
def similarity(self, other): """Get similarity as a ratio of the stripped text.""" logging.debug("comparing %r and %r...", self.stripped, other.stripped) ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio() similarity = self.Similarity(ratio) return similarity
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "logging", ".", "debug", "(", "\"comparing %r and %r...\"", ",", "self", ".", "stripped", ",", "other", ".", "stripped", ")", "ratio", "=", "SequenceMatcher", "(", "a", "=", "self", ".", "stripped", ",", "b", "=", "other", ".", "stripped", ")", ".", "ratio", "(", ")", "similarity", "=", "self", ".", "Similarity", "(", "ratio", ")", "return", "similarity" ]
Get similarity as a ratio of the stripped text.
[ "Get", "similarity", "as", "a", "ratio", "of", "the", "stripped", "text", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L592-L603
def bytes_per_iteration(self): """ Consecutive bytes written out per high-level iterations (as counted by loop stack). Is used to compute number of iterations per cacheline. """ # TODO Find longst consecutive writes to any variable and use as basis var_name = list(self.destinations)[0] var_type = self.variables[var_name][0] # FIXME this is correct most of the time, but not guaranteed: # Multiplying datatype size with step increment of inner-most loop return self.datatypes_size[var_type] * self._loop_stack[-1][3]
[ "def", "bytes_per_iteration", "(", "self", ")", ":", "# TODO Find longst consecutive writes to any variable and use as basis", "var_name", "=", "list", "(", "self", ".", "destinations", ")", "[", "0", "]", "var_type", "=", "self", ".", "variables", "[", "var_name", "]", "[", "0", "]", "# FIXME this is correct most of the time, but not guaranteed:", "# Multiplying datatype size with step increment of inner-most loop", "return", "self", ".", "datatypes_size", "[", "var_type", "]", "*", "self", ".", "_loop_stack", "[", "-", "1", "]", "[", "3", "]" ]
Consecutive bytes written out per high-level iterations (as counted by loop stack). Is used to compute number of iterations per cacheline.
[ "Consecutive", "bytes", "written", "out", "per", "high", "-", "level", "iterations", "(", "as", "counted", "by", "loop", "stack", ")", "." ]
python
test
inasafe/inasafe
safe_extras/raven/contrib/django/client.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe_extras/raven/contrib/django/client.py#L87-L138
def install_sql_hook(): """If installed this causes Django's queries to be captured.""" try: from django.db.backends.utils import CursorWrapper except ImportError: from django.db.backends.util import CursorWrapper try: real_execute = CursorWrapper.execute real_executemany = CursorWrapper.executemany except AttributeError: # XXX(mitsuhiko): On some very old django versions (<1.6) this # trickery would have to look different but I can't be bothered. return def record_sql(vendor, alias, start, duration, sql, params): def processor(data): real_sql, real_params = format_sql(sql, params) if real_params: real_sql = real_sql % tuple(real_params) # maybe category to 'django.%s.%s' % (vendor, alias or # 'default') ? data.update({ 'message': real_sql, 'category': 'query', }) breadcrumbs.record(processor=processor) def record_many_sql(vendor, alias, start, sql, param_list): duration = time.time() - start for params in param_list: record_sql(vendor, alias, start, duration, sql, params) def execute(self, sql, params=None): start = time.time() try: return real_execute(self, sql, params) finally: record_sql(self.db.vendor, getattr(self.db, 'alias', None), start, time.time() - start, sql, params) def executemany(self, sql, param_list): start = time.time() try: return real_executemany(self, sql, param_list) finally: record_many_sql(self.db.vendor, getattr(self.db, 'alias', None), start, sql, param_list) CursorWrapper.execute = execute CursorWrapper.executemany = executemany breadcrumbs.ignore_logger('django.db.backends')
[ "def", "install_sql_hook", "(", ")", ":", "try", ":", "from", "django", ".", "db", ".", "backends", ".", "utils", "import", "CursorWrapper", "except", "ImportError", ":", "from", "django", ".", "db", ".", "backends", ".", "util", "import", "CursorWrapper", "try", ":", "real_execute", "=", "CursorWrapper", ".", "execute", "real_executemany", "=", "CursorWrapper", ".", "executemany", "except", "AttributeError", ":", "# XXX(mitsuhiko): On some very old django versions (<1.6) this", "# trickery would have to look different but I can't be bothered.", "return", "def", "record_sql", "(", "vendor", ",", "alias", ",", "start", ",", "duration", ",", "sql", ",", "params", ")", ":", "def", "processor", "(", "data", ")", ":", "real_sql", ",", "real_params", "=", "format_sql", "(", "sql", ",", "params", ")", "if", "real_params", ":", "real_sql", "=", "real_sql", "%", "tuple", "(", "real_params", ")", "# maybe category to 'django.%s.%s' % (vendor, alias or", "# 'default') ?", "data", ".", "update", "(", "{", "'message'", ":", "real_sql", ",", "'category'", ":", "'query'", ",", "}", ")", "breadcrumbs", ".", "record", "(", "processor", "=", "processor", ")", "def", "record_many_sql", "(", "vendor", ",", "alias", ",", "start", ",", "sql", ",", "param_list", ")", ":", "duration", "=", "time", ".", "time", "(", ")", "-", "start", "for", "params", "in", "param_list", ":", "record_sql", "(", "vendor", ",", "alias", ",", "start", ",", "duration", ",", "sql", ",", "params", ")", "def", "execute", "(", "self", ",", "sql", ",", "params", "=", "None", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "return", "real_execute", "(", "self", ",", "sql", ",", "params", ")", "finally", ":", "record_sql", "(", "self", ".", "db", ".", "vendor", ",", "getattr", "(", "self", ".", "db", ",", "'alias'", ",", "None", ")", ",", "start", ",", "time", ".", "time", "(", ")", "-", "start", ",", "sql", ",", "params", ")", "def", "executemany", "(", "self", ",", "sql", ",", "param_list", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "return", "real_executemany", "(", "self", ",", "sql", ",", "param_list", ")", "finally", ":", "record_many_sql", "(", "self", ".", "db", ".", "vendor", ",", "getattr", "(", "self", ".", "db", ",", "'alias'", ",", "None", ")", ",", "start", ",", "sql", ",", "param_list", ")", "CursorWrapper", ".", "execute", "=", "execute", "CursorWrapper", ".", "executemany", "=", "executemany", "breadcrumbs", ".", "ignore_logger", "(", "'django.db.backends'", ")" ]
If installed this causes Django's queries to be captured.
[ "If", "installed", "this", "causes", "Django", "s", "queries", "to", "be", "captured", "." ]
python
train
lingthio/Flask-User
flask_user/token_manager.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/token_manager.py#L147-L177
def encode_data_items(self, *args): """ Encodes a list of integers and strings into a concatenated string. - encode string items as-is. - encode integer items as base-64 with a ``'~'`` prefix. - concatenate encoded items with a ``'|'`` separator. Example: ``encode_data_items('abc', 123, 'xyz')`` returns ``'abc|~B7|xyz'`` """ str_list = [] for arg in args: # encode string items as-is if isinstance(arg, str): arg_str = arg # encode integer items as base-64 strings with a '~' character in front elif isinstance(arg, int): arg_str = self.INTEGER_PREFIX + self.encode_int(arg) # convert other types to string else: arg_str = str(arg) str_list.append(arg_str) # Concatenate strings with '|' separators concatenated_str = self.SEPARATOR.join(str_list) return concatenated_str
[ "def", "encode_data_items", "(", "self", ",", "*", "args", ")", ":", "str_list", "=", "[", "]", "for", "arg", "in", "args", ":", "# encode string items as-is", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "arg_str", "=", "arg", "# encode integer items as base-64 strings with a '~' character in front", "elif", "isinstance", "(", "arg", ",", "int", ")", ":", "arg_str", "=", "self", ".", "INTEGER_PREFIX", "+", "self", ".", "encode_int", "(", "arg", ")", "# convert other types to string", "else", ":", "arg_str", "=", "str", "(", "arg", ")", "str_list", ".", "append", "(", "arg_str", ")", "# Concatenate strings with '|' separators", "concatenated_str", "=", "self", ".", "SEPARATOR", ".", "join", "(", "str_list", ")", "return", "concatenated_str" ]
Encodes a list of integers and strings into a concatenated string. - encode string items as-is. - encode integer items as base-64 with a ``'~'`` prefix. - concatenate encoded items with a ``'|'`` separator. Example: ``encode_data_items('abc', 123, 'xyz')`` returns ``'abc|~B7|xyz'``
[ "Encodes", "a", "list", "of", "integers", "and", "strings", "into", "a", "concatenated", "string", "." ]
python
train
log2timeline/dfvfs
dfvfs/file_io/ewf_file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/ewf_file_io.py#L43-L82
def _OpenFileObject(self, path_spec): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: pyewf.handle: a file-like object or None. Raises: PathSpecError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') parent_path_spec = path_spec.parent file_system = resolver.Resolver.OpenFileSystem( parent_path_spec, resolver_context=self._resolver_context) # Note that we cannot use pyewf's glob function since it does not # handle the file system abstraction dfvfs provides. segment_file_path_specs = ewf.EWFGlobPathSpec(file_system, path_spec) if not segment_file_path_specs: return None if parent_path_spec.IsSystemLevel(): # Typically the file-like object cache should have room for 127 items. self._resolver_context.SetMaximumNumberOfFileObjects( len(segment_file_path_specs) + 127) for segment_file_path_spec in segment_file_path_specs: file_object = resolver.Resolver.OpenFileObject( segment_file_path_spec, resolver_context=self._resolver_context) self._file_objects.append(file_object) ewf_handle = pyewf.handle() ewf_handle.open_file_objects(self._file_objects) return ewf_handle
[ "def", "_OpenFileObject", "(", "self", ",", "path_spec", ")", ":", "if", "not", "path_spec", ".", "HasParent", "(", ")", ":", "raise", "errors", ".", "PathSpecError", "(", "'Unsupported path specification without parent.'", ")", "parent_path_spec", "=", "path_spec", ".", "parent", "file_system", "=", "resolver", ".", "Resolver", ".", "OpenFileSystem", "(", "parent_path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")", "# Note that we cannot use pyewf's glob function since it does not", "# handle the file system abstraction dfvfs provides.", "segment_file_path_specs", "=", "ewf", ".", "EWFGlobPathSpec", "(", "file_system", ",", "path_spec", ")", "if", "not", "segment_file_path_specs", ":", "return", "None", "if", "parent_path_spec", ".", "IsSystemLevel", "(", ")", ":", "# Typically the file-like object cache should have room for 127 items.", "self", ".", "_resolver_context", ".", "SetMaximumNumberOfFileObjects", "(", "len", "(", "segment_file_path_specs", ")", "+", "127", ")", "for", "segment_file_path_spec", "in", "segment_file_path_specs", ":", "file_object", "=", "resolver", ".", "Resolver", ".", "OpenFileObject", "(", "segment_file_path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")", "self", ".", "_file_objects", ".", "append", "(", "file_object", ")", "ewf_handle", "=", "pyewf", ".", "handle", "(", ")", "ewf_handle", ".", "open_file_objects", "(", "self", ".", "_file_objects", ")", "return", "ewf_handle" ]
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: pyewf.handle: a file-like object or None. Raises: PathSpecError: if the path specification is invalid.
[ "Opens", "the", "file", "-", "like", "object", "defined", "by", "path", "specification", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L165-L176
def _label__get(self): """ Get or set any <label> element associated with this element. """ id = self.get('id') if not id: return None result = _label_xpath(self, id=id) if not result: return None else: return result[0]
[ "def", "_label__get", "(", "self", ")", ":", "id", "=", "self", ".", "get", "(", "'id'", ")", "if", "not", "id", ":", "return", "None", "result", "=", "_label_xpath", "(", "self", ",", "id", "=", "id", ")", "if", "not", "result", ":", "return", "None", "else", ":", "return", "result", "[", "0", "]" ]
Get or set any <label> element associated with this element.
[ "Get", "or", "set", "any", "<label", ">", "element", "associated", "with", "this", "element", "." ]
python
test
saltstack/salt
salt/client/ssh/wrapper/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L108-L118
def _check_pillar(kwargs, pillar=None): ''' Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors ''' if kwargs.get('force'): return True pillar_dict = pillar if pillar is not None else __pillar__ if '_errors' in pillar_dict: return False return True
[ "def", "_check_pillar", "(", "kwargs", ",", "pillar", "=", "None", ")", ":", "if", "kwargs", ".", "get", "(", "'force'", ")", ":", "return", "True", "pillar_dict", "=", "pillar", "if", "pillar", "is", "not", "None", "else", "__pillar__", "if", "'_errors'", "in", "pillar_dict", ":", "return", "False", "return", "True" ]
Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors
[ "Check", "the", "pillar", "for", "errors", "refuse", "to", "run", "the", "state", "if", "there", "are", "errors", "in", "the", "pillar", "and", "return", "the", "pillar", "errors" ]
python
train
webrecorder/pywb
pywb/rewrite/templateview.py
https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/rewrite/templateview.py#L200-L232
def render_to_string(self, env, **kwargs): """Render this template. :param dict env: The WSGI environment associated with the request causing this template to be rendered :param any kwargs: The keyword arguments to be supplied to the Jninja template render method :return: The rendered template :rtype: str """ template = None template_path = env.get(self.jenv.env_template_dir_key) if template_path: # jinja paths are not os paths, always use '/' as separator # https://github.com/pallets/jinja/issues/411 template_path = template_path + '/' + self.insert_file try: template = self.jenv.jinja_env.get_template(template_path) except TemplateNotFound as te: pass if not template: template = self.jenv.jinja_env.get_template(self.insert_file) params = env.get(self.jenv.env_template_params_key) if params: kwargs.update(params) kwargs['env'] = env kwargs['static_prefix'] = env.get('pywb.host_prefix', '') + env.get('pywb.app_prefix', '') + '/static' return template.render(**kwargs)
[ "def", "render_to_string", "(", "self", ",", "env", ",", "*", "*", "kwargs", ")", ":", "template", "=", "None", "template_path", "=", "env", ".", "get", "(", "self", ".", "jenv", ".", "env_template_dir_key", ")", "if", "template_path", ":", "# jinja paths are not os paths, always use '/' as separator", "# https://github.com/pallets/jinja/issues/411", "template_path", "=", "template_path", "+", "'/'", "+", "self", ".", "insert_file", "try", ":", "template", "=", "self", ".", "jenv", ".", "jinja_env", ".", "get_template", "(", "template_path", ")", "except", "TemplateNotFound", "as", "te", ":", "pass", "if", "not", "template", ":", "template", "=", "self", ".", "jenv", ".", "jinja_env", ".", "get_template", "(", "self", ".", "insert_file", ")", "params", "=", "env", ".", "get", "(", "self", ".", "jenv", ".", "env_template_params_key", ")", "if", "params", ":", "kwargs", ".", "update", "(", "params", ")", "kwargs", "[", "'env'", "]", "=", "env", "kwargs", "[", "'static_prefix'", "]", "=", "env", ".", "get", "(", "'pywb.host_prefix'", ",", "''", ")", "+", "env", ".", "get", "(", "'pywb.app_prefix'", ",", "''", ")", "+", "'/static'", "return", "template", ".", "render", "(", "*", "*", "kwargs", ")" ]
Render this template. :param dict env: The WSGI environment associated with the request causing this template to be rendered :param any kwargs: The keyword arguments to be supplied to the Jninja template render method :return: The rendered template :rtype: str
[ "Render", "this", "template", "." ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/basic_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L449-L459
def get_next_character(self): """ Returns the character after the cursor. :return: Next cursor character. :rtype: QString """ cursor = self.textCursor() cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor) return cursor.selectedText()
[ "def", "get_next_character", "(", "self", ")", ":", "cursor", "=", "self", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "NextCharacter", ",", "QTextCursor", ".", "KeepAnchor", ")", "return", "cursor", ".", "selectedText", "(", ")" ]
Returns the character after the cursor. :return: Next cursor character. :rtype: QString
[ "Returns", "the", "character", "after", "the", "cursor", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchart/xchartrenderer.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartrenderer.py#L297-L318
def calculateDatasetItems(self, scene, datasets): """ Syncs the scene together with the given datasets to make sure we have the proper number of items, by removing non-existant datasets and adding new ones. :param scene | <XChartScene> datasets | <XChartDataset> :return {<XChartDataset>: <XChartDatasetItem>, ..} """ dataitems = scene.datasetItems() olditems = set(dataitems.keys()).difference(datasets) newitems = set(datasets).difference(dataitems.keys()) for dataset in olditems: scene.removeItem(dataitems[dataset]) for dataset in newitems: dataitems[dataset] = scene.addDataset(dataset) return dataitems
[ "def", "calculateDatasetItems", "(", "self", ",", "scene", ",", "datasets", ")", ":", "dataitems", "=", "scene", ".", "datasetItems", "(", ")", "olditems", "=", "set", "(", "dataitems", ".", "keys", "(", ")", ")", ".", "difference", "(", "datasets", ")", "newitems", "=", "set", "(", "datasets", ")", ".", "difference", "(", "dataitems", ".", "keys", "(", ")", ")", "for", "dataset", "in", "olditems", ":", "scene", ".", "removeItem", "(", "dataitems", "[", "dataset", "]", ")", "for", "dataset", "in", "newitems", ":", "dataitems", "[", "dataset", "]", "=", "scene", ".", "addDataset", "(", "dataset", ")", "return", "dataitems" ]
Syncs the scene together with the given datasets to make sure we have the proper number of items, by removing non-existant datasets and adding new ones. :param scene | <XChartScene> datasets | <XChartDataset> :return {<XChartDataset>: <XChartDatasetItem>, ..}
[ "Syncs", "the", "scene", "together", "with", "the", "given", "datasets", "to", "make", "sure", "we", "have", "the", "proper", "number", "of", "items", "by", "removing", "non", "-", "existant", "datasets", "and", "adding", "new", "ones", ".", ":", "param", "scene", "|", "<XChartScene", ">", "datasets", "|", "<XChartDataset", ">", ":", "return", "{", "<XChartDataset", ">", ":", "<XChartDatasetItem", ">", "..", "}" ]
python
train
mitsei/dlkit
dlkit/json_/commenting/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/objects.py#L303-L318
def set_rating(self, grade_id): """Sets the rating. arg: grade_id (osid.id.Id): the new rating raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_rating_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['ratingId'] = str(grade_id)
[ "def", "set_rating", "(", "self", ",", "grade_id", ")", ":", "# Implemented from template for osid.resource.ResourceForm.set_avatar_template", "if", "self", ".", "get_rating_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "if", "not", "self", ".", "_is_valid_id", "(", "grade_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'ratingId'", "]", "=", "str", "(", "grade_id", ")" ]
Sets the rating. arg: grade_id (osid.id.Id): the new rating raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "rating", "." ]
python
train
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L104-L118
def _execute(self, *args: str, **kwargs) -> tuple: '''Execute command.''' process = self.execute( args=args, options=merge_dict(self.options, kwargs)) command = ' '.join(process.args) if self._dev: output, error = process.communicate() print( "Debug Information", "Command: {!r}".format(command), "Output: {!r}".format(output.encode('utf-8')), "Error: {!r}".format(error.encode('utf-8')), sep='\n', end='\n{}\n'.format('=' * 80) ) return process.communicate()
[ "def", "_execute", "(", "self", ",", "*", "args", ":", "str", ",", "*", "*", "kwargs", ")", "->", "tuple", ":", "process", "=", "self", ".", "execute", "(", "args", "=", "args", ",", "options", "=", "merge_dict", "(", "self", ".", "options", ",", "kwargs", ")", ")", "command", "=", "' '", ".", "join", "(", "process", ".", "args", ")", "if", "self", ".", "_dev", ":", "output", ",", "error", "=", "process", ".", "communicate", "(", ")", "print", "(", "\"Debug Information\"", ",", "\"Command: {!r}\"", ".", "format", "(", "command", ")", ",", "\"Output: {!r}\"", ".", "format", "(", "output", ".", "encode", "(", "'utf-8'", ")", ")", ",", "\"Error: {!r}\"", ".", "format", "(", "error", ".", "encode", "(", "'utf-8'", ")", ")", ",", "sep", "=", "'\\n'", ",", "end", "=", "'\\n{}\\n'", ".", "format", "(", "'='", "*", "80", ")", ")", "return", "process", ".", "communicate", "(", ")" ]
Execute command.
[ "Execute", "command", "." ]
python
train
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/eight_planets.py
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/eight_planets.py#L234-L275
def accel(q: np.ndarray): """ Compute the gravitational accelerations in the system q in row vector of 6 elements: sun (x, y, z), earth (x, y, z) """ # Infer number of dimensions from q dims: int = len(q) # Initialize acceleration as dimsx1 array a: np.ndarray = np.zeros(dims) # Iterate over each distinct pair of bodies for i in range(B): for j in range(i+1, B): # Masses of body i and j m0 = mass[i] m1 = mass[j] # Extract position of body i and j as 3-vectors pos_0 = q[slices[i]] pos_1 = q[slices[j]] # Displacement vector from body i to body j dv_01: np.ndarray = pos_1 - pos_0 # Distance from body i to j r_01: float = np.linalg.norm(dv_01) # Unit vector pointing from body i to body j udv_01 = dv_01 / r_01 # The force between these has magnitude G*m0*m1 / r^2 f_01: float = (G * m0 * m1) / (r_01 ** 2) # The force vectors are attractive a[slices[i]] += f_01 * udv_01 / m0 a[slices[j]] -= f_01 * udv_01 / m1 # Return the acceleration vector return a
[ "def", "accel", "(", "q", ":", "np", ".", "ndarray", ")", ":", "# Infer number of dimensions from q", "dims", ":", "int", "=", "len", "(", "q", ")", "# Initialize acceleration as dimsx1 array", "a", ":", "np", ".", "ndarray", "=", "np", ".", "zeros", "(", "dims", ")", "# Iterate over each distinct pair of bodies", "for", "i", "in", "range", "(", "B", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "B", ")", ":", "# Masses of body i and j", "m0", "=", "mass", "[", "i", "]", "m1", "=", "mass", "[", "j", "]", "# Extract position of body i and j as 3-vectors", "pos_0", "=", "q", "[", "slices", "[", "i", "]", "]", "pos_1", "=", "q", "[", "slices", "[", "j", "]", "]", "# Displacement vector from body i to body j", "dv_01", ":", "np", ".", "ndarray", "=", "pos_1", "-", "pos_0", "# Distance from body i to j", "r_01", ":", "float", "=", "np", ".", "linalg", ".", "norm", "(", "dv_01", ")", "# Unit vector pointing from body i to body j", "udv_01", "=", "dv_01", "/", "r_01", "# The force between these has magnitude G*m0*m1 / r^2", "f_01", ":", "float", "=", "(", "G", "*", "m0", "*", "m1", ")", "/", "(", "r_01", "**", "2", ")", "# The force vectors are attractive", "a", "[", "slices", "[", "i", "]", "]", "+=", "f_01", "*", "udv_01", "/", "m0", "a", "[", "slices", "[", "j", "]", "]", "-=", "f_01", "*", "udv_01", "/", "m1", "# Return the acceleration vector", "return", "a" ]
Compute the gravitational accelerations in the system q in row vector of 6 elements: sun (x, y, z), earth (x, y, z)
[ "Compute", "the", "gravitational", "accelerations", "in", "the", "system", "q", "in", "row", "vector", "of", "6", "elements", ":", "sun", "(", "x", "y", "z", ")", "earth", "(", "x", "y", "z", ")" ]
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/chat/connection.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/chat/connection.py#L187-L222
def _handle_message(self, tags, source, command, target, msg): """Construct the correct events and handle them :param tags: the tags of the message :type tags: :class:`list` of :class:`message.Tag` :param source: the sender of the message :type source: :class:`str` :param command: the event type :type command: :class:`str` :param target: the target of the message :type target: :class:`str` :param msg: the content :type msg: :class:`str` :returns: None :rtype: None :raises: None """ if isinstance(msg, tuple): if command in ["privmsg", "pubmsg"]: command = "ctcp" else: command = "ctcpreply" msg = list(msg) log.debug("tags: %s, command: %s, source: %s, target: %s, " "arguments: %s", tags, command, source, target, msg) event = Event3(command, source, target, msg, tags=tags) self._handle_event(event) if command == "ctcp" and msg[0] == "ACTION": event = Event3("action", source, target, msg[1:], tags=tags) self._handle_event(event) else: log.debug("tags: %s, command: %s, source: %s, target: %s, " "arguments: %s", tags, command, source, target, [msg]) event = Event3(command, source, target, [msg], tags=tags) self._handle_event(event)
[ "def", "_handle_message", "(", "self", ",", "tags", ",", "source", ",", "command", ",", "target", ",", "msg", ")", ":", "if", "isinstance", "(", "msg", ",", "tuple", ")", ":", "if", "command", "in", "[", "\"privmsg\"", ",", "\"pubmsg\"", "]", ":", "command", "=", "\"ctcp\"", "else", ":", "command", "=", "\"ctcpreply\"", "msg", "=", "list", "(", "msg", ")", "log", ".", "debug", "(", "\"tags: %s, command: %s, source: %s, target: %s, \"", "\"arguments: %s\"", ",", "tags", ",", "command", ",", "source", ",", "target", ",", "msg", ")", "event", "=", "Event3", "(", "command", ",", "source", ",", "target", ",", "msg", ",", "tags", "=", "tags", ")", "self", ".", "_handle_event", "(", "event", ")", "if", "command", "==", "\"ctcp\"", "and", "msg", "[", "0", "]", "==", "\"ACTION\"", ":", "event", "=", "Event3", "(", "\"action\"", ",", "source", ",", "target", ",", "msg", "[", "1", ":", "]", ",", "tags", "=", "tags", ")", "self", ".", "_handle_event", "(", "event", ")", "else", ":", "log", ".", "debug", "(", "\"tags: %s, command: %s, source: %s, target: %s, \"", "\"arguments: %s\"", ",", "tags", ",", "command", ",", "source", ",", "target", ",", "[", "msg", "]", ")", "event", "=", "Event3", "(", "command", ",", "source", ",", "target", ",", "[", "msg", "]", ",", "tags", "=", "tags", ")", "self", ".", "_handle_event", "(", "event", ")" ]
Construct the correct events and handle them :param tags: the tags of the message :type tags: :class:`list` of :class:`message.Tag` :param source: the sender of the message :type source: :class:`str` :param command: the event type :type command: :class:`str` :param target: the target of the message :type target: :class:`str` :param msg: the content :type msg: :class:`str` :returns: None :rtype: None :raises: None
[ "Construct", "the", "correct", "events", "and", "handle", "them" ]
python
train
adamrehn/ue4cli
ue4cli/UnrealManagerBase.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L143-L153
def getDescriptor(self, dir): """ Detects the descriptor file for either an Unreal project or an Unreal plugin in the specified directory """ try: return self.getProjectDescriptor(dir) except: try: return self.getPluginDescriptor(dir) except: raise UnrealManagerException('could not detect an Unreal project or plugin in the directory "{}"'.format(dir))
[ "def", "getDescriptor", "(", "self", ",", "dir", ")", ":", "try", ":", "return", "self", ".", "getProjectDescriptor", "(", "dir", ")", "except", ":", "try", ":", "return", "self", ".", "getPluginDescriptor", "(", "dir", ")", "except", ":", "raise", "UnrealManagerException", "(", "'could not detect an Unreal project or plugin in the directory \"{}\"'", ".", "format", "(", "dir", ")", ")" ]
Detects the descriptor file for either an Unreal project or an Unreal plugin in the specified directory
[ "Detects", "the", "descriptor", "file", "for", "either", "an", "Unreal", "project", "or", "an", "Unreal", "plugin", "in", "the", "specified", "directory" ]
python
train
AustralianSynchrotron/lightflow
lightflow/models/task_data.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/task_data.py#L165-L177
def add_alias(self, alias, index): """ Add an alias pointing to the specified index. Args: alias (str): The alias that should point to the given index. index (int): The index of the dataset for which an alias should be added. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """ if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._aliases[alias] = index
[ "def", "add_alias", "(", "self", ",", "alias", ",", "index", ")", ":", "if", "index", ">=", "len", "(", "self", ".", "_datasets", ")", ":", "raise", "DataInvalidIndex", "(", "'A dataset with index {} does not exist'", ".", "format", "(", "index", ")", ")", "self", ".", "_aliases", "[", "alias", "]", "=", "index" ]
Add an alias pointing to the specified index. Args: alias (str): The alias that should point to the given index. index (int): The index of the dataset for which an alias should be added. Raises: DataInvalidIndex: If the index does not represent a valid dataset.
[ "Add", "an", "alias", "pointing", "to", "the", "specified", "index", "." ]
python
train
evhub/coconut
coconut/compiler/matching.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/matching.py#L131-L135
def get_checks(self, position=None): """Gets the checks at the position.""" if position is None: position = self.position return self.checkdefs[position][0]
[ "def", "get_checks", "(", "self", ",", "position", "=", "None", ")", ":", "if", "position", "is", "None", ":", "position", "=", "self", ".", "position", "return", "self", ".", "checkdefs", "[", "position", "]", "[", "0", "]" ]
Gets the checks at the position.
[ "Gets", "the", "checks", "at", "the", "position", "." ]
python
train
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L657-L666
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None): """Add new URL to queue.""" if base: base_ref = urlutil.url_norm(base)[0] else: base_ref = None url_data = get_url_from(url, self.recursion_level+1, self.aggregate, parent_url=self.url, base_ref=base_ref, line=line, column=column, page=page, name=name, parent_content_type=self.content_type) self.aggregate.urlqueue.put(url_data)
[ "def", "add_url", "(", "self", ",", "url", ",", "line", "=", "0", ",", "column", "=", "0", ",", "page", "=", "0", ",", "name", "=", "u\"\"", ",", "base", "=", "None", ")", ":", "if", "base", ":", "base_ref", "=", "urlutil", ".", "url_norm", "(", "base", ")", "[", "0", "]", "else", ":", "base_ref", "=", "None", "url_data", "=", "get_url_from", "(", "url", ",", "self", ".", "recursion_level", "+", "1", ",", "self", ".", "aggregate", ",", "parent_url", "=", "self", ".", "url", ",", "base_ref", "=", "base_ref", ",", "line", "=", "line", ",", "column", "=", "column", ",", "page", "=", "page", ",", "name", "=", "name", ",", "parent_content_type", "=", "self", ".", "content_type", ")", "self", ".", "aggregate", ".", "urlqueue", ".", "put", "(", "url_data", ")" ]
Add new URL to queue.
[ "Add", "new", "URL", "to", "queue", "." ]
python
train
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L1068-L1121
def _save_entry_field(self, field_type, entry): """This group packs a entry field""" if field_type == 0x0000: # Ignored pass elif field_type == 0x0001: if entry.uuid is not None: return (16, entry.uuid) elif field_type == 0x0002: if entry.group_id is not None: return (4, struct.pack('<I', entry.group_id)) elif field_type == 0x0003: if entry.image is not None: return (4, struct.pack('<I', entry.image)) elif field_type == 0x0004: if entry.title is not None: return (len(entry.title.encode())+1, (entry.title+'\0').encode()) elif field_type == 0x0005: if entry.url is not None: return (len(entry.url.encode())+1, (entry.url+'\0').encode()) elif field_type == 0x0006: if entry.username is not None: return (len(entry.username.encode())+1, (entry.username+'\0').encode()) elif field_type == 0x0007: if entry.password is not None: return (len(entry.password.encode())+1, (entry.password+'\0').encode()) elif field_type == 0x0008: if entry.comment is not None: return (len(entry.comment.encode())+1, (entry.comment+'\0').encode()) elif field_type == 0x0009: if entry.creation is not None: return (5, self._pack_date(entry.creation)) elif field_type == 0x000A: if entry.last_mod is not None: return (5, self._pack_date(entry.last_mod)) elif field_type == 0x000B: if entry.last_access is not None: return (5, self._pack_date(entry.last_access)) elif field_type == 0x000C: if entry.expire is not None: return (5, self._pack_date(entry.expire)) elif field_type == 0x000D: if entry.binary_desc is not None: return (len(entry.binary_desc.encode())+1, (entry.binary_desc+'\0').encode()) elif field_type == 0x000E: if entry.binary is not None: return (len(entry.binary), entry.binary) return False
[ "def", "_save_entry_field", "(", "self", ",", "field_type", ",", "entry", ")", ":", "if", "field_type", "==", "0x0000", ":", "# Ignored", "pass", "elif", "field_type", "==", "0x0001", ":", "if", "entry", ".", "uuid", "is", "not", "None", ":", "return", "(", "16", ",", "entry", ".", "uuid", ")", "elif", "field_type", "==", "0x0002", ":", "if", "entry", ".", "group_id", "is", "not", "None", ":", "return", "(", "4", ",", "struct", ".", "pack", "(", "'<I'", ",", "entry", ".", "group_id", ")", ")", "elif", "field_type", "==", "0x0003", ":", "if", "entry", ".", "image", "is", "not", "None", ":", "return", "(", "4", ",", "struct", ".", "pack", "(", "'<I'", ",", "entry", ".", "image", ")", ")", "elif", "field_type", "==", "0x0004", ":", "if", "entry", ".", "title", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "title", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "title", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x0005", ":", "if", "entry", ".", "url", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "url", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "url", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x0006", ":", "if", "entry", ".", "username", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "username", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "username", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x0007", ":", "if", "entry", ".", "password", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "password", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "password", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x0008", ":", "if", "entry", ".", "comment", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "comment", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "comment", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x0009", ":", "if", "entry", ".", "creation", "is", "not", "None", ":", "return", "(", "5", ",", "self", ".", "_pack_date", "(", "entry", ".", "creation", ")", ")", "elif", "field_type", "==", "0x000A", ":", "if", "entry", ".", "last_mod", "is", "not", "None", ":", "return", "(", "5", ",", "self", ".", "_pack_date", "(", "entry", ".", "last_mod", ")", ")", "elif", "field_type", "==", "0x000B", ":", "if", "entry", ".", "last_access", "is", "not", "None", ":", "return", "(", "5", ",", "self", ".", "_pack_date", "(", "entry", ".", "last_access", ")", ")", "elif", "field_type", "==", "0x000C", ":", "if", "entry", ".", "expire", "is", "not", "None", ":", "return", "(", "5", ",", "self", ".", "_pack_date", "(", "entry", ".", "expire", ")", ")", "elif", "field_type", "==", "0x000D", ":", "if", "entry", ".", "binary_desc", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "binary_desc", ".", "encode", "(", ")", ")", "+", "1", ",", "(", "entry", ".", "binary_desc", "+", "'\\0'", ")", ".", "encode", "(", ")", ")", "elif", "field_type", "==", "0x000E", ":", "if", "entry", ".", "binary", "is", "not", "None", ":", "return", "(", "len", "(", "entry", ".", "binary", ")", ",", "entry", ".", "binary", ")", "return", "False" ]
This group packs a entry field
[ "This", "group", "packs", "a", "entry", "field" ]
python
train
chimera0/accel-brain-code
Reinforcement-Learning/pyqlearning/q_learning.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/q_learning.py#L355-L371
def update_q(self, state_key, action_key, reward_value, next_max_q): ''' Update Q-Value. Args: state_key: The key of state. action_key: The key of action. reward_value: R-Value(Reward). next_max_q: Maximum Q-Value. ''' # Now Q-Value. q = self.extract_q_df(state_key, action_key) # Update Q-Value. new_q = q + self.alpha_value * (reward_value + (self.gamma_value * next_max_q) - q) # Save updated Q-Value. self.save_q_df(state_key, action_key, new_q)
[ "def", "update_q", "(", "self", ",", "state_key", ",", "action_key", ",", "reward_value", ",", "next_max_q", ")", ":", "# Now Q-Value.", "q", "=", "self", ".", "extract_q_df", "(", "state_key", ",", "action_key", ")", "# Update Q-Value.", "new_q", "=", "q", "+", "self", ".", "alpha_value", "*", "(", "reward_value", "+", "(", "self", ".", "gamma_value", "*", "next_max_q", ")", "-", "q", ")", "# Save updated Q-Value.", "self", ".", "save_q_df", "(", "state_key", ",", "action_key", ",", "new_q", ")" ]
Update Q-Value. Args: state_key: The key of state. action_key: The key of action. reward_value: R-Value(Reward). next_max_q: Maximum Q-Value.
[ "Update", "Q", "-", "Value", "." ]
python
train
pepkit/peppy
peppy/utils.py
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L131-L150
def get_file_size(filename): """ Get size of all files in gigabytes (Gb). :param str | collections.Iterable[str] filename: A space-separated string or list of space-separated strings of absolute file paths. :return float: size of file(s), in gigabytes. """ if filename is None: return float(0) if type(filename) is list: return float(sum([get_file_size(x) for x in filename])) try: total_bytes = sum([float(os.stat(f).st_size) for f in filename.split(" ") if f is not '']) except OSError: # File not found return 0.0 else: return float(total_bytes) / (1024 ** 3)
[ "def", "get_file_size", "(", "filename", ")", ":", "if", "filename", "is", "None", ":", "return", "float", "(", "0", ")", "if", "type", "(", "filename", ")", "is", "list", ":", "return", "float", "(", "sum", "(", "[", "get_file_size", "(", "x", ")", "for", "x", "in", "filename", "]", ")", ")", "try", ":", "total_bytes", "=", "sum", "(", "[", "float", "(", "os", ".", "stat", "(", "f", ")", ".", "st_size", ")", "for", "f", "in", "filename", ".", "split", "(", "\" \"", ")", "if", "f", "is", "not", "''", "]", ")", "except", "OSError", ":", "# File not found", "return", "0.0", "else", ":", "return", "float", "(", "total_bytes", ")", "/", "(", "1024", "**", "3", ")" ]
Get size of all files in gigabytes (Gb). :param str | collections.Iterable[str] filename: A space-separated string or list of space-separated strings of absolute file paths. :return float: size of file(s), in gigabytes.
[ "Get", "size", "of", "all", "files", "in", "gigabytes", "(", "Gb", ")", "." ]
python
train
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L1604-L1682
async def streamstorm(self, text, opts=None, user=None): ''' Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages. ''' if opts is None: opts = {} MSG_QUEUE_SIZE = 1000 chan = asyncio.Queue(MSG_QUEUE_SIZE, loop=self.loop) if user is None: user = self.auth.getUserByName('root') # promote ourself to a synapse task synt = await self.boss.promote('storm', user=user, info={'query': text}) show = opts.get('show') async def runStorm(): cancelled = False tick = s_common.now() count = 0 try: # First, try text parsing. If this fails, we won't be able to get # a storm runtime in the snap, so catch and pass the `err` message # before handing a `fini` message along. self.getStormQuery(text) await chan.put(('init', {'tick': tick, 'text': text, 'task': synt.iden})) shownode = (show is None or 'node' in show) async with await self.snap(user=user) as snap: if show is None: snap.link(chan.put) else: [snap.on(n, chan.put) for n in show] if shownode: async for pode in snap.iterStormPodes(text, opts=opts, user=user): await chan.put(('node', pode)) count += 1 else: async for item in snap.storm(text, opts=opts, user=user): count += 1 except asyncio.CancelledError: logger.warning('Storm runtime cancelled.') cancelled = True raise except Exception as e: logger.exception('Error during storm execution') enfo = s_common.err(e) enfo[1].pop('esrc', None) enfo[1].pop('ename', None) await chan.put(('err', enfo)) finally: if cancelled: return tock = s_common.now() took = tock - tick await chan.put(('fini', {'tock': tock, 'took': took, 'count': count})) await synt.worker(runStorm()) while True: mesg = await chan.get() yield mesg if mesg[0] == 'fini': break
[ "async", "def", "streamstorm", "(", "self", ",", "text", ",", "opts", "=", "None", ",", "user", "=", "None", ")", ":", "if", "opts", "is", "None", ":", "opts", "=", "{", "}", "MSG_QUEUE_SIZE", "=", "1000", "chan", "=", "asyncio", ".", "Queue", "(", "MSG_QUEUE_SIZE", ",", "loop", "=", "self", ".", "loop", ")", "if", "user", "is", "None", ":", "user", "=", "self", ".", "auth", ".", "getUserByName", "(", "'root'", ")", "# promote ourself to a synapse task", "synt", "=", "await", "self", ".", "boss", ".", "promote", "(", "'storm'", ",", "user", "=", "user", ",", "info", "=", "{", "'query'", ":", "text", "}", ")", "show", "=", "opts", ".", "get", "(", "'show'", ")", "async", "def", "runStorm", "(", ")", ":", "cancelled", "=", "False", "tick", "=", "s_common", ".", "now", "(", ")", "count", "=", "0", "try", ":", "# First, try text parsing. If this fails, we won't be able to get", "# a storm runtime in the snap, so catch and pass the `err` message", "# before handing a `fini` message along.", "self", ".", "getStormQuery", "(", "text", ")", "await", "chan", ".", "put", "(", "(", "'init'", ",", "{", "'tick'", ":", "tick", ",", "'text'", ":", "text", ",", "'task'", ":", "synt", ".", "iden", "}", ")", ")", "shownode", "=", "(", "show", "is", "None", "or", "'node'", "in", "show", ")", "async", "with", "await", "self", ".", "snap", "(", "user", "=", "user", ")", "as", "snap", ":", "if", "show", "is", "None", ":", "snap", ".", "link", "(", "chan", ".", "put", ")", "else", ":", "[", "snap", ".", "on", "(", "n", ",", "chan", ".", "put", ")", "for", "n", "in", "show", "]", "if", "shownode", ":", "async", "for", "pode", "in", "snap", ".", "iterStormPodes", "(", "text", ",", "opts", "=", "opts", ",", "user", "=", "user", ")", ":", "await", "chan", ".", "put", "(", "(", "'node'", ",", "pode", ")", ")", "count", "+=", "1", "else", ":", "async", "for", "item", "in", "snap", ".", "storm", "(", "text", ",", "opts", "=", "opts", ",", "user", "=", "user", ")", ":", "count", "+=", "1", "except", "asyncio", ".", "CancelledError", ":", "logger", ".", "warning", "(", "'Storm runtime cancelled.'", ")", "cancelled", "=", "True", "raise", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "'Error during storm execution'", ")", "enfo", "=", "s_common", ".", "err", "(", "e", ")", "enfo", "[", "1", "]", ".", "pop", "(", "'esrc'", ",", "None", ")", "enfo", "[", "1", "]", ".", "pop", "(", "'ename'", ",", "None", ")", "await", "chan", ".", "put", "(", "(", "'err'", ",", "enfo", ")", ")", "finally", ":", "if", "cancelled", ":", "return", "tock", "=", "s_common", ".", "now", "(", ")", "took", "=", "tock", "-", "tick", "await", "chan", ".", "put", "(", "(", "'fini'", ",", "{", "'tock'", ":", "tock", ",", "'took'", ":", "took", ",", "'count'", ":", "count", "}", ")", ")", "await", "synt", ".", "worker", "(", "runStorm", "(", ")", ")", "while", "True", ":", "mesg", "=", "await", "chan", ".", "get", "(", ")", "yield", "mesg", "if", "mesg", "[", "0", "]", "==", "'fini'", ":", "break" ]
Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages.
[ "Evaluate", "a", "storm", "query", "and", "yield", "result", "messages", ".", "Yields", ":", "((", "str", "dict", "))", ":", "Storm", "messages", "." ]
python
train
Nachtfeuer/pipeline
spline/components/docker.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/docker.py#L37-L48
def update_environment_variables(self, filename): """Updating OS environment variables and current script path and filename.""" self.env.update({'PIPELINE_BASH_FILE_ORIGINAL': filename}) filename = os.path.join('/root/scripts', os.path.basename(filename)) self.env.update({'PIPELINE_BASH_FILE': filename}) # remove those keys for Docker since the paths inside Docker are different os_environ = os.environ.copy() for remove_variable in ['PATH', 'PYTHONPATH', 'JAVA_HOME', 'HOME']: os_environ.pop(remove_variable, None) self.env.update(os_environ)
[ "def", "update_environment_variables", "(", "self", ",", "filename", ")", ":", "self", ".", "env", ".", "update", "(", "{", "'PIPELINE_BASH_FILE_ORIGINAL'", ":", "filename", "}", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "'/root/scripts'", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "self", ".", "env", ".", "update", "(", "{", "'PIPELINE_BASH_FILE'", ":", "filename", "}", ")", "# remove those keys for Docker since the paths inside Docker are different", "os_environ", "=", "os", ".", "environ", ".", "copy", "(", ")", "for", "remove_variable", "in", "[", "'PATH'", ",", "'PYTHONPATH'", ",", "'JAVA_HOME'", ",", "'HOME'", "]", ":", "os_environ", ".", "pop", "(", "remove_variable", ",", "None", ")", "self", ".", "env", ".", "update", "(", "os_environ", ")" ]
Updating OS environment variables and current script path and filename.
[ "Updating", "OS", "environment", "variables", "and", "current", "script", "path", "and", "filename", "." ]
python
train
ssato/python-anytemplate
anytemplate/engine.py
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/engine.py#L50-L57
def list_engines_by_priority(engines=None): """ Return a list of engines supported sorted by each priority. """ if engines is None: engines = ENGINES return sorted(engines, key=operator.methodcaller("priority"))
[ "def", "list_engines_by_priority", "(", "engines", "=", "None", ")", ":", "if", "engines", "is", "None", ":", "engines", "=", "ENGINES", "return", "sorted", "(", "engines", ",", "key", "=", "operator", ".", "methodcaller", "(", "\"priority\"", ")", ")" ]
Return a list of engines supported sorted by each priority.
[ "Return", "a", "list", "of", "engines", "supported", "sorted", "by", "each", "priority", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/pagination.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/pagination.py#L202-L211
def data(self): """Deprecated. Returns the data as a `list`""" import warnings warnings.warn( '`data` attribute is deprecated and will be removed in a future release, ' 'use %s as an iterable instead' % (PaginatedResponse,), category=DeprecationWarning, stacklevel=2 # log wherever '.data' is referenced, rather than this line ) return list(self)
[ "def", "data", "(", "self", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "'`data` attribute is deprecated and will be removed in a future release, '", "'use %s as an iterable instead'", "%", "(", "PaginatedResponse", ",", ")", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", "# log wherever '.data' is referenced, rather than this line", ")", "return", "list", "(", "self", ")" ]
Deprecated. Returns the data as a `list`
[ "Deprecated", ".", "Returns", "the", "data", "as", "a", "list" ]
python
train
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/crud/inserting.py
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/inserting.py#L16-L61
def smart_insert(engine, table, data, minimal_size=5): """ An optimized Insert strategy. Guarantee successful and highest insertion speed. But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED. **中文文档** 在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要 远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略: 1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。 2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。 3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。 直到成功为止。 该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。 但时间上是各种情况下平均最优的。 """ insert = table.insert() if isinstance(data, list): # 首先进行尝试bulk insert try: engine.execute(insert, data) # 失败了 except IntegrityError: # 分析数据量 n = len(data) # 如果数据条数多于一定数量 if n >= minimal_size ** 2: # 则进行分包 n_chunk = math.floor(math.sqrt(n)) for chunk in grouper_list(data, n_chunk): smart_insert(engine, table, chunk, minimal_size) # 否则则一条条地逐条插入 else: for row in data: try: engine.execute(insert, row) except IntegrityError: pass else: try: engine.execute(insert, data) except IntegrityError: pass
[ "def", "smart_insert", "(", "engine", ",", "table", ",", "data", ",", "minimal_size", "=", "5", ")", ":", "insert", "=", "table", ".", "insert", "(", ")", "if", "isinstance", "(", "data", ",", "list", ")", ":", "# 首先进行尝试bulk insert", "try", ":", "engine", ".", "execute", "(", "insert", ",", "data", ")", "# 失败了", "except", "IntegrityError", ":", "# 分析数据量", "n", "=", "len", "(", "data", ")", "# 如果数据条数多于一定数量", "if", "n", ">=", "minimal_size", "**", "2", ":", "# 则进行分包", "n_chunk", "=", "math", ".", "floor", "(", "math", ".", "sqrt", "(", "n", ")", ")", "for", "chunk", "in", "grouper_list", "(", "data", ",", "n_chunk", ")", ":", "smart_insert", "(", "engine", ",", "table", ",", "chunk", ",", "minimal_size", ")", "# 否则则一条条地逐条插入", "else", ":", "for", "row", "in", "data", ":", "try", ":", "engine", ".", "execute", "(", "insert", ",", "row", ")", "except", "IntegrityError", ":", "pass", "else", ":", "try", ":", "engine", ".", "execute", "(", "insert", ",", "data", ")", "except", "IntegrityError", ":", "pass" ]
An optimized Insert strategy. Guarantee successful and highest insertion speed. But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED. **中文文档** 在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要 远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略: 1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。 2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。 3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。 直到成功为止。 该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。 但时间上是各种情况下平均最优的。
[ "An", "optimized", "Insert", "strategy", ".", "Guarantee", "successful", "and", "highest", "insertion", "speed", ".", "But", "ATOMIC", "WRITE", "IS", "NOT", "ENSURED", "IF", "THE", "PROGRAM", "IS", "INTERRUPTED", "." ]
python
train
aetros/aetros-cli
aetros/git.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L691-L699
def add_file(self, git_path, content): """ Add a new file as blob in the storage and add its tree entry into the index. :param git_path: str :param content: str """ blob_id = self.write_blob(content) self.add_index('100644', blob_id, git_path)
[ "def", "add_file", "(", "self", ",", "git_path", ",", "content", ")", ":", "blob_id", "=", "self", ".", "write_blob", "(", "content", ")", "self", ".", "add_index", "(", "'100644'", ",", "blob_id", ",", "git_path", ")" ]
Add a new file as blob in the storage and add its tree entry into the index. :param git_path: str :param content: str
[ "Add", "a", "new", "file", "as", "blob", "in", "the", "storage", "and", "add", "its", "tree", "entry", "into", "the", "index", ".", ":", "param", "git_path", ":", "str", ":", "param", "content", ":", "str" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/__init__.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L378-L389
def add_types(self, types, **kwargs): """ :param types: Types to add to the object :type types: list of strings :raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state Adds each of the specified types to the remote object. Takes no action for types that are already listed for the object. """ self._add_types(self._dxid, {"types": types}, **kwargs)
[ "def", "add_types", "(", "self", ",", "types", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_add_types", "(", "self", ".", "_dxid", ",", "{", "\"types\"", ":", "types", "}", ",", "*", "*", "kwargs", ")" ]
:param types: Types to add to the object :type types: list of strings :raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state Adds each of the specified types to the remote object. Takes no action for types that are already listed for the object.
[ ":", "param", "types", ":", "Types", "to", "add", "to", "the", "object", ":", "type", "types", ":", "list", "of", "strings", ":", "raises", ":", ":", "class", ":", "~dxpy", ".", "exceptions", ".", "DXAPIError", "if", "the", "object", "is", "not", "in", "the", "open", "state" ]
python
train
gwastro/pycbc
pycbc/strain/recalibrate.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/recalibrate.py#L343-L408
def adjust_strain(self, strain, delta_fs=None, delta_qinv=None, delta_fc=None, kappa_c=1.0, kappa_tst_re=1.0, kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0): """Adjust the FrequencySeries strain by changing the time-dependent calibration parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. Parameters ---------- strain : FrequencySeries The strain data to be adjusted. delta_fc : float Change in coupled-cavity (CC) pole at time t. kappa_c : float Scalar correction factor for sensing function c0 at time t. kappa_tst_re : float Real part of scalar correction factor for actuation function A_{tst0} at time t. kappa_tst_im : float Imaginary part of scalar correction factor for actuation function A_tst0 at time t. kappa_pu_re : float Real part of scalar correction factor for actuation function A_{pu0} at time t. kappa_pu_im : float Imaginary part of scalar correction factor for actuation function A_{pu0} at time t. fs : float Spring frequency for signal recycling cavity. qinv : float Inverse quality factor for signal recycling cavity. Returns ------- strain_adjusted : FrequencySeries The adjusted strain. """ fc = self.fc0 + delta_fc if delta_fc else self.fc0 fs = self.fs0 + delta_fs if delta_fs else self.fs0 qinv = self.qinv0 + delta_qinv if delta_qinv else self.qinv0 # calculate adjusted response function r_adjusted = self.update_r(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c, kappa_tst_re=kappa_tst_re, kappa_tst_im=kappa_tst_im, kappa_pu_re=kappa_pu_re, kappa_pu_im=kappa_pu_im) # calculate error function k = r_adjusted / self.r0 # decompose into amplitude and unwrapped phase k_amp = np.abs(k) k_phase = np.unwrap(np.angle(k)) # convert to FrequencySeries by interpolating then resampling order = 1 k_amp_off = UnivariateSpline(self.freq, k_amp, k=order, s=0) k_phase_off = UnivariateSpline(self.freq, k_phase, k=order, s=0) freq_even = strain.sample_frequencies.numpy() k_even_sample = k_amp_off(freq_even) * \ np.exp(1.0j * k_phase_off(freq_even)) strain_adjusted = FrequencySeries(strain.numpy() * \ k_even_sample, delta_f=strain.delta_f) return strain_adjusted
[ "def", "adjust_strain", "(", "self", ",", "strain", ",", "delta_fs", "=", "None", ",", "delta_qinv", "=", "None", ",", "delta_fc", "=", "None", ",", "kappa_c", "=", "1.0", ",", "kappa_tst_re", "=", "1.0", ",", "kappa_tst_im", "=", "0.0", ",", "kappa_pu_re", "=", "1.0", ",", "kappa_pu_im", "=", "0.0", ")", ":", "fc", "=", "self", ".", "fc0", "+", "delta_fc", "if", "delta_fc", "else", "self", ".", "fc0", "fs", "=", "self", ".", "fs0", "+", "delta_fs", "if", "delta_fs", "else", "self", ".", "fs0", "qinv", "=", "self", ".", "qinv0", "+", "delta_qinv", "if", "delta_qinv", "else", "self", ".", "qinv0", "# calculate adjusted response function", "r_adjusted", "=", "self", ".", "update_r", "(", "fs", "=", "fs", ",", "qinv", "=", "qinv", ",", "fc", "=", "fc", ",", "kappa_c", "=", "kappa_c", ",", "kappa_tst_re", "=", "kappa_tst_re", ",", "kappa_tst_im", "=", "kappa_tst_im", ",", "kappa_pu_re", "=", "kappa_pu_re", ",", "kappa_pu_im", "=", "kappa_pu_im", ")", "# calculate error function", "k", "=", "r_adjusted", "/", "self", ".", "r0", "# decompose into amplitude and unwrapped phase", "k_amp", "=", "np", ".", "abs", "(", "k", ")", "k_phase", "=", "np", ".", "unwrap", "(", "np", ".", "angle", "(", "k", ")", ")", "# convert to FrequencySeries by interpolating then resampling", "order", "=", "1", "k_amp_off", "=", "UnivariateSpline", "(", "self", ".", "freq", ",", "k_amp", ",", "k", "=", "order", ",", "s", "=", "0", ")", "k_phase_off", "=", "UnivariateSpline", "(", "self", ".", "freq", ",", "k_phase", ",", "k", "=", "order", ",", "s", "=", "0", ")", "freq_even", "=", "strain", ".", "sample_frequencies", ".", "numpy", "(", ")", "k_even_sample", "=", "k_amp_off", "(", "freq_even", ")", "*", "np", ".", "exp", "(", "1.0j", "*", "k_phase_off", "(", "freq_even", ")", ")", "strain_adjusted", "=", "FrequencySeries", "(", "strain", ".", "numpy", "(", ")", "*", "k_even_sample", ",", "delta_f", "=", "strain", ".", "delta_f", ")", "return", "strain_adjusted" ]
Adjust the FrequencySeries strain by changing the time-dependent calibration parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. Parameters ---------- strain : FrequencySeries The strain data to be adjusted. delta_fc : float Change in coupled-cavity (CC) pole at time t. kappa_c : float Scalar correction factor for sensing function c0 at time t. kappa_tst_re : float Real part of scalar correction factor for actuation function A_{tst0} at time t. kappa_tst_im : float Imaginary part of scalar correction factor for actuation function A_tst0 at time t. kappa_pu_re : float Real part of scalar correction factor for actuation function A_{pu0} at time t. kappa_pu_im : float Imaginary part of scalar correction factor for actuation function A_{pu0} at time t. fs : float Spring frequency for signal recycling cavity. qinv : float Inverse quality factor for signal recycling cavity. Returns ------- strain_adjusted : FrequencySeries The adjusted strain.
[ "Adjust", "the", "FrequencySeries", "strain", "by", "changing", "the", "time", "-", "dependent", "calibration", "parameters", "kappa_c", "(", "t", ")", "kappa_a", "(", "t", ")", "f_c", "(", "t", ")", "fs", "and", "qinv", "." ]
python
train
GNS3/gns3-server
gns3server/controller/gns3vm/virtualbox_gns3_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/gns3vm/virtualbox_gns3_vm.py#L153-L212
def start(self): """ Start the GNS3 VM. """ # get a NAT interface number nat_interface_number = yield from self._look_for_interface("nat") if nat_interface_number < 0: raise GNS3VMError("The GNS3 VM: {} must have a NAT interface configured in order to start".format(self.vmname)) hostonly_interface_number = yield from self._look_for_interface("hostonly") if hostonly_interface_number < 0: raise GNS3VMError("The GNS3 VM: {} must have a host only interface configured in order to start".format(self.vmname)) vboxnet = yield from self._look_for_vboxnet(hostonly_interface_number) if vboxnet is None: raise GNS3VMError("VirtualBox host-only network could not be found for interface {} on GNS3 VM".format(hostonly_interface_number)) if not (yield from self._check_dhcp_server(vboxnet)): raise GNS3VMError("DHCP must be enabled on VirtualBox host-only network: {} for GNS3 VM".format(vboxnet)) vm_state = yield from self._get_state() log.info('"{}" state is {}'.format(self._vmname, vm_state)) if vm_state == "poweroff": yield from self.set_vcpus(self.vcpus) yield from self.set_ram(self.ram) if vm_state in ("poweroff", "saved"): # start the VM if it is not running args = [self._vmname] if self._headless: args.extend(["--type", "headless"]) yield from self._execute("startvm", args) elif vm_state == "paused": args = [self._vmname, "resume"] yield from self._execute("controlvm", args) ip_address = "127.0.0.1" try: # get a random port on localhost with socket.socket() as s: s.bind((ip_address, 0)) api_port = s.getsockname()[1] except OSError as e: raise GNS3VMError("Error while getting random port: {}".format(e)) if (yield from self._check_vbox_port_forwarding()): # delete the GNS3VM NAT port forwarding rule if it exists log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number)) yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"]) # add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to port 3080 in the VM log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number)) yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "GNS3VM,tcp,{},{},,3080".format(ip_address, api_port)]) self.ip_address = yield from self._get_ip(hostonly_interface_number, api_port) self.port = 3080 log.info("GNS3 VM has been started with IP {}".format(self.ip_address)) self.running = True
[ "def", "start", "(", "self", ")", ":", "# get a NAT interface number", "nat_interface_number", "=", "yield", "from", "self", ".", "_look_for_interface", "(", "\"nat\"", ")", "if", "nat_interface_number", "<", "0", ":", "raise", "GNS3VMError", "(", "\"The GNS3 VM: {} must have a NAT interface configured in order to start\"", ".", "format", "(", "self", ".", "vmname", ")", ")", "hostonly_interface_number", "=", "yield", "from", "self", ".", "_look_for_interface", "(", "\"hostonly\"", ")", "if", "hostonly_interface_number", "<", "0", ":", "raise", "GNS3VMError", "(", "\"The GNS3 VM: {} must have a host only interface configured in order to start\"", ".", "format", "(", "self", ".", "vmname", ")", ")", "vboxnet", "=", "yield", "from", "self", ".", "_look_for_vboxnet", "(", "hostonly_interface_number", ")", "if", "vboxnet", "is", "None", ":", "raise", "GNS3VMError", "(", "\"VirtualBox host-only network could not be found for interface {} on GNS3 VM\"", ".", "format", "(", "hostonly_interface_number", ")", ")", "if", "not", "(", "yield", "from", "self", ".", "_check_dhcp_server", "(", "vboxnet", ")", ")", ":", "raise", "GNS3VMError", "(", "\"DHCP must be enabled on VirtualBox host-only network: {} for GNS3 VM\"", ".", "format", "(", "vboxnet", ")", ")", "vm_state", "=", "yield", "from", "self", ".", "_get_state", "(", ")", "log", ".", "info", "(", "'\"{}\" state is {}'", ".", "format", "(", "self", ".", "_vmname", ",", "vm_state", ")", ")", "if", "vm_state", "==", "\"poweroff\"", ":", "yield", "from", "self", ".", "set_vcpus", "(", "self", ".", "vcpus", ")", "yield", "from", "self", ".", "set_ram", "(", "self", ".", "ram", ")", "if", "vm_state", "in", "(", "\"poweroff\"", ",", "\"saved\"", ")", ":", "# start the VM if it is not running", "args", "=", "[", "self", ".", "_vmname", "]", "if", "self", ".", "_headless", ":", "args", ".", "extend", "(", "[", "\"--type\"", ",", "\"headless\"", "]", ")", "yield", "from", "self", ".", "_execute", "(", "\"startvm\"", ",", "args", ")", "elif", "vm_state", "==", "\"paused\"", ":", "args", "=", "[", "self", ".", "_vmname", ",", "\"resume\"", "]", "yield", "from", "self", ".", "_execute", "(", "\"controlvm\"", ",", "args", ")", "ip_address", "=", "\"127.0.0.1\"", "try", ":", "# get a random port on localhost", "with", "socket", ".", "socket", "(", ")", "as", "s", ":", "s", ".", "bind", "(", "(", "ip_address", ",", "0", ")", ")", "api_port", "=", "s", ".", "getsockname", "(", ")", "[", "1", "]", "except", "OSError", "as", "e", ":", "raise", "GNS3VMError", "(", "\"Error while getting random port: {}\"", ".", "format", "(", "e", ")", ")", "if", "(", "yield", "from", "self", ".", "_check_vbox_port_forwarding", "(", ")", ")", ":", "# delete the GNS3VM NAT port forwarding rule if it exists", "log", ".", "info", "(", "\"Removing GNS3VM NAT port forwarding rule from interface {}\"", ".", "format", "(", "nat_interface_number", ")", ")", "yield", "from", "self", ".", "_execute", "(", "\"controlvm\"", ",", "[", "self", ".", "_vmname", ",", "\"natpf{}\"", ".", "format", "(", "nat_interface_number", ")", ",", "\"delete\"", ",", "\"GNS3VM\"", "]", ")", "# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to port 3080 in the VM", "log", ".", "info", "(", "\"Adding GNS3VM NAT port forwarding rule with port {} to interface {}\"", ".", "format", "(", "api_port", ",", "nat_interface_number", ")", ")", "yield", "from", "self", ".", "_execute", "(", "\"controlvm\"", ",", "[", "self", ".", "_vmname", ",", "\"natpf{}\"", ".", "format", "(", "nat_interface_number", ")", ",", "\"GNS3VM,tcp,{},{},,3080\"", ".", "format", "(", "ip_address", ",", "api_port", ")", "]", ")", "self", ".", "ip_address", "=", "yield", "from", "self", ".", "_get_ip", "(", "hostonly_interface_number", ",", "api_port", ")", "self", ".", "port", "=", "3080", "log", ".", "info", "(", "\"GNS3 VM has been started with IP {}\"", ".", "format", "(", "self", ".", "ip_address", ")", ")", "self", ".", "running", "=", "True" ]
Start the GNS3 VM.
[ "Start", "the", "GNS3", "VM", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L1043-L1075
def optimize_ordering(fwtour, clm, phase, cpus): """ Optimize the ordering of contigs by Genetic Algorithm (GA). """ from .chic import score_evaluate_M # Prepare input files tour_contigs = clm.active_contigs tour_sizes = clm.active_sizes tour_M = clm.M tour = clm.tour signs = clm.signs oo = clm.oo def callback(tour, gen, phase, oo): fitness = tour.fitness if hasattr(tour, "fitness") else None label = "GA{}-{}".format(phase, gen) if fitness: fitness = "{0}".format(fitness).split(",")[0].replace("(", "") label += "-" + fitness if gen % 20 == 0: print_tour(fwtour, tour, label, tour_contigs, oo, signs=signs) return tour callbacki = partial(callback, phase=phase, oo=oo) toolbox = GA_setup(tour) toolbox.register("evaluate", score_evaluate_M, tour_sizes=tour_sizes, tour_M=tour_M) tour, tour_fitness = GA_run(toolbox, ngen=1000, npop=100, cpus=cpus, callback=callbacki) clm.tour = tour return tour
[ "def", "optimize_ordering", "(", "fwtour", ",", "clm", ",", "phase", ",", "cpus", ")", ":", "from", ".", "chic", "import", "score_evaluate_M", "# Prepare input files", "tour_contigs", "=", "clm", ".", "active_contigs", "tour_sizes", "=", "clm", ".", "active_sizes", "tour_M", "=", "clm", ".", "M", "tour", "=", "clm", ".", "tour", "signs", "=", "clm", ".", "signs", "oo", "=", "clm", ".", "oo", "def", "callback", "(", "tour", ",", "gen", ",", "phase", ",", "oo", ")", ":", "fitness", "=", "tour", ".", "fitness", "if", "hasattr", "(", "tour", ",", "\"fitness\"", ")", "else", "None", "label", "=", "\"GA{}-{}\"", ".", "format", "(", "phase", ",", "gen", ")", "if", "fitness", ":", "fitness", "=", "\"{0}\"", ".", "format", "(", "fitness", ")", ".", "split", "(", "\",\"", ")", "[", "0", "]", ".", "replace", "(", "\"(\"", ",", "\"\"", ")", "label", "+=", "\"-\"", "+", "fitness", "if", "gen", "%", "20", "==", "0", ":", "print_tour", "(", "fwtour", ",", "tour", ",", "label", ",", "tour_contigs", ",", "oo", ",", "signs", "=", "signs", ")", "return", "tour", "callbacki", "=", "partial", "(", "callback", ",", "phase", "=", "phase", ",", "oo", "=", "oo", ")", "toolbox", "=", "GA_setup", "(", "tour", ")", "toolbox", ".", "register", "(", "\"evaluate\"", ",", "score_evaluate_M", ",", "tour_sizes", "=", "tour_sizes", ",", "tour_M", "=", "tour_M", ")", "tour", ",", "tour_fitness", "=", "GA_run", "(", "toolbox", ",", "ngen", "=", "1000", ",", "npop", "=", "100", ",", "cpus", "=", "cpus", ",", "callback", "=", "callbacki", ")", "clm", ".", "tour", "=", "tour", "return", "tour" ]
Optimize the ordering of contigs by Genetic Algorithm (GA).
[ "Optimize", "the", "ordering", "of", "contigs", "by", "Genetic", "Algorithm", "(", "GA", ")", "." ]
python
train
CTPUG/wafer
wafer/registration/views.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/registration/views.py#L12-L20
def redirect_profile(request): ''' The default destination from logging in, redirect to the actual profile URL ''' if request.user.is_authenticated: return HttpResponseRedirect(reverse('wafer_user_profile', args=(request.user.username,))) else: return redirect_to_login(next=reverse(redirect_profile))
[ "def", "redirect_profile", "(", "request", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'wafer_user_profile'", ",", "args", "=", "(", "request", ".", "user", ".", "username", ",", ")", ")", ")", "else", ":", "return", "redirect_to_login", "(", "next", "=", "reverse", "(", "redirect_profile", ")", ")" ]
The default destination from logging in, redirect to the actual profile URL
[ "The", "default", "destination", "from", "logging", "in", "redirect", "to", "the", "actual", "profile", "URL" ]
python
train
MartinThoma/hwrt
hwrt/filter_dataset.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/filter_dataset.py#L179-L197
def read_csv(filepath): """ Read a CSV into a list of dictionarys. The first line of the CSV determines the keys of the dictionary. Parameters ---------- filepath : string Returns ------- list of dictionaries """ symbols = [] with open(filepath, 'rb') as csvfile: spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in spamreader: symbols.append(row) return symbols
[ "def", "read_csv", "(", "filepath", ")", ":", "symbols", "=", "[", "]", "with", "open", "(", "filepath", ",", "'rb'", ")", "as", "csvfile", ":", "spamreader", "=", "csv", ".", "DictReader", "(", "csvfile", ",", "delimiter", "=", "','", ",", "quotechar", "=", "'\"'", ")", "for", "row", "in", "spamreader", ":", "symbols", ".", "append", "(", "row", ")", "return", "symbols" ]
Read a CSV into a list of dictionarys. The first line of the CSV determines the keys of the dictionary. Parameters ---------- filepath : string Returns ------- list of dictionaries
[ "Read", "a", "CSV", "into", "a", "list", "of", "dictionarys", ".", "The", "first", "line", "of", "the", "CSV", "determines", "the", "keys", "of", "the", "dictionary", "." ]
python
train
coyo8/parinx
parinx/parser.py
https://github.com/coyo8/parinx/blob/6493798ceba8089345d970f71be4a896eb6b081d/parinx/parser.py#L125-L151
def split_docstring(docstring): """ Separates the method's description and paramter's :return: Return description string and list of fields strings """ docstring_list = [line.strip() for line in docstring.splitlines()] description_list = list( takewhile(lambda line: not (line.startswith(':') or line.startswith('@inherit')), docstring_list)) description = ' '.join(description_list).strip() first_field_line_number = len(description_list) fields = [] if first_field_line_number >= len(docstring_list): return description, fields # only description, without any field last_field_lines = [docstring_list[first_field_line_number]] for line in docstring_list[first_field_line_number + 1:]: if line.strip().startswith(':') or line.strip().startswith('@inherit'): fields.append(' '.join(last_field_lines)) last_field_lines = [line] else: last_field_lines.append(line) fields.append(' '.join(last_field_lines)) return description, fields
[ "def", "split_docstring", "(", "docstring", ")", ":", "docstring_list", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "docstring", ".", "splitlines", "(", ")", "]", "description_list", "=", "list", "(", "takewhile", "(", "lambda", "line", ":", "not", "(", "line", ".", "startswith", "(", "':'", ")", "or", "line", ".", "startswith", "(", "'@inherit'", ")", ")", ",", "docstring_list", ")", ")", "description", "=", "' '", ".", "join", "(", "description_list", ")", ".", "strip", "(", ")", "first_field_line_number", "=", "len", "(", "description_list", ")", "fields", "=", "[", "]", "if", "first_field_line_number", ">=", "len", "(", "docstring_list", ")", ":", "return", "description", ",", "fields", "# only description, without any field", "last_field_lines", "=", "[", "docstring_list", "[", "first_field_line_number", "]", "]", "for", "line", "in", "docstring_list", "[", "first_field_line_number", "+", "1", ":", "]", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "':'", ")", "or", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'@inherit'", ")", ":", "fields", ".", "append", "(", "' '", ".", "join", "(", "last_field_lines", ")", ")", "last_field_lines", "=", "[", "line", "]", "else", ":", "last_field_lines", ".", "append", "(", "line", ")", "fields", ".", "append", "(", "' '", ".", "join", "(", "last_field_lines", ")", ")", "return", "description", ",", "fields" ]
Separates the method's description and paramter's :return: Return description string and list of fields strings
[ "Separates", "the", "method", "s", "description", "and", "paramter", "s" ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1356-L1368
def paint_with_alpha(self, alpha): """A drawing operator that paints the current source everywhere within the current clip region using a mask of constant alpha value alpha. The effect is similar to :meth:`paint`, but the drawing is faded out using the :obj:`alpha` value. :type alpha: float :param alpha: Alpha value, between 0 (transparent) and 1 (opaque). """ cairo.cairo_paint_with_alpha(self._pointer, alpha) self._check_status()
[ "def", "paint_with_alpha", "(", "self", ",", "alpha", ")", ":", "cairo", ".", "cairo_paint_with_alpha", "(", "self", ".", "_pointer", ",", "alpha", ")", "self", ".", "_check_status", "(", ")" ]
A drawing operator that paints the current source everywhere within the current clip region using a mask of constant alpha value alpha. The effect is similar to :meth:`paint`, but the drawing is faded out using the :obj:`alpha` value. :type alpha: float :param alpha: Alpha value, between 0 (transparent) and 1 (opaque).
[ "A", "drawing", "operator", "that", "paints", "the", "current", "source", "everywhere", "within", "the", "current", "clip", "region", "using", "a", "mask", "of", "constant", "alpha", "value", "alpha", ".", "The", "effect", "is", "similar", "to", ":", "meth", ":", "paint", "but", "the", "drawing", "is", "faded", "out", "using", "the", ":", "obj", ":", "alpha", "value", "." ]
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L258-L277
async def load_device_aldb(self, addr, clear=True): """Read the device ALDB.""" dev_addr = Address(addr) device = None if dev_addr == self.plm.address: device = self.plm else: device = self.plm.devices[dev_addr.id] if device: if clear: device.aldb.clear() device.read_aldb() await asyncio.sleep(1, loop=self.loop) while device.aldb.status == ALDBStatus.LOADING: await asyncio.sleep(1, loop=self.loop) if device.aldb.status == ALDBStatus.LOADED: _LOGGING.info('ALDB loaded for device %s', addr) self.print_device_aldb(addr) else: _LOGGING.error('Could not find device %s', addr)
[ "async", "def", "load_device_aldb", "(", "self", ",", "addr", ",", "clear", "=", "True", ")", ":", "dev_addr", "=", "Address", "(", "addr", ")", "device", "=", "None", "if", "dev_addr", "==", "self", ".", "plm", ".", "address", ":", "device", "=", "self", ".", "plm", "else", ":", "device", "=", "self", ".", "plm", ".", "devices", "[", "dev_addr", ".", "id", "]", "if", "device", ":", "if", "clear", ":", "device", ".", "aldb", ".", "clear", "(", ")", "device", ".", "read_aldb", "(", ")", "await", "asyncio", ".", "sleep", "(", "1", ",", "loop", "=", "self", ".", "loop", ")", "while", "device", ".", "aldb", ".", "status", "==", "ALDBStatus", ".", "LOADING", ":", "await", "asyncio", ".", "sleep", "(", "1", ",", "loop", "=", "self", ".", "loop", ")", "if", "device", ".", "aldb", ".", "status", "==", "ALDBStatus", ".", "LOADED", ":", "_LOGGING", ".", "info", "(", "'ALDB loaded for device %s'", ",", "addr", ")", "self", ".", "print_device_aldb", "(", "addr", ")", "else", ":", "_LOGGING", ".", "error", "(", "'Could not find device %s'", ",", "addr", ")" ]
Read the device ALDB.
[ "Read", "the", "device", "ALDB", "." ]
python
train
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L808-L823
def asarray(arraylike, strict=True): """ Converts arraylike objects to NumPy ndarray types. Errors if object is not arraylike and strict option is enabled. """ if isinstance(arraylike, np.ndarray): return arraylike elif isinstance(arraylike, list): return np.asarray(arraylike, dtype=object) elif not isinstance(arraylike, np.ndarray) and isinstance(arraylike, arraylike_types): return arraylike.values elif hasattr(arraylike, '__array__'): return np.asarray(arraylike) elif strict: raise ValueError('Could not convert %s type to array' % type(arraylike)) return arraylike
[ "def", "asarray", "(", "arraylike", ",", "strict", "=", "True", ")", ":", "if", "isinstance", "(", "arraylike", ",", "np", ".", "ndarray", ")", ":", "return", "arraylike", "elif", "isinstance", "(", "arraylike", ",", "list", ")", ":", "return", "np", ".", "asarray", "(", "arraylike", ",", "dtype", "=", "object", ")", "elif", "not", "isinstance", "(", "arraylike", ",", "np", ".", "ndarray", ")", "and", "isinstance", "(", "arraylike", ",", "arraylike_types", ")", ":", "return", "arraylike", ".", "values", "elif", "hasattr", "(", "arraylike", ",", "'__array__'", ")", ":", "return", "np", ".", "asarray", "(", "arraylike", ")", "elif", "strict", ":", "raise", "ValueError", "(", "'Could not convert %s type to array'", "%", "type", "(", "arraylike", ")", ")", "return", "arraylike" ]
Converts arraylike objects to NumPy ndarray types. Errors if object is not arraylike and strict option is enabled.
[ "Converts", "arraylike", "objects", "to", "NumPy", "ndarray", "types", ".", "Errors", "if", "object", "is", "not", "arraylike", "and", "strict", "option", "is", "enabled", "." ]
python
train
geomet/geomet
geomet/wkb.py
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L414-L434
def _dump_linestring(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a linestring WKB string. Input parameters and output are similar to :func:`_dump_point`. """ coords = obj['coordinates'] vertex = coords[0] # Infer the number of dimensions from the first vertex num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'LineString', num_dims, big_endian, meta ) # append number of vertices in linestring wkb_string += struct.pack('%sl' % byte_order, len(coords)) for vertex in coords: wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string
[ "def", "_dump_linestring", "(", "obj", ",", "big_endian", ",", "meta", ")", ":", "coords", "=", "obj", "[", "'coordinates'", "]", "vertex", "=", "coords", "[", "0", "]", "# Infer the number of dimensions from the first vertex", "num_dims", "=", "len", "(", "vertex", ")", "wkb_string", ",", "byte_fmt", ",", "byte_order", "=", "_header_bytefmt_byteorder", "(", "'LineString'", ",", "num_dims", ",", "big_endian", ",", "meta", ")", "# append number of vertices in linestring", "wkb_string", "+=", "struct", ".", "pack", "(", "'%sl'", "%", "byte_order", ",", "len", "(", "coords", ")", ")", "for", "vertex", "in", "coords", ":", "wkb_string", "+=", "struct", ".", "pack", "(", "byte_fmt", ",", "*", "vertex", ")", "return", "wkb_string" ]
Dump a GeoJSON-like `dict` to a linestring WKB string. Input parameters and output are similar to :func:`_dump_point`.
[ "Dump", "a", "GeoJSON", "-", "like", "dict", "to", "a", "linestring", "WKB", "string", "." ]
python
train
playpauseandstop/rororo
rororo/settings.py
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/settings.py#L70-L94
def inject_settings(mixed: Union[str, Settings], context: MutableMapping[str, Any], fail_silently: bool = False) -> None: """Inject settings values to given context. :param mixed: Settings can be a string (that it will be read from Python path), Python module or dict-like instance. :param context: Context to assign settings key values. It should support dict-like item assingment. :param fail_silently: When enabled and reading settings from Python path ignore errors if given Python path couldn't be loaded. """ if isinstance(mixed, str): try: mixed = import_module(mixed) except Exception: if fail_silently: return raise for key, value in iter_settings(mixed): context[key] = value
[ "def", "inject_settings", "(", "mixed", ":", "Union", "[", "str", ",", "Settings", "]", ",", "context", ":", "MutableMapping", "[", "str", ",", "Any", "]", ",", "fail_silently", ":", "bool", "=", "False", ")", "->", "None", ":", "if", "isinstance", "(", "mixed", ",", "str", ")", ":", "try", ":", "mixed", "=", "import_module", "(", "mixed", ")", "except", "Exception", ":", "if", "fail_silently", ":", "return", "raise", "for", "key", ",", "value", "in", "iter_settings", "(", "mixed", ")", ":", "context", "[", "key", "]", "=", "value" ]
Inject settings values to given context. :param mixed: Settings can be a string (that it will be read from Python path), Python module or dict-like instance. :param context: Context to assign settings key values. It should support dict-like item assingment. :param fail_silently: When enabled and reading settings from Python path ignore errors if given Python path couldn't be loaded.
[ "Inject", "settings", "values", "to", "given", "context", "." ]
python
train
kilometer-io/kilometer-python
kilometer/__init__.py
https://github.com/kilometer-io/kilometer-python/blob/22a720cfed5aa74d4957b4597f224cede2e7c0e5/kilometer/__init__.py#L44-L66
def add_user(self, user_id, custom_properties=None, headers=None, endpoint_url=None): """ Creates a new identified user if he doesn't exist. :param str user_id: identified user's ID :param dict custom_properties: user properties :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response """ endpoint_url = endpoint_url or self._endpoint_url url = endpoint_url + '/users' headers = headers or self._default_headers() payload = {"user_id": user_id} if custom_properties is not None: payload["user_properties"] = custom_properties response = requests.post(url, headers=headers, json=payload) return response
[ "def", "add_user", "(", "self", ",", "user_id", ",", "custom_properties", "=", "None", ",", "headers", "=", "None", ",", "endpoint_url", "=", "None", ")", ":", "endpoint_url", "=", "endpoint_url", "or", "self", ".", "_endpoint_url", "url", "=", "endpoint_url", "+", "'/users'", "headers", "=", "headers", "or", "self", ".", "_default_headers", "(", ")", "payload", "=", "{", "\"user_id\"", ":", "user_id", "}", "if", "custom_properties", "is", "not", "None", ":", "payload", "[", "\"user_properties\"", "]", "=", "custom_properties", "response", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "json", "=", "payload", ")", "return", "response" ]
Creates a new identified user if he doesn't exist. :param str user_id: identified user's ID :param dict custom_properties: user properties :param dict headers: custom request headers (if isn't set default values are used) :param str endpoint_url: where to send the request (if isn't set default value is used) :return: Response
[ "Creates", "a", "new", "identified", "user", "if", "he", "doesn", "t", "exist", "." ]
python
train
lambdamusic/Ontospy
ontospy/core/ontospy.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L127-L142
def load_rdf(self, uri_or_path=None, data=None, file_obj=None, rdf_format="", verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True): """Load an RDF source into an ontospy/rdflib graph""" loader = RDFLoader(verbose=verbose) loader.load(uri_or_path, data, file_obj, rdf_format) self.rdflib_graph = loader.rdflib_graph self.sources = loader.sources_valid self.sparqlHelper = SparqlHelper(self.rdflib_graph) self.namespaces = sorted(self.rdflib_graph.namespaces())
[ "def", "load_rdf", "(", "self", ",", "uri_or_path", "=", "None", ",", "data", "=", "None", ",", "file_obj", "=", "None", ",", "rdf_format", "=", "\"\"", ",", "verbose", "=", "False", ",", "hide_base_schemas", "=", "True", ",", "hide_implicit_types", "=", "True", ",", "hide_implicit_preds", "=", "True", ")", ":", "loader", "=", "RDFLoader", "(", "verbose", "=", "verbose", ")", "loader", ".", "load", "(", "uri_or_path", ",", "data", ",", "file_obj", ",", "rdf_format", ")", "self", ".", "rdflib_graph", "=", "loader", ".", "rdflib_graph", "self", ".", "sources", "=", "loader", ".", "sources_valid", "self", ".", "sparqlHelper", "=", "SparqlHelper", "(", "self", ".", "rdflib_graph", ")", "self", ".", "namespaces", "=", "sorted", "(", "self", ".", "rdflib_graph", ".", "namespaces", "(", ")", ")" ]
Load an RDF source into an ontospy/rdflib graph
[ "Load", "an", "RDF", "source", "into", "an", "ontospy", "/", "rdflib", "graph" ]
python
train
openstack/horizon
openstack_dashboard/utils/settings.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/settings.py#L41-L61
def import_dashboard_config(modules): """Imports configuration from all the modules and merges it.""" config = collections.defaultdict(dict) for module in modules: for submodule in import_submodules(module).values(): if hasattr(submodule, 'DASHBOARD'): dashboard = submodule.DASHBOARD config[dashboard].update(submodule.__dict__) elif (hasattr(submodule, 'PANEL') or hasattr(submodule, 'PANEL_GROUP') or hasattr(submodule, 'FEATURE')): # If enabled and local.enabled contains a same filename, # the file loaded later (i.e., local.enabled) will be used. name = submodule.__name__.rsplit('.', 1)[1] config[name] = submodule.__dict__ else: logging.warning("Skipping %s because it doesn't have DASHBOARD" ", PANEL, PANEL_GROUP, or FEATURE defined.", submodule.__name__) return sorted(config.items(), key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
[ "def", "import_dashboard_config", "(", "modules", ")", ":", "config", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "module", "in", "modules", ":", "for", "submodule", "in", "import_submodules", "(", "module", ")", ".", "values", "(", ")", ":", "if", "hasattr", "(", "submodule", ",", "'DASHBOARD'", ")", ":", "dashboard", "=", "submodule", ".", "DASHBOARD", "config", "[", "dashboard", "]", ".", "update", "(", "submodule", ".", "__dict__", ")", "elif", "(", "hasattr", "(", "submodule", ",", "'PANEL'", ")", "or", "hasattr", "(", "submodule", ",", "'PANEL_GROUP'", ")", "or", "hasattr", "(", "submodule", ",", "'FEATURE'", ")", ")", ":", "# If enabled and local.enabled contains a same filename,", "# the file loaded later (i.e., local.enabled) will be used.", "name", "=", "submodule", ".", "__name__", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "config", "[", "name", "]", "=", "submodule", ".", "__dict__", "else", ":", "logging", ".", "warning", "(", "\"Skipping %s because it doesn't have DASHBOARD\"", "\", PANEL, PANEL_GROUP, or FEATURE defined.\"", ",", "submodule", ".", "__name__", ")", "return", "sorted", "(", "config", ".", "items", "(", ")", ",", "key", "=", "lambda", "c", ":", "c", "[", "1", "]", "[", "'__name__'", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", ")" ]
Imports configuration from all the modules and merges it.
[ "Imports", "configuration", "from", "all", "the", "modules", "and", "merges", "it", "." ]
python
train
yamins81/tabular
tabular/tab.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/tab.py#L658-L671
def renamecol(self, old, new): """ Rename column or color in-place. Method wraps:: tabular.spreadsheet.renamecol(self, old, new) """ spreadsheet.renamecol(self,old,new) for x in self.coloring.keys(): if old in self.coloring[x]: ind = self.coloring[x].index(old) self.coloring[x][ind] = new
[ "def", "renamecol", "(", "self", ",", "old", ",", "new", ")", ":", "spreadsheet", ".", "renamecol", "(", "self", ",", "old", ",", "new", ")", "for", "x", "in", "self", ".", "coloring", ".", "keys", "(", ")", ":", "if", "old", "in", "self", ".", "coloring", "[", "x", "]", ":", "ind", "=", "self", ".", "coloring", "[", "x", "]", ".", "index", "(", "old", ")", "self", ".", "coloring", "[", "x", "]", "[", "ind", "]", "=", "new" ]
Rename column or color in-place. Method wraps:: tabular.spreadsheet.renamecol(self, old, new)
[ "Rename", "column", "or", "color", "in", "-", "place", "." ]
python
train
redhat-openstack/python-tripleo-helper
tripleohelper/server.py
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/server.py#L54-L103
def enable_user(self, user): """Enable the root account on the remote host. Since the host may have been deployed using a Cloud image, it may not be possible to use the 'root' account. This method ensure the root account is enable, if this is not the case, it will try to get the name of admin user and use it to re-enable the root account. """ if user in self.ssh_pool._ssh_clients: return if user == 'root': _root_ssh_client = ssh.SshClient( hostname=self.hostname, user='root', key_filename=self._key_filename, via_ip=self.via_ip) # connect as a root user _root_ssh_client.start() result, _ = _root_ssh_client.run('uname -a') image_user = None # check if root is not allowed if 'Please login as the user "cloud-user"' in result: image_user = 'cloud-user' _root_ssh_client.stop() elif 'Please login as the user "fedora" rather than the user "root"' in result: image_user = 'fedora' _root_ssh_client.stop() elif 'Please login as the user "centos" rather than the user "root"' in result: image_user = 'centos' _root_ssh_client.stop() if image_user: self.enable_user(image_user) LOG.info('enabling the root user') _cmd = "sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys" self.ssh_pool.run(image_user, _cmd) _root_ssh_client.start() self.ssh_pool.add_ssh_client('root', _root_ssh_client) return # add the cloud user to the ssh pool self.ssh_pool.build_ssh_client( hostname=self.hostname, user=user, key_filename=self._key_filename, via_ip=self.via_ip)
[ "def", "enable_user", "(", "self", ",", "user", ")", ":", "if", "user", "in", "self", ".", "ssh_pool", ".", "_ssh_clients", ":", "return", "if", "user", "==", "'root'", ":", "_root_ssh_client", "=", "ssh", ".", "SshClient", "(", "hostname", "=", "self", ".", "hostname", ",", "user", "=", "'root'", ",", "key_filename", "=", "self", ".", "_key_filename", ",", "via_ip", "=", "self", ".", "via_ip", ")", "# connect as a root user", "_root_ssh_client", ".", "start", "(", ")", "result", ",", "_", "=", "_root_ssh_client", ".", "run", "(", "'uname -a'", ")", "image_user", "=", "None", "# check if root is not allowed", "if", "'Please login as the user \"cloud-user\"'", "in", "result", ":", "image_user", "=", "'cloud-user'", "_root_ssh_client", ".", "stop", "(", ")", "elif", "'Please login as the user \"fedora\" rather than the user \"root\"'", "in", "result", ":", "image_user", "=", "'fedora'", "_root_ssh_client", ".", "stop", "(", ")", "elif", "'Please login as the user \"centos\" rather than the user \"root\"'", "in", "result", ":", "image_user", "=", "'centos'", "_root_ssh_client", ".", "stop", "(", ")", "if", "image_user", ":", "self", ".", "enable_user", "(", "image_user", ")", "LOG", ".", "info", "(", "'enabling the root user'", ")", "_cmd", "=", "\"sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys\"", "self", ".", "ssh_pool", ".", "run", "(", "image_user", ",", "_cmd", ")", "_root_ssh_client", ".", "start", "(", ")", "self", ".", "ssh_pool", ".", "add_ssh_client", "(", "'root'", ",", "_root_ssh_client", ")", "return", "# add the cloud user to the ssh pool", "self", ".", "ssh_pool", ".", "build_ssh_client", "(", "hostname", "=", "self", ".", "hostname", ",", "user", "=", "user", ",", "key_filename", "=", "self", ".", "_key_filename", ",", "via_ip", "=", "self", ".", "via_ip", ")" ]
Enable the root account on the remote host. Since the host may have been deployed using a Cloud image, it may not be possible to use the 'root' account. This method ensure the root account is enable, if this is not the case, it will try to get the name of admin user and use it to re-enable the root account.
[ "Enable", "the", "root", "account", "on", "the", "remote", "host", "." ]
python
train
saltstack/salt
salt/states/keystore.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/keystore.py#L27-L151
def managed(name, passphrase, entries, force_remove=False): ''' Create or manage a java keystore. name The path to the keystore file passphrase The password to the keystore entries A list containing an alias, certificate, and optional private_key. The certificate and private_key can be a file or a string .. code-block:: yaml - entries: - alias: hostname2 certificate: /path/to/cert.crt private_key: /path/to/key.key - alias: stringhost certificate: | -----BEGIN CERTIFICATE----- MIICEjCCAXsCAg36MA0GCSqGSIb3DQEBBQUAMIGbMQswCQYDVQQGEwJKUDEOMAwG ... 2VguKv4SWjRFoRkIfIlHX0qVviMhSlNy2ioFLy7JcPZb+v3ftDGywUqcBiVDoea0 -----END CERTIFICATE----- force_remove If True will cause the state to remove any entries found in the keystore which are not defined in the state. The default is False. Example .. code-block:: yaml define_keystore: keystore.managed: - name: /path/to/keystore - passphrase: changeit - force_remove: True - entries: - alias: hostname1 certificate: /path/to/cert.crt - alias: remotehost certificate: /path/to/cert2.crt private_key: /path/to/key2.key - alias: pillarhost certificate: {{ salt.pillar.get('path:to:cert') }} ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} keep_list = [] old_aliases = [] if force_remove: if os.path.exists(name): existing_entries = __salt__['keystore.list'](name, passphrase) for entry in existing_entries: old_aliases.append(entry.get('alias')) log.debug("Existing aliases list: %s", old_aliases) for entry in entries: update_entry = True existing_entry = None if os.path.exists(name): if force_remove: keep_list.append(entry['alias']) existing_entry = __salt__['keystore.list'](name, passphrase, entry['alias']) if existing_entry: existing_sha1 = existing_entry[0]['sha1'] new_sha1 = __salt__['x509.read_certificate'](entry['certificate'])['SHA1 Finger Print'] if existing_sha1 == new_sha1: update_entry = False if update_entry: if __opts__['test']: ret['result'] = None if existing_entry: ret['comment'] += "Alias {0} would have been updated\n".format(entry['alias']) else: ret['comment'] += "Alias {0} would have been added\n".format(entry['alias']) else: if existing_entry: result = __salt__['keystore.remove'](entry['alias'], name, passphrase) result = __salt__['keystore.add'](entry['alias'], name, passphrase, entry['certificate'], private_key=entry.get('private_key', None) ) if result: ret['changes'][entry['alias']] = "Updated" ret['comment'] += "Alias {0} updated.\n".format(entry['alias']) else: result = __salt__['keystore.add'](entry['alias'], name, passphrase, entry['certificate'], private_key=entry.get('private_key', None) ) if result: ret['changes'][entry['alias']] = "Added" ret['comment'] += "Alias {0} added.\n".format(entry['alias']) if force_remove: # Determine which aliases need to be removed remove_list = list(set(old_aliases) - set(keep_list)) log.debug("Will remove: %s", remove_list) for alias_name in remove_list: if __opts__['test']: ret['comment'] += "Alias {0} would have been removed".format(alias_name) ret['result'] = None else: __salt__['keystore.remove'](alias_name, name, passphrase) ret['changes'][alias_name] = "Removed" ret['comment'] += "Alias {0} removed.\n".format(alias_name) if not ret['changes'] and not ret['comment']: ret['comment'] = "No changes made.\n" return ret
[ "def", "managed", "(", "name", ",", "passphrase", ",", "entries", ",", "force_remove", "=", "False", ")", ":", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'name'", ":", "name", ",", "'result'", ":", "True", "}", "keep_list", "=", "[", "]", "old_aliases", "=", "[", "]", "if", "force_remove", ":", "if", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "existing_entries", "=", "__salt__", "[", "'keystore.list'", "]", "(", "name", ",", "passphrase", ")", "for", "entry", "in", "existing_entries", ":", "old_aliases", ".", "append", "(", "entry", ".", "get", "(", "'alias'", ")", ")", "log", ".", "debug", "(", "\"Existing aliases list: %s\"", ",", "old_aliases", ")", "for", "entry", "in", "entries", ":", "update_entry", "=", "True", "existing_entry", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "if", "force_remove", ":", "keep_list", ".", "append", "(", "entry", "[", "'alias'", "]", ")", "existing_entry", "=", "__salt__", "[", "'keystore.list'", "]", "(", "name", ",", "passphrase", ",", "entry", "[", "'alias'", "]", ")", "if", "existing_entry", ":", "existing_sha1", "=", "existing_entry", "[", "0", "]", "[", "'sha1'", "]", "new_sha1", "=", "__salt__", "[", "'x509.read_certificate'", "]", "(", "entry", "[", "'certificate'", "]", ")", "[", "'SHA1 Finger Print'", "]", "if", "existing_sha1", "==", "new_sha1", ":", "update_entry", "=", "False", "if", "update_entry", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "if", "existing_entry", ":", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} would have been updated\\n\"", ".", "format", "(", "entry", "[", "'alias'", "]", ")", "else", ":", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} would have been added\\n\"", ".", "format", "(", "entry", "[", "'alias'", "]", ")", "else", ":", "if", "existing_entry", ":", "result", "=", "__salt__", "[", "'keystore.remove'", "]", "(", "entry", "[", "'alias'", "]", ",", "name", ",", "passphrase", ")", "result", "=", "__salt__", "[", "'keystore.add'", "]", "(", "entry", "[", "'alias'", "]", ",", "name", ",", "passphrase", ",", "entry", "[", "'certificate'", "]", ",", "private_key", "=", "entry", ".", "get", "(", "'private_key'", ",", "None", ")", ")", "if", "result", ":", "ret", "[", "'changes'", "]", "[", "entry", "[", "'alias'", "]", "]", "=", "\"Updated\"", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} updated.\\n\"", ".", "format", "(", "entry", "[", "'alias'", "]", ")", "else", ":", "result", "=", "__salt__", "[", "'keystore.add'", "]", "(", "entry", "[", "'alias'", "]", ",", "name", ",", "passphrase", ",", "entry", "[", "'certificate'", "]", ",", "private_key", "=", "entry", ".", "get", "(", "'private_key'", ",", "None", ")", ")", "if", "result", ":", "ret", "[", "'changes'", "]", "[", "entry", "[", "'alias'", "]", "]", "=", "\"Added\"", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} added.\\n\"", ".", "format", "(", "entry", "[", "'alias'", "]", ")", "if", "force_remove", ":", "# Determine which aliases need to be removed", "remove_list", "=", "list", "(", "set", "(", "old_aliases", ")", "-", "set", "(", "keep_list", ")", ")", "log", ".", "debug", "(", "\"Will remove: %s\"", ",", "remove_list", ")", "for", "alias_name", "in", "remove_list", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} would have been removed\"", ".", "format", "(", "alias_name", ")", "ret", "[", "'result'", "]", "=", "None", "else", ":", "__salt__", "[", "'keystore.remove'", "]", "(", "alias_name", ",", "name", ",", "passphrase", ")", "ret", "[", "'changes'", "]", "[", "alias_name", "]", "=", "\"Removed\"", "ret", "[", "'comment'", "]", "+=", "\"Alias {0} removed.\\n\"", ".", "format", "(", "alias_name", ")", "if", "not", "ret", "[", "'changes'", "]", "and", "not", "ret", "[", "'comment'", "]", ":", "ret", "[", "'comment'", "]", "=", "\"No changes made.\\n\"", "return", "ret" ]
Create or manage a java keystore. name The path to the keystore file passphrase The password to the keystore entries A list containing an alias, certificate, and optional private_key. The certificate and private_key can be a file or a string .. code-block:: yaml - entries: - alias: hostname2 certificate: /path/to/cert.crt private_key: /path/to/key.key - alias: stringhost certificate: | -----BEGIN CERTIFICATE----- MIICEjCCAXsCAg36MA0GCSqGSIb3DQEBBQUAMIGbMQswCQYDVQQGEwJKUDEOMAwG ... 2VguKv4SWjRFoRkIfIlHX0qVviMhSlNy2ioFLy7JcPZb+v3ftDGywUqcBiVDoea0 -----END CERTIFICATE----- force_remove If True will cause the state to remove any entries found in the keystore which are not defined in the state. The default is False. Example .. code-block:: yaml define_keystore: keystore.managed: - name: /path/to/keystore - passphrase: changeit - force_remove: True - entries: - alias: hostname1 certificate: /path/to/cert.crt - alias: remotehost certificate: /path/to/cert2.crt private_key: /path/to/key2.key - alias: pillarhost certificate: {{ salt.pillar.get('path:to:cert') }}
[ "Create", "or", "manage", "a", "java", "keystore", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L729-L745
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" n=parent.newChild(None,"TEL",None) for t in ("home","work","voice","fax","pager","msg","cell","video", "bbs","modem","isdn","pcs","pref"): if t in self.type: n.newChild(None,t.upper(),None) n.newTextChild(None,"NUMBER",to_utf8(self.number)) return n
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "n", "=", "parent", ".", "newChild", "(", "None", ",", "\"TEL\"", ",", "None", ")", "for", "t", "in", "(", "\"home\"", ",", "\"work\"", ",", "\"voice\"", ",", "\"fax\"", ",", "\"pager\"", ",", "\"msg\"", ",", "\"cell\"", ",", "\"video\"", ",", "\"bbs\"", ",", "\"modem\"", ",", "\"isdn\"", ",", "\"pcs\"", ",", "\"pref\"", ")", ":", "if", "t", "in", "self", ".", "type", ":", "n", ".", "newChild", "(", "None", ",", "t", ".", "upper", "(", ")", ",", "None", ")", "n", ".", "newTextChild", "(", "None", ",", "\"NUMBER\"", ",", "to_utf8", "(", "self", ".", "number", ")", ")", "return", "n" ]
Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`
[ "Create", "vcard", "-", "tmp", "XML", "representation", "of", "the", "field", "." ]
python
valid