repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
docker/docker-py
docker/models/containers.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/containers.py#L487-L508
def wait(self, **kwargs): """ Block until the container stops, then return its exit code. Similar to the ``docker wait`` command. Args: timeout (int): Request timeout condition (str): Wait until a container state reaches the given condition, either ``not-running`` (default), ``next-exit``, or ``removed`` Returns: (dict): The API's response as a Python dictionary, including the container's exit code under the ``StatusCode`` attribute. Raises: :py:class:`requests.exceptions.ReadTimeout` If the timeout is exceeded. :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.wait(self.id, **kwargs)
[ "def", "wait", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "client", ".", "api", ".", "wait", "(", "self", ".", "id", ",", "*", "*", "kwargs", ")" ]
Block until the container stops, then return its exit code. Similar to the ``docker wait`` command. Args: timeout (int): Request timeout condition (str): Wait until a container state reaches the given condition, either ``not-running`` (default), ``next-exit``, or ``removed`` Returns: (dict): The API's response as a Python dictionary, including the container's exit code under the ``StatusCode`` attribute. Raises: :py:class:`requests.exceptions.ReadTimeout` If the timeout is exceeded. :py:class:`docker.errors.APIError` If the server returns an error.
[ "Block", "until", "the", "container", "stops", "then", "return", "its", "exit", "code", ".", "Similar", "to", "the", "docker", "wait", "command", "." ]
python
train
37.318182
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L68-L83
def search(lines, pattern): """ return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return: """ p = pattern.replace("*", ".*") test = re.compile(p) result = [] for l in lines: if test.search(l): result.append(l) return result
[ "def", "search", "(", "lines", ",", "pattern", ")", ":", "p", "=", "pattern", ".", "replace", "(", "\"*\"", ",", "\".*\"", ")", "test", "=", "re", ".", "compile", "(", "p", ")", "result", "=", "[", "]", "for", "l", "in", "lines", ":", "if", "test", ".", "search", "(", "l", ")", ":", "result", ".", "append", "(", "l", ")", "return", "result" ]
return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return:
[ "return", "all", "lines", "that", "match", "the", "pattern", "#TODO", ":", "we", "need", "an", "example", ":", "param", "lines", ":", ":", "param", "pattern", ":", ":", "return", ":" ]
python
train
20.6875
opendatateam/udata
udata/search/__init__.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L173-L181
def register(adapter): '''Register a search adapter''' # register the class in the catalog if adapter.model and adapter.model not in adapter_catalog: adapter_catalog[adapter.model] = adapter # Automatically (re|un)index objects on save/delete post_save.connect(reindex_model_on_save, sender=adapter.model) post_delete.connect(unindex_model_on_delete, sender=adapter.model) return adapter
[ "def", "register", "(", "adapter", ")", ":", "# register the class in the catalog", "if", "adapter", ".", "model", "and", "adapter", ".", "model", "not", "in", "adapter_catalog", ":", "adapter_catalog", "[", "adapter", ".", "model", "]", "=", "adapter", "# Automatically (re|un)index objects on save/delete", "post_save", ".", "connect", "(", "reindex_model_on_save", ",", "sender", "=", "adapter", ".", "model", ")", "post_delete", ".", "connect", "(", "unindex_model_on_delete", ",", "sender", "=", "adapter", ".", "model", ")", "return", "adapter" ]
Register a search adapter
[ "Register", "a", "search", "adapter" ]
python
train
47.444444
benzrf/parthial
parthial/context.py
https://github.com/benzrf/parthial/blob/ab1e316aec87ed34dda0ec0e145fe0c8cc8e907f/parthial/context.py#L79-L92
def rec_new(self, val): """Recursively add a new value and its children to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value. """ if val not in self.things: for child in val.children(): self.rec_new(child) self.new(val) return val
[ "def", "rec_new", "(", "self", ",", "val", ")", ":", "if", "val", "not", "in", "self", ".", "things", ":", "for", "child", "in", "val", ".", "children", "(", ")", ":", "self", ".", "rec_new", "(", "child", ")", "self", ".", "new", "(", "val", ")", "return", "val" ]
Recursively add a new value and its children to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value.
[ "Recursively", "add", "a", "new", "value", "and", "its", "children", "to", "me", "." ]
python
train
25.928571
gwastro/pycbc
pycbc/inference/sampler/emcee.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/emcee.py#L166-L186
def write_results(self, filename): """Writes samples, model stats, acceptance fraction, and random state to the given file. Parameters ----------- filename : str The file to write to. The file is opened using the ``io`` class in an an append state. """ with self.io(filename, 'a') as fp: # write samples fp.write_samples(self.samples, self.model.variable_params, last_iteration=self.niterations) # write stats fp.write_samples(self.model_stats, last_iteration=self.niterations) # write accpetance fp.write_acceptance_fraction(self._sampler.acceptance_fraction) # write random state fp.write_random_state(state=self._sampler.random_state)
[ "def", "write_results", "(", "self", ",", "filename", ")", ":", "with", "self", ".", "io", "(", "filename", ",", "'a'", ")", "as", "fp", ":", "# write samples", "fp", ".", "write_samples", "(", "self", ".", "samples", ",", "self", ".", "model", ".", "variable_params", ",", "last_iteration", "=", "self", ".", "niterations", ")", "# write stats", "fp", ".", "write_samples", "(", "self", ".", "model_stats", ",", "last_iteration", "=", "self", ".", "niterations", ")", "# write accpetance", "fp", ".", "write_acceptance_fraction", "(", "self", ".", "_sampler", ".", "acceptance_fraction", ")", "# write random state", "fp", ".", "write_random_state", "(", "state", "=", "self", ".", "_sampler", ".", "random_state", ")" ]
Writes samples, model stats, acceptance fraction, and random state to the given file. Parameters ----------- filename : str The file to write to. The file is opened using the ``io`` class in an an append state.
[ "Writes", "samples", "model", "stats", "acceptance", "fraction", "and", "random", "state", "to", "the", "given", "file", "." ]
python
train
40.571429
jrief/django-websocket-redis
ws4redis/redis_store.py
https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/redis_store.py#L15-L26
def _wrap_users(users, request): """ Returns a list with the given list of users and/or the currently logged in user, if the list contains the magic item SELF. """ result = set() for u in users: if u is SELF and is_authenticated(request): result.add(request.user.get_username()) else: result.add(u) return result
[ "def", "_wrap_users", "(", "users", ",", "request", ")", ":", "result", "=", "set", "(", ")", "for", "u", "in", "users", ":", "if", "u", "is", "SELF", "and", "is_authenticated", "(", "request", ")", ":", "result", ".", "add", "(", "request", ".", "user", ".", "get_username", "(", ")", ")", "else", ":", "result", ".", "add", "(", "u", ")", "return", "result" ]
Returns a list with the given list of users and/or the currently logged in user, if the list contains the magic item SELF.
[ "Returns", "a", "list", "with", "the", "given", "list", "of", "users", "and", "/", "or", "the", "currently", "logged", "in", "user", "if", "the", "list", "contains", "the", "magic", "item", "SELF", "." ]
python
train
30.75
daskol/nls
nls/model.py
https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L521-L532
def restore(self, filename): """Restore object from mat-file. TODO: determine format specification """ matfile = loadmat(filename) if matfile['dim'] == 1: matfile['solution'] = matfile['solution'][0, :] self.elapsed_time = matfile['elapsed_time'][0, 0] self.solution = matfile['solution'] return self
[ "def", "restore", "(", "self", ",", "filename", ")", ":", "matfile", "=", "loadmat", "(", "filename", ")", "if", "matfile", "[", "'dim'", "]", "==", "1", ":", "matfile", "[", "'solution'", "]", "=", "matfile", "[", "'solution'", "]", "[", "0", ",", ":", "]", "self", ".", "elapsed_time", "=", "matfile", "[", "'elapsed_time'", "]", "[", "0", ",", "0", "]", "self", ".", "solution", "=", "matfile", "[", "'solution'", "]", "return", "self" ]
Restore object from mat-file. TODO: determine format specification
[ "Restore", "object", "from", "mat", "-", "file", ".", "TODO", ":", "determine", "format", "specification" ]
python
train
30
SheffieldML/GPy
GPy/kern/src/todo/eq_ode1.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/eq_ode1.py#L461-L556
def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False): """Helper function for computing part of the ode1 covariance function. :param t: first time input. :type t: array :param index: Indices of first output. :type index: array of int :param t2: second time input. :type t2: array :param index2: Indices of second output. :type index2: array of int :param update_derivatives: whether to update derivatives (default is False) :return h : result of this subcomponent of the kernel for the given values. :rtype: ndarray """ if stationary: raise NotImplementedError, "Error, stationary version of this covariance not yet implemented." # Vector of decays and delays associated with each output. Decay = self.decay[index] Decay2 = self.decay[index2] t_mat = t[:, None] t2_mat = t2[None, :] if self.delay is not None: Delay = self.delay[index] Delay2 = self.delay[index2] t_mat-=Delay[:, None] t2_mat-=Delay2[None, :] diff_t = (t_mat - t2_mat) inv_sigma_diff_t = 1./self.sigma*diff_t half_sigma_decay_i = 0.5*self.sigma*Decay[:, None] ln_part_1, sign1 = ln_diff_erfs(half_sigma_decay_i + t2_mat/self.sigma, half_sigma_decay_i - inv_sigma_diff_t, return_sign=True) ln_part_2, sign2 = ln_diff_erfs(half_sigma_decay_i, half_sigma_decay_i - t_mat/self.sigma, return_sign=True) h = sign1*np.exp(half_sigma_decay_i *half_sigma_decay_i -Decay[:, None]*diff_t+ln_part_1 -np.log(Decay[:, None] + Decay2[None, :])) h -= sign2*np.exp(half_sigma_decay_i*half_sigma_decay_i -Decay[:, None]*t_mat-Decay2[None, :]*t2_mat+ln_part_2 -np.log(Decay[:, None] + Decay2[None, :])) if update_derivatives: sigma2 = self.sigma*self.sigma # Update ith decay gradient dh_ddecay = ((0.5*Decay[:, None]*sigma2*(Decay[:, None] + Decay2[None, :])-1)*h + (-diff_t*sign1*np.exp( half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*diff_t+ln_part_1 ) +t_mat*sign2*np.exp( half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*t_mat - Decay2*t2_mat+ln_part_2)) +self.sigma/np.sqrt(np.pi)*( -np.exp( -diff_t*diff_t/sigma2 )+np.exp( -t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat )+np.exp( -t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat )-np.exp( -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat) ) )) self._dh_ddecay = (dh_ddecay/(Decay[:, None]+Decay2[None, :])).real # Update jth decay gradient dh_ddecay2 = (t2_mat*sign2 *np.exp( half_sigma_decay_i*half_sigma_decay_i -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat) +ln_part_2 ) -h) self._dh_ddecay2 = (dh_ddecay/(Decay[:, None] + Decay2[None, :])).real # Update sigma gradient self._dh_dsigma = (half_sigma_decay_i*Decay[:, None]*h + 2/(np.sqrt(np.pi) *(Decay[:, None]+Decay2[None, :])) *((-diff_t/sigma2-Decay[:, None]/2) *np.exp(-diff_t*diff_t/sigma2) + (-t2_mat/sigma2+Decay[:, None]/2) *np.exp(-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat) - (-t_mat/sigma2-Decay[:, None]/2) *np.exp(-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat) - Decay[:, None]/2 *np.exp(-(Decay[:, None]*t_mat+Decay2[None, :]*t2_mat)))) return h
[ "def", "_compute_H", "(", "self", ",", "t", ",", "index", ",", "t2", ",", "index2", ",", "update_derivatives", "=", "False", ",", "stationary", "=", "False", ")", ":", "if", "stationary", ":", "raise", "NotImplementedError", ",", "\"Error, stationary version of this covariance not yet implemented.\"", "# Vector of decays and delays associated with each output.", "Decay", "=", "self", ".", "decay", "[", "index", "]", "Decay2", "=", "self", ".", "decay", "[", "index2", "]", "t_mat", "=", "t", "[", ":", ",", "None", "]", "t2_mat", "=", "t2", "[", "None", ",", ":", "]", "if", "self", ".", "delay", "is", "not", "None", ":", "Delay", "=", "self", ".", "delay", "[", "index", "]", "Delay2", "=", "self", ".", "delay", "[", "index2", "]", "t_mat", "-=", "Delay", "[", ":", ",", "None", "]", "t2_mat", "-=", "Delay2", "[", "None", ",", ":", "]", "diff_t", "=", "(", "t_mat", "-", "t2_mat", ")", "inv_sigma_diff_t", "=", "1.", "/", "self", ".", "sigma", "*", "diff_t", "half_sigma_decay_i", "=", "0.5", "*", "self", ".", "sigma", "*", "Decay", "[", ":", ",", "None", "]", "ln_part_1", ",", "sign1", "=", "ln_diff_erfs", "(", "half_sigma_decay_i", "+", "t2_mat", "/", "self", ".", "sigma", ",", "half_sigma_decay_i", "-", "inv_sigma_diff_t", ",", "return_sign", "=", "True", ")", "ln_part_2", ",", "sign2", "=", "ln_diff_erfs", "(", "half_sigma_decay_i", ",", "half_sigma_decay_i", "-", "t_mat", "/", "self", ".", "sigma", ",", "return_sign", "=", "True", ")", "h", "=", "sign1", "*", "np", ".", "exp", "(", "half_sigma_decay_i", "*", "half_sigma_decay_i", "-", "Decay", "[", ":", ",", "None", "]", "*", "diff_t", "+", "ln_part_1", "-", "np", ".", "log", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", ")", "h", "-=", "sign2", "*", "np", ".", "exp", "(", "half_sigma_decay_i", "*", "half_sigma_decay_i", "-", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", "-", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", "+", "ln_part_2", "-", "np", ".", "log", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", ")", "if", "update_derivatives", ":", "sigma2", "=", "self", ".", "sigma", "*", "self", ".", "sigma", "# Update ith decay gradient", "dh_ddecay", "=", "(", "(", "0.5", "*", "Decay", "[", ":", ",", "None", "]", "*", "sigma2", "*", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", "-", "1", ")", "*", "h", "+", "(", "-", "diff_t", "*", "sign1", "*", "np", ".", "exp", "(", "half_sigma_decay_i", "*", "half_sigma_decay_i", "-", "Decay", "[", ":", ",", "None", "]", "*", "diff_t", "+", "ln_part_1", ")", "+", "t_mat", "*", "sign2", "*", "np", ".", "exp", "(", "half_sigma_decay_i", "*", "half_sigma_decay_i", "-", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", "-", "Decay2", "*", "t2_mat", "+", "ln_part_2", ")", ")", "+", "self", ".", "sigma", "/", "np", ".", "sqrt", "(", "np", ".", "pi", ")", "*", "(", "-", "np", ".", "exp", "(", "-", "diff_t", "*", "diff_t", "/", "sigma2", ")", "+", "np", ".", "exp", "(", "-", "t2_mat", "*", "t2_mat", "/", "sigma2", "-", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", ")", "+", "np", ".", "exp", "(", "-", "t_mat", "*", "t_mat", "/", "sigma2", "-", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", ")", "-", "np", ".", "exp", "(", "-", "(", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", "+", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", ")", ")", ")", ")", "self", ".", "_dh_ddecay", "=", "(", "dh_ddecay", "/", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", ")", ".", "real", "# Update jth decay gradient", "dh_ddecay2", "=", "(", "t2_mat", "*", "sign2", "*", "np", ".", "exp", "(", "half_sigma_decay_i", "*", "half_sigma_decay_i", "-", "(", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", "+", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", ")", "+", "ln_part_2", ")", "-", "h", ")", "self", ".", "_dh_ddecay2", "=", "(", "dh_ddecay", "/", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", ")", ".", "real", "# Update sigma gradient", "self", ".", "_dh_dsigma", "=", "(", "half_sigma_decay_i", "*", "Decay", "[", ":", ",", "None", "]", "*", "h", "+", "2", "/", "(", "np", ".", "sqrt", "(", "np", ".", "pi", ")", "*", "(", "Decay", "[", ":", ",", "None", "]", "+", "Decay2", "[", "None", ",", ":", "]", ")", ")", "*", "(", "(", "-", "diff_t", "/", "sigma2", "-", "Decay", "[", ":", ",", "None", "]", "/", "2", ")", "*", "np", ".", "exp", "(", "-", "diff_t", "*", "diff_t", "/", "sigma2", ")", "+", "(", "-", "t2_mat", "/", "sigma2", "+", "Decay", "[", ":", ",", "None", "]", "/", "2", ")", "*", "np", ".", "exp", "(", "-", "t2_mat", "*", "t2_mat", "/", "sigma2", "-", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", ")", "-", "(", "-", "t_mat", "/", "sigma2", "-", "Decay", "[", ":", ",", "None", "]", "/", "2", ")", "*", "np", ".", "exp", "(", "-", "t_mat", "*", "t_mat", "/", "sigma2", "-", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", ")", "-", "Decay", "[", ":", ",", "None", "]", "/", "2", "*", "np", ".", "exp", "(", "-", "(", "Decay", "[", ":", ",", "None", "]", "*", "t_mat", "+", "Decay2", "[", "None", ",", ":", "]", "*", "t2_mat", ")", ")", ")", ")", "return", "h" ]
Helper function for computing part of the ode1 covariance function. :param t: first time input. :type t: array :param index: Indices of first output. :type index: array of int :param t2: second time input. :type t2: array :param index2: Indices of second output. :type index2: array of int :param update_derivatives: whether to update derivatives (default is False) :return h : result of this subcomponent of the kernel for the given values. :rtype: ndarray
[ "Helper", "function", "for", "computing", "part", "of", "the", "ode1", "covariance", "function", "." ]
python
train
45.125
totalgood/nlpia
src/nlpia/loaders.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1268-L1289
def load_geo_adwords(filename='AdWords API Location Criteria 2017-06-26.csv.gz'): """ WARN: Not a good source of city names. This table has many errors, even after cleaning""" df = pd.read_csv(filename, header=0, index_col=0, low_memory=False) df.columns = [c.replace(' ', '_').lower() for c in df.columns] canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split(',').values]) def cleaner(row): cleaned = pd.np.array( [s for i, s in enumerate(row.values) if s not in ('Downtown', None) and (i > 3 or row[i + 1] != s)]) if len(cleaned) == 2: cleaned = [cleaned[0], None, cleaned[1], None, None] else: cleaned = list(cleaned) + [None] * (5 - len(cleaned)) if not pd.np.all(pd.np.array(row.values)[:3] == pd.np.array(cleaned)[:3]): logger.info('{} => {}'.format(row.values, cleaned)) return list(cleaned) cleancanon = canonical.apply(cleaner, axis=1) cleancanon.columns = 'city region country extra extra2'.split() df['region'] = cleancanon.region df['country'] = cleancanon.country return df
[ "def", "load_geo_adwords", "(", "filename", "=", "'AdWords API Location Criteria 2017-06-26.csv.gz'", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "filename", ",", "header", "=", "0", ",", "index_col", "=", "0", ",", "low_memory", "=", "False", ")", "df", ".", "columns", "=", "[", "c", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "lower", "(", ")", "for", "c", "in", "df", ".", "columns", "]", "canonical", "=", "pd", ".", "DataFrame", "(", "[", "list", "(", "row", ")", "for", "row", "in", "df", ".", "canonical_name", ".", "str", ".", "split", "(", "','", ")", ".", "values", "]", ")", "def", "cleaner", "(", "row", ")", ":", "cleaned", "=", "pd", ".", "np", ".", "array", "(", "[", "s", "for", "i", ",", "s", "in", "enumerate", "(", "row", ".", "values", ")", "if", "s", "not", "in", "(", "'Downtown'", ",", "None", ")", "and", "(", "i", ">", "3", "or", "row", "[", "i", "+", "1", "]", "!=", "s", ")", "]", ")", "if", "len", "(", "cleaned", ")", "==", "2", ":", "cleaned", "=", "[", "cleaned", "[", "0", "]", ",", "None", ",", "cleaned", "[", "1", "]", ",", "None", ",", "None", "]", "else", ":", "cleaned", "=", "list", "(", "cleaned", ")", "+", "[", "None", "]", "*", "(", "5", "-", "len", "(", "cleaned", ")", ")", "if", "not", "pd", ".", "np", ".", "all", "(", "pd", ".", "np", ".", "array", "(", "row", ".", "values", ")", "[", ":", "3", "]", "==", "pd", ".", "np", ".", "array", "(", "cleaned", ")", "[", ":", "3", "]", ")", ":", "logger", ".", "info", "(", "'{} => {}'", ".", "format", "(", "row", ".", "values", ",", "cleaned", ")", ")", "return", "list", "(", "cleaned", ")", "cleancanon", "=", "canonical", ".", "apply", "(", "cleaner", ",", "axis", "=", "1", ")", "cleancanon", ".", "columns", "=", "'city region country extra extra2'", ".", "split", "(", ")", "df", "[", "'region'", "]", "=", "cleancanon", ".", "region", "df", "[", "'country'", "]", "=", "cleancanon", ".", "country", "return", "df" ]
WARN: Not a good source of city names. This table has many errors, even after cleaning
[ "WARN", ":", "Not", "a", "good", "source", "of", "city", "names", ".", "This", "table", "has", "many", "errors", "even", "after", "cleaning" ]
python
train
50.772727
bitesofcode/projexui
projexui/widgets/xiconbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xiconbutton.py#L146-L166
def buildIcon(icon): """ Builds an icon from the inputed information. :param icon | <variant> """ if icon is None: return QIcon() if type(icon) == buffer: try: icon = QIcon(projexui.generatePixmap(icon)) except: icon = QIcon() else: try: icon = QIcon(icon) except: icon = QIcon() return icon
[ "def", "buildIcon", "(", "icon", ")", ":", "if", "icon", "is", "None", ":", "return", "QIcon", "(", ")", "if", "type", "(", "icon", ")", "==", "buffer", ":", "try", ":", "icon", "=", "QIcon", "(", "projexui", ".", "generatePixmap", "(", "icon", ")", ")", "except", ":", "icon", "=", "QIcon", "(", ")", "else", ":", "try", ":", "icon", "=", "QIcon", "(", "icon", ")", "except", ":", "icon", "=", "QIcon", "(", ")", "return", "icon" ]
Builds an icon from the inputed information. :param icon | <variant>
[ "Builds", "an", "icon", "from", "the", "inputed", "information", ".", ":", "param", "icon", "|", "<variant", ">" ]
python
train
24.333333
proteanhq/protean
src/protean/core/entity.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/entity.py#L533-L574
def update(self, *data, **kwargs) -> 'Entity': """Update a Record in the repository. Also performs unique validations before creating the entity. Supports both dictionary and keyword argument updates to the entity:: dog.update({'age': 10}) dog.update(age=10) :param data: Dictionary of values to be updated for the entity :param kwargs: keyword arguments with key-value pairs to be updated """ logger.debug(f'Updating existing `{self.__class__.__name__}` object with id {self.id}') # Fetch Model class and connected repository from Repository Factory model_cls = repo_factory.get_model(self.__class__) repository = repo_factory.get_repository(self.__class__) try: # Update entity's data attributes self._update_data(*data, **kwargs) # Do unique checks, update the record and return the Entity self._validate_unique(create=False) # Perform Pre-Save Actions self.pre_save() repository.update(model_cls.from_entity(self)) # Set Entity status to saved self.state_.mark_saved() # Perform Post-Save Actions self.post_save() return self except Exception: # FIXME Log Exception raise
[ "def", "update", "(", "self", ",", "*", "data", ",", "*", "*", "kwargs", ")", "->", "'Entity'", ":", "logger", ".", "debug", "(", "f'Updating existing `{self.__class__.__name__}` object with id {self.id}'", ")", "# Fetch Model class and connected repository from Repository Factory", "model_cls", "=", "repo_factory", ".", "get_model", "(", "self", ".", "__class__", ")", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "__class__", ")", "try", ":", "# Update entity's data attributes", "self", ".", "_update_data", "(", "*", "data", ",", "*", "*", "kwargs", ")", "# Do unique checks, update the record and return the Entity", "self", ".", "_validate_unique", "(", "create", "=", "False", ")", "# Perform Pre-Save Actions", "self", ".", "pre_save", "(", ")", "repository", ".", "update", "(", "model_cls", ".", "from_entity", "(", "self", ")", ")", "# Set Entity status to saved", "self", ".", "state_", ".", "mark_saved", "(", ")", "# Perform Post-Save Actions", "self", ".", "post_save", "(", ")", "return", "self", "except", "Exception", ":", "# FIXME Log Exception", "raise" ]
Update a Record in the repository. Also performs unique validations before creating the entity. Supports both dictionary and keyword argument updates to the entity:: dog.update({'age': 10}) dog.update(age=10) :param data: Dictionary of values to be updated for the entity :param kwargs: keyword arguments with key-value pairs to be updated
[ "Update", "a", "Record", "in", "the", "repository", "." ]
python
train
31.785714
saulpw/visidata
visidata/canvas.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L428-L480
def _recursive_bezier(self, x1, y1, x2, y2, x3, y3, attr, row, level=0): 'from http://www.antigrain.com/research/adaptive_bezier/' m_approximation_scale = 10.0 m_distance_tolerance = (0.5 / m_approximation_scale) ** 2 m_angle_tolerance = 1 * 2*math.pi/360 # 15 degrees in rads curve_angle_tolerance_epsilon = 0.01 curve_recursion_limit = 32 curve_collinearity_epsilon = 1e-30 if level > curve_recursion_limit: return # Calculate all the mid-points of the line segments x12 = (x1 + x2) / 2 y12 = (y1 + y2) / 2 x23 = (x2 + x3) / 2 y23 = (y2 + y3) / 2 x123 = (x12 + x23) / 2 y123 = (y12 + y23) / 2 dx = x3-x1 dy = y3-y1 d = abs(((x2 - x3) * dy - (y2 - y3) * dx)) if d > curve_collinearity_epsilon: # Regular care if d*d <= m_distance_tolerance * (dx*dx + dy*dy): # If the curvature doesn't exceed the distance_tolerance value, we tend to finish subdivisions. if m_angle_tolerance < curve_angle_tolerance_epsilon: self.point(x123, y123, attr, row) return # Angle & Cusp Condition da = abs(math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1)) if da >= math.pi: da = 2*math.pi - da if da < m_angle_tolerance: # Finally we can stop the recursion self.point(x123, y123, attr, row) return else: # Collinear case dx = x123 - (x1 + x3) / 2 dy = y123 - (y1 + y3) / 2 if dx*dx + dy*dy <= m_distance_tolerance: self.point(x123, y123, attr, row) return # Continue subdivision self._recursive_bezier(x1, y1, x12, y12, x123, y123, attr, row, level + 1) self._recursive_bezier(x123, y123, x23, y23, x3, y3, attr, row, level + 1)
[ "def", "_recursive_bezier", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ",", "attr", ",", "row", ",", "level", "=", "0", ")", ":", "m_approximation_scale", "=", "10.0", "m_distance_tolerance", "=", "(", "0.5", "/", "m_approximation_scale", ")", "**", "2", "m_angle_tolerance", "=", "1", "*", "2", "*", "math", ".", "pi", "/", "360", "# 15 degrees in rads", "curve_angle_tolerance_epsilon", "=", "0.01", "curve_recursion_limit", "=", "32", "curve_collinearity_epsilon", "=", "1e-30", "if", "level", ">", "curve_recursion_limit", ":", "return", "# Calculate all the mid-points of the line segments", "x12", "=", "(", "x1", "+", "x2", ")", "/", "2", "y12", "=", "(", "y1", "+", "y2", ")", "/", "2", "x23", "=", "(", "x2", "+", "x3", ")", "/", "2", "y23", "=", "(", "y2", "+", "y3", ")", "/", "2", "x123", "=", "(", "x12", "+", "x23", ")", "/", "2", "y123", "=", "(", "y12", "+", "y23", ")", "/", "2", "dx", "=", "x3", "-", "x1", "dy", "=", "y3", "-", "y1", "d", "=", "abs", "(", "(", "(", "x2", "-", "x3", ")", "*", "dy", "-", "(", "y2", "-", "y3", ")", "*", "dx", ")", ")", "if", "d", ">", "curve_collinearity_epsilon", ":", "# Regular care", "if", "d", "*", "d", "<=", "m_distance_tolerance", "*", "(", "dx", "*", "dx", "+", "dy", "*", "dy", ")", ":", "# If the curvature doesn't exceed the distance_tolerance value, we tend to finish subdivisions.", "if", "m_angle_tolerance", "<", "curve_angle_tolerance_epsilon", ":", "self", ".", "point", "(", "x123", ",", "y123", ",", "attr", ",", "row", ")", "return", "# Angle & Cusp Condition", "da", "=", "abs", "(", "math", ".", "atan2", "(", "y3", "-", "y2", ",", "x3", "-", "x2", ")", "-", "math", ".", "atan2", "(", "y2", "-", "y1", ",", "x2", "-", "x1", ")", ")", "if", "da", ">=", "math", ".", "pi", ":", "da", "=", "2", "*", "math", ".", "pi", "-", "da", "if", "da", "<", "m_angle_tolerance", ":", "# Finally we can stop the recursion", "self", ".", "point", "(", "x123", ",", "y123", ",", "attr", ",", "row", ")", "return", "else", ":", "# Collinear case", "dx", "=", "x123", "-", "(", "x1", "+", "x3", ")", "/", "2", "dy", "=", "y123", "-", "(", "y1", "+", "y3", ")", "/", "2", "if", "dx", "*", "dx", "+", "dy", "*", "dy", "<=", "m_distance_tolerance", ":", "self", ".", "point", "(", "x123", ",", "y123", ",", "attr", ",", "row", ")", "return", "# Continue subdivision", "self", ".", "_recursive_bezier", "(", "x1", ",", "y1", ",", "x12", ",", "y12", ",", "x123", ",", "y123", ",", "attr", ",", "row", ",", "level", "+", "1", ")", "self", ".", "_recursive_bezier", "(", "x123", ",", "y123", ",", "x23", ",", "y23", ",", "x3", ",", "y3", ",", "attr", ",", "row", ",", "level", "+", "1", ")" ]
from http://www.antigrain.com/research/adaptive_bezier/
[ "from", "http", ":", "//", "www", ".", "antigrain", ".", "com", "/", "research", "/", "adaptive_bezier", "/" ]
python
train
37.754717
ambitioninc/django-regex-field
regex_field/fields.py
https://github.com/ambitioninc/django-regex-field/blob/0cf6f5f627002175e40474f75f76128830ae3cdf/regex_field/fields.py#L102-L108
def run_validators(self, value): """ Make sure value is a string so it can run through django validators """ value = self.to_python(value) value = self.value_to_string(value) return super(RegexField, self).run_validators(value)
[ "def", "run_validators", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "to_python", "(", "value", ")", "value", "=", "self", ".", "value_to_string", "(", "value", ")", "return", "super", "(", "RegexField", ",", "self", ")", ".", "run_validators", "(", "value", ")" ]
Make sure value is a string so it can run through django validators
[ "Make", "sure", "value", "is", "a", "string", "so", "it", "can", "run", "through", "django", "validators" ]
python
train
38.428571
tensorflow/probability
tensorflow_probability/python/optimizer/nelder_mead.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L459-L470
def _accept_reflected_fn(simplex, objective_values, worst_index, reflected, objective_at_reflected): """Creates the condition function pair for a reflection to be accepted.""" def _replace_worst_with_reflected(): next_simplex = _replace_at_index(simplex, worst_index, reflected) next_objective_values = _replace_at_index(objective_values, worst_index, objective_at_reflected) return False, next_simplex, next_objective_values, 0 return _replace_worst_with_reflected
[ "def", "_accept_reflected_fn", "(", "simplex", ",", "objective_values", ",", "worst_index", ",", "reflected", ",", "objective_at_reflected", ")", ":", "def", "_replace_worst_with_reflected", "(", ")", ":", "next_simplex", "=", "_replace_at_index", "(", "simplex", ",", "worst_index", ",", "reflected", ")", "next_objective_values", "=", "_replace_at_index", "(", "objective_values", ",", "worst_index", ",", "objective_at_reflected", ")", "return", "False", ",", "next_simplex", ",", "next_objective_values", ",", "0", "return", "_replace_worst_with_reflected" ]
Creates the condition function pair for a reflection to be accepted.
[ "Creates", "the", "condition", "function", "pair", "for", "a", "reflection", "to", "be", "accepted", "." ]
python
test
51.5
DataONEorg/d1_python
lib_common/src/d1_common/xml.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/xml.py#L238-L255
def are_equal_or_superset(superset_tree, base_tree): """Return True if ``superset_tree`` is equal to or a superset of ``base_tree`` - Checks that all elements and attributes in ``superset_tree`` are present and contain the same values as in ``base_tree``. For elements, also checks that the order is the same. - Can be used for checking if one XML document is based on another, as long as all the information in ``base_tree`` is also present and unmodified in ``superset_tree``. """ try: _compare_attr(superset_tree, base_tree) _compare_text(superset_tree, base_tree) except CompareError as e: logger.debug(str(e)) return False return True
[ "def", "are_equal_or_superset", "(", "superset_tree", ",", "base_tree", ")", ":", "try", ":", "_compare_attr", "(", "superset_tree", ",", "base_tree", ")", "_compare_text", "(", "superset_tree", ",", "base_tree", ")", "except", "CompareError", "as", "e", ":", "logger", ".", "debug", "(", "str", "(", "e", ")", ")", "return", "False", "return", "True" ]
Return True if ``superset_tree`` is equal to or a superset of ``base_tree`` - Checks that all elements and attributes in ``superset_tree`` are present and contain the same values as in ``base_tree``. For elements, also checks that the order is the same. - Can be used for checking if one XML document is based on another, as long as all the information in ``base_tree`` is also present and unmodified in ``superset_tree``.
[ "Return", "True", "if", "superset_tree", "is", "equal", "to", "or", "a", "superset", "of", "base_tree" ]
python
train
39.333333
jkeyes/python-docraptor
docraptor/__init__.py
https://github.com/jkeyes/python-docraptor/blob/4be5b641f92820539b2c42165fec9251a6603dea/docraptor/__init__.py#L142-L153
def download(self, download_key, raise_exception_on_failure=False): """Download the file represented by the download_key.""" query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sdownload/%s" % (self._url, download_key), params=query, timeout=self._timeout, ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentDownloadFailure(resp.content, resp.status_code) return resp
[ "def", "download", "(", "self", ",", "download_key", ",", "raise_exception_on_failure", "=", "False", ")", ":", "query", "=", "{", "\"output\"", ":", "\"json\"", ",", "\"user_credentials\"", ":", "self", ".", "api_key", "}", "resp", "=", "requests", ".", "get", "(", "\"%sdownload/%s\"", "%", "(", "self", ".", "_url", ",", "download_key", ")", ",", "params", "=", "query", ",", "timeout", "=", "self", ".", "_timeout", ",", ")", "if", "raise_exception_on_failure", "and", "resp", ".", "status_code", "!=", "200", ":", "raise", "DocumentDownloadFailure", "(", "resp", ".", "content", ",", "resp", ".", "status_code", ")", "return", "resp" ]
Download the file represented by the download_key.
[ "Download", "the", "file", "represented", "by", "the", "download_key", "." ]
python
train
42.416667
DataBiosphere/toil
src/toil/jobStores/fileJobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/fileJobStore.py#L628-L642
def _getTempFile(self, jobStoreID=None): """ :rtype : file-descriptor, string, string is the absolute path to a temporary file within the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor is integer pointing to open operating system file handle. Should be closed using os.close() after writing some material to the file. """ if jobStoreID != None: # Make a temporary file within the job's directory self._checkJobStoreId(jobStoreID) return tempfile.mkstemp(suffix=".tmp", dir=os.path.join(self._getAbsPath(jobStoreID), "g")) else: # Make a temporary file within the temporary file structure return tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self._getTempSharedDir())
[ "def", "_getTempFile", "(", "self", ",", "jobStoreID", "=", "None", ")", ":", "if", "jobStoreID", "!=", "None", ":", "# Make a temporary file within the job's directory", "self", ".", "_checkJobStoreId", "(", "jobStoreID", ")", "return", "tempfile", ".", "mkstemp", "(", "suffix", "=", "\".tmp\"", ",", "dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_getAbsPath", "(", "jobStoreID", ")", ",", "\"g\"", ")", ")", "else", ":", "# Make a temporary file within the temporary file structure", "return", "tempfile", ".", "mkstemp", "(", "prefix", "=", "\"tmp\"", ",", "suffix", "=", "\".tmp\"", ",", "dir", "=", "self", ".", "_getTempSharedDir", "(", ")", ")" ]
:rtype : file-descriptor, string, string is the absolute path to a temporary file within the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor is integer pointing to open operating system file handle. Should be closed using os.close() after writing some material to the file.
[ ":", "rtype", ":", "file", "-", "descriptor", "string", "string", "is", "the", "absolute", "path", "to", "a", "temporary", "file", "within", "the", "given", "job", "s", "(", "referenced", "by", "jobStoreID", "s", ")", "temporary", "file", "directory", ".", "The", "file", "-", "descriptor", "is", "integer", "pointing", "to", "open", "operating", "system", "file", "handle", ".", "Should", "be", "closed", "using", "os", ".", "close", "()", "after", "writing", "some", "material", "to", "the", "file", "." ]
python
train
56.8
Azure/blobxfer
blobxfer/util.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/util.py#L326-L337
def parse_blob_snapshot_parameter(url): # type: (str) -> str """Retrieves the blob snapshot parameter from a url :param url str: blob url :rtype: str :return: snapshot parameter """ if blob_is_snapshot(url): tmp = url.split('?snapshot=') if len(tmp) == 2: return tmp[0], tmp[1] return None
[ "def", "parse_blob_snapshot_parameter", "(", "url", ")", ":", "# type: (str) -> str", "if", "blob_is_snapshot", "(", "url", ")", ":", "tmp", "=", "url", ".", "split", "(", "'?snapshot='", ")", "if", "len", "(", "tmp", ")", "==", "2", ":", "return", "tmp", "[", "0", "]", ",", "tmp", "[", "1", "]", "return", "None" ]
Retrieves the blob snapshot parameter from a url :param url str: blob url :rtype: str :return: snapshot parameter
[ "Retrieves", "the", "blob", "snapshot", "parameter", "from", "a", "url", ":", "param", "url", "str", ":", "blob", "url", ":", "rtype", ":", "str", ":", "return", ":", "snapshot", "parameter" ]
python
train
28.166667
nikhilkumarsingh/content-downloader
ctdl/ctdl.py
https://github.com/nikhilkumarsingh/content-downloader/blob/8b14af3a6eadcc43581e0425dc1d218208de12df/ctdl/ctdl.py#L216-L224
def show_filetypes(extensions): """ function to show valid file extensions """ for item in extensions.items(): val = item[1] if type(item[1]) == list: val = ", ".join(str(x) for x in item[1]) print("{0:4}: {1}".format(val, item[0]))
[ "def", "show_filetypes", "(", "extensions", ")", ":", "for", "item", "in", "extensions", ".", "items", "(", ")", ":", "val", "=", "item", "[", "1", "]", "if", "type", "(", "item", "[", "1", "]", ")", "==", "list", ":", "val", "=", "\", \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "item", "[", "1", "]", ")", "print", "(", "\"{0:4}: {1}\"", ".", "format", "(", "val", ",", "item", "[", "0", "]", ")", ")" ]
function to show valid file extensions
[ "function", "to", "show", "valid", "file", "extensions" ]
python
train
26.333333
bachiraoun/pyrep
OldRepository.py
https://github.com/bachiraoun/pyrep/blob/0449bf2fad3e3e8dda855d4686a8869efeefd433/OldRepository.py#L292-L322
def get_pickling_errors(obj, seen=None): """Investigate pickling errors.""" if seen == None: seen = [] if hasattr(obj, "__getstate__"): state = obj.__getstate__() #elif hasattr(obj, "__dict__"): # state = obj.__dict__ else: return None #try: # state = obj.__getstate__() #except AttributeError as e: # #state = obj.__dict__ # return str(e) if state == None: return 'object state is None' if isinstance(state,tuple): if not isinstance(state[0], dict): state=state[1] else: state=state[0].update(state[1]) result = {} for i in state: try: pickle.dumps(state[i], protocol=2) except pickle.PicklingError as e: if not state[i] in seen: seen.append(state[i]) result[i]=get_pickling_errors(state[i],seen) return result
[ "def", "get_pickling_errors", "(", "obj", ",", "seen", "=", "None", ")", ":", "if", "seen", "==", "None", ":", "seen", "=", "[", "]", "if", "hasattr", "(", "obj", ",", "\"__getstate__\"", ")", ":", "state", "=", "obj", ".", "__getstate__", "(", ")", "#elif hasattr(obj, \"__dict__\"):", "# state = obj.__dict__", "else", ":", "return", "None", "#try:", "# state = obj.__getstate__()", "#except AttributeError as e:", "# #state = obj.__dict__", "# return str(e)", "if", "state", "==", "None", ":", "return", "'object state is None'", "if", "isinstance", "(", "state", ",", "tuple", ")", ":", "if", "not", "isinstance", "(", "state", "[", "0", "]", ",", "dict", ")", ":", "state", "=", "state", "[", "1", "]", "else", ":", "state", "=", "state", "[", "0", "]", ".", "update", "(", "state", "[", "1", "]", ")", "result", "=", "{", "}", "for", "i", "in", "state", ":", "try", ":", "pickle", ".", "dumps", "(", "state", "[", "i", "]", ",", "protocol", "=", "2", ")", "except", "pickle", ".", "PicklingError", "as", "e", ":", "if", "not", "state", "[", "i", "]", "in", "seen", ":", "seen", ".", "append", "(", "state", "[", "i", "]", ")", "result", "[", "i", "]", "=", "get_pickling_errors", "(", "state", "[", "i", "]", ",", "seen", ")", "return", "result" ]
Investigate pickling errors.
[ "Investigate", "pickling", "errors", "." ]
python
valid
29.064516
benedictpaten/sonLib
bioio.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/bioio.py#L569-L584
def destroyTempDir(self, tempDir): """Removes a temporary directory in the temp file dir, checking its in the temp file tree. The dir will be removed regardless of if it is empty. """ #Do basic assertions for goodness of the function assert os.path.isdir(tempDir) assert os.path.commonprefix((self.rootDir, tempDir)) == self.rootDir #Checks file is part of tree #Update stats. self.tempFilesDestroyed += 1 #Do the actual removal try: os.rmdir(tempDir) except OSError: shutil.rmtree(tempDir) #system("rm -rf %s" % tempDir) self.__destroyFile(tempDir)
[ "def", "destroyTempDir", "(", "self", ",", "tempDir", ")", ":", "#Do basic assertions for goodness of the function", "assert", "os", ".", "path", ".", "isdir", "(", "tempDir", ")", "assert", "os", ".", "path", ".", "commonprefix", "(", "(", "self", ".", "rootDir", ",", "tempDir", ")", ")", "==", "self", ".", "rootDir", "#Checks file is part of tree", "#Update stats.", "self", ".", "tempFilesDestroyed", "+=", "1", "#Do the actual removal", "try", ":", "os", ".", "rmdir", "(", "tempDir", ")", "except", "OSError", ":", "shutil", ".", "rmtree", "(", "tempDir", ")", "#system(\"rm -rf %s\" % tempDir)", "self", ".", "__destroyFile", "(", "tempDir", ")" ]
Removes a temporary directory in the temp file dir, checking its in the temp file tree. The dir will be removed regardless of if it is empty.
[ "Removes", "a", "temporary", "directory", "in", "the", "temp", "file", "dir", "checking", "its", "in", "the", "temp", "file", "tree", ".", "The", "dir", "will", "be", "removed", "regardless", "of", "if", "it", "is", "empty", "." ]
python
train
41.625
rstoneback/pysat
pysat/_constellation.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_constellation.py#L254-L451
def difference(self, instrument1, instrument2, bounds, data_labels, cost_function): """ Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ """ Draft Pseudocode ---------------- Check integrity of inputs. Let STD_LABELS be the constant tuple: ("time", "lat", "long", "alt") Note: modify so that user can override labels for time, lat, long, data for each satelite. // We only care about the data currently loaded into each object. Let start be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied start bound. Let end be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied end bound. If start is after end, raise an error. // Let data be the 2D array of deques holding each piece // of data, sorted into bins by lat/long/alt. Let s1_data (resp s2_data) be data from s1.data, s2.data filtered by user-provided lat/long/alt bounds, time bounds calculated. Let data be a dictionary of lists with the keys [ dl1 for dl1, dl2 in data_labels ] + STD_LABELS + [ lb+"2" for lb in STD_LABELS ] For each piece of data s1_point in s1_data: # Hopefully np.where is very good, because this # runs O(n) times. # We could try reusing selections, maybe, if needed. # This would probably involve binning. Let s2_near be the data from s2.data within certain bounds on lat/long/alt/time using 8 statements to numpy.where. We can probably get those defaults from the user or handy constants / config? # We could try a different algorithm for closest pairs # of points. Let distance be the numpy array representing the distance between s1_point and each point in s2_near. # S: Difference for others: change this line. For each of those, calculate the spatial difference from the s1 using lat/long/alt. If s2_near is empty; break loop. Let s2_nearest be the point in s2_near corresponding to the lowest distance. Append to data: a point, indexed by the time from s1_point, containing the following data: # note Let n be the length of data["time"]. For each key in data: Assert len(data[key]) == n End for. # Create data row to pass to pandas. Let row be an empty dict. For dl1, dl2 in data_labels: Append s1_point[dl1] - s2_nearest[dl2] to data[dl1]. For key in STD_LABELS: Append s1_point[translate[key]] to data[key] key = key+"2" Append s2_nearest[translate[key]] to data[key] Let data_df be a pandas dataframe created from the data in data. return { 'data': data_df, 'start':start, 'end':end } """ labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist'] data = {label: [] for label in labels} # Apply bounds inst1 = instrument1.data inst2 = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] low = b[2] high = b[3] data1 = inst1[label1] ind1 = np.where((data1 >= low) & (data1 < high)) inst1 = inst1.iloc[ind1] data2 = inst2[label2] ind2 = np.where((data2 >= low) & (data2 < high)) inst2 = inst2.iloc[ind2] for i, s1_point in inst1.iterrows(): # Gets points in instrument2 within the given bounds s2_near = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] s1_val = s1_point[label1] max_dist = b[4] minbound = s1_val - max_dist maxbound = s1_val + max_dist data2 = s2_near[label2] indices = np.where((data2 >= minbound) & (data2 < maxbound)) s2_near = s2_near.iloc[indices] # Finds nearest point to s1_point in s2_near s2_nearest = None min_dist = float('NaN') for j, s2_point in s2_near.iterrows(): dist = cost_function(s1_point, s2_point) if dist < min_dist or min_dist != min_dist: min_dist = dist s2_nearest = s2_point data['dist'].append(min_dist) # Append difference to data dict for dl1, dl2 in data_labels: if s2_nearest is not None: data[dl1].append(s1_point[dl1] - s2_nearest[dl2]) else: data[dl1].append(float('NaN')) # Append the rest of the row for b in bounds: label1 = b[0] label2 = b[1] data['1_'+label1].append(s1_point[label1]) if s2_nearest is not None: data['2_'+label2].append(s2_nearest[label2]) else: data['2_'+label2].append(float('NaN')) data_df = pds.DataFrame(data=data) return data_df
[ "def", "difference", "(", "self", ",", "instrument1", ",", "instrument2", ",", "bounds", ",", "data_labels", ",", "cost_function", ")", ":", "\"\"\"\n Draft Pseudocode\n ----------------\n Check integrity of inputs.\n\n Let STD_LABELS be the constant tuple:\n (\"time\", \"lat\", \"long\", \"alt\")\n\n Note: modify so that user can override labels for time,\n lat, long, data for each satelite.\n\n // We only care about the data currently loaded\n into each object.\n\n Let start be the later of the datetime of the\n first piece of data loaded into s1, the first\n piece of data loaded into s2, and the user\n supplied start bound.\n\n Let end be the later of the datetime of the first\n piece of data loaded into s1, the first piece\n of data loaded into s2, and the user supplied\n end bound.\n\n If start is after end, raise an error.\n\n // Let data be the 2D array of deques holding each piece\n // of data, sorted into bins by lat/long/alt.\n\n Let s1_data (resp s2_data) be data from s1.data, s2.data\n filtered by user-provided lat/long/alt bounds, time bounds\n calculated.\n\n Let data be a dictionary of lists with the keys\n [ dl1 for dl1, dl2 in data_labels ] +\n STD_LABELS +\n [ lb+\"2\" for lb in STD_LABELS ]\n\n For each piece of data s1_point in s1_data:\n\n # Hopefully np.where is very good, because this\n # runs O(n) times.\n # We could try reusing selections, maybe, if needed.\n # This would probably involve binning.\n Let s2_near be the data from s2.data within certain\n bounds on lat/long/alt/time using 8 statements to\n numpy.where. We can probably get those defaults from\n the user or handy constants / config?\n\n # We could try a different algorithm for closest pairs\n # of points.\n\n Let distance be the numpy array representing the\n distance between s1_point and each point in s2_near.\n\n # S: Difference for others: change this line.\n For each of those, calculate the spatial difference\n from the s1 using lat/long/alt. If s2_near is\n empty; break loop.\n\n Let s2_nearest be the point in s2_near corresponding\n to the lowest distance.\n\n Append to data: a point, indexed by the time from\n s1_point, containing the following data:\n\n # note\n Let n be the length of data[\"time\"].\n For each key in data:\n Assert len(data[key]) == n\n End for.\n\n # Create data row to pass to pandas.\n Let row be an empty dict.\n For dl1, dl2 in data_labels:\n Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].\n\n For key in STD_LABELS:\n Append s1_point[translate[key]] to data[key]\n key = key+\"2\"\n Append s2_nearest[translate[key]] to data[key]\n\n Let data_df be a pandas dataframe created from the data\n in data.\n\n return { 'data': data_df, 'start':start, 'end':end }\n \"\"\"", "labels", "=", "[", "dl1", "for", "dl1", ",", "dl2", "in", "data_labels", "]", "+", "[", "'1_'", "+", "b", "[", "0", "]", "for", "b", "in", "bounds", "]", "+", "[", "'2_'", "+", "b", "[", "1", "]", "for", "b", "in", "bounds", "]", "+", "[", "'dist'", "]", "data", "=", "{", "label", ":", "[", "]", "for", "label", "in", "labels", "}", "# Apply bounds", "inst1", "=", "instrument1", ".", "data", "inst2", "=", "instrument2", ".", "data", "for", "b", "in", "bounds", ":", "label1", "=", "b", "[", "0", "]", "label2", "=", "b", "[", "1", "]", "low", "=", "b", "[", "2", "]", "high", "=", "b", "[", "3", "]", "data1", "=", "inst1", "[", "label1", "]", "ind1", "=", "np", ".", "where", "(", "(", "data1", ">=", "low", ")", "&", "(", "data1", "<", "high", ")", ")", "inst1", "=", "inst1", ".", "iloc", "[", "ind1", "]", "data2", "=", "inst2", "[", "label2", "]", "ind2", "=", "np", ".", "where", "(", "(", "data2", ">=", "low", ")", "&", "(", "data2", "<", "high", ")", ")", "inst2", "=", "inst2", ".", "iloc", "[", "ind2", "]", "for", "i", ",", "s1_point", "in", "inst1", ".", "iterrows", "(", ")", ":", "# Gets points in instrument2 within the given bounds", "s2_near", "=", "instrument2", ".", "data", "for", "b", "in", "bounds", ":", "label1", "=", "b", "[", "0", "]", "label2", "=", "b", "[", "1", "]", "s1_val", "=", "s1_point", "[", "label1", "]", "max_dist", "=", "b", "[", "4", "]", "minbound", "=", "s1_val", "-", "max_dist", "maxbound", "=", "s1_val", "+", "max_dist", "data2", "=", "s2_near", "[", "label2", "]", "indices", "=", "np", ".", "where", "(", "(", "data2", ">=", "minbound", ")", "&", "(", "data2", "<", "maxbound", ")", ")", "s2_near", "=", "s2_near", ".", "iloc", "[", "indices", "]", "# Finds nearest point to s1_point in s2_near", "s2_nearest", "=", "None", "min_dist", "=", "float", "(", "'NaN'", ")", "for", "j", ",", "s2_point", "in", "s2_near", ".", "iterrows", "(", ")", ":", "dist", "=", "cost_function", "(", "s1_point", ",", "s2_point", ")", "if", "dist", "<", "min_dist", "or", "min_dist", "!=", "min_dist", ":", "min_dist", "=", "dist", "s2_nearest", "=", "s2_point", "data", "[", "'dist'", "]", ".", "append", "(", "min_dist", ")", "# Append difference to data dict", "for", "dl1", ",", "dl2", "in", "data_labels", ":", "if", "s2_nearest", "is", "not", "None", ":", "data", "[", "dl1", "]", ".", "append", "(", "s1_point", "[", "dl1", "]", "-", "s2_nearest", "[", "dl2", "]", ")", "else", ":", "data", "[", "dl1", "]", ".", "append", "(", "float", "(", "'NaN'", ")", ")", "# Append the rest of the row", "for", "b", "in", "bounds", ":", "label1", "=", "b", "[", "0", "]", "label2", "=", "b", "[", "1", "]", "data", "[", "'1_'", "+", "label1", "]", ".", "append", "(", "s1_point", "[", "label1", "]", ")", "if", "s2_nearest", "is", "not", "None", ":", "data", "[", "'2_'", "+", "label2", "]", ".", "append", "(", "s2_nearest", "[", "label2", "]", ")", "else", ":", "data", "[", "'2_'", "+", "label2", "]", ".", "append", "(", "float", "(", "'NaN'", ")", ")", "data_df", "=", "pds", ".", "DataFrame", "(", "data", "=", "data", ")", "return", "data_df" ]
Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project.
[ "Calculates", "the", "difference", "in", "signals", "from", "multiple", "instruments", "within", "the", "given", "bounds", "." ]
python
train
35.131313
portfoliome/foil
foil/iteration.py
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/iteration.py#L5-L12
def chunks(items, chunksize): """Turn generator sequence into sequence of chunks.""" items = iter(items) for first in items: chunk = chain((first,), islice(items, chunksize - 1)) yield chunk deque(chunk, 0)
[ "def", "chunks", "(", "items", ",", "chunksize", ")", ":", "items", "=", "iter", "(", "items", ")", "for", "first", "in", "items", ":", "chunk", "=", "chain", "(", "(", "first", ",", ")", ",", "islice", "(", "items", ",", "chunksize", "-", "1", ")", ")", "yield", "chunk", "deque", "(", "chunk", ",", "0", ")" ]
Turn generator sequence into sequence of chunks.
[ "Turn", "generator", "sequence", "into", "sequence", "of", "chunks", "." ]
python
train
29.5
pypa/pipenv
pipenv/vendor/distlib/manifest.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/manifest.py#L84-L92
def add(self, item): """ Add a file to the manifest. :param item: The pathname to add. This can be relative to the base. """ if not item.startswith(self.prefix): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item))
[ "def", "add", "(", "self", ",", "item", ")", ":", "if", "not", "item", ".", "startswith", "(", "self", ".", "prefix", ")", ":", "item", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base", ",", "item", ")", "self", ".", "files", ".", "add", "(", "os", ".", "path", ".", "normpath", "(", "item", ")", ")" ]
Add a file to the manifest. :param item: The pathname to add. This can be relative to the base.
[ "Add", "a", "file", "to", "the", "manifest", "." ]
python
train
32.222222
mojaie/chorus
chorus/draw/svg.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L80-L87
def save(self, path): """Save svg as file(.svg) Args: path (str): destination to save file """ with open(path, 'w') as f: f.write(self.contents())
[ "def", "save", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "contents", "(", ")", ")" ]
Save svg as file(.svg) Args: path (str): destination to save file
[ "Save", "svg", "as", "file", "(", ".", "svg", ")" ]
python
train
24.5
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L3141-L3169
def request(self, send_terminator = False): """Required request() override for v3 and standard method to read meter. Args: send_terminator (bool): Send termination string at end of read. Returns: bool: CRC request flag result from most recent read """ self.m_a_crc = False start_context = self.getContext() self.setContext("request[v3A]") try: self.m_serial_port.write("2f3f".decode("hex") + self.m_meter_address + "210d0a".decode("hex")) self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext()) unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a) self.convertData(unpacked_read_a, self.m_blk_a, 1) self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a) if send_terminator: self.serialPostEnd() self.calculateFields() self.makeReturnFormat() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext(start_context) return self.m_a_crc
[ "def", "request", "(", "self", ",", "send_terminator", "=", "False", ")", ":", "self", ".", "m_a_crc", "=", "False", "start_context", "=", "self", ".", "getContext", "(", ")", "self", ".", "setContext", "(", "\"request[v3A]\"", ")", "try", ":", "self", ".", "m_serial_port", ".", "write", "(", "\"2f3f\"", ".", "decode", "(", "\"hex\"", ")", "+", "self", ".", "m_meter_address", "+", "\"210d0a\"", ".", "decode", "(", "\"hex\"", ")", ")", "self", ".", "m_raw_read_a", "=", "self", ".", "m_serial_port", ".", "getResponse", "(", "self", ".", "getContext", "(", ")", ")", "unpacked_read_a", "=", "self", ".", "unpackStruct", "(", "self", ".", "m_raw_read_a", ",", "self", ".", "m_blk_a", ")", "self", ".", "convertData", "(", "unpacked_read_a", ",", "self", ".", "m_blk_a", ",", "1", ")", "self", ".", "m_a_crc", "=", "self", ".", "crcMeterRead", "(", "self", ".", "m_raw_read_a", ",", "self", ".", "m_blk_a", ")", "if", "send_terminator", ":", "self", ".", "serialPostEnd", "(", ")", "self", ".", "calculateFields", "(", ")", "self", ".", "makeReturnFormat", "(", ")", "except", ":", "ekm_log", "(", "traceback", ".", "format_exc", "(", "sys", ".", "exc_info", "(", ")", ")", ")", "self", ".", "setContext", "(", "start_context", ")", "return", "self", ".", "m_a_crc" ]
Required request() override for v3 and standard method to read meter. Args: send_terminator (bool): Send termination string at end of read. Returns: bool: CRC request flag result from most recent read
[ "Required", "request", "()", "override", "for", "v3", "and", "standard", "method", "to", "read", "meter", "." ]
python
test
40.413793
devopshq/youtrack
youtrack/connection.py
https://github.com/devopshq/youtrack/blob/c4ec19aca253ae30ac8eee7976a2f330e480a73b/youtrack/connection.py#L312-L315
def get_user(self, login): """ http://confluence.jetbrains.net/display/YTD2/GET+user """ return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self)
[ "def", "get_user", "(", "self", ",", "login", ")", ":", "return", "youtrack", ".", "User", "(", "self", ".", "_get", "(", "\"/admin/user/\"", "+", "urlquote", "(", "login", ".", "encode", "(", "'utf8'", ")", ")", ")", ",", "self", ")" ]
http://confluence.jetbrains.net/display/YTD2/GET+user
[ "http", ":", "//", "confluence", ".", "jetbrains", ".", "net", "/", "display", "/", "YTD2", "/", "GET", "+", "user" ]
python
train
49
dstufft/crust
crust/query.py
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L335-L343
def iterator(self): """ An iterator over the results from applying this QuerySet to the api. """ for item in self.query.results(): obj = self.resource(**item) yield obj
[ "def", "iterator", "(", "self", ")", ":", "for", "item", "in", "self", ".", "query", ".", "results", "(", ")", ":", "obj", "=", "self", ".", "resource", "(", "*", "*", "item", ")", "yield", "obj" ]
An iterator over the results from applying this QuerySet to the api.
[ "An", "iterator", "over", "the", "results", "from", "applying", "this", "QuerySet", "to", "the", "api", "." ]
python
train
24.222222
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L177-L217
def load_febrl4(return_links=False): """Load the FEBRL 4 datasets. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the fourth Febrl dataset as a :class:`pandas.DataFrame`. *"Generated as one data set with 10000 records (5000 originals and 5000 duplicates, with one duplicate per original), the originals have been split from the duplicates, into dataset4a.csv (containing the 5000 original records) and dataset4b.csv (containing the 5000 duplicate records) These two data sets can be used for testing linkage procedures."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- (pandas.DataFrame, pandas.DataFrame) A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas dataframe with Febrl dataset4b.csv. When return_links is True, the function returns also the true links. """ df_a = _febrl_load_data('dataset4a.csv') df_b = _febrl_load_data('dataset4b.csv') if return_links: links = pandas.MultiIndex.from_arrays([ ["rec-{}-org".format(i) for i in range(0, 5000)], ["rec-{}-dup-0".format(i) for i in range(0, 5000)]] ) return df_a, df_b, links else: return df_a, df_b
[ "def", "load_febrl4", "(", "return_links", "=", "False", ")", ":", "df_a", "=", "_febrl_load_data", "(", "'dataset4a.csv'", ")", "df_b", "=", "_febrl_load_data", "(", "'dataset4b.csv'", ")", "if", "return_links", ":", "links", "=", "pandas", ".", "MultiIndex", ".", "from_arrays", "(", "[", "[", "\"rec-{}-org\"", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "0", ",", "5000", ")", "]", ",", "[", "\"rec-{}-dup-0\"", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "0", ",", "5000", ")", "]", "]", ")", "return", "df_a", ",", "df_b", ",", "links", "else", ":", "return", "df_a", ",", "df_b" ]
Load the FEBRL 4 datasets. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the fourth Febrl dataset as a :class:`pandas.DataFrame`. *"Generated as one data set with 10000 records (5000 originals and 5000 duplicates, with one duplicate per original), the originals have been split from the duplicates, into dataset4a.csv (containing the 5000 original records) and dataset4b.csv (containing the 5000 duplicate records) These two data sets can be used for testing linkage procedures."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- (pandas.DataFrame, pandas.DataFrame) A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas dataframe with Febrl dataset4b.csv. When return_links is True, the function returns also the true links.
[ "Load", "the", "FEBRL", "4", "datasets", "." ]
python
train
35.756098
rigetti/pyquil
pyquil/gates.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L749-L761
def GT(classical_reg1, classical_reg2, classical_reg3): """ Produce an GT instruction. :param classical_reg1: Memory address to which to store the comparison result. :param classical_reg2: Left comparison operand. :param classical_reg3: Right comparison operand. :return: A ClassicalGreaterThan instance. """ classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1, classical_reg2, classical_reg3) return ClassicalGreaterThan(classical_reg1, classical_reg2, classical_reg3)
[ "def", "GT", "(", "classical_reg1", ",", "classical_reg2", ",", "classical_reg3", ")", ":", "classical_reg1", ",", "classical_reg2", ",", "classical_reg3", "=", "prepare_ternary_operands", "(", "classical_reg1", ",", "classical_reg2", ",", "classical_reg3", ")", "return", "ClassicalGreaterThan", "(", "classical_reg1", ",", "classical_reg2", ",", "classical_reg3", ")" ]
Produce an GT instruction. :param classical_reg1: Memory address to which to store the comparison result. :param classical_reg2: Left comparison operand. :param classical_reg3: Right comparison operand. :return: A ClassicalGreaterThan instance.
[ "Produce", "an", "GT", "instruction", "." ]
python
train
52.846154
saltstack/salt
salt/engines/libvirt_events.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/libvirt_events.py#L294-L303
def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque): ''' Domain I/O Error events handler ''' _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'srcPath': srcpath, 'dev': devalias, 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action), 'reason': reason })
[ "def", "_domain_event_io_error_cb", "(", "conn", ",", "domain", ",", "srcpath", ",", "devalias", ",", "action", ",", "reason", ",", "opaque", ")", ":", "_salt_send_domain_event", "(", "opaque", ",", "conn", ",", "domain", ",", "opaque", "[", "'event'", "]", ",", "{", "'srcPath'", ":", "srcpath", ",", "'dev'", ":", "devalias", ",", "'action'", ":", "_get_libvirt_enum_string", "(", "'VIR_DOMAIN_EVENT_IO_ERROR_'", ",", "action", ")", ",", "'reason'", ":", "reason", "}", ")" ]
Domain I/O Error events handler
[ "Domain", "I", "/", "O", "Error", "events", "handler" ]
python
train
36.6
kata198/func_timeout
func_timeout/dafunc.py
https://github.com/kata198/func_timeout/blob/b427da2517266b31aa0d17c46e9cbeb5add8ef73/func_timeout/dafunc.py#L33-L111
def func_timeout(timeout, func, args=(), kwargs=None): ''' func_timeout - Runs the given function for up to #timeout# seconds. Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut @param timeout <float> - Maximum number of seconds to run #func# before terminating @param func <function> - The function to call @param args <tuple> - Any ordered arguments to pass to the function @param kwargs <dict/None> - Keyword arguments to pass to the function. @raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates, but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut to return cleanly, but in most cases it will 'just work'. @return - The return value that #func# gives ''' if not kwargs: kwargs = {} if not args: args = () ret = [] exception = [] isStopped = False def funcwrap(args2, kwargs2): try: ret.append( func(*args2, **kwargs2) ) except FunctionTimedOut: # Don't print traceback to stderr if we time out pass except Exception as e: exc_info = sys.exc_info() if isStopped is False: # Assemble the alternate traceback, excluding this function # from the trace (by going to next frame) # Pytohn3 reads native from __traceback__, # python2 has a different form for "raise" e.__traceback__ = exc_info[2].tb_next exception.append( e ) thread = StoppableThread(target=funcwrap, args=(args, kwargs)) thread.daemon = True thread.start() thread.join(timeout) stopException = None if thread.isAlive(): isStopped = True class FunctionTimedOutTempType(FunctionTimedOut): def __init__(self): return FunctionTimedOut.__init__(self, '', timeout, func, args, kwargs) FunctionTimedOutTemp = type('FunctionTimedOut' + str( hash( "%d_%d_%d_%d" %(id(timeout), id(func), id(args), id(kwargs))) ), FunctionTimedOutTempType.__bases__, dict(FunctionTimedOutTempType.__dict__)) stopException = FunctionTimedOutTemp thread._stopThread(stopException) thread.join(min(.1, timeout / 50.0)) raise FunctionTimedOut('', timeout, func, args, kwargs) else: # We can still cleanup the thread here.. # Still give a timeout... just... cuz.. thread.join(.5) if exception: raise_exception(exception) if ret: return ret[0]
[ "def", "func_timeout", "(", "timeout", ",", "func", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ")", ":", "if", "not", "kwargs", ":", "kwargs", "=", "{", "}", "if", "not", "args", ":", "args", "=", "(", ")", "ret", "=", "[", "]", "exception", "=", "[", "]", "isStopped", "=", "False", "def", "funcwrap", "(", "args2", ",", "kwargs2", ")", ":", "try", ":", "ret", ".", "append", "(", "func", "(", "*", "args2", ",", "*", "*", "kwargs2", ")", ")", "except", "FunctionTimedOut", ":", "# Don't print traceback to stderr if we time out", "pass", "except", "Exception", "as", "e", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "if", "isStopped", "is", "False", ":", "# Assemble the alternate traceback, excluding this function", "# from the trace (by going to next frame)", "# Pytohn3 reads native from __traceback__, ", "# python2 has a different form for \"raise\"", "e", ".", "__traceback__", "=", "exc_info", "[", "2", "]", ".", "tb_next", "exception", ".", "append", "(", "e", ")", "thread", "=", "StoppableThread", "(", "target", "=", "funcwrap", ",", "args", "=", "(", "args", ",", "kwargs", ")", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")", "thread", ".", "join", "(", "timeout", ")", "stopException", "=", "None", "if", "thread", ".", "isAlive", "(", ")", ":", "isStopped", "=", "True", "class", "FunctionTimedOutTempType", "(", "FunctionTimedOut", ")", ":", "def", "__init__", "(", "self", ")", ":", "return", "FunctionTimedOut", ".", "__init__", "(", "self", ",", "''", ",", "timeout", ",", "func", ",", "args", ",", "kwargs", ")", "FunctionTimedOutTemp", "=", "type", "(", "'FunctionTimedOut'", "+", "str", "(", "hash", "(", "\"%d_%d_%d_%d\"", "%", "(", "id", "(", "timeout", ")", ",", "id", "(", "func", ")", ",", "id", "(", "args", ")", ",", "id", "(", "kwargs", ")", ")", ")", ")", ",", "FunctionTimedOutTempType", ".", "__bases__", ",", "dict", "(", "FunctionTimedOutTempType", ".", "__dict__", ")", ")", "stopException", "=", "FunctionTimedOutTemp", "thread", ".", "_stopThread", "(", "stopException", ")", "thread", ".", "join", "(", "min", "(", ".1", ",", "timeout", "/", "50.0", ")", ")", "raise", "FunctionTimedOut", "(", "''", ",", "timeout", ",", "func", ",", "args", ",", "kwargs", ")", "else", ":", "# We can still cleanup the thread here..", "# Still give a timeout... just... cuz..", "thread", ".", "join", "(", ".5", ")", "if", "exception", ":", "raise_exception", "(", "exception", ")", "if", "ret", ":", "return", "ret", "[", "0", "]" ]
func_timeout - Runs the given function for up to #timeout# seconds. Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut @param timeout <float> - Maximum number of seconds to run #func# before terminating @param func <function> - The function to call @param args <tuple> - Any ordered arguments to pass to the function @param kwargs <dict/None> - Keyword arguments to pass to the function. @raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates, but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut to return cleanly, but in most cases it will 'just work'. @return - The return value that #func# gives
[ "func_timeout", "-", "Runs", "the", "given", "function", "for", "up", "to", "#timeout#", "seconds", "." ]
python
train
36.43038
lappis-unb/salic-ml
src/salicml/data/query.py
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/data/query.py#L100-L112
def register(self, category): """ Usage: @metrics.register('finance') def approved_funds(pronac, data): return metric_from_data_and_pronac_number(data, pronac) """ def decorator(func): name = func.__name__ key = f'{category}.{name}' self._metrics[key] = func return func return decorator
[ "def", "register", "(", "self", ",", "category", ")", ":", "def", "decorator", "(", "func", ")", ":", "name", "=", "func", ".", "__name__", "key", "=", "f'{category}.{name}'", "self", ".", "_metrics", "[", "key", "]", "=", "func", "return", "func", "return", "decorator" ]
Usage: @metrics.register('finance') def approved_funds(pronac, data): return metric_from_data_and_pronac_number(data, pronac)
[ "Usage", ":" ]
python
train
31
box/flaky
flaky/flaky_nose_plugin.py
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L129-L153
def handleError(self, test, err): """ Baseclass override. Called when a test raises an exception. If the test isn't going to be rerun again, then report the error to the nose test result. :param test: The test that has raised an error :type test: :class:`nose.case.Test` :param err: Information about the test failure (from sys.exc_info()) :type err: `tuple` of `class`, :class:`Exception`, `traceback` :return: True, if the test will be rerun; False, if nose should handle it. :rtype: `bool` """ # pylint:disable=invalid-name want_error = self._handle_test_error_or_failure(test, err) if not want_error and id(test) in self._tests_that_reran: self._nose_result.addError(test, err) return want_error or None
[ "def", "handleError", "(", "self", ",", "test", ",", "err", ")", ":", "# pylint:disable=invalid-name", "want_error", "=", "self", ".", "_handle_test_error_or_failure", "(", "test", ",", "err", ")", "if", "not", "want_error", "and", "id", "(", "test", ")", "in", "self", ".", "_tests_that_reran", ":", "self", ".", "_nose_result", ".", "addError", "(", "test", ",", "err", ")", "return", "want_error", "or", "None" ]
Baseclass override. Called when a test raises an exception. If the test isn't going to be rerun again, then report the error to the nose test result. :param test: The test that has raised an error :type test: :class:`nose.case.Test` :param err: Information about the test failure (from sys.exc_info()) :type err: `tuple` of `class`, :class:`Exception`, `traceback` :return: True, if the test will be rerun; False, if nose should handle it. :rtype: `bool`
[ "Baseclass", "override", ".", "Called", "when", "a", "test", "raises", "an", "exception", "." ]
python
train
35.56
openego/ding0
ding0/flexopt/check_tech_constraints.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/flexopt/check_tech_constraints.py#L736-L783
def get_voltage_at_bus_bar(grid, tree): """ Determine voltage level at bus bar of MV-LV substation Parameters ---------- grid : LVGridDing0 Ding0 grid object tree : :networkx:`NetworkX Graph Obj< >` Tree of grid topology: Returns ------- :any:`list` Voltage at bus bar. First item refers to load case, second item refers to voltage in feedin (generation) case """ # voltage at substation bus bar r_mv_grid, x_mv_grid = get_mv_impedance(grid) r_trafo = sum([tr.r for tr in grid._station._transformers]) x_trafo = sum([tr.x for tr in grid._station._transformers]) cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load') cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen') v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage') # loads and generators connected to bus bar bus_bar_load = sum( [node.peak_load for node in tree.successors(grid._station) if isinstance(node, LVLoadDing0)]) / cos_phi_load bus_bar_generation = sum( [node.capacity for node in tree.successors(grid._station) if isinstance(node, GeneratorDing0)]) / cos_phi_feedin v_delta_load_case_bus_bar = voltage_delta_vde(v_nom, bus_bar_load, (r_mv_grid + r_trafo), (x_mv_grid + x_trafo), cos_phi_load) v_delta_gen_case_bus_bar = voltage_delta_vde(v_nom, bus_bar_generation, (r_mv_grid + r_trafo), -(x_mv_grid + x_trafo), cos_phi_feedin) return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar
[ "def", "get_voltage_at_bus_bar", "(", "grid", ",", "tree", ")", ":", "# voltage at substation bus bar", "r_mv_grid", ",", "x_mv_grid", "=", "get_mv_impedance", "(", "grid", ")", "r_trafo", "=", "sum", "(", "[", "tr", ".", "r", "for", "tr", "in", "grid", ".", "_station", ".", "_transformers", "]", ")", "x_trafo", "=", "sum", "(", "[", "tr", ".", "x", "for", "tr", "in", "grid", ".", "_station", ".", "_transformers", "]", ")", "cos_phi_load", "=", "cfg_ding0", ".", "get", "(", "'assumptions'", ",", "'cos_phi_load'", ")", "cos_phi_feedin", "=", "cfg_ding0", ".", "get", "(", "'assumptions'", ",", "'cos_phi_gen'", ")", "v_nom", "=", "cfg_ding0", ".", "get", "(", "'assumptions'", ",", "'lv_nominal_voltage'", ")", "# loads and generators connected to bus bar", "bus_bar_load", "=", "sum", "(", "[", "node", ".", "peak_load", "for", "node", "in", "tree", ".", "successors", "(", "grid", ".", "_station", ")", "if", "isinstance", "(", "node", ",", "LVLoadDing0", ")", "]", ")", "/", "cos_phi_load", "bus_bar_generation", "=", "sum", "(", "[", "node", ".", "capacity", "for", "node", "in", "tree", ".", "successors", "(", "grid", ".", "_station", ")", "if", "isinstance", "(", "node", ",", "GeneratorDing0", ")", "]", ")", "/", "cos_phi_feedin", "v_delta_load_case_bus_bar", "=", "voltage_delta_vde", "(", "v_nom", ",", "bus_bar_load", ",", "(", "r_mv_grid", "+", "r_trafo", ")", ",", "(", "x_mv_grid", "+", "x_trafo", ")", ",", "cos_phi_load", ")", "v_delta_gen_case_bus_bar", "=", "voltage_delta_vde", "(", "v_nom", ",", "bus_bar_generation", ",", "(", "r_mv_grid", "+", "r_trafo", ")", ",", "-", "(", "x_mv_grid", "+", "x_trafo", ")", ",", "cos_phi_feedin", ")", "return", "v_delta_load_case_bus_bar", ",", "v_delta_gen_case_bus_bar" ]
Determine voltage level at bus bar of MV-LV substation Parameters ---------- grid : LVGridDing0 Ding0 grid object tree : :networkx:`NetworkX Graph Obj< >` Tree of grid topology: Returns ------- :any:`list` Voltage at bus bar. First item refers to load case, second item refers to voltage in feedin (generation) case
[ "Determine", "voltage", "level", "at", "bus", "bar", "of", "MV", "-", "LV", "substation" ]
python
train
39.354167
trailofbits/manticore
manticore/core/executor.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/executor.py#L136-L141
def _visited_callback(self, state, pc, instr): """ Maintain our own copy of the visited set """ pc = state.platform.current.PC with self.locked_context('visited', dict) as ctx: ctx[pc] = ctx.get(pc, 0) + 1
[ "def", "_visited_callback", "(", "self", ",", "state", ",", "pc", ",", "instr", ")", ":", "pc", "=", "state", ".", "platform", ".", "current", ".", "PC", "with", "self", ".", "locked_context", "(", "'visited'", ",", "dict", ")", "as", "ctx", ":", "ctx", "[", "pc", "]", "=", "ctx", ".", "get", "(", "pc", ",", "0", ")", "+", "1" ]
Maintain our own copy of the visited set
[ "Maintain", "our", "own", "copy", "of", "the", "visited", "set" ]
python
valid
40.666667
fastai/fastai
examples/train_imagenet.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/examples/train_imagenet.py#L22-L60
def main( gpu:Param("GPU to run on", str)=None ): """Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch""" path = Path('/mnt/fe2_disk/') tot_epochs,size,bs,lr = 60,224,256,3e-1 dirname = 'imagenet' gpu = setup_distrib(gpu) if gpu is None: bs *= torch.cuda.device_count() n_gpus = num_distrib() or 1 workers = min(12, num_cpus()//n_gpus) data = get_data(path/dirname, size, bs, workers) b_its = len(data.train_dl)//n_gpus # Using bs 256 on single GPU as baseline, scale the LR linearly tot_bs = bs*n_gpus bs_rat = tot_bs/256 lr *= bs_rat ph1 = (TrainingPhase(tot_epochs*0.10*b_its) .schedule_hp('lr', (lr/10,lr), anneal=annealing_cos)) ph2 = (TrainingPhase(tot_epochs*0.90*b_its) .schedule_hp('lr', (lr,lr/1e5), anneal=annealing_cos)) opt_func = partial(optim.Adam, eps=0.1, betas=(0.9,0.99)) learn = Learner(data, models.xresnet50(), metrics=[accuracy,top_k_accuracy], wd=1e-3, opt_func=opt_func, bn_wd=False, true_wd=True, loss_func = LabelSmoothingCrossEntropy()).mixup(alpha=0.2) learn.callback_fns += [ partial(GeneralScheduler, phases=(ph1,ph2)), partial(SaveModelCallback, every='epoch', name='model') ] learn.split(lambda m: (children(m)[-2],)) if gpu is None: learn.model = nn.DataParallel(learn.model) else: learn.to_distributed(gpu) learn.to_fp16(dynamic=True) learn.fit(tot_epochs, 1) if rank_distrib(): time.sleep(1) learn.save('done')
[ "def", "main", "(", "gpu", ":", "Param", "(", "\"GPU to run on\"", ",", "str", ")", "=", "None", ")", ":", "path", "=", "Path", "(", "'/mnt/fe2_disk/'", ")", "tot_epochs", ",", "size", ",", "bs", ",", "lr", "=", "60", ",", "224", ",", "256", ",", "3e-1", "dirname", "=", "'imagenet'", "gpu", "=", "setup_distrib", "(", "gpu", ")", "if", "gpu", "is", "None", ":", "bs", "*=", "torch", ".", "cuda", ".", "device_count", "(", ")", "n_gpus", "=", "num_distrib", "(", ")", "or", "1", "workers", "=", "min", "(", "12", ",", "num_cpus", "(", ")", "//", "n_gpus", ")", "data", "=", "get_data", "(", "path", "/", "dirname", ",", "size", ",", "bs", ",", "workers", ")", "b_its", "=", "len", "(", "data", ".", "train_dl", ")", "//", "n_gpus", "# Using bs 256 on single GPU as baseline, scale the LR linearly", "tot_bs", "=", "bs", "*", "n_gpus", "bs_rat", "=", "tot_bs", "/", "256", "lr", "*=", "bs_rat", "ph1", "=", "(", "TrainingPhase", "(", "tot_epochs", "*", "0.10", "*", "b_its", ")", ".", "schedule_hp", "(", "'lr'", ",", "(", "lr", "/", "10", ",", "lr", ")", ",", "anneal", "=", "annealing_cos", ")", ")", "ph2", "=", "(", "TrainingPhase", "(", "tot_epochs", "*", "0.90", "*", "b_its", ")", ".", "schedule_hp", "(", "'lr'", ",", "(", "lr", ",", "lr", "/", "1e5", ")", ",", "anneal", "=", "annealing_cos", ")", ")", "opt_func", "=", "partial", "(", "optim", ".", "Adam", ",", "eps", "=", "0.1", ",", "betas", "=", "(", "0.9", ",", "0.99", ")", ")", "learn", "=", "Learner", "(", "data", ",", "models", ".", "xresnet50", "(", ")", ",", "metrics", "=", "[", "accuracy", ",", "top_k_accuracy", "]", ",", "wd", "=", "1e-3", ",", "opt_func", "=", "opt_func", ",", "bn_wd", "=", "False", ",", "true_wd", "=", "True", ",", "loss_func", "=", "LabelSmoothingCrossEntropy", "(", ")", ")", ".", "mixup", "(", "alpha", "=", "0.2", ")", "learn", ".", "callback_fns", "+=", "[", "partial", "(", "GeneralScheduler", ",", "phases", "=", "(", "ph1", ",", "ph2", ")", ")", ",", "partial", "(", "SaveModelCallback", ",", "every", "=", "'epoch'", ",", "name", "=", "'model'", ")", "]", "learn", ".", "split", "(", "lambda", "m", ":", "(", "children", "(", "m", ")", "[", "-", "2", "]", ",", ")", ")", "if", "gpu", "is", "None", ":", "learn", ".", "model", "=", "nn", ".", "DataParallel", "(", "learn", ".", "model", ")", "else", ":", "learn", ".", "to_distributed", "(", "gpu", ")", "learn", ".", "to_fp16", "(", "dynamic", "=", "True", ")", "learn", ".", "fit", "(", "tot_epochs", ",", "1", ")", "if", "rank_distrib", "(", ")", ":", "time", ".", "sleep", "(", "1", ")", "learn", ".", "save", "(", "'done'", ")" ]
Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch
[ "Distributed", "training", "of", "Imagenet", ".", "Fastest", "speed", "is", "if", "you", "run", "with", ":", "python", "-", "m", "fastai", ".", "launch" ]
python
train
39.230769
smnorris/pgdata
pgdata/database.py
https://github.com/smnorris/pgdata/blob/8b0294024d5ef30b4ae9184888e2cc7004d1784e/pgdata/database.py#L111-L117
def tables_in_schema(self, schema): """Get a listing of all tables in given schema """ sql = """SELECT table_name FROM information_schema.tables WHERE table_schema = %s""" return [t[0] for t in self.query(sql, (schema,)).fetchall()]
[ "def", "tables_in_schema", "(", "self", ",", "schema", ")", ":", "sql", "=", "\"\"\"SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = %s\"\"\"", "return", "[", "t", "[", "0", "]", "for", "t", "in", "self", ".", "query", "(", "sql", ",", "(", "schema", ",", ")", ")", ".", "fetchall", "(", ")", "]" ]
Get a listing of all tables in given schema
[ "Get", "a", "listing", "of", "all", "tables", "in", "given", "schema" ]
python
train
41.714286
edwards-lab/libGWAS
libgwas/boundary.py
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/boundary.py#L179-L196
def LoadSNPs(self, snps=[]): """Define the SNP inclusions (by RSID). This overrides true boundary \ definition. :param snps: array of RSIDs :return: None This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it encounters what appears to be a range (SNP contains a "-") """ for snp in snps: bounds = snp.split("-") if len(bounds) == 1: if bounds[0] != "": self.target_rs.append(bounds[0]) else: raise InvalidBoundarySpec(snp)
[ "def", "LoadSNPs", "(", "self", ",", "snps", "=", "[", "]", ")", ":", "for", "snp", "in", "snps", ":", "bounds", "=", "snp", ".", "split", "(", "\"-\"", ")", "if", "len", "(", "bounds", ")", "==", "1", ":", "if", "bounds", "[", "0", "]", "!=", "\"\"", ":", "self", ".", "target_rs", ".", "append", "(", "bounds", "[", "0", "]", ")", "else", ":", "raise", "InvalidBoundarySpec", "(", "snp", ")" ]
Define the SNP inclusions (by RSID). This overrides true boundary \ definition. :param snps: array of RSIDs :return: None This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it encounters what appears to be a range (SNP contains a "-")
[ "Define", "the", "SNP", "inclusions", "(", "by", "RSID", ")", ".", "This", "overrides", "true", "boundary", "\\", "definition", "." ]
python
train
32.333333
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3725-L3774
def _read_mesafile(filename,data_rows=0,only='all'): """ private routine that is not directly called by the user""" f=open(filename,'r') vv=[] v=[] lines = [] line = '' for i in range(0,6): line = f.readline() lines.extend([line]) hval = lines[2].split() hlist = lines[1].split() header_attr = {} for a,b in zip(hlist,hval): header_attr[a] = float(b) if only is 'header_attr': return header_attr cols = {} colnum = lines[4].split() colname = lines[5].split() for a,b in zip(colname,colnum): cols[a] = int(b) data = [] old_percent = 0 for i in range(data_rows): # writing reading status percent = int(i*100/np.max([1, data_rows-1])) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r reading " + "...%d%%" % percent) old_percent = percent line = f.readline() v=line.split() try: vv=np.array(v,dtype='float64') except ValueError: for item in v: if item.__contains__('.') and not item.__contains__('E'): v[v.index(item)]='0' data.append(vv) print(' \n') f.close() a=np.array(data) data = [] return header_attr, cols, a
[ "def", "_read_mesafile", "(", "filename", ",", "data_rows", "=", "0", ",", "only", "=", "'all'", ")", ":", "f", "=", "open", "(", "filename", ",", "'r'", ")", "vv", "=", "[", "]", "v", "=", "[", "]", "lines", "=", "[", "]", "line", "=", "''", "for", "i", "in", "range", "(", "0", ",", "6", ")", ":", "line", "=", "f", ".", "readline", "(", ")", "lines", ".", "extend", "(", "[", "line", "]", ")", "hval", "=", "lines", "[", "2", "]", ".", "split", "(", ")", "hlist", "=", "lines", "[", "1", "]", ".", "split", "(", ")", "header_attr", "=", "{", "}", "for", "a", ",", "b", "in", "zip", "(", "hlist", ",", "hval", ")", ":", "header_attr", "[", "a", "]", "=", "float", "(", "b", ")", "if", "only", "is", "'header_attr'", ":", "return", "header_attr", "cols", "=", "{", "}", "colnum", "=", "lines", "[", "4", "]", ".", "split", "(", ")", "colname", "=", "lines", "[", "5", "]", ".", "split", "(", ")", "for", "a", ",", "b", "in", "zip", "(", "colname", ",", "colnum", ")", ":", "cols", "[", "a", "]", "=", "int", "(", "b", ")", "data", "=", "[", "]", "old_percent", "=", "0", "for", "i", "in", "range", "(", "data_rows", ")", ":", "# writing reading status", "percent", "=", "int", "(", "i", "*", "100", "/", "np", ".", "max", "(", "[", "1", ",", "data_rows", "-", "1", "]", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r reading \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "line", "=", "f", ".", "readline", "(", ")", "v", "=", "line", ".", "split", "(", ")", "try", ":", "vv", "=", "np", ".", "array", "(", "v", ",", "dtype", "=", "'float64'", ")", "except", "ValueError", ":", "for", "item", "in", "v", ":", "if", "item", ".", "__contains__", "(", "'.'", ")", "and", "not", "item", ".", "__contains__", "(", "'E'", ")", ":", "v", "[", "v", ".", "index", "(", "item", ")", "]", "=", "'0'", "data", ".", "append", "(", "vv", ")", "print", "(", "' \\n'", ")", "f", ".", "close", "(", ")", "a", "=", "np", ".", "array", "(", "data", ")", "data", "=", "[", "]", "return", "header_attr", ",", "cols", ",", "a" ]
private routine that is not directly called by the user
[ "private", "routine", "that", "is", "not", "directly", "called", "by", "the", "user" ]
python
train
25.86
kensho-technologies/grift
grift/property_types.py
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L14-L23
def to_native(self, value): """Return the value as a dict, raising error if conversion to dict is not possible""" if isinstance(value, dict): return value elif isinstance(value, six.string_types): native_value = json.loads(value) if isinstance(native_value, dict): return native_value else: raise ConversionError(u'Cannot load value as a dict: {}'.format(value))
[ "def", "to_native", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "native_value", "=", "json", ".", "loads", "(", "value", ")", "if", "isinstance", "(", "native_value", ",", "dict", ")", ":", "return", "native_value", "else", ":", "raise", "ConversionError", "(", "u'Cannot load value as a dict: {}'", ".", "format", "(", "value", ")", ")" ]
Return the value as a dict, raising error if conversion to dict is not possible
[ "Return", "the", "value", "as", "a", "dict", "raising", "error", "if", "conversion", "to", "dict", "is", "not", "possible" ]
python
train
45.7
quantumlib/Cirq
cirq/protocols/decompose.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/decompose.py#L348-L376
def decompose_once_with_qubits(val: Any, qubits: Iterable['cirq.Qid'], default=RaiseTypeErrorIfNotProvided): """Decomposes a value into operations on the given qubits. This method is used when decomposing gates, which don't know which qubits they are being applied to unless told. It decomposes the gate exactly once, instead of decomposing it and then continuing to decomposing the decomposed operations recursively until some criteria is met. Args: val: The value to call `._decompose_(qubits=qubits)` on, if possible. qubits: The value to pass into the named `qubits` parameter of `val._decompose_`. default: A default result to use if the value doesn't have a `_decompose_` method or that method returns `NotImplemented` or `None`. If not specified, undecomposable values cause a `TypeError`. Returns: The result of `val._decompose_(qubits=qubits)`, if `val` has a `_decompose_` method and it didn't return `NotImplemented` or `None`. Otherwise `default` is returned, if it was specified. Otherwise an error is raised. TypeError: `val` didn't have a `_decompose_` method (or that method returned `NotImplemented` or `None`) and `default` wasn't set. """ return decompose_once(val, default, qubits=tuple(qubits))
[ "def", "decompose_once_with_qubits", "(", "val", ":", "Any", ",", "qubits", ":", "Iterable", "[", "'cirq.Qid'", "]", ",", "default", "=", "RaiseTypeErrorIfNotProvided", ")", ":", "return", "decompose_once", "(", "val", ",", "default", ",", "qubits", "=", "tuple", "(", "qubits", ")", ")" ]
Decomposes a value into operations on the given qubits. This method is used when decomposing gates, which don't know which qubits they are being applied to unless told. It decomposes the gate exactly once, instead of decomposing it and then continuing to decomposing the decomposed operations recursively until some criteria is met. Args: val: The value to call `._decompose_(qubits=qubits)` on, if possible. qubits: The value to pass into the named `qubits` parameter of `val._decompose_`. default: A default result to use if the value doesn't have a `_decompose_` method or that method returns `NotImplemented` or `None`. If not specified, undecomposable values cause a `TypeError`. Returns: The result of `val._decompose_(qubits=qubits)`, if `val` has a `_decompose_` method and it didn't return `NotImplemented` or `None`. Otherwise `default` is returned, if it was specified. Otherwise an error is raised. TypeError: `val` didn't have a `_decompose_` method (or that method returned `NotImplemented` or `None`) and `default` wasn't set.
[ "Decomposes", "a", "value", "into", "operations", "on", "the", "given", "qubits", "." ]
python
train
48.310345
pgmpy/pgmpy
pgmpy/factors/discrete/JointProbabilityDistribution.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/JointProbabilityDistribution.py#L327-L370
def is_imap(self, model): """ Checks whether the given BayesianModel is Imap of JointProbabilityDistribution Parameters ----------- model : An instance of BayesianModel Class, for which you want to check the Imap Returns -------- boolean : True if given bayesian model is Imap for Joint Probability Distribution False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd) >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032, 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128] >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val) >>> JPD.is_imap(bm) True """ from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): raise TypeError("model must be an instance of BayesianModel") factors = [cpd.to_factor() for cpd in model.get_cpds()] factor_prod = six.moves.reduce(mul, factors) JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values) if JPD_fact == factor_prod: return True else: return False
[ "def", "is_imap", "(", "self", ",", "model", ")", ":", "from", "pgmpy", ".", "models", "import", "BayesianModel", "if", "not", "isinstance", "(", "model", ",", "BayesianModel", ")", ":", "raise", "TypeError", "(", "\"model must be an instance of BayesianModel\"", ")", "factors", "=", "[", "cpd", ".", "to_factor", "(", ")", "for", "cpd", "in", "model", ".", "get_cpds", "(", ")", "]", "factor_prod", "=", "six", ".", "moves", ".", "reduce", "(", "mul", ",", "factors", ")", "JPD_fact", "=", "DiscreteFactor", "(", "self", ".", "variables", ",", "self", ".", "cardinality", ",", "self", ".", "values", ")", "if", "JPD_fact", "==", "factor_prod", ":", "return", "True", "else", ":", "return", "False" ]
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution Parameters ----------- model : An instance of BayesianModel Class, for which you want to check the Imap Returns -------- boolean : True if given bayesian model is Imap for Joint Probability Distribution False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd) >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032, 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128] >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val) >>> JPD.is_imap(bm) True
[ "Checks", "whether", "the", "given", "BayesianModel", "is", "Imap", "of", "JointProbabilityDistribution" ]
python
train
45.045455
perrygeo/simanneal
examples/salesman.py
https://github.com/perrygeo/simanneal/blob/293bc81b5bc4bf0ba7760a0e4df5ba97fdcf2881/examples/salesman.py#L7-L13
def distance(a, b): """Calculates distance between two latitude-longitude coordinates.""" R = 3963 # radius of Earth (miles) lat1, lon1 = math.radians(a[0]), math.radians(a[1]) lat2, lon2 = math.radians(b[0]), math.radians(b[1]) return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
[ "def", "distance", "(", "a", ",", "b", ")", ":", "R", "=", "3963", "# radius of Earth (miles)", "lat1", ",", "lon1", "=", "math", ".", "radians", "(", "a", "[", "0", "]", ")", ",", "math", ".", "radians", "(", "a", "[", "1", "]", ")", "lat2", ",", "lon2", "=", "math", ".", "radians", "(", "b", "[", "0", "]", ")", ",", "math", ".", "radians", "(", "b", "[", "1", "]", ")", "return", "math", ".", "acos", "(", "math", ".", "sin", "(", "lat1", ")", "*", "math", ".", "sin", "(", "lat2", ")", "+", "math", ".", "cos", "(", "lat1", ")", "*", "math", ".", "cos", "(", "lat2", ")", "*", "math", ".", "cos", "(", "lon1", "-", "lon2", ")", ")", "*", "R" ]
Calculates distance between two latitude-longitude coordinates.
[ "Calculates", "distance", "between", "two", "latitude", "-", "longitude", "coordinates", "." ]
python
train
53.714286
geopy/geopy
geopy/geocoders/photon.py
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/photon.py#L205-L215
def _parse_json(cls, resources, exactly_one=True): """ Parse display name, latitude, and longitude from a JSON response. """ if not len(resources['features']): # pragma: no cover return None if exactly_one: return cls.parse_resource(resources['features'][0]) else: return [cls.parse_resource(resource) for resource in resources['features']]
[ "def", "_parse_json", "(", "cls", ",", "resources", ",", "exactly_one", "=", "True", ")", ":", "if", "not", "len", "(", "resources", "[", "'features'", "]", ")", ":", "# pragma: no cover", "return", "None", "if", "exactly_one", ":", "return", "cls", ".", "parse_resource", "(", "resources", "[", "'features'", "]", "[", "0", "]", ")", "else", ":", "return", "[", "cls", ".", "parse_resource", "(", "resource", ")", "for", "resource", "in", "resources", "[", "'features'", "]", "]" ]
Parse display name, latitude, and longitude from a JSON response.
[ "Parse", "display", "name", "latitude", "and", "longitude", "from", "a", "JSON", "response", "." ]
python
train
39.545455
mitsei/dlkit
dlkit/handcar/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/objects.py#L371-L390
def get_next_objective(self): """Gets the next Objective in this list. return: (osid.learning.Objective) - the next Objective in this list. The has_next() method should be used to test that a next Objective is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented. """ try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: # Need to specify exceptions here! raise OperationFailed() else: return next_object
[ "def", "get_next_objective", "(", "self", ")", ":", "try", ":", "next_object", "=", "next", "(", "self", ")", "except", "StopIteration", ":", "raise", "IllegalState", "(", "'no more elements available in this list'", ")", "except", "Exception", ":", "# Need to specify exceptions here!", "raise", "OperationFailed", "(", ")", "else", ":", "return", "next_object" ]
Gets the next Objective in this list. return: (osid.learning.Objective) - the next Objective in this list. The has_next() method should be used to test that a next Objective is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented.
[ "Gets", "the", "next", "Objective", "in", "this", "list", "." ]
python
train
40
hydpy-dev/hydpy
hydpy/models/hstream/hstream_states.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hstream/hstream_states.py#L57-L69
def refweights(self): """A |numpy| |numpy.ndarray| with equal weights for all segment junctions.. >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> states.qjoints.shape = 5 >>> states.qjoints.refweights array([ 0.2, 0.2, 0.2, 0.2, 0.2]) """ # pylint: disable=unsubscriptable-object # due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870) return numpy.full(self.shape, 1./self.shape[0], dtype=float)
[ "def", "refweights", "(", "self", ")", ":", "# pylint: disable=unsubscriptable-object", "# due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870)", "return", "numpy", ".", "full", "(", "self", ".", "shape", ",", "1.", "/", "self", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")" ]
A |numpy| |numpy.ndarray| with equal weights for all segment junctions.. >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> states.qjoints.shape = 5 >>> states.qjoints.refweights array([ 0.2, 0.2, 0.2, 0.2, 0.2])
[ "A", "|numpy|", "|numpy", ".", "ndarray|", "with", "equal", "weights", "for", "all", "segment", "junctions", ".." ]
python
train
39.307692
ynop/audiomate
audiomate/corpus/io/tuda.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/tuda.py#L167-L227
def load_file(folder_path, idx, corpus): """ Load speaker, file, utterance, labels for the file with the given id. """ xml_path = os.path.join(folder_path, '{}.xml'.format(idx)) wav_paths = glob.glob(os.path.join(folder_path, '{}_*.wav'.format(idx))) if len(wav_paths) == 0: return [] xml_file = open(xml_path, 'r', encoding='utf-8') soup = BeautifulSoup(xml_file, 'lxml') transcription = soup.recording.cleaned_sentence.string transcription_raw = soup.recording.sentence.string gender = soup.recording.gender.string is_native = soup.recording.muttersprachler.string age_class = soup.recording.ageclass.string speaker_idx = soup.recording.speaker_id.string if speaker_idx not in corpus.issuers.keys(): start_age_class = int(age_class.split('-')[0]) if start_age_class < 12: age_group = issuers.AgeGroup.CHILD elif start_age_class < 18: age_group = issuers.AgeGroup.YOUTH elif start_age_class < 65: age_group = issuers.AgeGroup.ADULT else: age_group = issuers.AgeGroup.SENIOR native_lang = None if is_native == 'Ja': native_lang = 'deu' issuer = issuers.Speaker(speaker_idx, gender=issuers.Gender(gender), age_group=age_group, native_language=native_lang) corpus.import_issuers(issuer) utt_ids = [] for wav_path in wav_paths: wav_name = os.path.split(wav_path)[1] wav_idx = os.path.splitext(wav_name)[0] corpus.new_file(wav_path, wav_idx) utt = corpus.new_utterance(wav_idx, wav_idx, speaker_idx) utt.set_label_list(annotations.LabelList.create_single( transcription, idx=audiomate.corpus.LL_WORD_TRANSCRIPT )) utt.set_label_list(annotations.LabelList.create_single( transcription_raw, idx=audiomate.corpus.LL_WORD_TRANSCRIPT_RAW )) utt_ids.append(wav_idx) return utt_ids
[ "def", "load_file", "(", "folder_path", ",", "idx", ",", "corpus", ")", ":", "xml_path", "=", "os", ".", "path", ".", "join", "(", "folder_path", ",", "'{}.xml'", ".", "format", "(", "idx", ")", ")", "wav_paths", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "folder_path", ",", "'{}_*.wav'", ".", "format", "(", "idx", ")", ")", ")", "if", "len", "(", "wav_paths", ")", "==", "0", ":", "return", "[", "]", "xml_file", "=", "open", "(", "xml_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "soup", "=", "BeautifulSoup", "(", "xml_file", ",", "'lxml'", ")", "transcription", "=", "soup", ".", "recording", ".", "cleaned_sentence", ".", "string", "transcription_raw", "=", "soup", ".", "recording", ".", "sentence", ".", "string", "gender", "=", "soup", ".", "recording", ".", "gender", ".", "string", "is_native", "=", "soup", ".", "recording", ".", "muttersprachler", ".", "string", "age_class", "=", "soup", ".", "recording", ".", "ageclass", ".", "string", "speaker_idx", "=", "soup", ".", "recording", ".", "speaker_id", ".", "string", "if", "speaker_idx", "not", "in", "corpus", ".", "issuers", ".", "keys", "(", ")", ":", "start_age_class", "=", "int", "(", "age_class", ".", "split", "(", "'-'", ")", "[", "0", "]", ")", "if", "start_age_class", "<", "12", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "CHILD", "elif", "start_age_class", "<", "18", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "YOUTH", "elif", "start_age_class", "<", "65", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "ADULT", "else", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "SENIOR", "native_lang", "=", "None", "if", "is_native", "==", "'Ja'", ":", "native_lang", "=", "'deu'", "issuer", "=", "issuers", ".", "Speaker", "(", "speaker_idx", ",", "gender", "=", "issuers", ".", "Gender", "(", "gender", ")", ",", "age_group", "=", "age_group", ",", "native_language", "=", "native_lang", ")", "corpus", ".", "import_issuers", "(", "issuer", ")", "utt_ids", "=", "[", "]", "for", "wav_path", "in", "wav_paths", ":", "wav_name", "=", "os", ".", "path", ".", "split", "(", "wav_path", ")", "[", "1", "]", "wav_idx", "=", "os", ".", "path", ".", "splitext", "(", "wav_name", ")", "[", "0", "]", "corpus", ".", "new_file", "(", "wav_path", ",", "wav_idx", ")", "utt", "=", "corpus", ".", "new_utterance", "(", "wav_idx", ",", "wav_idx", ",", "speaker_idx", ")", "utt", ".", "set_label_list", "(", "annotations", ".", "LabelList", ".", "create_single", "(", "transcription", ",", "idx", "=", "audiomate", ".", "corpus", ".", "LL_WORD_TRANSCRIPT", ")", ")", "utt", ".", "set_label_list", "(", "annotations", ".", "LabelList", ".", "create_single", "(", "transcription_raw", ",", "idx", "=", "audiomate", ".", "corpus", ".", "LL_WORD_TRANSCRIPT_RAW", ")", ")", "utt_ids", ".", "append", "(", "wav_idx", ")", "return", "utt_ids" ]
Load speaker, file, utterance, labels for the file with the given id.
[ "Load", "speaker", "file", "utterance", "labels", "for", "the", "file", "with", "the", "given", "id", "." ]
python
train
36.852459
robinagist/ezo
ezo/core/lib.py
https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L317-L370
def response(self, response_data): ''' called by the event handler with the result data :param response_data: result data :return: ''' if "address" not in response_data: return None, "address missing from response_data payload" if "function" not in response_data: return None, "method missing from response_data payload" if "params" not in response_data: return None, "params missing from response_data payload" if "target" not in response_data: return None, "target missing from response_data payload" address = self._ezo.w3.toChecksumAddress(response_data["address"]) account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"])) self._ezo.w3.eth.accounts[0] = account tx_dict = dict() tx_dict["account"] = account tx_dict["from"] = account password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None u_state = self._ezo.w3.personal.unlockAccount(account, password) if not self.contract_obj: try: self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi) except Exception as e: return None, e method = response_data["function"] params = response_data["params"] contract_func = self.contract_obj.functions[method] try: if not params: tx_dict["gas"] = contract_func().estimateGas() + 1000 tx_hash = contract_func().transact(tx_dict) else: tx_dict["gas"] = contract_func(*params).estimateGas() + 1000 tx_hash = contract_func(*params).transact(tx_dict) receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash) except Exception as e: return None, "error executing transaction: {}".format(e) # finally: # self._ezo.w3.personal.lockAccount(account) return receipt, None
[ "def", "response", "(", "self", ",", "response_data", ")", ":", "if", "\"address\"", "not", "in", "response_data", ":", "return", "None", ",", "\"address missing from response_data payload\"", "if", "\"function\"", "not", "in", "response_data", ":", "return", "None", ",", "\"method missing from response_data payload\"", "if", "\"params\"", "not", "in", "response_data", ":", "return", "None", ",", "\"params missing from response_data payload\"", "if", "\"target\"", "not", "in", "response_data", ":", "return", "None", ",", "\"target missing from response_data payload\"", "address", "=", "self", ".", "_ezo", ".", "w3", ".", "toChecksumAddress", "(", "response_data", "[", "\"address\"", "]", ")", "account", "=", "self", ".", "_ezo", ".", "w3", ".", "toChecksumAddress", "(", "get_account", "(", "self", ".", "_ezo", ".", "config", ",", "response_data", "[", "\"target\"", "]", ")", ")", "self", ".", "_ezo", ".", "w3", ".", "eth", ".", "accounts", "[", "0", "]", "=", "account", "tx_dict", "=", "dict", "(", ")", "tx_dict", "[", "\"account\"", "]", "=", "account", "tx_dict", "[", "\"from\"", "]", "=", "account", "password", "=", "os", ".", "environ", "[", "'EZO_PASSWORD'", "]", "if", "'EZO_PASSWORD'", "in", "os", ".", "environ", "else", "None", "u_state", "=", "self", ".", "_ezo", ".", "w3", ".", "personal", ".", "unlockAccount", "(", "account", ",", "password", ")", "if", "not", "self", ".", "contract_obj", ":", "try", ":", "self", ".", "contract_obj", "=", "self", ".", "_ezo", ".", "w3", ".", "eth", ".", "contract", "(", "address", "=", "address", ",", "abi", "=", "self", ".", "abi", ")", "except", "Exception", "as", "e", ":", "return", "None", ",", "e", "method", "=", "response_data", "[", "\"function\"", "]", "params", "=", "response_data", "[", "\"params\"", "]", "contract_func", "=", "self", ".", "contract_obj", ".", "functions", "[", "method", "]", "try", ":", "if", "not", "params", ":", "tx_dict", "[", "\"gas\"", "]", "=", "contract_func", "(", ")", ".", "estimateGas", "(", ")", "+", "1000", "tx_hash", "=", "contract_func", "(", ")", ".", "transact", "(", "tx_dict", ")", "else", ":", "tx_dict", "[", "\"gas\"", "]", "=", "contract_func", "(", "*", "params", ")", ".", "estimateGas", "(", ")", "+", "1000", "tx_hash", "=", "contract_func", "(", "*", "params", ")", ".", "transact", "(", "tx_dict", ")", "receipt", "=", "self", ".", "_ezo", ".", "w3", ".", "eth", ".", "waitForTransactionReceipt", "(", "tx_hash", ")", "except", "Exception", "as", "e", ":", "return", "None", ",", "\"error executing transaction: {}\"", ".", "format", "(", "e", ")", "# finally:", "# self._ezo.w3.personal.lockAccount(account)", "return", "receipt", ",", "None" ]
called by the event handler with the result data :param response_data: result data :return:
[ "called", "by", "the", "event", "handler", "with", "the", "result", "data", ":", "param", "response_data", ":", "result", "data", ":", "return", ":" ]
python
train
37.537037
ggravlingen/pytradfri
pytradfri/api/libcoap_api.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/libcoap_api.py#L151-L169
def generate_psk(self, security_key): """ Generate and set a psk from the security key. """ if not self._psk: # Backup the real identity. existing_psk_id = self._psk_id # Set the default identity and security key for generation. self._psk_id = 'Client_identity' self._psk = security_key # Ask the Gateway to generate the psk for the identity. self._psk = self.request(Gateway().generate_psk(existing_psk_id)) # Restore the real identity. self._psk_id = existing_psk_id return self._psk
[ "def", "generate_psk", "(", "self", ",", "security_key", ")", ":", "if", "not", "self", ".", "_psk", ":", "# Backup the real identity.", "existing_psk_id", "=", "self", ".", "_psk_id", "# Set the default identity and security key for generation.", "self", ".", "_psk_id", "=", "'Client_identity'", "self", ".", "_psk", "=", "security_key", "# Ask the Gateway to generate the psk for the identity.", "self", ".", "_psk", "=", "self", ".", "request", "(", "Gateway", "(", ")", ".", "generate_psk", "(", "existing_psk_id", ")", ")", "# Restore the real identity.", "self", ".", "_psk_id", "=", "existing_psk_id", "return", "self", ".", "_psk" ]
Generate and set a psk from the security key.
[ "Generate", "and", "set", "a", "psk", "from", "the", "security", "key", "." ]
python
train
32.578947
ktdreyer/txkoji
txkoji/connection.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L373-L390
def listTasks(self, opts={}, queryOpts={}): """ Get information about all Koji tasks. Calls "listTasks" XML-RPC. :param dict opts: Eg. {'state': [task_states.OPEN]} :param dict queryOpts: Eg. {'order' : 'priority,create_time'} :returns: deferred that when fired returns a list of Task objects. """ opts['decode'] = True # decode xmlrpc data in "request" data = yield self.call('listTasks', opts, queryOpts) tasks = [] for tdata in data: task = Task.fromDict(tdata) task.connection = self tasks.append(task) defer.returnValue(tasks)
[ "def", "listTasks", "(", "self", ",", "opts", "=", "{", "}", ",", "queryOpts", "=", "{", "}", ")", ":", "opts", "[", "'decode'", "]", "=", "True", "# decode xmlrpc data in \"request\"", "data", "=", "yield", "self", ".", "call", "(", "'listTasks'", ",", "opts", ",", "queryOpts", ")", "tasks", "=", "[", "]", "for", "tdata", "in", "data", ":", "task", "=", "Task", ".", "fromDict", "(", "tdata", ")", "task", ".", "connection", "=", "self", "tasks", ".", "append", "(", "task", ")", "defer", ".", "returnValue", "(", "tasks", ")" ]
Get information about all Koji tasks. Calls "listTasks" XML-RPC. :param dict opts: Eg. {'state': [task_states.OPEN]} :param dict queryOpts: Eg. {'order' : 'priority,create_time'} :returns: deferred that when fired returns a list of Task objects.
[ "Get", "information", "about", "all", "Koji", "tasks", "." ]
python
train
36.055556
guyskk/pybeautifier
pybeautifier.py
https://github.com/guyskk/pybeautifier/blob/bf9ce19d059c3364c690947d91077183b0adb4fc/pybeautifier.py#L72-L116
def handle(client, request): """ Handle format request request struct: { 'data': 'data_need_format', 'formaters': [ { 'name': 'formater_name', 'config': {} # None or dict }, ... # formaters ] } if no formaters, use autopep8 formater and it's default config """ formaters = request.get('formaters', None) if not formaters: formaters = [{'name': 'autopep8'}] logging.debug('formaters: ' + json.dumps(formaters, indent=4)) data = request.get('data', None) if not isinstance(data, str): return send(client, 'invalid data', None) max_line_length = None for formater in formaters: max_line_length = formater.get('config', {}).get('max_line_length') if max_line_length: break for formater in formaters: name = formater.get('name', None) config = formater.get('config', {}) if name not in FORMATERS: return send(client, 'formater {} not support'.format(name), None) formater = FORMATERS[name] if formater is None: return send(client, 'formater {} not installed'.format(name), None) if name == 'isort' and max_line_length: config.setdefault('line_length', max_line_length) data = formater(data, **config) return send(client, None, data)
[ "def", "handle", "(", "client", ",", "request", ")", ":", "formaters", "=", "request", ".", "get", "(", "'formaters'", ",", "None", ")", "if", "not", "formaters", ":", "formaters", "=", "[", "{", "'name'", ":", "'autopep8'", "}", "]", "logging", ".", "debug", "(", "'formaters: '", "+", "json", ".", "dumps", "(", "formaters", ",", "indent", "=", "4", ")", ")", "data", "=", "request", ".", "get", "(", "'data'", ",", "None", ")", "if", "not", "isinstance", "(", "data", ",", "str", ")", ":", "return", "send", "(", "client", ",", "'invalid data'", ",", "None", ")", "max_line_length", "=", "None", "for", "formater", "in", "formaters", ":", "max_line_length", "=", "formater", ".", "get", "(", "'config'", ",", "{", "}", ")", ".", "get", "(", "'max_line_length'", ")", "if", "max_line_length", ":", "break", "for", "formater", "in", "formaters", ":", "name", "=", "formater", ".", "get", "(", "'name'", ",", "None", ")", "config", "=", "formater", ".", "get", "(", "'config'", ",", "{", "}", ")", "if", "name", "not", "in", "FORMATERS", ":", "return", "send", "(", "client", ",", "'formater {} not support'", ".", "format", "(", "name", ")", ",", "None", ")", "formater", "=", "FORMATERS", "[", "name", "]", "if", "formater", "is", "None", ":", "return", "send", "(", "client", ",", "'formater {} not installed'", ".", "format", "(", "name", ")", ",", "None", ")", "if", "name", "==", "'isort'", "and", "max_line_length", ":", "config", ".", "setdefault", "(", "'line_length'", ",", "max_line_length", ")", "data", "=", "formater", "(", "data", ",", "*", "*", "config", ")", "return", "send", "(", "client", ",", "None", ",", "data", ")" ]
Handle format request request struct: { 'data': 'data_need_format', 'formaters': [ { 'name': 'formater_name', 'config': {} # None or dict }, ... # formaters ] } if no formaters, use autopep8 formater and it's default config
[ "Handle", "format", "request" ]
python
train
31.577778
fred49/argtoolbox
argtoolbox/argtoolbox.py
https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L414-L420
def add_element(self, elt): """Helper to add a element to the current section. The Element name will be used as an identifier.""" if not isinstance(elt, Element): raise TypeError("argument should be a subclass of Element") self.elements[elt.get_name()] = elt return elt
[ "def", "add_element", "(", "self", ",", "elt", ")", ":", "if", "not", "isinstance", "(", "elt", ",", "Element", ")", ":", "raise", "TypeError", "(", "\"argument should be a subclass of Element\"", ")", "self", ".", "elements", "[", "elt", ".", "get_name", "(", ")", "]", "=", "elt", "return", "elt" ]
Helper to add a element to the current section. The Element name will be used as an identifier.
[ "Helper", "to", "add", "a", "element", "to", "the", "current", "section", ".", "The", "Element", "name", "will", "be", "used", "as", "an", "identifier", "." ]
python
train
45
sffjunkie/astral
src/astral.py
https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L837-L876
def sunrise(self, date=None, local=True, use_elevation=True): """Return sunrise time. Calculates the time in the morning when the sun is a 0.833 degrees below the horizon. This is to account for refraction. :param date: The date for which to calculate the sunrise time. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: The date and time at which sunrise occurs. :rtype: :class:`~datetime.datetime` """ if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 sunrise = self.astral.sunrise_utc(date, self.latitude, self.longitude, elevation) if local: return sunrise.astimezone(self.tz) else: return sunrise
[ "def", "sunrise", "(", "self", ",", "date", "=", "None", ",", "local", "=", "True", ",", "use_elevation", "=", "True", ")", ":", "if", "local", "and", "self", ".", "timezone", "is", "None", ":", "raise", "ValueError", "(", "\"Local time requested but Location has no timezone set.\"", ")", "if", "self", ".", "astral", "is", "None", ":", "self", ".", "astral", "=", "Astral", "(", ")", "if", "date", "is", "None", ":", "date", "=", "datetime", ".", "date", ".", "today", "(", ")", "elevation", "=", "self", ".", "elevation", "if", "use_elevation", "else", "0", "sunrise", "=", "self", ".", "astral", ".", "sunrise_utc", "(", "date", ",", "self", ".", "latitude", ",", "self", ".", "longitude", ",", "elevation", ")", "if", "local", ":", "return", "sunrise", ".", "astimezone", "(", "self", ".", "tz", ")", "else", ":", "return", "sunrise" ]
Return sunrise time. Calculates the time in the morning when the sun is a 0.833 degrees below the horizon. This is to account for refraction. :param date: The date for which to calculate the sunrise time. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: The date and time at which sunrise occurs. :rtype: :class:`~datetime.datetime`
[ "Return", "sunrise", "time", "." ]
python
train
39.35
inveniosoftware/invenio-records-files
invenio_records_files/api.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L80-L98
def _writable(method): """Check that record is in defined status. :param method: Method to be decorated. :returns: Function decorated. """ @wraps(method) def wrapper(self, *args, **kwargs): """Send record for indexing. :returns: Execution result of the decorated method. :raises InvalidOperationError: It occurs when the bucket is locked or deleted. """ if self.bucket.locked or self.bucket.deleted: raise InvalidOperationError() return method(self, *args, **kwargs) return wrapper
[ "def", "_writable", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Send record for indexing.\n\n :returns: Execution result of the decorated method.\n\n :raises InvalidOperationError: It occurs when the bucket is locked or\n deleted.\n \"\"\"", "if", "self", ".", "bucket", ".", "locked", "or", "self", ".", "bucket", ".", "deleted", ":", "raise", "InvalidOperationError", "(", ")", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Check that record is in defined status. :param method: Method to be decorated. :returns: Function decorated.
[ "Check", "that", "record", "is", "in", "defined", "status", "." ]
python
train
29.789474
mmp2/megaman
doc/sphinxext/numpy_ext/autodoc_enhancements.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/doc/sphinxext/numpy_ext/autodoc_enhancements.py#L9-L55
def type_object_attrgetter(obj, attr, *defargs): """ This implements an improved attrgetter for type objects (i.e. classes) that can handle class attributes that are implemented as properties on a metaclass. Normally `getattr` on a class with a `property` (say, "foo"), would return the `property` object itself. However, if the class has a metaclass which *also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find the "foo" property on the metaclass and resolve it. For the purposes of autodoc we just want to document the "foo" property defined on the class, not on the metaclass. For example:: >>> class Meta(type): ... @property ... def foo(cls): ... return 'foo' ... >>> class MyClass(metaclass=Meta): ... @property ... def foo(self): ... \"\"\"Docstring for MyClass.foo property.\"\"\" ... return 'myfoo' ... >>> getattr(MyClass, 'foo') 'foo' >>> type_object_attrgetter(MyClass, 'foo') <property at 0x...> >>> type_object_attrgetter(MyClass, 'foo').__doc__ 'Docstring for MyClass.foo property.' The last line of the example shows the desired behavior for the purposes of autodoc. """ for base in obj.__mro__: if attr in base.__dict__: if isinstance(base.__dict__[attr], property): # Note, this should only be used for properties--for any other # type of descriptor (classmethod, for example) this can mess # up existing expectations of what getattr(cls, ...) returns return base.__dict__[attr] break return getattr(obj, attr, *defargs)
[ "def", "type_object_attrgetter", "(", "obj", ",", "attr", ",", "*", "defargs", ")", ":", "for", "base", "in", "obj", ".", "__mro__", ":", "if", "attr", "in", "base", ".", "__dict__", ":", "if", "isinstance", "(", "base", ".", "__dict__", "[", "attr", "]", ",", "property", ")", ":", "# Note, this should only be used for properties--for any other", "# type of descriptor (classmethod, for example) this can mess", "# up existing expectations of what getattr(cls, ...) returns", "return", "base", ".", "__dict__", "[", "attr", "]", "break", "return", "getattr", "(", "obj", ",", "attr", ",", "*", "defargs", ")" ]
This implements an improved attrgetter for type objects (i.e. classes) that can handle class attributes that are implemented as properties on a metaclass. Normally `getattr` on a class with a `property` (say, "foo"), would return the `property` object itself. However, if the class has a metaclass which *also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find the "foo" property on the metaclass and resolve it. For the purposes of autodoc we just want to document the "foo" property defined on the class, not on the metaclass. For example:: >>> class Meta(type): ... @property ... def foo(cls): ... return 'foo' ... >>> class MyClass(metaclass=Meta): ... @property ... def foo(self): ... \"\"\"Docstring for MyClass.foo property.\"\"\" ... return 'myfoo' ... >>> getattr(MyClass, 'foo') 'foo' >>> type_object_attrgetter(MyClass, 'foo') <property at 0x...> >>> type_object_attrgetter(MyClass, 'foo').__doc__ 'Docstring for MyClass.foo property.' The last line of the example shows the desired behavior for the purposes of autodoc.
[ "This", "implements", "an", "improved", "attrgetter", "for", "type", "objects", "(", "i", ".", "e", ".", "classes", ")", "that", "can", "handle", "class", "attributes", "that", "are", "implemented", "as", "properties", "on", "a", "metaclass", "." ]
python
train
37.361702
titusjan/argos
argos/qt/registrytable.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/registrytable.py#L265-L271
def setCurrentRegItem(self, regItem): """ Sets the current registry item. """ rowIndex = self.model().indexFromItem(regItem) if not rowIndex.isValid(): logger.warn("Can't select {!r} in table".format(regItem)) self.setCurrentIndex(rowIndex)
[ "def", "setCurrentRegItem", "(", "self", ",", "regItem", ")", ":", "rowIndex", "=", "self", ".", "model", "(", ")", ".", "indexFromItem", "(", "regItem", ")", "if", "not", "rowIndex", ".", "isValid", "(", ")", ":", "logger", ".", "warn", "(", "\"Can't select {!r} in table\"", ".", "format", "(", "regItem", ")", ")", "self", ".", "setCurrentIndex", "(", "rowIndex", ")" ]
Sets the current registry item.
[ "Sets", "the", "current", "registry", "item", "." ]
python
train
40.857143
DataKitchen/DKCloudCommand
DKCloudCommand/modules/DKCloudAPI.py
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudAPI.py#L858-L901
def recipe_status(self, kitchen, recipe, local_dir=None): """ gets the status of a recipe :param self: DKCloudAPI :param kitchen: string :param recipe: string :param local_dir: string -- :rtype: dict """ rc = DKReturnCode() if kitchen is None or isinstance(kitchen, basestring) is False: rc.set(rc.DK_FAIL, 'issue with kitchen parameter') return rc if recipe is None or isinstance(recipe, basestring) is False: rc.set(rc.DK_FAIL, 'issue with recipe parameter') return rc url = '%s/v2/recipe/tree/%s/%s' % (self.get_url_for_direct_rest_call(), kitchen, recipe) try: response = requests.get(url, headers=self._get_common_headers()) rdict = self._get_json(response) pass except (RequestException, ValueError, TypeError), c: s = "get_recipe: exception: %s" % str(c) rc.set(rc.DK_FAIL, s) return rc if DKCloudAPI._valid_response(response): # Now get the local sha. if local_dir is None: check_path = os.getcwd() else: if os.path.isdir(local_dir) is False: print 'Local path %s does not exist' % local_dir return None else: check_path = local_dir local_sha = get_directory_sha(check_path) remote_sha = rdict['recipes'][recipe] rv = compare_sha(remote_sha, local_sha) rc.set(rc.DK_SUCCESS, None, rv) else: arc = DKAPIReturnCode(rdict, response) rc.set(rc.DK_FAIL, arc.get_message()) return rc
[ "def", "recipe_status", "(", "self", ",", "kitchen", ",", "recipe", ",", "local_dir", "=", "None", ")", ":", "rc", "=", "DKReturnCode", "(", ")", "if", "kitchen", "is", "None", "or", "isinstance", "(", "kitchen", ",", "basestring", ")", "is", "False", ":", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "'issue with kitchen parameter'", ")", "return", "rc", "if", "recipe", "is", "None", "or", "isinstance", "(", "recipe", ",", "basestring", ")", "is", "False", ":", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "'issue with recipe parameter'", ")", "return", "rc", "url", "=", "'%s/v2/recipe/tree/%s/%s'", "%", "(", "self", ".", "get_url_for_direct_rest_call", "(", ")", ",", "kitchen", ",", "recipe", ")", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_get_common_headers", "(", ")", ")", "rdict", "=", "self", ".", "_get_json", "(", "response", ")", "pass", "except", "(", "RequestException", ",", "ValueError", ",", "TypeError", ")", ",", "c", ":", "s", "=", "\"get_recipe: exception: %s\"", "%", "str", "(", "c", ")", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "s", ")", "return", "rc", "if", "DKCloudAPI", ".", "_valid_response", "(", "response", ")", ":", "# Now get the local sha.", "if", "local_dir", "is", "None", ":", "check_path", "=", "os", ".", "getcwd", "(", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "local_dir", ")", "is", "False", ":", "print", "'Local path %s does not exist'", "%", "local_dir", "return", "None", "else", ":", "check_path", "=", "local_dir", "local_sha", "=", "get_directory_sha", "(", "check_path", ")", "remote_sha", "=", "rdict", "[", "'recipes'", "]", "[", "recipe", "]", "rv", "=", "compare_sha", "(", "remote_sha", ",", "local_sha", ")", "rc", ".", "set", "(", "rc", ".", "DK_SUCCESS", ",", "None", ",", "rv", ")", "else", ":", "arc", "=", "DKAPIReturnCode", "(", "rdict", ",", "response", ")", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "arc", ".", "get_message", "(", ")", ")", "return", "rc" ]
gets the status of a recipe :param self: DKCloudAPI :param kitchen: string :param recipe: string :param local_dir: string -- :rtype: dict
[ "gets", "the", "status", "of", "a", "recipe", ":", "param", "self", ":", "DKCloudAPI", ":", "param", "kitchen", ":", "string", ":", "param", "recipe", ":", "string", ":", "param", "local_dir", ":", "string", "--", ":", "rtype", ":", "dict" ]
python
train
39.931818
Frzk/Ellis
ellis_actions/nftables.py
https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis_actions/nftables.py#L26-L48
async def add(self, setname, ip, timeout): """ Adds the given IP address to the specified set. If timeout is specified, the IP will stay in the set for the given duration. Else it will stay in the set during the set default timeout. timeout must be given in seconds. The resulting command looks like this: ``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }`` """ # We have to double-quote the '{' '}' at both ends for `format` to work. if timeout > 0: to_ban = "{{ {0} timeout {1}s }}".format(ip, timeout) else: to_ban = "{{ {0} }}".format(ip) args = ['add', 'element', self.table_family, self.table_name, setname, to_ban] return await self.start(__class__.CMD, *args)
[ "async", "def", "add", "(", "self", ",", "setname", ",", "ip", ",", "timeout", ")", ":", "# We have to double-quote the '{' '}' at both ends for `format` to work.", "if", "timeout", ">", "0", ":", "to_ban", "=", "\"{{ {0} timeout {1}s }}\"", ".", "format", "(", "ip", ",", "timeout", ")", "else", ":", "to_ban", "=", "\"{{ {0} }}\".", "f", "ormat(", "i", "p)", "", "args", "=", "[", "'add'", ",", "'element'", ",", "self", ".", "table_family", ",", "self", ".", "table_name", ",", "setname", ",", "to_ban", "]", "return", "await", "self", ".", "start", "(", "__class__", ".", "CMD", ",", "*", "args", ")" ]
Adds the given IP address to the specified set. If timeout is specified, the IP will stay in the set for the given duration. Else it will stay in the set during the set default timeout. timeout must be given in seconds. The resulting command looks like this: ``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }``
[ "Adds", "the", "given", "IP", "address", "to", "the", "specified", "set", "." ]
python
train
35.043478
annoviko/pyclustering
pyclustering/nnet/examples/syncpr_examples.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/examples/syncpr_examples.py#L93-L103
def small_abc_image_recognition(): """! @brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise. """ images = []; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_A; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_B; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_C; template_recognition_image(images, 250, 25);
[ "def", "small_abc_image_recognition", "(", ")", ":", "images", "=", "[", "]", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMAGES_SYMBOL_A", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMAGES_SYMBOL_B", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMAGES_SYMBOL_C", "template_recognition_image", "(", "images", ",", "250", ",", "25", ")" ]
! @brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise.
[ "!" ]
python
valid
36.727273
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/external.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/external.py#L417-L435
def post_refresh_system_metadata(request): """MNStorage.systemMetadataChanged(session, did, serialVersion, dateSysMetaLastModified) → boolean.""" d1_gmn.app.views.assert_db.post_has_mime_parts( request, ( ('field', 'pid'), ('field', 'serialVersion'), ('field', 'dateSysMetaLastModified'), ), ) d1_gmn.app.views.assert_db.is_existing_object(request.POST['pid']) d1_gmn.app.models.sysmeta_refresh_queue( request.POST['pid'], request.POST['serialVersion'], request.POST['dateSysMetaLastModified'], 'queued', ).save() return d1_gmn.app.views.util.http_response_with_boolean_true_type()
[ "def", "post_refresh_system_metadata", "(", "request", ")", ":", "d1_gmn", ".", "app", ".", "views", ".", "assert_db", ".", "post_has_mime_parts", "(", "request", ",", "(", "(", "'field'", ",", "'pid'", ")", ",", "(", "'field'", ",", "'serialVersion'", ")", ",", "(", "'field'", ",", "'dateSysMetaLastModified'", ")", ",", ")", ",", ")", "d1_gmn", ".", "app", ".", "views", ".", "assert_db", ".", "is_existing_object", "(", "request", ".", "POST", "[", "'pid'", "]", ")", "d1_gmn", ".", "app", ".", "models", ".", "sysmeta_refresh_queue", "(", "request", ".", "POST", "[", "'pid'", "]", ",", "request", ".", "POST", "[", "'serialVersion'", "]", ",", "request", ".", "POST", "[", "'dateSysMetaLastModified'", "]", ",", "'queued'", ",", ")", ".", "save", "(", ")", "return", "d1_gmn", ".", "app", ".", "views", ".", "util", ".", "http_response_with_boolean_true_type", "(", ")" ]
MNStorage.systemMetadataChanged(session, did, serialVersion, dateSysMetaLastModified) → boolean.
[ "MNStorage", ".", "systemMetadataChanged", "(", "session", "did", "serialVersion", "dateSysMetaLastModified", ")", "→", "boolean", "." ]
python
train
36.157895
jonathf/chaospy
chaospy/distributions/operators/multiply.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/multiply.py#L501-L513
def mul(left, right): """ Distribution multiplication. Args: left (Dist, numpy.ndarray) : left hand side. right (Dist, numpy.ndarray) : right hand side. """ from .mv_mul import MvMul length = max(left, right) if length == 1: return Mul(left, right) return MvMul(left, right)
[ "def", "mul", "(", "left", ",", "right", ")", ":", "from", ".", "mv_mul", "import", "MvMul", "length", "=", "max", "(", "left", ",", "right", ")", "if", "length", "==", "1", ":", "return", "Mul", "(", "left", ",", "right", ")", "return", "MvMul", "(", "left", ",", "right", ")" ]
Distribution multiplication. Args: left (Dist, numpy.ndarray) : left hand side. right (Dist, numpy.ndarray) : right hand side.
[ "Distribution", "multiplication", "." ]
python
train
24.538462
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L182-L220
def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}): """ Get a read/write database handle to the blockstack db. At most one such handle can exist within the program. When the caller is done with the handle, it should call release_readwrite_instance() Returns the handle on success Returns None if we can't set up the db. Aborts if there is another read/write handle out there somewhere. """ global blockstack_db, blockstack_db_lastblock, blockstack_db_lock import virtualchain_hooks db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir) blockstack_db_lock.acquire() try: assert blockstack_db is None, "Borrowing violation" except Exception, e: log.exception(e) log.error("FATAL: Borrowing violation") os.abort() db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block(), expected_snapshots=expected_snapshots) rc = db.db_setup() if not rc: db.close() blockstack_db_lock.release() log.error("Failed to set up virtualchain state engine") return None blockstack_db = db blockstack_db_lastblock = block_number blockstack_db_lock.release() return blockstack_db
[ "def", "borrow_readwrite_instance", "(", "cls", ",", "working_dir", ",", "block_number", ",", "expected_snapshots", "=", "{", "}", ")", ":", "global", "blockstack_db", ",", "blockstack_db_lastblock", ",", "blockstack_db_lock", "import", "virtualchain_hooks", "db_path", "=", "virtualchain", ".", "get_db_filename", "(", "virtualchain_hooks", ",", "working_dir", ")", "blockstack_db_lock", ".", "acquire", "(", ")", "try", ":", "assert", "blockstack_db", "is", "None", ",", "\"Borrowing violation\"", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"FATAL: Borrowing violation\"", ")", "os", ".", "abort", "(", ")", "db", "=", "BlockstackDB", "(", "db_path", ",", "DISPOSITION_RW", ",", "working_dir", ",", "get_genesis_block", "(", ")", ",", "expected_snapshots", "=", "expected_snapshots", ")", "rc", "=", "db", ".", "db_setup", "(", ")", "if", "not", "rc", ":", "db", ".", "close", "(", ")", "blockstack_db_lock", ".", "release", "(", ")", "log", ".", "error", "(", "\"Failed to set up virtualchain state engine\"", ")", "return", "None", "blockstack_db", "=", "db", "blockstack_db_lastblock", "=", "block_number", "blockstack_db_lock", ".", "release", "(", ")", "return", "blockstack_db" ]
Get a read/write database handle to the blockstack db. At most one such handle can exist within the program. When the caller is done with the handle, it should call release_readwrite_instance() Returns the handle on success Returns None if we can't set up the db. Aborts if there is another read/write handle out there somewhere.
[ "Get", "a", "read", "/", "write", "database", "handle", "to", "the", "blockstack", "db", ".", "At", "most", "one", "such", "handle", "can", "exist", "within", "the", "program", "." ]
python
train
35.051282
spotify/luigi
luigi/parameter.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L790-L802
def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
[ "def", "parse", "(", "self", ",", "input", ")", ":", "result", "=", "self", ".", "_parseIso8601", "(", "input", ")", "if", "not", "result", ":", "result", "=", "self", ".", "_parseSimple", "(", "input", ")", "if", "result", "is", "not", "None", ":", "return", "result", "else", ":", "raise", "ParameterException", "(", "\"Invalid time delta - could not parse %s\"", "%", "input", ")" ]
Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats.
[ "Parses", "a", "time", "delta", "from", "the", "input", "." ]
python
train
32.923077
ioos/wicken
wicken/xml_dogma.py
https://github.com/ioos/wicken/blob/5e1097e27788a5cbfbcd7ecd90542b8ea2185a56/wicken/xml_dogma.py#L193-L208
def _eval_xpath(self, xpath): """ Evaluates xpath expressions. Either string or XPath object. """ if isinstance(xpath, etree.XPath): result = xpath(self._dataObject) else: result = self._dataObject.xpath(xpath,namespaces=self._namespaces) #print 'Xpath expression:', xpath #print etree.tostring(self._dataObject) #print 'Got Result: \n%s\n End Result' % result return result
[ "def", "_eval_xpath", "(", "self", ",", "xpath", ")", ":", "if", "isinstance", "(", "xpath", ",", "etree", ".", "XPath", ")", ":", "result", "=", "xpath", "(", "self", ".", "_dataObject", ")", "else", ":", "result", "=", "self", ".", "_dataObject", ".", "xpath", "(", "xpath", ",", "namespaces", "=", "self", ".", "_namespaces", ")", "#print 'Xpath expression:', xpath", "#print etree.tostring(self._dataObject)", "#print 'Got Result: \\n%s\\n End Result' % result", "return", "result" ]
Evaluates xpath expressions. Either string or XPath object.
[ "Evaluates", "xpath", "expressions", "." ]
python
train
29.25
openstack/networking-cisco
networking_cisco/apps/saf/agent/dfa_agent.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/dfa_agent.py#L134-L138
def setup_client_rpc(self): """Setup RPC client for dfa agent.""" # Setup RPC client. self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE, exchange=constants.DFA_EXCHANGE)
[ "def", "setup_client_rpc", "(", "self", ")", ":", "# Setup RPC client.", "self", ".", "clnt", "=", "rpc", ".", "DfaRpcClient", "(", "self", ".", "_url", ",", "constants", ".", "DFA_SERVER_QUEUE", ",", "exchange", "=", "constants", ".", "DFA_EXCHANGE", ")" ]
Setup RPC client for dfa agent.
[ "Setup", "RPC", "client", "for", "dfa", "agent", "." ]
python
train
48.6
apache/incubator-superset
superset/views/api.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/api.py#L58-L72
def query_form_data(self): """ Get the formdata stored in the database for existing slice. params: slice_id: integer """ form_data = {} slice_id = request.args.get('slice_id') if slice_id: slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none() if slc: form_data = slc.form_data.copy() update_time_range(form_data) return json.dumps(form_data)
[ "def", "query_form_data", "(", "self", ")", ":", "form_data", "=", "{", "}", "slice_id", "=", "request", ".", "args", ".", "get", "(", "'slice_id'", ")", "if", "slice_id", ":", "slc", "=", "db", ".", "session", ".", "query", "(", "models", ".", "Slice", ")", ".", "filter_by", "(", "id", "=", "slice_id", ")", ".", "one_or_none", "(", ")", "if", "slc", ":", "form_data", "=", "slc", ".", "form_data", ".", "copy", "(", ")", "update_time_range", "(", "form_data", ")", "return", "json", ".", "dumps", "(", "form_data", ")" ]
Get the formdata stored in the database for existing slice. params: slice_id: integer
[ "Get", "the", "formdata", "stored", "in", "the", "database", "for", "existing", "slice", ".", "params", ":", "slice_id", ":", "integer" ]
python
train
30.733333
saltstack/salt
salt/modules/iptables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/iptables.py#L977-L1046
def _parse_conf(conf_file=None, in_mem=False, family='ipv4'): ''' If a file is not passed in, and the correct one for this OS is not detected, return False ''' if _conf() and not conf_file and not in_mem: conf_file = _conf(family) rules = '' if conf_file: with salt.utils.files.fopen(conf_file, 'r') as ifile: rules = ifile.read() elif in_mem: cmd = '{0}-save' . format(_iptables_cmd(family)) rules = __salt__['cmd.run'](cmd) else: raise SaltException('A file was not found to parse') ret = {} table = '' parser = _parser() for line in rules.splitlines(): line = salt.utils.stringutils.to_unicode(line) if line.startswith('*'): table = line.replace('*', '') ret[table] = {} elif line.startswith(':'): comps = line.split() chain = comps[0].replace(':', '') ret[table][chain] = {} ret[table][chain]['policy'] = comps[1] counters = comps[2].replace('[', '').replace(']', '') (pcount, bcount) = counters.split(':') ret[table][chain]['packet count'] = pcount ret[table][chain]['byte count'] = bcount ret[table][chain]['rules'] = [] ret[table][chain]['rules_comment'] = {} elif line.startswith('-A'): args = salt.utils.args.shlex_split(line) index = 0 while index + 1 < len(args): swap = args[index] == '!' and args[index + 1].startswith('-') if swap: args[index], args[index + 1] = args[index + 1], args[index] if args[index].startswith('-'): index += 1 if args[index].startswith('-') or (args[index] == '!' and not swap): args.insert(index, '') else: while (index + 1 < len(args) and args[index + 1] != '!' and not args[index + 1].startswith('-')): args[index] += ' {0}'.format(args.pop(index + 1)) index += 1 if args[-1].startswith('-'): args.append('') parsed_args = [] opts, _ = parser.parse_known_args(args) parsed_args = vars(opts) ret_args = {} chain = parsed_args['append'] for arg in parsed_args: if parsed_args[arg] and arg is not 'append': ret_args[arg] = parsed_args[arg] if parsed_args['comment'] is not None: comment = parsed_args['comment'][0].strip('"') ret[table][chain[0]]['rules_comment'][comment] = ret_args ret[table][chain[0]]['rules'].append(ret_args) return ret
[ "def", "_parse_conf", "(", "conf_file", "=", "None", ",", "in_mem", "=", "False", ",", "family", "=", "'ipv4'", ")", ":", "if", "_conf", "(", ")", "and", "not", "conf_file", "and", "not", "in_mem", ":", "conf_file", "=", "_conf", "(", "family", ")", "rules", "=", "''", "if", "conf_file", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "conf_file", ",", "'r'", ")", "as", "ifile", ":", "rules", "=", "ifile", ".", "read", "(", ")", "elif", "in_mem", ":", "cmd", "=", "'{0}-save'", ".", "format", "(", "_iptables_cmd", "(", "family", ")", ")", "rules", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "else", ":", "raise", "SaltException", "(", "'A file was not found to parse'", ")", "ret", "=", "{", "}", "table", "=", "''", "parser", "=", "_parser", "(", ")", "for", "line", "in", "rules", ".", "splitlines", "(", ")", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "if", "line", ".", "startswith", "(", "'*'", ")", ":", "table", "=", "line", ".", "replace", "(", "'*'", ",", "''", ")", "ret", "[", "table", "]", "=", "{", "}", "elif", "line", ".", "startswith", "(", "':'", ")", ":", "comps", "=", "line", ".", "split", "(", ")", "chain", "=", "comps", "[", "0", "]", ".", "replace", "(", "':'", ",", "''", ")", "ret", "[", "table", "]", "[", "chain", "]", "=", "{", "}", "ret", "[", "table", "]", "[", "chain", "]", "[", "'policy'", "]", "=", "comps", "[", "1", "]", "counters", "=", "comps", "[", "2", "]", ".", "replace", "(", "'['", ",", "''", ")", ".", "replace", "(", "']'", ",", "''", ")", "(", "pcount", ",", "bcount", ")", "=", "counters", ".", "split", "(", "':'", ")", "ret", "[", "table", "]", "[", "chain", "]", "[", "'packet count'", "]", "=", "pcount", "ret", "[", "table", "]", "[", "chain", "]", "[", "'byte count'", "]", "=", "bcount", "ret", "[", "table", "]", "[", "chain", "]", "[", "'rules'", "]", "=", "[", "]", "ret", "[", "table", "]", "[", "chain", "]", "[", "'rules_comment'", "]", "=", "{", "}", "elif", "line", ".", "startswith", "(", "'-A'", ")", ":", "args", "=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "line", ")", "index", "=", "0", "while", "index", "+", "1", "<", "len", "(", "args", ")", ":", "swap", "=", "args", "[", "index", "]", "==", "'!'", "and", "args", "[", "index", "+", "1", "]", ".", "startswith", "(", "'-'", ")", "if", "swap", ":", "args", "[", "index", "]", ",", "args", "[", "index", "+", "1", "]", "=", "args", "[", "index", "+", "1", "]", ",", "args", "[", "index", "]", "if", "args", "[", "index", "]", ".", "startswith", "(", "'-'", ")", ":", "index", "+=", "1", "if", "args", "[", "index", "]", ".", "startswith", "(", "'-'", ")", "or", "(", "args", "[", "index", "]", "==", "'!'", "and", "not", "swap", ")", ":", "args", ".", "insert", "(", "index", ",", "''", ")", "else", ":", "while", "(", "index", "+", "1", "<", "len", "(", "args", ")", "and", "args", "[", "index", "+", "1", "]", "!=", "'!'", "and", "not", "args", "[", "index", "+", "1", "]", ".", "startswith", "(", "'-'", ")", ")", ":", "args", "[", "index", "]", "+=", "' {0}'", ".", "format", "(", "args", ".", "pop", "(", "index", "+", "1", ")", ")", "index", "+=", "1", "if", "args", "[", "-", "1", "]", ".", "startswith", "(", "'-'", ")", ":", "args", ".", "append", "(", "''", ")", "parsed_args", "=", "[", "]", "opts", ",", "_", "=", "parser", ".", "parse_known_args", "(", "args", ")", "parsed_args", "=", "vars", "(", "opts", ")", "ret_args", "=", "{", "}", "chain", "=", "parsed_args", "[", "'append'", "]", "for", "arg", "in", "parsed_args", ":", "if", "parsed_args", "[", "arg", "]", "and", "arg", "is", "not", "'append'", ":", "ret_args", "[", "arg", "]", "=", "parsed_args", "[", "arg", "]", "if", "parsed_args", "[", "'comment'", "]", "is", "not", "None", ":", "comment", "=", "parsed_args", "[", "'comment'", "]", "[", "0", "]", ".", "strip", "(", "'\"'", ")", "ret", "[", "table", "]", "[", "chain", "[", "0", "]", "]", "[", "'rules_comment'", "]", "[", "comment", "]", "=", "ret_args", "ret", "[", "table", "]", "[", "chain", "[", "0", "]", "]", "[", "'rules'", "]", ".", "append", "(", "ret_args", ")", "return", "ret" ]
If a file is not passed in, and the correct one for this OS is not detected, return False
[ "If", "a", "file", "is", "not", "passed", "in", "and", "the", "correct", "one", "for", "this", "OS", "is", "not", "detected", "return", "False" ]
python
train
40.885714
pinterest/thrift-tools
thrift_tools/thrift_struct.py
https://github.com/pinterest/thrift-tools/blob/64e74aec89e2491c781fc62d1c45944dc15aba28/thrift_tools/thrift_struct.py#L33-L43
def is_isomorphic_to(self, other): """ Returns true if all fields of other struct are isomorphic to this struct's fields """ return (isinstance(other, self.__class__) and len(self.fields) == len(other.fields) and all(a.is_isomorphic_to(b) for a, b in zip(self.fields, other.fields)))
[ "def", "is_isomorphic_to", "(", "self", ",", "other", ")", ":", "return", "(", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", "and", "len", "(", "self", ".", "fields", ")", "==", "len", "(", "other", ".", "fields", ")", "and", "all", "(", "a", ".", "is_isomorphic_to", "(", "b", ")", "for", "a", ",", "b", "in", "zip", "(", "self", ".", "fields", ",", "other", ".", "fields", ")", ")", ")" ]
Returns true if all fields of other struct are isomorphic to this struct's fields
[ "Returns", "true", "if", "all", "fields", "of", "other", "struct", "are", "isomorphic", "to", "this", "struct", "s", "fields" ]
python
valid
39.545455
MatiasSM/fcb
fcb/processing/filesystem/Compressor.py
https://github.com/MatiasSM/fcb/blob/92a6c535287ea1c1ef986954a5d66e7905fb6221/fcb/processing/filesystem/Compressor.py#L110-L123
def can_add_new_content(self, block, file_info): """ new content from file_info can be added into block iff - file count limit hasn't been reached for the block - there is enough space to completely fit the info into the block - OR the info can be split and some info can fit into the block """ return ((self._max_files_per_container == 0 or self._max_files_per_container > len(block.content_file_infos)) and (self.does_content_fit(file_info, block) or # check if we can fit some content by splitting the file # Note: if max size was unlimited, does_content_fit would have been True (block.content_size < self._max_container_content_size_in_bytes and (self._should_split_small_files or not self._is_small_file(file_info)))))
[ "def", "can_add_new_content", "(", "self", ",", "block", ",", "file_info", ")", ":", "return", "(", "(", "self", ".", "_max_files_per_container", "==", "0", "or", "self", ".", "_max_files_per_container", ">", "len", "(", "block", ".", "content_file_infos", ")", ")", "and", "(", "self", ".", "does_content_fit", "(", "file_info", ",", "block", ")", "or", "# check if we can fit some content by splitting the file", "# Note: if max size was unlimited, does_content_fit would have been True", "(", "block", ".", "content_size", "<", "self", ".", "_max_container_content_size_in_bytes", "and", "(", "self", ".", "_should_split_small_files", "or", "not", "self", ".", "_is_small_file", "(", "file_info", ")", ")", ")", ")", ")" ]
new content from file_info can be added into block iff - file count limit hasn't been reached for the block - there is enough space to completely fit the info into the block - OR the info can be split and some info can fit into the block
[ "new", "content", "from", "file_info", "can", "be", "added", "into", "block", "iff", "-", "file", "count", "limit", "hasn", "t", "been", "reached", "for", "the", "block", "-", "there", "is", "enough", "space", "to", "completely", "fit", "the", "info", "into", "the", "block", "-", "OR", "the", "info", "can", "be", "split", "and", "some", "info", "can", "fit", "into", "the", "block" ]
python
train
63.5
smarie/python-valid8
valid8/entry_points_annotations.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L442-L469
def validate_arg(f, arg_name, *validation_func, # type: ValidationFuncs **kwargs ): # type: (...) -> Callable """ A decorator to apply function input validation for the given argument name, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param arg_name: :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed. """ return decorate_with_validation(f, arg_name, *validation_func, **kwargs)
[ "def", "validate_arg", "(", "f", ",", "arg_name", ",", "*", "validation_func", ",", "# type: ValidationFuncs", "*", "*", "kwargs", ")", ":", "# type: (...) -> Callable", "return", "decorate_with_validation", "(", "f", ",", "arg_name", ",", "*", "validation_func", ",", "*", "*", "kwargs", ")" ]
A decorator to apply function input validation for the given argument name, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param arg_name: :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed.
[ "A", "decorator", "to", "apply", "function", "input", "validation", "for", "the", "given", "argument", "name", "with", "the", "provided", "base", "validation", "function", "(", "s", ")", ".", "You", "may", "use", "several", "such", "decorators", "on", "a", "given", "function", "as", "long", "as", "they", "are", "stacked", "on", "top", "of", "each", "other", "(", "no", "external", "decorator", "in", "the", "middle", ")" ]
python
train
67.464286
pytroll/satpy
satpy/readers/iasi_l2.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/iasi_l2.py#L143-L162
def read_geo(fid, key): """Read geolocation and related datasets.""" dsid = GEO_NAMES[key.name] add_epoch = False if "time" in key.name: days = fid["/L1C/" + dsid["day"]].value msecs = fid["/L1C/" + dsid["msec"]].value data = _form_datetimes(days, msecs) add_epoch = True dtype = np.float64 else: data = fid["/L1C/" + dsid].value dtype = np.float32 data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE), name=key.name, dims=['y', 'x']).astype(dtype) if add_epoch: data.attrs['sensing_time_epoch'] = EPOCH return data
[ "def", "read_geo", "(", "fid", ",", "key", ")", ":", "dsid", "=", "GEO_NAMES", "[", "key", ".", "name", "]", "add_epoch", "=", "False", "if", "\"time\"", "in", "key", ".", "name", ":", "days", "=", "fid", "[", "\"/L1C/\"", "+", "dsid", "[", "\"day\"", "]", "]", ".", "value", "msecs", "=", "fid", "[", "\"/L1C/\"", "+", "dsid", "[", "\"msec\"", "]", "]", ".", "value", "data", "=", "_form_datetimes", "(", "days", ",", "msecs", ")", "add_epoch", "=", "True", "dtype", "=", "np", ".", "float64", "else", ":", "data", "=", "fid", "[", "\"/L1C/\"", "+", "dsid", "]", ".", "value", "dtype", "=", "np", ".", "float32", "data", "=", "xr", ".", "DataArray", "(", "da", ".", "from_array", "(", "data", ",", "chunks", "=", "CHUNK_SIZE", ")", ",", "name", "=", "key", ".", "name", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", ".", "astype", "(", "dtype", ")", "if", "add_epoch", ":", "data", ".", "attrs", "[", "'sensing_time_epoch'", "]", "=", "EPOCH", "return", "data" ]
Read geolocation and related datasets.
[ "Read", "geolocation", "and", "related", "datasets", "." ]
python
train
31.2
hazelcast/hazelcast-python-client
hazelcast/proxy/list.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/list.py#L194-L204
def last_index_of(self, item): """ Returns the last index of specified items's occurrences in this list. If specified item is not present in this list, returns -1. :param item: (object), the specified item to be searched for. :return: (int), the last index of specified items's occurrences, -1 if item is not present in this list. """ check_not_none(item, "Value can't be None") item_data = self._to_data(item) return self._encode_invoke(list_last_index_of_codec, value=item_data)
[ "def", "last_index_of", "(", "self", ",", "item", ")", ":", "check_not_none", "(", "item", ",", "\"Value can't be None\"", ")", "item_data", "=", "self", ".", "_to_data", "(", "item", ")", "return", "self", ".", "_encode_invoke", "(", "list_last_index_of_codec", ",", "value", "=", "item_data", ")" ]
Returns the last index of specified items's occurrences in this list. If specified item is not present in this list, returns -1. :param item: (object), the specified item to be searched for. :return: (int), the last index of specified items's occurrences, -1 if item is not present in this list.
[ "Returns", "the", "last", "index", "of", "specified", "items", "s", "occurrences", "in", "this", "list", ".", "If", "specified", "item", "is", "not", "present", "in", "this", "list", "returns", "-", "1", "." ]
python
train
49.363636
LuminosoInsight/python-ftfy
ftfy/fixes.py
https://github.com/LuminosoInsight/python-ftfy/blob/476acc6ad270bffe07f97d4f7cf2139acdc69633/ftfy/fixes.py#L469-L489
def fix_surrogates(text): """ Replace 16-bit surrogate codepoints with the characters they represent (when properly paired), or with \ufffd otherwise. >>> high_surrogate = chr(0xd83d) >>> low_surrogate = chr(0xdca9) >>> print(fix_surrogates(high_surrogate + low_surrogate)) 💩 >>> print(fix_surrogates(low_surrogate + high_surrogate)) �� The above doctest had to be very carefully written, because even putting the Unicode escapes of the surrogates in the docstring was causing various tools to fail, which I think just goes to show why this fixer is necessary. """ if SURROGATE_RE.search(text): text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text) text = SURROGATE_RE.sub('\ufffd', text) return text
[ "def", "fix_surrogates", "(", "text", ")", ":", "if", "SURROGATE_RE", ".", "search", "(", "text", ")", ":", "text", "=", "SURROGATE_PAIR_RE", ".", "sub", "(", "convert_surrogate_pair", ",", "text", ")", "text", "=", "SURROGATE_RE", ".", "sub", "(", "'\\ufffd'", ",", "text", ")", "return", "text" ]
Replace 16-bit surrogate codepoints with the characters they represent (when properly paired), or with \ufffd otherwise. >>> high_surrogate = chr(0xd83d) >>> low_surrogate = chr(0xdca9) >>> print(fix_surrogates(high_surrogate + low_surrogate)) 💩 >>> print(fix_surrogates(low_surrogate + high_surrogate)) �� The above doctest had to be very carefully written, because even putting the Unicode escapes of the surrogates in the docstring was causing various tools to fail, which I think just goes to show why this fixer is necessary.
[ "Replace", "16", "-", "bit", "surrogate", "codepoints", "with", "the", "characters", "they", "represent", "(", "when", "properly", "paired", ")", "or", "with", "\\", "ufffd", "otherwise", "." ]
python
train
37.666667
sentinel-hub/sentinelhub-py
sentinelhub/time_utils.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/time_utils.py#L113-L130
def parse_time(time_input): """ Parse input time/date string into ISO 8601 string :param time_input: time/date to parse :type time_input: str or datetime.date or datetime.datetime :return: parsed string in ISO 8601 format :rtype: str """ if isinstance(time_input, datetime.date): return time_input.isoformat() # datetime.date only returns date, datetime.datetime also returns time if len(time_input) < 8: raise ValueError('Invalid time string {}.\n' 'Please specify time in formats YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS'.format(time_input)) time = dateutil.parser.parse(time_input) if len(time_input) <= 10: return time.date().isoformat() return time.isoformat()
[ "def", "parse_time", "(", "time_input", ")", ":", "if", "isinstance", "(", "time_input", ",", "datetime", ".", "date", ")", ":", "return", "time_input", ".", "isoformat", "(", ")", "# datetime.date only returns date, datetime.datetime also returns time", "if", "len", "(", "time_input", ")", "<", "8", ":", "raise", "ValueError", "(", "'Invalid time string {}.\\n'", "'Please specify time in formats YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS'", ".", "format", "(", "time_input", ")", ")", "time", "=", "dateutil", ".", "parser", ".", "parse", "(", "time_input", ")", "if", "len", "(", "time_input", ")", "<=", "10", ":", "return", "time", ".", "date", "(", ")", ".", "isoformat", "(", ")", "return", "time", ".", "isoformat", "(", ")" ]
Parse input time/date string into ISO 8601 string :param time_input: time/date to parse :type time_input: str or datetime.date or datetime.datetime :return: parsed string in ISO 8601 format :rtype: str
[ "Parse", "input", "time", "/", "date", "string", "into", "ISO", "8601", "string" ]
python
train
40.944444
UCSBarchlab/PyRTL
pyrtl/simulation.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/simulation.py#L664-L694
def _render_val_with_prev(self, w, n, current_val, symbol_len): """Return a string encoding the given value in a waveform. :param w: The WireVector we are rendering to a waveform :param n: An integer from 0 to segment_len-1 :param current_val: the value to be rendered :param symbol_len: and integer for how big to draw the current value Returns a string of printed length symbol_len that will draw the representation of current_val. The input prior_val is used to render transitions. """ sl = symbol_len-1 if len(w) > 1: out = self._revstart if current_val != self.prior_val: out += self._x + hex(current_val).rstrip('L').ljust(sl)[:sl] elif n == 0: out += hex(current_val).rstrip('L').ljust(symbol_len)[:symbol_len] else: out += ' '*symbol_len out += self._revstop else: pretty_map = { (0, 0): self._low + self._low * sl, (0, 1): self._up + self._high * sl, (1, 0): self._down + self._low * sl, (1, 1): self._high + self._high * sl, } out = pretty_map[(self.prior_val, current_val)] return out
[ "def", "_render_val_with_prev", "(", "self", ",", "w", ",", "n", ",", "current_val", ",", "symbol_len", ")", ":", "sl", "=", "symbol_len", "-", "1", "if", "len", "(", "w", ")", ">", "1", ":", "out", "=", "self", ".", "_revstart", "if", "current_val", "!=", "self", ".", "prior_val", ":", "out", "+=", "self", ".", "_x", "+", "hex", "(", "current_val", ")", ".", "rstrip", "(", "'L'", ")", ".", "ljust", "(", "sl", ")", "[", ":", "sl", "]", "elif", "n", "==", "0", ":", "out", "+=", "hex", "(", "current_val", ")", ".", "rstrip", "(", "'L'", ")", ".", "ljust", "(", "symbol_len", ")", "[", ":", "symbol_len", "]", "else", ":", "out", "+=", "' '", "*", "symbol_len", "out", "+=", "self", ".", "_revstop", "else", ":", "pretty_map", "=", "{", "(", "0", ",", "0", ")", ":", "self", ".", "_low", "+", "self", ".", "_low", "*", "sl", ",", "(", "0", ",", "1", ")", ":", "self", ".", "_up", "+", "self", ".", "_high", "*", "sl", ",", "(", "1", ",", "0", ")", ":", "self", ".", "_down", "+", "self", ".", "_low", "*", "sl", ",", "(", "1", ",", "1", ")", ":", "self", ".", "_high", "+", "self", ".", "_high", "*", "sl", ",", "}", "out", "=", "pretty_map", "[", "(", "self", ".", "prior_val", ",", "current_val", ")", "]", "return", "out" ]
Return a string encoding the given value in a waveform. :param w: The WireVector we are rendering to a waveform :param n: An integer from 0 to segment_len-1 :param current_val: the value to be rendered :param symbol_len: and integer for how big to draw the current value Returns a string of printed length symbol_len that will draw the representation of current_val. The input prior_val is used to render transitions.
[ "Return", "a", "string", "encoding", "the", "given", "value", "in", "a", "waveform", "." ]
python
train
41.290323
lionel/counterparts
counterparts.py
https://github.com/lionel/counterparts/blob/20db9852feff531f854972f76b412c442b2fafbf/counterparts.py#L199-L222
def no_counterpart_found(string, options, rc_so_far): """Takes action determined by options.else_action. Unless told to raise an exception, this function returns the errno that is supposed to be returned in this case. :param string: The lookup string. :param options: ArgumentParser or equivalent to provide options.else_action, options.else_errno, options.no_newline :param rc_so_far: Becomes set to the value set in options. """ logger.debug("options.else_action: %s", options.else_action) if options.else_action == "passthrough": format_list = [string] output_fd = sys.stdout elif options.else_action == "exception": raise KeyError("No counterpart found for: %s" % (string)) elif options.else_action == "error": format_list = ["# No counterpart found for: %s" % (string)] output_fd = sys.stderr if not options.no_newline: format_list.append("\n") output_fd.write("".join(format_list)) return options.else_errno
[ "def", "no_counterpart_found", "(", "string", ",", "options", ",", "rc_so_far", ")", ":", "logger", ".", "debug", "(", "\"options.else_action: %s\"", ",", "options", ".", "else_action", ")", "if", "options", ".", "else_action", "==", "\"passthrough\"", ":", "format_list", "=", "[", "string", "]", "output_fd", "=", "sys", ".", "stdout", "elif", "options", ".", "else_action", "==", "\"exception\"", ":", "raise", "KeyError", "(", "\"No counterpart found for: %s\"", "%", "(", "string", ")", ")", "elif", "options", ".", "else_action", "==", "\"error\"", ":", "format_list", "=", "[", "\"# No counterpart found for: %s\"", "%", "(", "string", ")", "]", "output_fd", "=", "sys", ".", "stderr", "if", "not", "options", ".", "no_newline", ":", "format_list", ".", "append", "(", "\"\\n\"", ")", "output_fd", ".", "write", "(", "\"\"", ".", "join", "(", "format_list", ")", ")", "return", "options", ".", "else_errno" ]
Takes action determined by options.else_action. Unless told to raise an exception, this function returns the errno that is supposed to be returned in this case. :param string: The lookup string. :param options: ArgumentParser or equivalent to provide options.else_action, options.else_errno, options.no_newline :param rc_so_far: Becomes set to the value set in options.
[ "Takes", "action", "determined", "by", "options", ".", "else_action", ".", "Unless", "told", "to", "raise", "an", "exception", "this", "function", "returns", "the", "errno", "that", "is", "supposed", "to", "be", "returned", "in", "this", "case", "." ]
python
train
41.875
pantsbuild/pants
contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py#L36-L53
def ensure_workspace(self, target): """Ensures that an up-to-date Go workspace exists for the given target. Creates any necessary symlinks to source files based on the target and its transitive dependencies, and removes any symlinks which do not correspond to any needed dep. """ gopath = self.get_gopath(target) for d in ('bin', 'pkg', 'src'): safe_mkdir(os.path.join(gopath, d)) required_links = set() for dep in target.closure(): if not isinstance(dep, GoTarget): continue if self.is_remote_lib(dep): self._symlink_remote_lib(gopath, dep, required_links) else: self._symlink_local_src(gopath, dep, required_links) self.remove_unused_links(os.path.join(gopath, 'src'), required_links)
[ "def", "ensure_workspace", "(", "self", ",", "target", ")", ":", "gopath", "=", "self", ".", "get_gopath", "(", "target", ")", "for", "d", "in", "(", "'bin'", ",", "'pkg'", ",", "'src'", ")", ":", "safe_mkdir", "(", "os", ".", "path", ".", "join", "(", "gopath", ",", "d", ")", ")", "required_links", "=", "set", "(", ")", "for", "dep", "in", "target", ".", "closure", "(", ")", ":", "if", "not", "isinstance", "(", "dep", ",", "GoTarget", ")", ":", "continue", "if", "self", ".", "is_remote_lib", "(", "dep", ")", ":", "self", ".", "_symlink_remote_lib", "(", "gopath", ",", "dep", ",", "required_links", ")", "else", ":", "self", ".", "_symlink_local_src", "(", "gopath", ",", "dep", ",", "required_links", ")", "self", ".", "remove_unused_links", "(", "os", ".", "path", ".", "join", "(", "gopath", ",", "'src'", ")", ",", "required_links", ")" ]
Ensures that an up-to-date Go workspace exists for the given target. Creates any necessary symlinks to source files based on the target and its transitive dependencies, and removes any symlinks which do not correspond to any needed dep.
[ "Ensures", "that", "an", "up", "-", "to", "-", "date", "Go", "workspace", "exists", "for", "the", "given", "target", "." ]
python
train
41.888889
royi1000/py-libhdate
hdate/common.py
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/common.py#L66-L69
def timezone(self, value): """Set the timezone.""" self._timezone = (value if isinstance(value, datetime.tzinfo) else tz.gettz(value))
[ "def", "timezone", "(", "self", ",", "value", ")", ":", "self", ".", "_timezone", "=", "(", "value", "if", "isinstance", "(", "value", ",", "datetime", ".", "tzinfo", ")", "else", "tz", ".", "gettz", "(", "value", ")", ")" ]
Set the timezone.
[ "Set", "the", "timezone", "." ]
python
train
43.25
pypa/pipenv
pipenv/resolver.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/resolver.py#L391-L433
def constraint_from_parent_conflicts(self): """ Given a resolved entry with multiple parent dependencies with different constraints, searches for the resolution that satisfies all of the parent constraints. :return: A new **InstallRequirement** satisfying all parent constraints :raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible """ # ensure that we satisfy the parent dependencies of this dep from pipenv.vendor.packaging.specifiers import Specifier parent_dependencies = set() has_mismatch = False can_use_original = True for p in self.parent_deps: # updated dependencies should be satisfied since they were resolved already if p.is_updated: continue # parents with no requirements can't conflict if not p.requirements: continue needed = p.requirements.get("dependencies", []) entry_ref = p.get_dependency(self.name) required = entry_ref.get("required_version", "*") required = self.clean_specifier(required) parent_requires = self.make_requirement(self.name, required) parent_dependencies.add("{0} => {1} ({2})".format(p.name, self.name, required)) if not parent_requires.requirement.specifier.contains(self.original_version): can_use_original = False if not parent_requires.requirement.specifier.contains(self.updated_version): has_mismatch = True if has_mismatch and not can_use_original: from pipenv.exceptions import DependencyConflict msg = ( "Cannot resolve {0} ({1}) due to conflicting parent dependencies: " "\n\t{2}".format( self.name, self.updated_version, "\n\t".join(parent_dependencies) ) ) raise DependencyConflict(msg) elif can_use_original: return self.lockfile_entry.as_ireq() return self.entry.as_ireq()
[ "def", "constraint_from_parent_conflicts", "(", "self", ")", ":", "# ensure that we satisfy the parent dependencies of this dep", "from", "pipenv", ".", "vendor", ".", "packaging", ".", "specifiers", "import", "Specifier", "parent_dependencies", "=", "set", "(", ")", "has_mismatch", "=", "False", "can_use_original", "=", "True", "for", "p", "in", "self", ".", "parent_deps", ":", "# updated dependencies should be satisfied since they were resolved already", "if", "p", ".", "is_updated", ":", "continue", "# parents with no requirements can't conflict", "if", "not", "p", ".", "requirements", ":", "continue", "needed", "=", "p", ".", "requirements", ".", "get", "(", "\"dependencies\"", ",", "[", "]", ")", "entry_ref", "=", "p", ".", "get_dependency", "(", "self", ".", "name", ")", "required", "=", "entry_ref", ".", "get", "(", "\"required_version\"", ",", "\"*\"", ")", "required", "=", "self", ".", "clean_specifier", "(", "required", ")", "parent_requires", "=", "self", ".", "make_requirement", "(", "self", ".", "name", ",", "required", ")", "parent_dependencies", ".", "add", "(", "\"{0} => {1} ({2})\"", ".", "format", "(", "p", ".", "name", ",", "self", ".", "name", ",", "required", ")", ")", "if", "not", "parent_requires", ".", "requirement", ".", "specifier", ".", "contains", "(", "self", ".", "original_version", ")", ":", "can_use_original", "=", "False", "if", "not", "parent_requires", ".", "requirement", ".", "specifier", ".", "contains", "(", "self", ".", "updated_version", ")", ":", "has_mismatch", "=", "True", "if", "has_mismatch", "and", "not", "can_use_original", ":", "from", "pipenv", ".", "exceptions", "import", "DependencyConflict", "msg", "=", "(", "\"Cannot resolve {0} ({1}) due to conflicting parent dependencies: \"", "\"\\n\\t{2}\"", ".", "format", "(", "self", ".", "name", ",", "self", ".", "updated_version", ",", "\"\\n\\t\"", ".", "join", "(", "parent_dependencies", ")", ")", ")", "raise", "DependencyConflict", "(", "msg", ")", "elif", "can_use_original", ":", "return", "self", ".", "lockfile_entry", ".", "as_ireq", "(", ")", "return", "self", ".", "entry", ".", "as_ireq", "(", ")" ]
Given a resolved entry with multiple parent dependencies with different constraints, searches for the resolution that satisfies all of the parent constraints. :return: A new **InstallRequirement** satisfying all parent constraints :raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible
[ "Given", "a", "resolved", "entry", "with", "multiple", "parent", "dependencies", "with", "different", "constraints", "searches", "for", "the", "resolution", "that", "satisfies", "all", "of", "the", "parent", "constraints", "." ]
python
train
48.348837
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L706-L720
def get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail input = ET.SubElement(get_interface_detail, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") last_rcvd_interface = ET.SubElement(get_next_request, "last-rcvd-interface") interface_name = ET.SubElement(last_rcvd_interface, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "config", "=", "get_interface_detail", "input", "=", "ET", ".", "SubElement", "(", "get_interface_detail", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_next_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-next-request\"", ")", "last_rcvd_interface", "=", "ET", ".", "SubElement", "(", "get_next_request", ",", "\"last-rcvd-interface\"", ")", "interface_name", "=", "ET", ".", "SubElement", "(", "last_rcvd_interface", ",", "\"interface-name\"", ")", "interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.8
savvastj/nbashots
nbashots/api.py
https://github.com/savvastj/nbashots/blob/76ece28d717f10b25eb0fc681b317df6ef6b5157/nbashots/api.py#L218-L222
def get_shots(self): """Returns the shot chart data as a pandas DataFrame.""" shots = self.response.json()['resultSets'][0]['rowSet'] headers = self.response.json()['resultSets'][0]['headers'] return pd.DataFrame(shots, columns=headers)
[ "def", "get_shots", "(", "self", ")", ":", "shots", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'rowSet'", "]", "headers", "=", "self", ".", "response", ".", "json", "(", ")", "[", "'resultSets'", "]", "[", "0", "]", "[", "'headers'", "]", "return", "pd", ".", "DataFrame", "(", "shots", ",", "columns", "=", "headers", ")" ]
Returns the shot chart data as a pandas DataFrame.
[ "Returns", "the", "shot", "chart", "data", "as", "a", "pandas", "DataFrame", "." ]
python
train
52.8
tensorflow/tensor2tensor
tensor2tensor/trax/trax.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L152-L161
def save_state(state, output_dir, keep=False): """Save State and optionally gin config.""" params_file = os.path.join(output_dir, "model.pkl") with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) if keep: params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step)) with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) log("Model saved to %s" % params_file, stdout=False)
[ "def", "save_state", "(", "state", ",", "output_dir", ",", "keep", "=", "False", ")", ":", "params_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model.pkl\"", ")", "with", "gfile", ".", "GFile", "(", "params_file", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "(", "state", ".", "params", ",", "state", ".", "step", ",", "state", ".", "history", ")", ",", "f", ")", "if", "keep", ":", "params_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"model_{}.pkl\"", ".", "format", "(", "state", ".", "step", ")", ")", "with", "gfile", ".", "GFile", "(", "params_file", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "(", "state", ".", "params", ",", "state", ".", "step", ",", "state", ".", "history", ")", ",", "f", ")", "log", "(", "\"Model saved to %s\"", "%", "params_file", ",", "stdout", "=", "False", ")" ]
Save State and optionally gin config.
[ "Save", "State", "and", "optionally", "gin", "config", "." ]
python
train
49.7
hydrosquall/tiingo-python
tiingo/api.py
https://github.com/hydrosquall/tiingo-python/blob/9bb98ca9d24f2e4db651cf0590e4b47184546482/tiingo/api.py#L207-L275
def get_dataframe(self, tickers, startDate=None, endDate=None, metric_name=None, frequency='daily'): """ Return a pandas.DataFrame of historical prices for one or more ticker symbols. By default, return latest EOD Composite Price for a list of stock tickers. On average, each feed contains 3 data sources. Supported tickers + Available Day Ranges are here: https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip or from the TiingoClient.list_tickers() method. Args: tickers (string/list): One or more unique identifiers for a stock ticker. startDate (string): Start of ticker range in YYYY-MM-DD format. endDate (string): End of ticker range in YYYY-MM-DD format. metric_name (string): Optional parameter specifying metric to be returned for each ticker. In the event of a single ticker, this is optional and if not specified all of the available data will be returned. In the event of a list of tickers, this parameter is required. frequency (string): Resample frequency (defaults to daily). """ valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow', 'adjClose', 'adjVolume', 'divCash', 'splitFactor'] if metric_name is not None and metric_name not in valid_columns: raise APIColumnNameError('Valid data items are: ' + str(valid_columns)) params = { 'format': 'json', 'resampleFreq': frequency } if startDate: params['startDate'] = startDate if endDate: params['endDate'] = endDate if pandas_is_installed: if type(tickers) is str: stock = tickers url = self._get_url(stock, frequency) response = self._request('GET', url, params=params) df = pd.DataFrame(response.json()) if metric_name is not None: prices = df[metric_name] prices.index = df['date'] else: prices = df prices.index = df['date'] del (prices['date']) else: prices = pd.DataFrame() for stock in tickers: url = self._get_url(stock, frequency) response = self._request('GET', url, params=params) df = pd.DataFrame(response.json()) df.index = df['date'] df.rename(index=str, columns={metric_name: stock}, inplace=True) prices = pd.concat([prices, df[stock]], axis=1) prices.index = pd.to_datetime(prices.index) return prices else: error_message = ("Pandas is not installed, but .get_ticker_price() was " "called with fmt=pandas. In order to install tiingo with " "pandas, reinstall with pandas as an optional dependency. \n" "Install tiingo with pandas dependency: \'pip install tiingo[pandas]\'\n" "Alternatively, just install pandas: pip install pandas.") raise InstallPandasException(error_message)
[ "def", "get_dataframe", "(", "self", ",", "tickers", ",", "startDate", "=", "None", ",", "endDate", "=", "None", ",", "metric_name", "=", "None", ",", "frequency", "=", "'daily'", ")", ":", "valid_columns", "=", "[", "'open'", ",", "'high'", ",", "'low'", ",", "'close'", ",", "'volume'", ",", "'adjOpen'", ",", "'adjHigh'", ",", "'adjLow'", ",", "'adjClose'", ",", "'adjVolume'", ",", "'divCash'", ",", "'splitFactor'", "]", "if", "metric_name", "is", "not", "None", "and", "metric_name", "not", "in", "valid_columns", ":", "raise", "APIColumnNameError", "(", "'Valid data items are: '", "+", "str", "(", "valid_columns", ")", ")", "params", "=", "{", "'format'", ":", "'json'", ",", "'resampleFreq'", ":", "frequency", "}", "if", "startDate", ":", "params", "[", "'startDate'", "]", "=", "startDate", "if", "endDate", ":", "params", "[", "'endDate'", "]", "=", "endDate", "if", "pandas_is_installed", ":", "if", "type", "(", "tickers", ")", "is", "str", ":", "stock", "=", "tickers", "url", "=", "self", ".", "_get_url", "(", "stock", ",", "frequency", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "url", ",", "params", "=", "params", ")", "df", "=", "pd", ".", "DataFrame", "(", "response", ".", "json", "(", ")", ")", "if", "metric_name", "is", "not", "None", ":", "prices", "=", "df", "[", "metric_name", "]", "prices", ".", "index", "=", "df", "[", "'date'", "]", "else", ":", "prices", "=", "df", "prices", ".", "index", "=", "df", "[", "'date'", "]", "del", "(", "prices", "[", "'date'", "]", ")", "else", ":", "prices", "=", "pd", ".", "DataFrame", "(", ")", "for", "stock", "in", "tickers", ":", "url", "=", "self", ".", "_get_url", "(", "stock", ",", "frequency", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "url", ",", "params", "=", "params", ")", "df", "=", "pd", ".", "DataFrame", "(", "response", ".", "json", "(", ")", ")", "df", ".", "index", "=", "df", "[", "'date'", "]", "df", ".", "rename", "(", "index", "=", "str", ",", "columns", "=", "{", "metric_name", ":", "stock", "}", ",", "inplace", "=", "True", ")", "prices", "=", "pd", ".", "concat", "(", "[", "prices", ",", "df", "[", "stock", "]", "]", ",", "axis", "=", "1", ")", "prices", ".", "index", "=", "pd", ".", "to_datetime", "(", "prices", ".", "index", ")", "return", "prices", "else", ":", "error_message", "=", "(", "\"Pandas is not installed, but .get_ticker_price() was \"", "\"called with fmt=pandas. In order to install tiingo with \"", "\"pandas, reinstall with pandas as an optional dependency. \\n\"", "\"Install tiingo with pandas dependency: \\'pip install tiingo[pandas]\\'\\n\"", "\"Alternatively, just install pandas: pip install pandas.\"", ")", "raise", "InstallPandasException", "(", "error_message", ")" ]
Return a pandas.DataFrame of historical prices for one or more ticker symbols. By default, return latest EOD Composite Price for a list of stock tickers. On average, each feed contains 3 data sources. Supported tickers + Available Day Ranges are here: https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip or from the TiingoClient.list_tickers() method. Args: tickers (string/list): One or more unique identifiers for a stock ticker. startDate (string): Start of ticker range in YYYY-MM-DD format. endDate (string): End of ticker range in YYYY-MM-DD format. metric_name (string): Optional parameter specifying metric to be returned for each ticker. In the event of a single ticker, this is optional and if not specified all of the available data will be returned. In the event of a list of tickers, this parameter is required. frequency (string): Resample frequency (defaults to daily).
[ "Return", "a", "pandas", ".", "DataFrame", "of", "historical", "prices", "for", "one", "or", "more", "ticker", "symbols", "." ]
python
test
49.130435
interedition/collatex
collatex-pythonport/collatex/linsuffarr.py
https://github.com/interedition/collatex/blob/76dd1fcc36047bc66a87d31142e72e98b5347821/collatex-pythonport/collatex/linsuffarr.py#L297-L351
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0): """ Returns the length of the longest common prefix of seq1 starting at offset start1 and seq2 starting at offset start2. >>> _longestCommonPrefix("abcdef", "abcghj") 3 >>> _longestCommonPrefix("abcghj", "abcdef") 3 >>> _longestCommonPrefix("miss", "") 0 >>> _longestCommonPrefix("", "mr") 0 >>> _longestCommonPrefix(range(128), range(128)) 128 >>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6) 3 >>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0) 3 >>> _longestCommonPrefix("abc", "abcabc", 1, 4) 2 >>> _longestCommonPrefix("abcabc", "abc", 4, 1) 2 """ len1 = len(seq1) - start1 len2 = len(seq2) - start2 # We set seq2 as the shortest sequence if len1 < len2: seq1, seq2 = seq2, seq1 start1, start2 = start2, start1 len1, len2 = len2, len1 # if seq2 is empty returns 0 if len2 == 0: return 0 i = 0 pos2 = start2 for i in range(min(len1, len2)): # print seq1, seq2, start1, start2 if seq1[start1 + i] != seq2[start2 + i]: return i # we have reached the end of seq2 (need to increment i) return i + 1
[ "def", "_longestCommonPrefix", "(", "seq1", ",", "seq2", ",", "start1", "=", "0", ",", "start2", "=", "0", ")", ":", "len1", "=", "len", "(", "seq1", ")", "-", "start1", "len2", "=", "len", "(", "seq2", ")", "-", "start2", "# We set seq2 as the shortest sequence", "if", "len1", "<", "len2", ":", "seq1", ",", "seq2", "=", "seq2", ",", "seq1", "start1", ",", "start2", "=", "start2", ",", "start1", "len1", ",", "len2", "=", "len2", ",", "len1", "# if seq2 is empty returns 0", "if", "len2", "==", "0", ":", "return", "0", "i", "=", "0", "pos2", "=", "start2", "for", "i", "in", "range", "(", "min", "(", "len1", ",", "len2", ")", ")", ":", "# print seq1, seq2, start1, start2", "if", "seq1", "[", "start1", "+", "i", "]", "!=", "seq2", "[", "start2", "+", "i", "]", ":", "return", "i", "# we have reached the end of seq2 (need to increment i)", "return", "i", "+", "1" ]
Returns the length of the longest common prefix of seq1 starting at offset start1 and seq2 starting at offset start2. >>> _longestCommonPrefix("abcdef", "abcghj") 3 >>> _longestCommonPrefix("abcghj", "abcdef") 3 >>> _longestCommonPrefix("miss", "") 0 >>> _longestCommonPrefix("", "mr") 0 >>> _longestCommonPrefix(range(128), range(128)) 128 >>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6) 3 >>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0) 3 >>> _longestCommonPrefix("abc", "abcabc", 1, 4) 2 >>> _longestCommonPrefix("abcabc", "abc", 4, 1) 2
[ "Returns", "the", "length", "of", "the", "longest", "common", "prefix", "of", "seq1", "starting", "at", "offset", "start1", "and", "seq2", "starting", "at", "offset", "start2", "." ]
python
train
22.327273
holtjma/msbwt
MUS/MSBWTGen.py
https://github.com/holtjma/msbwt/blob/7503346ec072ddb89520db86fef85569a9ba093a/MUS/MSBWTGen.py#L1098-L1167
def compressBWTPoolProcess(tup): ''' During compression, each available process will calculate a subportion of the BWT independently using this function. This process takes the chunk and rewrites it into a given filename using the technique described in the compressBWT(...) function header ''' #pull the tuple info inputFN = tup[0] startIndex = tup[1] endIndex = tup[2] tempFN = tup[3] #this shouldn't happen if startIndex == endIndex: print 'ERROR: EQUAL INDICES' return None #load the file bwt = np.load(inputFN, 'r') #create bit spacings letterBits = 3 numberBits = 8-letterBits numPower = 2**numberBits mask = 255 >> letterBits #search for the places they're different whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0]) #this is the difference between two adjacent ones deltas = np.zeros(dtype='<u4', shape=(whereSol.shape[0]+1,)) if whereSol.shape[0] == 0: deltas[0] = endIndex-startIndex else: deltas[0] = whereSol[0]-startIndex deltas[1:-1] = np.subtract(whereSol[1:], whereSol[0:-1]) deltas[-1] = endIndex - whereSol[-1] #calculate the number of bytes we need to store this information size = 0 byteCount = 0 lastCount = 1 while lastCount > 0: lastCount = np.where(deltas >= 2**(numberBits*byteCount))[0].shape[0] size += lastCount byteCount += 1 #create the file ret = np.lib.format.open_memmap(tempFN, 'w+', '<u1', (size,)) retIndex = 0 c = bwt[startIndex] startChar = c delta = deltas[0] while delta > 0: ret[retIndex] = ((delta & mask) << letterBits)+c delta /= numPower retIndex += 1 #fill in the values based on the bit functions for i in xrange(0, whereSol.shape[0]): c = bwt[whereSol[i]] delta = deltas[i+1] while delta > 0: ret[retIndex] = ((delta & mask) << letterBits)+c delta /= numPower retIndex += 1 endChar = c #return a lot of information so we can easily combine the results return (size, startChar, deltas[0], endChar, deltas[-1], tempFN)
[ "def", "compressBWTPoolProcess", "(", "tup", ")", ":", "#pull the tuple info", "inputFN", "=", "tup", "[", "0", "]", "startIndex", "=", "tup", "[", "1", "]", "endIndex", "=", "tup", "[", "2", "]", "tempFN", "=", "tup", "[", "3", "]", "#this shouldn't happen", "if", "startIndex", "==", "endIndex", ":", "print", "'ERROR: EQUAL INDICES'", "return", "None", "#load the file", "bwt", "=", "np", ".", "load", "(", "inputFN", ",", "'r'", ")", "#create bit spacings", "letterBits", "=", "3", "numberBits", "=", "8", "-", "letterBits", "numPower", "=", "2", "**", "numberBits", "mask", "=", "255", ">>", "letterBits", "#search for the places they're different", "whereSol", "=", "np", ".", "add", "(", "startIndex", "+", "1", ",", "np", ".", "where", "(", "bwt", "[", "startIndex", ":", "endIndex", "-", "1", "]", "!=", "bwt", "[", "startIndex", "+", "1", ":", "endIndex", "]", ")", "[", "0", "]", ")", "#this is the difference between two adjacent ones", "deltas", "=", "np", ".", "zeros", "(", "dtype", "=", "'<u4'", ",", "shape", "=", "(", "whereSol", ".", "shape", "[", "0", "]", "+", "1", ",", ")", ")", "if", "whereSol", ".", "shape", "[", "0", "]", "==", "0", ":", "deltas", "[", "0", "]", "=", "endIndex", "-", "startIndex", "else", ":", "deltas", "[", "0", "]", "=", "whereSol", "[", "0", "]", "-", "startIndex", "deltas", "[", "1", ":", "-", "1", "]", "=", "np", ".", "subtract", "(", "whereSol", "[", "1", ":", "]", ",", "whereSol", "[", "0", ":", "-", "1", "]", ")", "deltas", "[", "-", "1", "]", "=", "endIndex", "-", "whereSol", "[", "-", "1", "]", "#calculate the number of bytes we need to store this information", "size", "=", "0", "byteCount", "=", "0", "lastCount", "=", "1", "while", "lastCount", ">", "0", ":", "lastCount", "=", "np", ".", "where", "(", "deltas", ">=", "2", "**", "(", "numberBits", "*", "byteCount", ")", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "size", "+=", "lastCount", "byteCount", "+=", "1", "#create the file", "ret", "=", "np", ".", "lib", ".", "format", ".", "open_memmap", "(", "tempFN", ",", "'w+'", ",", "'<u1'", ",", "(", "size", ",", ")", ")", "retIndex", "=", "0", "c", "=", "bwt", "[", "startIndex", "]", "startChar", "=", "c", "delta", "=", "deltas", "[", "0", "]", "while", "delta", ">", "0", ":", "ret", "[", "retIndex", "]", "=", "(", "(", "delta", "&", "mask", ")", "<<", "letterBits", ")", "+", "c", "delta", "/=", "numPower", "retIndex", "+=", "1", "#fill in the values based on the bit functions", "for", "i", "in", "xrange", "(", "0", ",", "whereSol", ".", "shape", "[", "0", "]", ")", ":", "c", "=", "bwt", "[", "whereSol", "[", "i", "]", "]", "delta", "=", "deltas", "[", "i", "+", "1", "]", "while", "delta", ">", "0", ":", "ret", "[", "retIndex", "]", "=", "(", "(", "delta", "&", "mask", ")", "<<", "letterBits", ")", "+", "c", "delta", "/=", "numPower", "retIndex", "+=", "1", "endChar", "=", "c", "#return a lot of information so we can easily combine the results", "return", "(", "size", ",", "startChar", ",", "deltas", "[", "0", "]", ",", "endChar", ",", "deltas", "[", "-", "1", "]", ",", "tempFN", ")" ]
During compression, each available process will calculate a subportion of the BWT independently using this function. This process takes the chunk and rewrites it into a given filename using the technique described in the compressBWT(...) function header
[ "During", "compression", "each", "available", "process", "will", "calculate", "a", "subportion", "of", "the", "BWT", "independently", "using", "this", "function", ".", "This", "process", "takes", "the", "chunk", "and", "rewrites", "it", "into", "a", "given", "filename", "using", "the", "technique", "described", "in", "the", "compressBWT", "(", "...", ")", "function", "header" ]
python
train
31.7
mushkevych/scheduler
synergy/scheduler/garbage_collector.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/garbage_collector.py#L132-L136
def flush_one(self, process_name, ignore_priority=False): """ method iterates over the reprocessing queue for the given process and re-submits UOW whose waiting time has expired """ q = self.reprocess_uows[process_name] self._flush_queue(q, ignore_priority)
[ "def", "flush_one", "(", "self", ",", "process_name", ",", "ignore_priority", "=", "False", ")", ":", "q", "=", "self", ".", "reprocess_uows", "[", "process_name", "]", "self", ".", "_flush_queue", "(", "q", ",", "ignore_priority", ")" ]
method iterates over the reprocessing queue for the given process and re-submits UOW whose waiting time has expired
[ "method", "iterates", "over", "the", "reprocessing", "queue", "for", "the", "given", "process", "and", "re", "-", "submits", "UOW", "whose", "waiting", "time", "has", "expired" ]
python
train
57.8
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L1013-L1037
def record(self): # type: () -> bytes ''' A method to generate the string representing this UDF Primary Volume Descriptor. Parameters: None. Returns: A string representing this UDF Primary Volume Descriptor. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized') rec = struct.pack(self.FMT, b'\x00' * 16, self.vol_desc_seqnum, self.desc_num, self.vol_ident, 1, 1, 2, self.max_interchange_level, 1, 1, self.vol_set_ident, self.desc_char_set, self.explanatory_char_set, self.vol_abstract_length, self.vol_abstract_extent, self.vol_copyright_length, self.vol_copyright_extent, self.app_ident.record(), self.recording_date.record(), self.impl_ident.record(), self.implementation_use, self.predecessor_vol_desc_location, 0, b'\x00' * 22)[16:] return self.desc_tag.record(rec) + rec
[ "def", "record", "(", "self", ")", ":", "# type: () -> bytes", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Primary Volume Descriptor not initialized'", ")", "rec", "=", "struct", ".", "pack", "(", "self", ".", "FMT", ",", "b'\\x00'", "*", "16", ",", "self", ".", "vol_desc_seqnum", ",", "self", ".", "desc_num", ",", "self", ".", "vol_ident", ",", "1", ",", "1", ",", "2", ",", "self", ".", "max_interchange_level", ",", "1", ",", "1", ",", "self", ".", "vol_set_ident", ",", "self", ".", "desc_char_set", ",", "self", ".", "explanatory_char_set", ",", "self", ".", "vol_abstract_length", ",", "self", ".", "vol_abstract_extent", ",", "self", ".", "vol_copyright_length", ",", "self", ".", "vol_copyright_extent", ",", "self", ".", "app_ident", ".", "record", "(", ")", ",", "self", ".", "recording_date", ".", "record", "(", ")", ",", "self", ".", "impl_ident", ".", "record", "(", ")", ",", "self", ".", "implementation_use", ",", "self", ".", "predecessor_vol_desc_location", ",", "0", ",", "b'\\x00'", "*", "22", ")", "[", "16", ":", "]", "return", "self", ".", "desc_tag", ".", "record", "(", "rec", ")", "+", "rec" ]
A method to generate the string representing this UDF Primary Volume Descriptor. Parameters: None. Returns: A string representing this UDF Primary Volume Descriptor.
[ "A", "method", "to", "generate", "the", "string", "representing", "this", "UDF", "Primary", "Volume", "Descriptor", "." ]
python
train
46.64
NYUCCL/psiTurk
psiturk/psiturk_shell.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L1029-L1051
def do_worker(self, arg): """ Usage: worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force] worker reject (--hit <hit_id> | <assignment_id> ...) worker unreject (--hit <hit_id> | <assignment_id> ...) worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...) worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies] worker help """ if arg['approve']: self.worker_approve(arg['--all'], arg['<hit_id>'], arg['<assignment_id>'], arg['--all-studies'], arg['--force']) elif arg['reject']: self.amt_services_wrapper.worker_reject(arg['<hit_id>'], arg['<assignment_id>']) elif arg['unreject']: self.amt_services_wrapper.worker_unreject(arg['<hit_id>'], arg['<assignment_id>']) elif arg['list']: self.worker_list(arg['--submitted'], arg['--approved'], arg['--rejected'], arg['<hit_id>'], arg['--all-studies']) elif arg['bonus']: self.amt_services_wrapper.worker_bonus(arg['<hit_id>'], arg['--auto'], arg['<amount>'], '', arg['<assignment_id>']) else: self.help_worker()
[ "def", "do_worker", "(", "self", ",", "arg", ")", ":", "if", "arg", "[", "'approve'", "]", ":", "self", ".", "worker_approve", "(", "arg", "[", "'--all'", "]", ",", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ",", "arg", "[", "'--all-studies'", "]", ",", "arg", "[", "'--force'", "]", ")", "elif", "arg", "[", "'reject'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_reject", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ")", "elif", "arg", "[", "'unreject'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_unreject", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ")", "elif", "arg", "[", "'list'", "]", ":", "self", ".", "worker_list", "(", "arg", "[", "'--submitted'", "]", ",", "arg", "[", "'--approved'", "]", ",", "arg", "[", "'--rejected'", "]", ",", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'--all-studies'", "]", ")", "elif", "arg", "[", "'bonus'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_bonus", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'--auto'", "]", ",", "arg", "[", "'<amount>'", "]", ",", "''", ",", "arg", "[", "'<assignment_id>'", "]", ")", "else", ":", "self", ".", "help_worker", "(", ")" ]
Usage: worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force] worker reject (--hit <hit_id> | <assignment_id> ...) worker unreject (--hit <hit_id> | <assignment_id> ...) worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...) worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies] worker help
[ "Usage", ":", "worker", "approve", "(", "--", "all", "|", "--", "hit", "<hit_id", ">", "...", "|", "<assignment_id", ">", "...", ")", "[", "--", "all", "-", "studies", "]", "[", "--", "force", "]", "worker", "reject", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "unreject", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "bonus", "(", "--", "amount", "<amount", ">", "|", "--", "auto", ")", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "list", "[", "--", "submitted", "|", "--", "approved", "|", "--", "rejected", "]", "[", "(", "--", "hit", "<hit_id", ">", ")", "]", "[", "--", "all", "-", "studies", "]", "worker", "help" ]
python
train
54.913043
ace0/pyrelic
pyrelic/pbc.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/pbc.py#L450-L457
def serializeG1(x, compress=True): """ Converts G1 element @x into an array of bytes. If @compress is True, the point will be compressed resulting in a much shorter string of bytes. """ assertType(x, G1Element) return _serialize(x, compress, librelic.g1_size_bin_abi, librelic.g1_write_bin_abi)
[ "def", "serializeG1", "(", "x", ",", "compress", "=", "True", ")", ":", "assertType", "(", "x", ",", "G1Element", ")", "return", "_serialize", "(", "x", ",", "compress", ",", "librelic", ".", "g1_size_bin_abi", ",", "librelic", ".", "g1_write_bin_abi", ")" ]
Converts G1 element @x into an array of bytes. If @compress is True, the point will be compressed resulting in a much shorter string of bytes.
[ "Converts", "G1", "element" ]
python
train
40
senaite/senaite.core
bika/lims/exportimport/instruments/agilent/masshunter/masshunter.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/agilent/masshunter/masshunter.py#L317-L374
def parse_quantitationesultsline(self, line): """ Parses quantitation result lines Please see samples/GC-MS output.txt [MS Quantitative Results] section """ if line == ',,,,,,,,,,,,,,,,,,': return 0 if line.startswith('SampleID'): self._end_header = True self._quantitationresultsheader = [token.strip() for token in line.split(self.COMMAS) if token.strip()] return 0 splitted = [token.strip() for token in line.split(self.COMMAS)] quantitation = {'DefaultResult': 'FinalConcentration'} for colname in self._quantitationresultsheader: quantitation[colname] = '' for i in range(len(splitted)): token = splitted[i] if i < len(self._quantitationresultsheader): colname = self._quantitationresultsheader[i] if colname in self.QUANTITATIONRESULTS_NUMERICHEADERS: try: quantitation[colname] = float(token) except ValueError: self.warn( "No valid number ${token} in column " "${index} (${column_name})", mapping={"token": token, "index": str(i + 1), "column_name": colname}, numline=self._numline, line=line) quantitation[colname] = token else: quantitation[colname] = token elif token: self.err("Orphan value in column ${index} (${token})", mapping={"index": str(i+1), "token": token}, numline=self._numline, line=line) result = quantitation[quantitation['DefaultResult']] column_name = quantitation['DefaultResult'] result = self.zeroValueDefaultInstrumentResults(column_name, result, line) quantitation[quantitation['DefaultResult']] = result d = datetime.strptime(quantitation['AcqDateTime'], "%m/%d/%Y %H:%M") quantitation['AcqDateTime'] = d val = re.sub(r"\W", "", quantitation['Compound']) self._addRawResult(quantitation['DataFileName'], values={val: quantitation}, override=False)
[ "def", "parse_quantitationesultsline", "(", "self", ",", "line", ")", ":", "if", "line", "==", "',,,,,,,,,,,,,,,,,,'", ":", "return", "0", "if", "line", ".", "startswith", "(", "'SampleID'", ")", ":", "self", ".", "_end_header", "=", "True", "self", ".", "_quantitationresultsheader", "=", "[", "token", ".", "strip", "(", ")", "for", "token", "in", "line", ".", "split", "(", "self", ".", "COMMAS", ")", "if", "token", ".", "strip", "(", ")", "]", "return", "0", "splitted", "=", "[", "token", ".", "strip", "(", ")", "for", "token", "in", "line", ".", "split", "(", "self", ".", "COMMAS", ")", "]", "quantitation", "=", "{", "'DefaultResult'", ":", "'FinalConcentration'", "}", "for", "colname", "in", "self", ".", "_quantitationresultsheader", ":", "quantitation", "[", "colname", "]", "=", "''", "for", "i", "in", "range", "(", "len", "(", "splitted", ")", ")", ":", "token", "=", "splitted", "[", "i", "]", "if", "i", "<", "len", "(", "self", ".", "_quantitationresultsheader", ")", ":", "colname", "=", "self", ".", "_quantitationresultsheader", "[", "i", "]", "if", "colname", "in", "self", ".", "QUANTITATIONRESULTS_NUMERICHEADERS", ":", "try", ":", "quantitation", "[", "colname", "]", "=", "float", "(", "token", ")", "except", "ValueError", ":", "self", ".", "warn", "(", "\"No valid number ${token} in column \"", "\"${index} (${column_name})\"", ",", "mapping", "=", "{", "\"token\"", ":", "token", ",", "\"index\"", ":", "str", "(", "i", "+", "1", ")", ",", "\"column_name\"", ":", "colname", "}", ",", "numline", "=", "self", ".", "_numline", ",", "line", "=", "line", ")", "quantitation", "[", "colname", "]", "=", "token", "else", ":", "quantitation", "[", "colname", "]", "=", "token", "elif", "token", ":", "self", ".", "err", "(", "\"Orphan value in column ${index} (${token})\"", ",", "mapping", "=", "{", "\"index\"", ":", "str", "(", "i", "+", "1", ")", ",", "\"token\"", ":", "token", "}", ",", "numline", "=", "self", ".", "_numline", ",", "line", "=", "line", ")", "result", "=", "quantitation", "[", "quantitation", "[", "'DefaultResult'", "]", "]", "column_name", "=", "quantitation", "[", "'DefaultResult'", "]", "result", "=", "self", ".", "zeroValueDefaultInstrumentResults", "(", "column_name", ",", "result", ",", "line", ")", "quantitation", "[", "quantitation", "[", "'DefaultResult'", "]", "]", "=", "result", "d", "=", "datetime", ".", "strptime", "(", "quantitation", "[", "'AcqDateTime'", "]", ",", "\"%m/%d/%Y %H:%M\"", ")", "quantitation", "[", "'AcqDateTime'", "]", "=", "d", "val", "=", "re", ".", "sub", "(", "r\"\\W\"", ",", "\"\"", ",", "quantitation", "[", "'Compound'", "]", ")", "self", ".", "_addRawResult", "(", "quantitation", "[", "'DataFileName'", "]", ",", "values", "=", "{", "val", ":", "quantitation", "}", ",", "override", "=", "False", ")" ]
Parses quantitation result lines Please see samples/GC-MS output.txt [MS Quantitative Results] section
[ "Parses", "quantitation", "result", "lines", "Please", "see", "samples", "/", "GC", "-", "MS", "output", ".", "txt", "[", "MS", "Quantitative", "Results", "]", "section" ]
python
train
43.965517
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L920-L965
def respond_list_directory(self, dir_path, query=None): """ Respond to the client with an HTML page listing the contents of the specified directory. :param str dir_path: The path of the directory to list the contents of. """ del query try: dir_contents = os.listdir(dir_path) except os.error: self.respond_not_found() return if os.path.normpath(dir_path) != self.__config['serve_files_root']: dir_contents.append('..') dir_contents.sort(key=lambda a: a.lower()) displaypath = html.escape(urllib.parse.unquote(self.path), quote=True) f = io.BytesIO() encoding = sys.getfilesystemencoding() f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n') f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n') f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n') f.write(b'<hr>\n<ul>\n') for name in dir_contents: fullname = os.path.join(dir_path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding)) f.write(b'</ul>\n<hr>\n</body>\n</html>\n') length = f.tell() f.seek(0) self.send_response(200) self.send_header('Content-Type', 'text/html; charset=' + encoding) self.send_header('Content-Length', length) self.end_headers() shutil.copyfileobj(f, self.wfile) f.close() return
[ "def", "respond_list_directory", "(", "self", ",", "dir_path", ",", "query", "=", "None", ")", ":", "del", "query", "try", ":", "dir_contents", "=", "os", ".", "listdir", "(", "dir_path", ")", "except", "os", ".", "error", ":", "self", ".", "respond_not_found", "(", ")", "return", "if", "os", ".", "path", ".", "normpath", "(", "dir_path", ")", "!=", "self", ".", "__config", "[", "'serve_files_root'", "]", ":", "dir_contents", ".", "append", "(", "'..'", ")", "dir_contents", ".", "sort", "(", "key", "=", "lambda", "a", ":", "a", ".", "lower", "(", ")", ")", "displaypath", "=", "html", ".", "escape", "(", "urllib", ".", "parse", ".", "unquote", "(", "self", ".", "path", ")", ",", "quote", "=", "True", ")", "f", "=", "io", ".", "BytesIO", "(", ")", "encoding", "=", "sys", ".", "getfilesystemencoding", "(", ")", "f", ".", "write", "(", "b'<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\\n'", ")", "f", ".", "write", "(", "b'<html>\\n<title>Directory listing for '", "+", "displaypath", ".", "encode", "(", "encoding", ")", "+", "b'</title>\\n'", ")", "f", ".", "write", "(", "b'<body>\\n<h2>Directory listing for '", "+", "displaypath", ".", "encode", "(", "encoding", ")", "+", "b'</h2>\\n'", ")", "f", ".", "write", "(", "b'<hr>\\n<ul>\\n'", ")", "for", "name", "in", "dir_contents", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "name", ")", "displayname", "=", "linkname", "=", "name", "# Append / for directories or @ for symbolic links", "if", "os", ".", "path", ".", "isdir", "(", "fullname", ")", ":", "displayname", "=", "name", "+", "\"/\"", "linkname", "=", "name", "+", "\"/\"", "if", "os", ".", "path", ".", "islink", "(", "fullname", ")", ":", "displayname", "=", "name", "+", "\"@\"", "# Note: a link to a directory displays with @ and links with /", "f", ".", "write", "(", "(", "'<li><a href=\"'", "+", "urllib", ".", "parse", ".", "quote", "(", "linkname", ")", "+", "'\">'", "+", "html", ".", "escape", "(", "displayname", ",", "quote", "=", "True", ")", "+", "'</a>\\n'", ")", ".", "encode", "(", "encoding", ")", ")", "f", ".", "write", "(", "b'</ul>\\n<hr>\\n</body>\\n</html>\\n'", ")", "length", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-Type'", ",", "'text/html; charset='", "+", "encoding", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "length", ")", "self", ".", "end_headers", "(", ")", "shutil", ".", "copyfileobj", "(", "f", ",", "self", ".", "wfile", ")", "f", ".", "close", "(", ")", "return" ]
Respond to the client with an HTML page listing the contents of the specified directory. :param str dir_path: The path of the directory to list the contents of.
[ "Respond", "to", "the", "client", "with", "an", "HTML", "page", "listing", "the", "contents", "of", "the", "specified", "directory", "." ]
python
train
36.478261
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L339-L360
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): # type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]] """Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form """ stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
[ "def", "read_list_from_csv", "(", "filepath", ",", "dict_form", "=", "False", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]", "stream", "=", "Stream", "(", "filepath", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "stream", ".", "open", "(", ")", "result", "=", "stream", ".", "read", "(", "keyed", "=", "dict_form", ")", "stream", ".", "close", "(", ")", "return", "result" ]
Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form
[ "Read", "a", "list", "of", "rows", "in", "dict", "or", "list", "form", "from", "a", "csv", ".", "(", "The", "headers", "argument", "is", "either", "a", "row", "number", "or", "list", "of", "row", "numbers", "(", "in", "case", "of", "multi", "-", "line", "headers", ")", "to", "be", "considered", "as", "headers", "(", "rows", "start", "counting", "at", "1", ")", "or", "the", "actual", "headers", "defined", "a", "list", "of", "strings", ".", "If", "not", "set", "all", "rows", "will", "be", "treated", "as", "containing", "values", ".", ")" ]
python
train
46.318182
MAVENSDC/cdflib
cdflib/epochs.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/epochs.py#L207-L260
def compute(datetimes, to_np=None): # @NoSelf """ Computes the provided date/time components into CDF epoch value(s). For CDF_EPOCH: For computing into CDF_EPOCH value, each date/time elements should have exactly seven (7) components, as year, month, day, hour, minute, second and millisecond, in a list. For example: [[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]] Or, call function compute_epoch directly, instead, with at least three (3) first (up to seven) components. The last component, if not the 7th, can be a float that can have a fraction of the unit. For CDF_EPOCH16: They should have exactly ten (10) components, as year, month, day, hour, minute, second, millisecond, microsecond, nanosecond and picosecond, in a list. For example: [[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]] Or, call function compute_epoch directly, instead, with at least three (3) first (up to ten) components. The last component, if not the 10th, can be a float that can have a fraction of the unit. For TT2000: Each TT2000 typed date/time should have exactly nine (9) components, as year, month, day, hour, minute, second, millisecond, microsecond, and nanosecond, in a list. For example: [[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]] Or, call function compute_tt2000 directly, instead, with at least three (3) first (up to nine) components. The last component, if not the 9th, can be a float that can have a fraction of the unit. Specify to_np to True, if the result should be in numpy class. """ if not isinstance(datetimes, (list, tuple, np.ndarray)): raise TypeError('datetime must be in list form') if isinstance(datetimes[0], numbers.Number): items = len(datetimes) elif isinstance(datetimes[0], (list, tuple, np.ndarray)): items = len(datetimes[0]) else: print('Unknown input') return if (items == 7): return CDFepoch.compute_epoch(datetimes, to_np) elif (items == 10): return CDFepoch.compute_epoch16(datetimes, to_np) elif (items == 9): return CDFepoch.compute_tt2000(datetimes, to_np) else: print('Unknown input') return
[ "def", "compute", "(", "datetimes", ",", "to_np", "=", "None", ")", ":", "# @NoSelf", "if", "not", "isinstance", "(", "datetimes", ",", "(", "list", ",", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "TypeError", "(", "'datetime must be in list form'", ")", "if", "isinstance", "(", "datetimes", "[", "0", "]", ",", "numbers", ".", "Number", ")", ":", "items", "=", "len", "(", "datetimes", ")", "elif", "isinstance", "(", "datetimes", "[", "0", "]", ",", "(", "list", ",", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "items", "=", "len", "(", "datetimes", "[", "0", "]", ")", "else", ":", "print", "(", "'Unknown input'", ")", "return", "if", "(", "items", "==", "7", ")", ":", "return", "CDFepoch", ".", "compute_epoch", "(", "datetimes", ",", "to_np", ")", "elif", "(", "items", "==", "10", ")", ":", "return", "CDFepoch", ".", "compute_epoch16", "(", "datetimes", ",", "to_np", ")", "elif", "(", "items", "==", "9", ")", ":", "return", "CDFepoch", ".", "compute_tt2000", "(", "datetimes", ",", "to_np", ")", "else", ":", "print", "(", "'Unknown input'", ")", "return" ]
Computes the provided date/time components into CDF epoch value(s). For CDF_EPOCH: For computing into CDF_EPOCH value, each date/time elements should have exactly seven (7) components, as year, month, day, hour, minute, second and millisecond, in a list. For example: [[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]] Or, call function compute_epoch directly, instead, with at least three (3) first (up to seven) components. The last component, if not the 7th, can be a float that can have a fraction of the unit. For CDF_EPOCH16: They should have exactly ten (10) components, as year, month, day, hour, minute, second, millisecond, microsecond, nanosecond and picosecond, in a list. For example: [[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]] Or, call function compute_epoch directly, instead, with at least three (3) first (up to ten) components. The last component, if not the 10th, can be a float that can have a fraction of the unit. For TT2000: Each TT2000 typed date/time should have exactly nine (9) components, as year, month, day, hour, minute, second, millisecond, microsecond, and nanosecond, in a list. For example: [[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]] Or, call function compute_tt2000 directly, instead, with at least three (3) first (up to nine) components. The last component, if not the 9th, can be a float that can have a fraction of the unit. Specify to_np to True, if the result should be in numpy class.
[ "Computes", "the", "provided", "date", "/", "time", "components", "into", "CDF", "epoch", "value", "(", "s", ")", "." ]
python
train
47.796296
ff0000/scarlet
scarlet/cache/manager.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cache/manager.py#L43-L64
def register_model(self, key, *models, **kwargs): """ Register a cache_group with this manager. Use this method to register more simple groups where all models share the same parameters. Any arguments are treated as models that you would like to register. Any keyword arguments received are passed to the register method when registering each model. :param key: The key to register this group as. \ Raises an exception if the key is already registered. """ cache_group = CacheGroup(key) for model in models: cache_group.register(model, **kwargs) self.register_cache(cache_group)
[ "def", "register_model", "(", "self", ",", "key", ",", "*", "models", ",", "*", "*", "kwargs", ")", ":", "cache_group", "=", "CacheGroup", "(", "key", ")", "for", "model", "in", "models", ":", "cache_group", ".", "register", "(", "model", ",", "*", "*", "kwargs", ")", "self", ".", "register_cache", "(", "cache_group", ")" ]
Register a cache_group with this manager. Use this method to register more simple groups where all models share the same parameters. Any arguments are treated as models that you would like to register. Any keyword arguments received are passed to the register method when registering each model. :param key: The key to register this group as. \ Raises an exception if the key is already registered.
[ "Register", "a", "cache_group", "with", "this", "manager", "." ]
python
train
31.227273
memphis-iis/GLUDB
gludb/backends/dynamodb.py
https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/backends/dynamodb.py#L177-L195
def save(self, obj): """Required functionality.""" if not obj.id: obj.id = uuid() stored_data = { 'id': obj.id, 'value': obj.to_data() } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = DynamoMappings.map_index_val(val) table = self.get_class_table(obj.__class__) item = Item(table, data=stored_data) item.save(overwrite=True)
[ "def", "save", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ".", "id", ":", "obj", ".", "id", "=", "uuid", "(", ")", "stored_data", "=", "{", "'id'", ":", "obj", ".", "id", ",", "'value'", ":", "obj", ".", "to_data", "(", ")", "}", "index_vals", "=", "obj", ".", "indexes", "(", ")", "or", "{", "}", "for", "key", "in", "obj", ".", "__class__", ".", "index_names", "(", ")", "or", "[", "]", ":", "val", "=", "index_vals", ".", "get", "(", "key", ",", "''", ")", "stored_data", "[", "key", "]", "=", "DynamoMappings", ".", "map_index_val", "(", "val", ")", "table", "=", "self", ".", "get_class_table", "(", "obj", ".", "__class__", ")", "item", "=", "Item", "(", "table", ",", "data", "=", "stored_data", ")", "item", ".", "save", "(", "overwrite", "=", "True", ")" ]
Required functionality.
[ "Required", "functionality", "." ]
python
train
27.526316
aaugustin/websockets
src/websockets/server.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/server.py#L639-L647
def close(self) -> None: """ Close the server and terminate connections with close code 1001. This method is idempotent. """ if self.close_task is None: self.close_task = self.loop.create_task(self._close())
[ "def", "close", "(", "self", ")", "->", "None", ":", "if", "self", ".", "close_task", "is", "None", ":", "self", ".", "close_task", "=", "self", ".", "loop", ".", "create_task", "(", "self", ".", "_close", "(", ")", ")" ]
Close the server and terminate connections with close code 1001. This method is idempotent.
[ "Close", "the", "server", "and", "terminate", "connections", "with", "close", "code", "1001", "." ]
python
train
28.111111
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L1215-L1221
def _run_atexit(): '''Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.''' global _atexit for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:]
[ "def", "_run_atexit", "(", ")", ":", "global", "_atexit", "for", "callback", ",", "args", ",", "kwargs", "in", "reversed", "(", "_atexit", ")", ":", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "del", "_atexit", "[", ":", "]" ]
Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.
[ "Hook", "frameworks", "must", "invoke", "this", "after", "the", "main", "hook", "body", "has", "successfully", "completed", ".", "Do", "not", "invoke", "it", "if", "the", "hook", "fails", "." ]
python
train
39
otto-torino/django-baton
baton/views.py
https://github.com/otto-torino/django-baton/blob/e791b5db3a0814bb49d8dfbdfb989d45e03594b7/baton/views.py#L15-L17
def dispatch(self, *args, **kwargs): """ Only staff members can access this view """ return super(GetAppListJsonView, self).dispatch(*args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "GetAppListJsonView", ",", "self", ")", ".", "dispatch", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Only staff members can access this view
[ "Only", "staff", "members", "can", "access", "this", "view" ]
python
train
54.333333
sirfoga/pyhal
hal/streams/pretty_table.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/pretty_table.py#L69-L90
def _calculate_optimal_column_widths(self): """Calculates widths of columns :return: Length of longest data in each column (labels and data) """ columns = len(self.data[0]) # number of columns str_labels = [parse_colorama(str(l)) for l in self.labels] # labels as strings str_data = [[parse_colorama(str(col)) for col in row] for row in self.data] # values as strings widths = [0] * columns # length of longest string in each column for row in str_data: # calculate max width in each column widths = [max(w, len(c)) for w, c in zip(widths, row)] # check if label name is longer than data for col, label in enumerate(str_labels): if len(label) > widths[col]: widths[col] = len(label) self.widths = widths
[ "def", "_calculate_optimal_column_widths", "(", "self", ")", ":", "columns", "=", "len", "(", "self", ".", "data", "[", "0", "]", ")", "# number of columns", "str_labels", "=", "[", "parse_colorama", "(", "str", "(", "l", ")", ")", "for", "l", "in", "self", ".", "labels", "]", "# labels as strings", "str_data", "=", "[", "[", "parse_colorama", "(", "str", "(", "col", ")", ")", "for", "col", "in", "row", "]", "for", "row", "in", "self", ".", "data", "]", "# values as strings", "widths", "=", "[", "0", "]", "*", "columns", "# length of longest string in each column", "for", "row", "in", "str_data", ":", "# calculate max width in each column", "widths", "=", "[", "max", "(", "w", ",", "len", "(", "c", ")", ")", "for", "w", ",", "c", "in", "zip", "(", "widths", ",", "row", ")", "]", "# check if label name is longer than data", "for", "col", ",", "label", "in", "enumerate", "(", "str_labels", ")", ":", "if", "len", "(", "label", ")", ">", "widths", "[", "col", "]", ":", "widths", "[", "col", "]", "=", "len", "(", "label", ")", "self", ".", "widths", "=", "widths" ]
Calculates widths of columns :return: Length of longest data in each column (labels and data)
[ "Calculates", "widths", "of", "columns" ]
python
train
39.454545