repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
jssimporter/python-jss
jss/distribution_point.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/distribution_point.py#L770-L808
def exists_using_casper(self, filename): """Check for the existence of a package file. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Package() or /scripts and look for matches on the filename. If this is not enough, this method uses the results of the casper.jxml page to determine if a package exists. This is an undocumented feature and as such should probably not be relied upon. Please note, scripts are not listed per-distributionserver like packages. For scripts, the best you can do is use the regular exists method. It will test for whether the file exists on ALL configured distribution servers. This may register False if the JDS is busy syncing them. """ casper_results = casper.Casper(self.connection["jss"]) distribution_servers = casper_results.find("distributionservers") # Step one: Build a list of sets of all package names. all_packages = [] for distribution_server in distribution_servers: packages = set() for package in distribution_server.findall("packages/package"): packages.add(os.path.basename(package.find("fileURL").text)) all_packages.append(packages) # Step two: Intersect the sets. base_set = all_packages.pop() for packages in all_packages: base_set = base_set.intersection(packages) # Step three: Check for membership. return filename in base_set
[ "def", "exists_using_casper", "(", "self", ",", "filename", ")", ":", "casper_results", "=", "casper", ".", "Casper", "(", "self", ".", "connection", "[", "\"jss\"", "]", ")", "distribution_servers", "=", "casper_results", ".", "find", "(", "\"distributionservers\"", ")", "# Step one: Build a list of sets of all package names.", "all_packages", "=", "[", "]", "for", "distribution_server", "in", "distribution_servers", ":", "packages", "=", "set", "(", ")", "for", "package", "in", "distribution_server", ".", "findall", "(", "\"packages/package\"", ")", ":", "packages", ".", "add", "(", "os", ".", "path", ".", "basename", "(", "package", ".", "find", "(", "\"fileURL\"", ")", ".", "text", ")", ")", "all_packages", ".", "append", "(", "packages", ")", "# Step two: Intersect the sets.", "base_set", "=", "all_packages", ".", "pop", "(", ")", "for", "packages", "in", "all_packages", ":", "base_set", "=", "base_set", ".", "intersection", "(", "packages", ")", "# Step three: Check for membership.", "return", "filename", "in", "base_set" ]
Check for the existence of a package file. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Package() or /scripts and look for matches on the filename. If this is not enough, this method uses the results of the casper.jxml page to determine if a package exists. This is an undocumented feature and as such should probably not be relied upon. Please note, scripts are not listed per-distributionserver like packages. For scripts, the best you can do is use the regular exists method. It will test for whether the file exists on ALL configured distribution servers. This may register False if the JDS is busy syncing them.
[ "Check", "for", "the", "existence", "of", "a", "package", "file", "." ]
python
train
43.974359
tcalmant/ipopo
pelix/ipopo/core.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/core.py#L654-L687
def bundle_changed(self, event): # type: (BundleEvent) -> None """ A bundle event has been triggered :param event: The bundle event """ kind = event.get_kind() bundle = event.get_bundle() if kind == BundleEvent.STOPPING_PRECLEAN: # A bundle is gone, remove its factories after the deactivator has # been called. That way, the deactivator can kill manually started # components. self._unregister_bundle_factories(bundle) elif kind == BundleEvent.STARTED: # A bundle is staring, register its factories before its activator # is called. That way, the activator can use the registered # factories. self._register_bundle_factories(bundle) elif kind == BundleEvent.UPDATE_BEGIN: # A bundle will be updated, store its auto-restart component self._autorestart_store_components(bundle) elif kind == BundleEvent.UPDATED: # Update has finished, restart stored components self._autorestart_components(bundle) self._autorestart_clear_components(bundle) elif kind == BundleEvent.UPDATE_FAILED: # Update failed, clean the stored components self._autorestart_clear_components(bundle)
[ "def", "bundle_changed", "(", "self", ",", "event", ")", ":", "# type: (BundleEvent) -> None", "kind", "=", "event", ".", "get_kind", "(", ")", "bundle", "=", "event", ".", "get_bundle", "(", ")", "if", "kind", "==", "BundleEvent", ".", "STOPPING_PRECLEAN", ":", "# A bundle is gone, remove its factories after the deactivator has", "# been called. That way, the deactivator can kill manually started", "# components.", "self", ".", "_unregister_bundle_factories", "(", "bundle", ")", "elif", "kind", "==", "BundleEvent", ".", "STARTED", ":", "# A bundle is staring, register its factories before its activator", "# is called. That way, the activator can use the registered", "# factories.", "self", ".", "_register_bundle_factories", "(", "bundle", ")", "elif", "kind", "==", "BundleEvent", ".", "UPDATE_BEGIN", ":", "# A bundle will be updated, store its auto-restart component", "self", ".", "_autorestart_store_components", "(", "bundle", ")", "elif", "kind", "==", "BundleEvent", ".", "UPDATED", ":", "# Update has finished, restart stored components", "self", ".", "_autorestart_components", "(", "bundle", ")", "self", ".", "_autorestart_clear_components", "(", "bundle", ")", "elif", "kind", "==", "BundleEvent", ".", "UPDATE_FAILED", ":", "# Update failed, clean the stored components", "self", ".", "_autorestart_clear_components", "(", "bundle", ")" ]
A bundle event has been triggered :param event: The bundle event
[ "A", "bundle", "event", "has", "been", "triggered" ]
python
train
38.705882
mrcagney/gtfstk
gtfstk/helpers.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/helpers.py#L39-L70
def timestr_to_seconds( x: Union[dt.date, str], *, inverse: bool = False, mod24: bool = False ) -> int: """ Given an HH:MM:SS time string ``x``, return the number of seconds past midnight that it represents. In keeping with GTFS standards, the hours entry may be greater than 23. If ``mod24``, then return the number of seconds modulo ``24*3600``. If ``inverse``, then do the inverse operation. In this case, if ``mod24`` also, then first take the number of seconds modulo ``24*3600``. """ if not inverse: try: hours, mins, seconds = x.split(":") result = int(hours) * 3600 + int(mins) * 60 + int(seconds) if mod24: result %= 24 * 3600 except: result = np.nan else: try: seconds = int(x) if mod24: seconds %= 24 * 3600 hours, remainder = divmod(seconds, 3600) mins, secs = divmod(remainder, 60) result = f"{hours:02d}:{mins:02d}:{secs:02d}" except: result = np.nan return result
[ "def", "timestr_to_seconds", "(", "x", ":", "Union", "[", "dt", ".", "date", ",", "str", "]", ",", "*", ",", "inverse", ":", "bool", "=", "False", ",", "mod24", ":", "bool", "=", "False", ")", "->", "int", ":", "if", "not", "inverse", ":", "try", ":", "hours", ",", "mins", ",", "seconds", "=", "x", ".", "split", "(", "\":\"", ")", "result", "=", "int", "(", "hours", ")", "*", "3600", "+", "int", "(", "mins", ")", "*", "60", "+", "int", "(", "seconds", ")", "if", "mod24", ":", "result", "%=", "24", "*", "3600", "except", ":", "result", "=", "np", ".", "nan", "else", ":", "try", ":", "seconds", "=", "int", "(", "x", ")", "if", "mod24", ":", "seconds", "%=", "24", "*", "3600", "hours", ",", "remainder", "=", "divmod", "(", "seconds", ",", "3600", ")", "mins", ",", "secs", "=", "divmod", "(", "remainder", ",", "60", ")", "result", "=", "f\"{hours:02d}:{mins:02d}:{secs:02d}\"", "except", ":", "result", "=", "np", ".", "nan", "return", "result" ]
Given an HH:MM:SS time string ``x``, return the number of seconds past midnight that it represents. In keeping with GTFS standards, the hours entry may be greater than 23. If ``mod24``, then return the number of seconds modulo ``24*3600``. If ``inverse``, then do the inverse operation. In this case, if ``mod24`` also, then first take the number of seconds modulo ``24*3600``.
[ "Given", "an", "HH", ":", "MM", ":", "SS", "time", "string", "x", "return", "the", "number", "of", "seconds", "past", "midnight", "that", "it", "represents", ".", "In", "keeping", "with", "GTFS", "standards", "the", "hours", "entry", "may", "be", "greater", "than", "23", ".", "If", "mod24", "then", "return", "the", "number", "of", "seconds", "modulo", "24", "*", "3600", ".", "If", "inverse", "then", "do", "the", "inverse", "operation", ".", "In", "this", "case", "if", "mod24", "also", "then", "first", "take", "the", "number", "of", "seconds", "modulo", "24", "*", "3600", "." ]
python
train
33.96875
fermiPy/fermipy
fermipy/diffuse/gt_srcmaps_catalog.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_srcmaps_catalog.py#L141-L199
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) if self._comp_dict is None or self._comp_dict_file != args['library']: self._comp_dict_file = args['library'] self._comp_dict = make_catalog_comp_dict(sources=self._comp_dict_file, basedir=NAME_FACTORY.base_dict['basedir']) else: print ("Using cached catalog dict from %s" % args['library']) catalog_info_dict = self._comp_dict['catalog_info_dict'] comp_info_dict = self._comp_dict['comp_info_dict'] n_src_per_job = args['nsrc'] if args['make_xml']: SrcmapsCatalog_SG._make_xml_files(catalog_info_dict, comp_info_dict) for catalog_name, catalog_info in catalog_info_dict.items(): n_cat_src = len(catalog_info.catalog.table) n_job = int(math.ceil(float(n_cat_src) / n_src_per_job)) for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, sourcekey=catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime='none', fullpath=True) for i_job in range(n_job): full_key = "%s_%02i" % (key, i_job) srcmin = i_job * n_src_per_job srcmax = min(srcmin + n_src_per_job, n_cat_src) outfile = NAME_FACTORY.srcmaps( **name_keys).replace('.fits', "_%02i.fits" % (i_job)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[full_key] = dict(cmap=NAME_FACTORY.ccube(**name_keys), expcube=NAME_FACTORY.ltcube(**name_keys), irfs=NAME_FACTORY.irfs(**name_keys), bexpmap=NAME_FACTORY.bexpcube(**name_keys), outfile=outfile, logfile=logfile, srcmdl=catalog_info.srcmdl_name, evtype=comp.evtype, srcmin=srcmin, srcmax=srcmax) return job_configs
[ "def", "build_job_configs", "(", "self", ",", "args", ")", ":", "job_configs", "=", "{", "}", "components", "=", "Component", ".", "build_from_yamlfile", "(", "args", "[", "'comp'", "]", ")", "NAME_FACTORY", ".", "update_base_dict", "(", "args", "[", "'data'", "]", ")", "if", "self", ".", "_comp_dict", "is", "None", "or", "self", ".", "_comp_dict_file", "!=", "args", "[", "'library'", "]", ":", "self", ".", "_comp_dict_file", "=", "args", "[", "'library'", "]", "self", ".", "_comp_dict", "=", "make_catalog_comp_dict", "(", "sources", "=", "self", ".", "_comp_dict_file", ",", "basedir", "=", "NAME_FACTORY", ".", "base_dict", "[", "'basedir'", "]", ")", "else", ":", "print", "(", "\"Using cached catalog dict from %s\"", "%", "args", "[", "'library'", "]", ")", "catalog_info_dict", "=", "self", ".", "_comp_dict", "[", "'catalog_info_dict'", "]", "comp_info_dict", "=", "self", ".", "_comp_dict", "[", "'comp_info_dict'", "]", "n_src_per_job", "=", "args", "[", "'nsrc'", "]", "if", "args", "[", "'make_xml'", "]", ":", "SrcmapsCatalog_SG", ".", "_make_xml_files", "(", "catalog_info_dict", ",", "comp_info_dict", ")", "for", "catalog_name", ",", "catalog_info", "in", "catalog_info_dict", ".", "items", "(", ")", ":", "n_cat_src", "=", "len", "(", "catalog_info", ".", "catalog", ".", "table", ")", "n_job", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "n_cat_src", ")", "/", "n_src_per_job", ")", ")", "for", "comp", "in", "components", ":", "zcut", "=", "\"zmax%i\"", "%", "comp", ".", "zmax", "key", "=", "comp", ".", "make_key", "(", "'{ebin_name}_{evtype_name}'", ")", "name_keys", "=", "dict", "(", "zcut", "=", "zcut", ",", "sourcekey", "=", "catalog_name", ",", "ebin", "=", "comp", ".", "ebin_name", ",", "psftype", "=", "comp", ".", "evtype_name", ",", "coordsys", "=", "comp", ".", "coordsys", ",", "irf_ver", "=", "NAME_FACTORY", ".", "irf_ver", "(", ")", ",", "mktime", "=", "'none'", ",", "fullpath", "=", "True", ")", "for", "i_job", "in", "range", "(", "n_job", ")", ":", "full_key", "=", "\"%s_%02i\"", "%", "(", "key", ",", "i_job", ")", "srcmin", "=", "i_job", "*", "n_src_per_job", "srcmax", "=", "min", "(", "srcmin", "+", "n_src_per_job", ",", "n_cat_src", ")", "outfile", "=", "NAME_FACTORY", ".", "srcmaps", "(", "*", "*", "name_keys", ")", ".", "replace", "(", "'.fits'", ",", "\"_%02i.fits\"", "%", "(", "i_job", ")", ")", "logfile", "=", "make_nfs_path", "(", "outfile", ".", "replace", "(", "'.fits'", ",", "'.log'", ")", ")", "job_configs", "[", "full_key", "]", "=", "dict", "(", "cmap", "=", "NAME_FACTORY", ".", "ccube", "(", "*", "*", "name_keys", ")", ",", "expcube", "=", "NAME_FACTORY", ".", "ltcube", "(", "*", "*", "name_keys", ")", ",", "irfs", "=", "NAME_FACTORY", ".", "irfs", "(", "*", "*", "name_keys", ")", ",", "bexpmap", "=", "NAME_FACTORY", ".", "bexpcube", "(", "*", "*", "name_keys", ")", ",", "outfile", "=", "outfile", ",", "logfile", "=", "logfile", ",", "srcmdl", "=", "catalog_info", ".", "srcmdl_name", ",", "evtype", "=", "comp", ".", "evtype", ",", "srcmin", "=", "srcmin", ",", "srcmax", "=", "srcmax", ")", "return", "job_configs" ]
Hook to build job configurations
[ "Hook", "to", "build", "job", "configurations" ]
python
train
48.20339
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1772-L1823
def take(list_, index_list): """ Selects a subset of a list based on a list of indices. This is similar to np.take, but pure python. Args: list_ (list): some indexable object index_list (list, slice, int): some indexing object Returns: list or scalar: subset of the list CommandLine: python -m utool.util_list --test-take SeeAlso: ut.dict_take ut.dict_subset ut.none_take ut.compress Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index_list = [2, 0] >>> result = take(list_, index_list) >>> print(result) [2, 0] Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = 2 >>> result = take(list_, index) >>> print(result) 2 Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = slice(1, None, 2) >>> result = take(list_, index) >>> print(result) [1, 3] """ try: return [list_[index] for index in index_list] except TypeError: return list_[index_list]
[ "def", "take", "(", "list_", ",", "index_list", ")", ":", "try", ":", "return", "[", "list_", "[", "index", "]", "for", "index", "in", "index_list", "]", "except", "TypeError", ":", "return", "list_", "[", "index_list", "]" ]
Selects a subset of a list based on a list of indices. This is similar to np.take, but pure python. Args: list_ (list): some indexable object index_list (list, slice, int): some indexing object Returns: list or scalar: subset of the list CommandLine: python -m utool.util_list --test-take SeeAlso: ut.dict_take ut.dict_subset ut.none_take ut.compress Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index_list = [2, 0] >>> result = take(list_, index_list) >>> print(result) [2, 0] Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = 2 >>> result = take(list_, index) >>> print(result) 2 Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = slice(1, None, 2) >>> result = take(list_, index) >>> print(result) [1, 3]
[ "Selects", "a", "subset", "of", "a", "list", "based", "on", "a", "list", "of", "indices", ".", "This", "is", "similar", "to", "np", ".", "take", "but", "pure", "python", "." ]
python
train
24.365385
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L589-L604
def validate(cls, mapper_spec): """Inherit docs.""" super(RawDatastoreInputReader, cls).validate(mapper_spec) params = _get_params(mapper_spec) entity_kind = params[cls.ENTITY_KIND_PARAM] if "." in entity_kind: logging.warning( ". detected in entity kind %s specified for reader %s." "Assuming entity kind contains the dot.", entity_kind, cls.__name__) if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] for f in filters: if f[1] != "=": raise BadReaderParamsError( "Only equality filters are supported: %s", f)
[ "def", "validate", "(", "cls", ",", "mapper_spec", ")", ":", "super", "(", "RawDatastoreInputReader", ",", "cls", ")", ".", "validate", "(", "mapper_spec", ")", "params", "=", "_get_params", "(", "mapper_spec", ")", "entity_kind", "=", "params", "[", "cls", ".", "ENTITY_KIND_PARAM", "]", "if", "\".\"", "in", "entity_kind", ":", "logging", ".", "warning", "(", "\". detected in entity kind %s specified for reader %s.\"", "\"Assuming entity kind contains the dot.\"", ",", "entity_kind", ",", "cls", ".", "__name__", ")", "if", "cls", ".", "FILTERS_PARAM", "in", "params", ":", "filters", "=", "params", "[", "cls", ".", "FILTERS_PARAM", "]", "for", "f", "in", "filters", ":", "if", "f", "[", "1", "]", "!=", "\"=\"", ":", "raise", "BadReaderParamsError", "(", "\"Only equality filters are supported: %s\"", ",", "f", ")" ]
Inherit docs.
[ "Inherit", "docs", "." ]
python
train
38.5625
mapmyfitness/jtime
jtime/utils.py
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/utils.py#L6-L13
def get_input(input_func, input_str): """ Get input from the user given an input function and an input string """ val = input_func("Please enter your {0}: ".format(input_str)) while not val or not len(val.strip()): val = input_func("You didn't enter a valid {0}, please try again: ".format(input_str)) return val
[ "def", "get_input", "(", "input_func", ",", "input_str", ")", ":", "val", "=", "input_func", "(", "\"Please enter your {0}: \"", ".", "format", "(", "input_str", ")", ")", "while", "not", "val", "or", "not", "len", "(", "val", ".", "strip", "(", ")", ")", ":", "val", "=", "input_func", "(", "\"You didn't enter a valid {0}, please try again: \"", ".", "format", "(", "input_str", ")", ")", "return", "val" ]
Get input from the user given an input function and an input string
[ "Get", "input", "from", "the", "user", "given", "an", "input", "function", "and", "an", "input", "string" ]
python
train
42.125
ThomasChiroux/attowiki
src/attowiki/tools.py
https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/tools.py#L30-L40
def attowiki_distro_path(): """return the absolute complete path where attowiki is located .. todo:: use pkg_resources ? """ attowiki_path = os.path.abspath(__file__) if attowiki_path[-1] != '/': attowiki_path = attowiki_path[:attowiki_path.rfind('/')] else: attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')] return attowiki_path
[ "def", "attowiki_distro_path", "(", ")", ":", "attowiki_path", "=", "os", ".", "path", ".", "abspath", "(", "__file__", ")", "if", "attowiki_path", "[", "-", "1", "]", "!=", "'/'", ":", "attowiki_path", "=", "attowiki_path", "[", ":", "attowiki_path", ".", "rfind", "(", "'/'", ")", "]", "else", ":", "attowiki_path", "=", "attowiki_path", "[", ":", "attowiki_path", "[", ":", "-", "1", "]", ".", "rfind", "(", "'/'", ")", "]", "return", "attowiki_path" ]
return the absolute complete path where attowiki is located .. todo:: use pkg_resources ?
[ "return", "the", "absolute", "complete", "path", "where", "attowiki", "is", "located" ]
python
train
34.181818
nitely/v8-cffi
v8cffi/context.py
https://github.com/nitely/v8-cffi/blob/e3492e7eaacb30be75999c24413aa15eeab57a5d/v8cffi/context.py#L22-L38
def _is_utf_8(txt): """ Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool """ assert isinstance(txt, six.binary_type) try: _ = six.text_type(txt, 'utf-8') except (TypeError, UnicodeEncodeError): return False else: return True
[ "def", "_is_utf_8", "(", "txt", ")", ":", "assert", "isinstance", "(", "txt", ",", "six", ".", "binary_type", ")", "try", ":", "_", "=", "six", ".", "text_type", "(", "txt", ",", "'utf-8'", ")", "except", "(", "TypeError", ",", "UnicodeEncodeError", ")", ":", "return", "False", "else", ":", "return", "True" ]
Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool
[ "Check", "a", "string", "is", "utf", "-", "8", "encoded" ]
python
train
21.058824
lins05/slackbot
slackbot/dispatcher.py
https://github.com/lins05/slackbot/blob/7195d46b9e1dc4ecfae0bdcaa91461202689bfe5/slackbot/dispatcher.py#L214-L230
def reply_webapi(self, text, attachments=None, as_user=True, in_thread=None): """ Send a reply to the sender using Web API (This function supports formatted message when using a bot integration) If the message was send in a thread, answer in a thread per default. """ if in_thread is None: in_thread = 'thread_ts' in self.body if in_thread: self.send_webapi(text, attachments=attachments, as_user=as_user, thread_ts=self.thread_ts) else: text = self.gen_reply(text) self.send_webapi(text, attachments=attachments, as_user=as_user)
[ "def", "reply_webapi", "(", "self", ",", "text", ",", "attachments", "=", "None", ",", "as_user", "=", "True", ",", "in_thread", "=", "None", ")", ":", "if", "in_thread", "is", "None", ":", "in_thread", "=", "'thread_ts'", "in", "self", ".", "body", "if", "in_thread", ":", "self", ".", "send_webapi", "(", "text", ",", "attachments", "=", "attachments", ",", "as_user", "=", "as_user", ",", "thread_ts", "=", "self", ".", "thread_ts", ")", "else", ":", "text", "=", "self", ".", "gen_reply", "(", "text", ")", "self", ".", "send_webapi", "(", "text", ",", "attachments", "=", "attachments", ",", "as_user", "=", "as_user", ")" ]
Send a reply to the sender using Web API (This function supports formatted message when using a bot integration) If the message was send in a thread, answer in a thread per default.
[ "Send", "a", "reply", "to", "the", "sender", "using", "Web", "API" ]
python
train
38.411765
molmod/molmod
molmod/io/cml.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cml.py#L166-L195
def _dump_cml_molecule(f, molecule): """Dump a single molecule to a CML file Arguments: | ``f`` -- a file-like object | ``molecule`` -- a Molecule instance """ extra = getattr(molecule, "extra", {}) attr_str = " ".join("%s='%s'" % (key, value) for key, value in extra.items()) f.write(" <molecule id='%s' %s>\n" % (molecule.title, attr_str)) f.write(" <atomArray>\n") atoms_extra = getattr(molecule, "atoms_extra", {}) for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom): atom_extra = atoms_extra.get(counter, {}) attr_str = " ".join("%s='%s'" % (key, value) for key, value in atom_extra.items()) f.write(" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\n" % ( counter, periodic[number].symbol, coordinate[0], coordinate[1], coordinate[2], attr_str, )) f.write(" </atomArray>\n") if molecule.graph is not None: bonds_extra = getattr(molecule, "bonds_extra", {}) f.write(" <bondArray>\n") for edge in molecule.graph.edges: bond_extra = bonds_extra.get(edge, {}) attr_str = " ".join("%s='%s'" % (key, value) for key, value in bond_extra.items()) i1, i2 = edge f.write(" <bond atomRefs2='a%i a%i' %s />\n" % (i1, i2, attr_str)) f.write(" </bondArray>\n") f.write(" </molecule>\n")
[ "def", "_dump_cml_molecule", "(", "f", ",", "molecule", ")", ":", "extra", "=", "getattr", "(", "molecule", ",", "\"extra\"", ",", "{", "}", ")", "attr_str", "=", "\" \"", ".", "join", "(", "\"%s='%s'\"", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "extra", ".", "items", "(", ")", ")", "f", ".", "write", "(", "\" <molecule id='%s' %s>\\n\"", "%", "(", "molecule", ".", "title", ",", "attr_str", ")", ")", "f", ".", "write", "(", "\" <atomArray>\\n\"", ")", "atoms_extra", "=", "getattr", "(", "molecule", ",", "\"atoms_extra\"", ",", "{", "}", ")", "for", "counter", ",", "number", ",", "coordinate", "in", "zip", "(", "range", "(", "molecule", ".", "size", ")", ",", "molecule", ".", "numbers", ",", "molecule", ".", "coordinates", "/", "angstrom", ")", ":", "atom_extra", "=", "atoms_extra", ".", "get", "(", "counter", ",", "{", "}", ")", "attr_str", "=", "\" \"", ".", "join", "(", "\"%s='%s'\"", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "atom_extra", ".", "items", "(", ")", ")", "f", ".", "write", "(", "\" <atom id='a%i' elementType='%s' x3='%s' y3='%s' z3='%s' %s />\\n\"", "%", "(", "counter", ",", "periodic", "[", "number", "]", ".", "symbol", ",", "coordinate", "[", "0", "]", ",", "coordinate", "[", "1", "]", ",", "coordinate", "[", "2", "]", ",", "attr_str", ",", ")", ")", "f", ".", "write", "(", "\" </atomArray>\\n\"", ")", "if", "molecule", ".", "graph", "is", "not", "None", ":", "bonds_extra", "=", "getattr", "(", "molecule", ",", "\"bonds_extra\"", ",", "{", "}", ")", "f", ".", "write", "(", "\" <bondArray>\\n\"", ")", "for", "edge", "in", "molecule", ".", "graph", ".", "edges", ":", "bond_extra", "=", "bonds_extra", ".", "get", "(", "edge", ",", "{", "}", ")", "attr_str", "=", "\" \"", ".", "join", "(", "\"%s='%s'\"", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "bond_extra", ".", "items", "(", ")", ")", "i1", ",", "i2", "=", "edge", "f", ".", "write", "(", "\" <bond atomRefs2='a%i a%i' %s />\\n\"", "%", "(", "i1", ",", "i2", ",", "attr_str", ")", ")", "f", ".", "write", "(", "\" </bondArray>\\n\"", ")", "f", ".", "write", "(", "\" </molecule>\\n\"", ")" ]
Dump a single molecule to a CML file Arguments: | ``f`` -- a file-like object | ``molecule`` -- a Molecule instance
[ "Dump", "a", "single", "molecule", "to", "a", "CML", "file" ]
python
train
47.933333
f213/rumetr-client
rumetr/roometr.py
https://github.com/f213/rumetr-client/blob/5180152bcb2eed8246b88035db7c0bb1fe603166/rumetr/roometr.py#L61-L70
def appt_exists(self, complex: str, house: str, appt: str) -> bool: """ Shortcut to check if appt exists in our database. """ try: self.check_appt(complex, house, appt) except exceptions.RumetrApptNotFound: return False return True
[ "def", "appt_exists", "(", "self", ",", "complex", ":", "str", ",", "house", ":", "str", ",", "appt", ":", "str", ")", "->", "bool", ":", "try", ":", "self", ".", "check_appt", "(", "complex", ",", "house", ",", "appt", ")", "except", "exceptions", ".", "RumetrApptNotFound", ":", "return", "False", "return", "True" ]
Shortcut to check if appt exists in our database.
[ "Shortcut", "to", "check", "if", "appt", "exists", "in", "our", "database", "." ]
python
train
29.5
mozilla/socorrolib
socorrolib/lib/threaded_task_manager.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/threaded_task_manager.py#L201-L214
def _kill_worker_threads(self): """This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.""" for x in range(self.number_of_threads): self.task_queue.put((None, None)) self.logger.debug("waiting for standard worker threads to stop") for t in self.thread_list: t.join()
[ "def", "_kill_worker_threads", "(", "self", ")", ":", "for", "x", "in", "range", "(", "self", ".", "number_of_threads", ")", ":", "self", ".", "task_queue", ".", "put", "(", "(", "None", ",", "None", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"waiting for standard worker threads to stop\"", ")", "for", "t", "in", "self", ".", "thread_list", ":", "t", ".", "join", "(", ")" ]
This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.
[ "This", "function", "coerces", "the", "consumer", "/", "worker", "threads", "to", "kill", "themselves", ".", "When", "called", "by", "the", "queuing", "thread", "one", "death", "token", "will", "be", "placed", "on", "the", "queue", "for", "each", "thread", ".", "Each", "worker", "thread", "is", "always", "looking", "for", "the", "death", "token", ".", "When", "it", "encounters", "it", "it", "immediately", "runs", "to", "completion", "without", "drawing", "anything", "more", "off", "the", "queue", "." ]
python
train
52.5
google/mobly
mobly/controllers/monsoon.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L846-L884
def measure_power(self, hz, duration, tag, offset=30): """Measure power consumption of the attached device. Because it takes some time for the device to calm down after the usb connection is cut, an offset is set for each measurement. The default is 30s. The total time taken to measure will be (duration + offset). Args: hz: Number of samples to take per second. duration: Number of seconds to take samples for in each step. offset: The number of seconds of initial data to discard. tag: A string that's the name of the collected data group. Returns: A MonsoonData object with the measured power data. """ num = duration * hz oset = offset * hz data = None self.usb("auto") time.sleep(1) with self.dut.handle_usb_disconnect(): time.sleep(1) try: data = self.take_samples(hz, num, sample_offset=oset) if not data: raise MonsoonError( "No data was collected in measurement %s." % tag) data.tag = tag self.dut.log.info("Measurement summary: %s", repr(data)) return data finally: self.mon.StopDataCollection() self.log.info("Finished taking samples, reconnecting to dut.") self.usb("on") self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON) # Wait for device to come back online. time.sleep(10) self.dut.log.info("Dut reconnected.")
[ "def", "measure_power", "(", "self", ",", "hz", ",", "duration", ",", "tag", ",", "offset", "=", "30", ")", ":", "num", "=", "duration", "*", "hz", "oset", "=", "offset", "*", "hz", "data", "=", "None", "self", ".", "usb", "(", "\"auto\"", ")", "time", ".", "sleep", "(", "1", ")", "with", "self", ".", "dut", ".", "handle_usb_disconnect", "(", ")", ":", "time", ".", "sleep", "(", "1", ")", "try", ":", "data", "=", "self", ".", "take_samples", "(", "hz", ",", "num", ",", "sample_offset", "=", "oset", ")", "if", "not", "data", ":", "raise", "MonsoonError", "(", "\"No data was collected in measurement %s.\"", "%", "tag", ")", "data", ".", "tag", "=", "tag", "self", ".", "dut", ".", "log", ".", "info", "(", "\"Measurement summary: %s\"", ",", "repr", "(", "data", ")", ")", "return", "data", "finally", ":", "self", ".", "mon", ".", "StopDataCollection", "(", ")", "self", ".", "log", ".", "info", "(", "\"Finished taking samples, reconnecting to dut.\"", ")", "self", ".", "usb", "(", "\"on\"", ")", "self", ".", "dut", ".", "adb", ".", "wait_for_device", "(", "timeout", "=", "DEFAULT_TIMEOUT_USB_ON", ")", "# Wait for device to come back online.", "time", ".", "sleep", "(", "10", ")", "self", ".", "dut", ".", "log", ".", "info", "(", "\"Dut reconnected.\"", ")" ]
Measure power consumption of the attached device. Because it takes some time for the device to calm down after the usb connection is cut, an offset is set for each measurement. The default is 30s. The total time taken to measure will be (duration + offset). Args: hz: Number of samples to take per second. duration: Number of seconds to take samples for in each step. offset: The number of seconds of initial data to discard. tag: A string that's the name of the collected data group. Returns: A MonsoonData object with the measured power data.
[ "Measure", "power", "consumption", "of", "the", "attached", "device", "." ]
python
train
42
user-cont/conu
conu/backend/docker/image.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/image.py#L639-L662
def has_pkgs_signed_with(self, allowed_keys): """ Check signature of packages installed in image. Raises exception when * rpm binary is not installed in image * parsing of rpm fails * there are packages in image that are not signed with one of allowed keys :param allowed_keys: list of allowed keys :return: bool """ if not allowed_keys or not isinstance(allowed_keys, list): raise ConuException("allowed_keys must be a list") command = ['rpm', '-qa', '--qf', '%{name} %{SIGPGP:pgpsig}\n'] cont = self.run_via_binary(command=command) try: out = cont.logs_unicode()[:-1].split('\n') check_signatures(out, allowed_keys) finally: cont.stop() cont.delete() return True
[ "def", "has_pkgs_signed_with", "(", "self", ",", "allowed_keys", ")", ":", "if", "not", "allowed_keys", "or", "not", "isinstance", "(", "allowed_keys", ",", "list", ")", ":", "raise", "ConuException", "(", "\"allowed_keys must be a list\"", ")", "command", "=", "[", "'rpm'", ",", "'-qa'", ",", "'--qf'", ",", "'%{name} %{SIGPGP:pgpsig}\\n'", "]", "cont", "=", "self", ".", "run_via_binary", "(", "command", "=", "command", ")", "try", ":", "out", "=", "cont", ".", "logs_unicode", "(", ")", "[", ":", "-", "1", "]", ".", "split", "(", "'\\n'", ")", "check_signatures", "(", "out", ",", "allowed_keys", ")", "finally", ":", "cont", ".", "stop", "(", ")", "cont", ".", "delete", "(", ")", "return", "True" ]
Check signature of packages installed in image. Raises exception when * rpm binary is not installed in image * parsing of rpm fails * there are packages in image that are not signed with one of allowed keys :param allowed_keys: list of allowed keys :return: bool
[ "Check", "signature", "of", "packages", "installed", "in", "image", ".", "Raises", "exception", "when" ]
python
train
34.333333
talpor/django-dashing
dashing/views.py
https://github.com/talpor/django-dashing/blob/1edb9ac5d7b7079f079a1e85552bfdfc5e1a93f6/dashing/views.py#L12-L20
def check_permissions(self, request): """ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. """ permissions = [permission() for permission in self.permission_classes] for permission in permissions: if not permission.has_permission(request): raise PermissionDenied()
[ "def", "check_permissions", "(", "self", ",", "request", ")", ":", "permissions", "=", "[", "permission", "(", ")", "for", "permission", "in", "self", ".", "permission_classes", "]", "for", "permission", "in", "permissions", ":", "if", "not", "permission", ".", "has_permission", "(", "request", ")", ":", "raise", "PermissionDenied", "(", ")" ]
Check if the request should be permitted. Raises an appropriate exception if the request is not permitted.
[ "Check", "if", "the", "request", "should", "be", "permitted", ".", "Raises", "an", "appropriate", "exception", "if", "the", "request", "is", "not", "permitted", "." ]
python
train
43.333333
IdentityPython/pysaml2
src/saml2/sigver.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/sigver.py#L1338-L1349
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None): """ Will encrypt an assertion :param statement: A XML document that contains the assertion to encrypt :param enc_key: File name of a file containing the encryption key :param template: A template for the encryption part to be added. :param key_type: The type of session key to use. :return: The encrypted text """ return self.crypto.encrypt_assertion( statement, enc_key, template, key_type, node_xpath)
[ "def", "encrypt_assertion", "(", "self", ",", "statement", ",", "enc_key", ",", "template", ",", "key_type", "=", "'des-192'", ",", "node_xpath", "=", "None", ")", ":", "return", "self", ".", "crypto", ".", "encrypt_assertion", "(", "statement", ",", "enc_key", ",", "template", ",", "key_type", ",", "node_xpath", ")" ]
Will encrypt an assertion :param statement: A XML document that contains the assertion to encrypt :param enc_key: File name of a file containing the encryption key :param template: A template for the encryption part to be added. :param key_type: The type of session key to use. :return: The encrypted text
[ "Will", "encrypt", "an", "assertion" ]
python
train
48.083333
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L699-L718
def state_in_ec(self, ec_index): '''Get the state of the component in an execution context. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) return self.participating_ec_states[ec_index] else: return self.owned_ec_states[ec_index]
[ "def", "state_in_ec", "(", "self", ",", "ec_index", ")", ":", "with", "self", ".", "_mutex", ":", "if", "ec_index", ">=", "len", "(", "self", ".", "owned_ecs", ")", ":", "ec_index", "-=", "len", "(", "self", ".", "owned_ecs", ")", "if", "ec_index", ">=", "len", "(", "self", ".", "participating_ecs", ")", ":", "raise", "exceptions", ".", "BadECIndexError", "(", "ec_index", ")", "return", "self", ".", "participating_ec_states", "[", "ec_index", "]", "else", ":", "return", "self", ".", "owned_ec_states", "[", "ec_index", "]" ]
Get the state of the component in an execution context. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
[ "Get", "the", "state", "of", "the", "component", "in", "an", "execution", "context", "." ]
python
train
48.3
phoebe-project/phoebe2
phoebe/dynamics/nbody.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dynamics/nbody.py#L321-L381
def dynamics_from_bundle_bs(b, times, compute=None, return_roche_euler=False, **kwargs): """ Parse parameters in the bundle and call :func:`dynamics`. See :func:`dynamics` for more detailed information. NOTE: you must either provide compute (the label) OR all relevant options as kwargs (ltte) Args: b: (Bundle) the bundle with a set hierarchy times: (list or array) times at which to run the dynamics stepsize: (float, optional) stepsize for the integration [default: 0.01] orbiterror: (float, optional) orbiterror for the integration [default: 1e-16] ltte: (bool, default False) whether to account for light travel time effects. Returns: t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times, the remaining are a list of numpy arrays (a numpy array per star - in order given by b.hierarchy.get_stars()) for the cartesian positions and velocities of each star at those same times. """ stepsize = 0.01 orbiterror = 1e-16 computeps = b.get_compute(compute, check_visible=False, force_ps=True) ltte = computeps.get_value('ltte', check_visible=False, **kwargs) hier = b.hierarchy starrefs = hier.get_stars() orbitrefs = hier.get_orbits() def mean_anom(t0, t0_perpass, period): # TODO: somehow make this into a constraint where t0 and mean anom # are both in the compute options if dynamic_method==nbody # (one is constrained from the other and the orbit.... nvm, this gets ugly) return 2 * np.pi * (t0 - t0_perpass) / period masses = [b.get_value('mass', u.solMass, component=component, context='component') * c.G.to('AU3 / (Msun d2)').value for component in starrefs] # GM smas = [b.get_value('sma', u.AU, component=component, context='component') for component in orbitrefs] eccs = [b.get_value('ecc', component=component, context='component') for component in orbitrefs] incls = [b.get_value('incl', u.rad, component=component, context='component') for component in orbitrefs] per0s = [b.get_value('per0', u.rad, component=component, context='component') for component in orbitrefs] long_ans = [b.get_value('long_an', u.rad, component=component, context='component') for component in orbitrefs] t0_perpasses = [b.get_value('t0_perpass', u.d, component=component, context='component') for component in orbitrefs] periods = [b.get_value('period', u.d, component=component, context='component') for component in orbitrefs] vgamma = b.get_value('vgamma', context='system', unit=u.solRad/u.d) t0 = b.get_value('t0', context='system', unit=u.d) # mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)] mean_anoms = [b.get_value('mean_anom', u.rad, component=component, context='component') for component in orbitrefs] return dynamics_bs(times, masses, smas, eccs, incls, per0s, long_ans, \ mean_anoms, t0, vgamma, stepsize, orbiterror, ltte, return_roche_euler=return_roche_euler)
[ "def", "dynamics_from_bundle_bs", "(", "b", ",", "times", ",", "compute", "=", "None", ",", "return_roche_euler", "=", "False", ",", "*", "*", "kwargs", ")", ":", "stepsize", "=", "0.01", "orbiterror", "=", "1e-16", "computeps", "=", "b", ".", "get_compute", "(", "compute", ",", "check_visible", "=", "False", ",", "force_ps", "=", "True", ")", "ltte", "=", "computeps", ".", "get_value", "(", "'ltte'", ",", "check_visible", "=", "False", ",", "*", "*", "kwargs", ")", "hier", "=", "b", ".", "hierarchy", "starrefs", "=", "hier", ".", "get_stars", "(", ")", "orbitrefs", "=", "hier", ".", "get_orbits", "(", ")", "def", "mean_anom", "(", "t0", ",", "t0_perpass", ",", "period", ")", ":", "# TODO: somehow make this into a constraint where t0 and mean anom", "# are both in the compute options if dynamic_method==nbody", "# (one is constrained from the other and the orbit.... nvm, this gets ugly)", "return", "2", "*", "np", ".", "pi", "*", "(", "t0", "-", "t0_perpass", ")", "/", "period", "masses", "=", "[", "b", ".", "get_value", "(", "'mass'", ",", "u", ".", "solMass", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "*", "c", ".", "G", ".", "to", "(", "'AU3 / (Msun d2)'", ")", ".", "value", "for", "component", "in", "starrefs", "]", "# GM", "smas", "=", "[", "b", ".", "get_value", "(", "'sma'", ",", "u", ".", "AU", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "eccs", "=", "[", "b", ".", "get_value", "(", "'ecc'", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "incls", "=", "[", "b", ".", "get_value", "(", "'incl'", ",", "u", ".", "rad", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "per0s", "=", "[", "b", ".", "get_value", "(", "'per0'", ",", "u", ".", "rad", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "long_ans", "=", "[", "b", ".", "get_value", "(", "'long_an'", ",", "u", ".", "rad", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "t0_perpasses", "=", "[", "b", ".", "get_value", "(", "'t0_perpass'", ",", "u", ".", "d", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "periods", "=", "[", "b", ".", "get_value", "(", "'period'", ",", "u", ".", "d", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "vgamma", "=", "b", ".", "get_value", "(", "'vgamma'", ",", "context", "=", "'system'", ",", "unit", "=", "u", ".", "solRad", "/", "u", ".", "d", ")", "t0", "=", "b", ".", "get_value", "(", "'t0'", ",", "context", "=", "'system'", ",", "unit", "=", "u", ".", "d", ")", "# mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)]", "mean_anoms", "=", "[", "b", ".", "get_value", "(", "'mean_anom'", ",", "u", ".", "rad", ",", "component", "=", "component", ",", "context", "=", "'component'", ")", "for", "component", "in", "orbitrefs", "]", "return", "dynamics_bs", "(", "times", ",", "masses", ",", "smas", ",", "eccs", ",", "incls", ",", "per0s", ",", "long_ans", ",", "mean_anoms", ",", "t0", ",", "vgamma", ",", "stepsize", ",", "orbiterror", ",", "ltte", ",", "return_roche_euler", "=", "return_roche_euler", ")" ]
Parse parameters in the bundle and call :func:`dynamics`. See :func:`dynamics` for more detailed information. NOTE: you must either provide compute (the label) OR all relevant options as kwargs (ltte) Args: b: (Bundle) the bundle with a set hierarchy times: (list or array) times at which to run the dynamics stepsize: (float, optional) stepsize for the integration [default: 0.01] orbiterror: (float, optional) orbiterror for the integration [default: 1e-16] ltte: (bool, default False) whether to account for light travel time effects. Returns: t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times, the remaining are a list of numpy arrays (a numpy array per star - in order given by b.hierarchy.get_stars()) for the cartesian positions and velocities of each star at those same times.
[ "Parse", "parameters", "in", "the", "bundle", "and", "call", ":", "func", ":", "dynamics", "." ]
python
train
50.311475
aleontiev/dj
dj/application.py
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L67-L89
def parse_application_name(setup_filename): """Parse a setup.py file for the name. Returns: name, or None """ with open(setup_filename, 'rt') as setup_file: fst = RedBaron(setup_file.read()) for node in fst: if ( node.type == 'atomtrailers' and str(node.name) == 'setup' ): for call in node.call: if str(call.name) == 'name': value = call.value if hasattr(value, 'to_python'): value = value.to_python() name = str(value) break if name: break return name
[ "def", "parse_application_name", "(", "setup_filename", ")", ":", "with", "open", "(", "setup_filename", ",", "'rt'", ")", "as", "setup_file", ":", "fst", "=", "RedBaron", "(", "setup_file", ".", "read", "(", ")", ")", "for", "node", "in", "fst", ":", "if", "(", "node", ".", "type", "==", "'atomtrailers'", "and", "str", "(", "node", ".", "name", ")", "==", "'setup'", ")", ":", "for", "call", "in", "node", ".", "call", ":", "if", "str", "(", "call", ".", "name", ")", "==", "'name'", ":", "value", "=", "call", ".", "value", "if", "hasattr", "(", "value", ",", "'to_python'", ")", ":", "value", "=", "value", ".", "to_python", "(", ")", "name", "=", "str", "(", "value", ")", "break", "if", "name", ":", "break", "return", "name" ]
Parse a setup.py file for the name. Returns: name, or None
[ "Parse", "a", "setup", ".", "py", "file", "for", "the", "name", "." ]
python
train
35.304348
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L30256-L30273
def open_ext_pack_file(self, path): """Attempts to open an extension pack file in preparation for installation. in path of type str The path of the extension pack tarball. This can optionally be followed by a "::SHA-256=hex-digit" of the tarball. return file_p of type :class:`IExtPackFile` The interface of the extension pack file object. """ if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") file_p = self._call("openExtPackFile", in_p=[path]) file_p = IExtPackFile(file_p) return file_p
[ "def", "open_ext_pack_file", "(", "self", ",", "path", ")", ":", "if", "not", "isinstance", "(", "path", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"path can only be an instance of type basestring\"", ")", "file_p", "=", "self", ".", "_call", "(", "\"openExtPackFile\"", ",", "in_p", "=", "[", "path", "]", ")", "file_p", "=", "IExtPackFile", "(", "file_p", ")", "return", "file_p" ]
Attempts to open an extension pack file in preparation for installation. in path of type str The path of the extension pack tarball. This can optionally be followed by a "::SHA-256=hex-digit" of the tarball. return file_p of type :class:`IExtPackFile` The interface of the extension pack file object.
[ "Attempts", "to", "open", "an", "extension", "pack", "file", "in", "preparation", "for", "installation", "." ]
python
train
37.222222
pypa/pipenv
pipenv/vendor/orderedmultidict/orderedmultidict.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/orderedmultidict/orderedmultidict.py#L274-L282
def getlist(self, key, default=[]): """ Returns: The list of values for <key> if <key> is in the dictionary, else <default>. If <default> is not provided, an empty list is returned. """ if key in self: return [node.value for node in self._map[key]] return default
[ "def", "getlist", "(", "self", ",", "key", ",", "default", "=", "[", "]", ")", ":", "if", "key", "in", "self", ":", "return", "[", "node", ".", "value", "for", "node", "in", "self", ".", "_map", "[", "key", "]", "]", "return", "default" ]
Returns: The list of values for <key> if <key> is in the dictionary, else <default>. If <default> is not provided, an empty list is returned.
[ "Returns", ":", "The", "list", "of", "values", "for", "<key", ">", "if", "<key", ">", "is", "in", "the", "dictionary", "else", "<default", ">", ".", "If", "<default", ">", "is", "not", "provided", "an", "empty", "list", "is", "returned", "." ]
python
train
35.888889
weld-project/weld
python/grizzly/grizzly/grizzly_impl.py
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/grizzly_impl.py#L376-L407
def sort(expr, field = None, keytype=None, ascending=True): """ Sorts the vector. If the field parameter is provided then the sort operators on a vector of structs where the sort key is the field of the struct. Args: expr (WeldObject) field (Int) """ weld_obj = WeldObject(encoder_, decoder_) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr if field is not None: key_str = "x.$%s" % field else: key_str = "x" if not ascending: # The type is not necessarily f64. key_str = key_str + "* %s(-1)" % keytype weld_template = """ sort(%(expr)s, |x| %(key)s) """ weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str} return weld_obj
[ "def", "sort", "(", "expr", ",", "field", "=", "None", ",", "keytype", "=", "None", ",", "ascending", "=", "True", ")", ":", "weld_obj", "=", "WeldObject", "(", "encoder_", ",", "decoder_", ")", "expr_var", "=", "weld_obj", ".", "update", "(", "expr", ")", "if", "isinstance", "(", "expr", ",", "WeldObject", ")", ":", "expr_var", "=", "expr", ".", "obj_id", "weld_obj", ".", "dependencies", "[", "expr_var", "]", "=", "expr", "if", "field", "is", "not", "None", ":", "key_str", "=", "\"x.$%s\"", "%", "field", "else", ":", "key_str", "=", "\"x\"", "if", "not", "ascending", ":", "# The type is not necessarily f64.", "key_str", "=", "key_str", "+", "\"* %s(-1)\"", "%", "keytype", "weld_template", "=", "\"\"\"\n sort(%(expr)s, |x| %(key)s)\n \"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", "%", "{", "\"expr\"", ":", "expr_var", ",", "\"key\"", ":", "key_str", "}", "return", "weld_obj" ]
Sorts the vector. If the field parameter is provided then the sort operators on a vector of structs where the sort key is the field of the struct. Args: expr (WeldObject) field (Int)
[ "Sorts", "the", "vector", ".", "If", "the", "field", "parameter", "is", "provided", "then", "the", "sort", "operators", "on", "a", "vector", "of", "structs", "where", "the", "sort", "key", "is", "the", "field", "of", "the", "struct", "." ]
python
train
25.8125
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L163-L167
def list_subdomains(self, limit=None, offset=None): """ Returns a list of all subdomains for this domain. """ return self.manager.list_subdomains(self, limit=limit, offset=offset)
[ "def", "list_subdomains", "(", "self", ",", "limit", "=", "None", ",", "offset", "=", "None", ")", ":", "return", "self", ".", "manager", ".", "list_subdomains", "(", "self", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ")" ]
Returns a list of all subdomains for this domain.
[ "Returns", "a", "list", "of", "all", "subdomains", "for", "this", "domain", "." ]
python
train
41.4
JelteF/PyLaTeX
pylatex/tikz.py
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/tikz.py#L89-L103
def from_str(cls, coordinate): """Build a TikZCoordinate object from a string.""" m = cls._coordinate_str_regex.match(coordinate) if m is None: raise ValueError('invalid coordinate string') if m.group(1) == '++': relative = True else: relative = False return TikZCoordinate( float(m.group(2)), float(m.group(4)), relative=relative)
[ "def", "from_str", "(", "cls", ",", "coordinate", ")", ":", "m", "=", "cls", ".", "_coordinate_str_regex", ".", "match", "(", "coordinate", ")", "if", "m", "is", "None", ":", "raise", "ValueError", "(", "'invalid coordinate string'", ")", "if", "m", ".", "group", "(", "1", ")", "==", "'++'", ":", "relative", "=", "True", "else", ":", "relative", "=", "False", "return", "TikZCoordinate", "(", "float", "(", "m", ".", "group", "(", "2", ")", ")", ",", "float", "(", "m", ".", "group", "(", "4", ")", ")", ",", "relative", "=", "relative", ")" ]
Build a TikZCoordinate object from a string.
[ "Build", "a", "TikZCoordinate", "object", "from", "a", "string", "." ]
python
train
27.8
numenta/htmresearch
htmresearch/support/network_text_data_generator.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/network_text_data_generator.py#L185-L231
def saveData(self, dataOutputFile, categoriesOutputFile): """ Save the processed data and the associated category mapping. @param dataOutputFile (str) Location to save data @param categoriesOutputFile (str) Location to save category map @return (str) Path to the saved data file iff saveData() is successful. """ if self.records is None: return False if not dataOutputFile.endswith("csv"): raise TypeError("data output file must be csv.") if not categoriesOutputFile.endswith("json"): raise TypeError("category output file must be json") # Ensure directory exists dataOutputDirectory = os.path.dirname(dataOutputFile) if not os.path.exists(dataOutputDirectory): os.makedirs(dataOutputDirectory) categoriesOutputDirectory = os.path.dirname(categoriesOutputFile) if not os.path.exists(categoriesOutputDirectory): os.makedirs(categoriesOutputDirectory) with open(dataOutputFile, "w") as f: # Header writer = csv.DictWriter(f, fieldnames=self.fieldNames) writer.writeheader() # Types writer.writerow(self.types) # Special characters writer.writerow(self.specials) for data in self.records: for record in data: writer.writerow(record) with open(categoriesOutputFile, "w") as f: f.write(json.dumps(self.categoryToId, sort_keys=True, indent=4, separators=(",", ": "))) return dataOutputFile
[ "def", "saveData", "(", "self", ",", "dataOutputFile", ",", "categoriesOutputFile", ")", ":", "if", "self", ".", "records", "is", "None", ":", "return", "False", "if", "not", "dataOutputFile", ".", "endswith", "(", "\"csv\"", ")", ":", "raise", "TypeError", "(", "\"data output file must be csv.\"", ")", "if", "not", "categoriesOutputFile", ".", "endswith", "(", "\"json\"", ")", ":", "raise", "TypeError", "(", "\"category output file must be json\"", ")", "# Ensure directory exists", "dataOutputDirectory", "=", "os", ".", "path", ".", "dirname", "(", "dataOutputFile", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dataOutputDirectory", ")", ":", "os", ".", "makedirs", "(", "dataOutputDirectory", ")", "categoriesOutputDirectory", "=", "os", ".", "path", ".", "dirname", "(", "categoriesOutputFile", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "categoriesOutputDirectory", ")", ":", "os", ".", "makedirs", "(", "categoriesOutputDirectory", ")", "with", "open", "(", "dataOutputFile", ",", "\"w\"", ")", "as", "f", ":", "# Header", "writer", "=", "csv", ".", "DictWriter", "(", "f", ",", "fieldnames", "=", "self", ".", "fieldNames", ")", "writer", ".", "writeheader", "(", ")", "# Types", "writer", ".", "writerow", "(", "self", ".", "types", ")", "# Special characters", "writer", ".", "writerow", "(", "self", ".", "specials", ")", "for", "data", "in", "self", ".", "records", ":", "for", "record", "in", "data", ":", "writer", ".", "writerow", "(", "record", ")", "with", "open", "(", "categoriesOutputFile", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "self", ".", "categoryToId", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "\",\"", ",", "\": \"", ")", ")", ")", "return", "dataOutputFile" ]
Save the processed data and the associated category mapping. @param dataOutputFile (str) Location to save data @param categoriesOutputFile (str) Location to save category map @return (str) Path to the saved data file iff saveData() is successful.
[ "Save", "the", "processed", "data", "and", "the", "associated", "category", "mapping", "." ]
python
train
33.255319
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L502-L525
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data): """Convert CRAM to fastq in a specified region. """ ref_file = tz.get_in(["reference", "fasta", "base"], data) resources = config_utils.get_resources("bamtofastq", data["config"]) cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full" out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" % (base_name, rext, fext)) for fext in ["s1", "p1", "p2", "o1", "o2"]] if not utils.file_exists(out_p1): with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \ (tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2): cram_file = objectstore.cl_input(cram_file) sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0] cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} " "gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY " "F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} " "reference={ref_file}") if region: cmd += " ranges='{region}'" do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "") return [[out_p1, out_p2, out_s]]
[ "def", "_cram_to_fastq_region", "(", "cram_file", ",", "work_dir", ",", "base_name", ",", "region", ",", "data", ")", ":", "ref_file", "=", "tz", ".", "get_in", "(", "[", "\"reference\"", ",", "\"fasta\"", ",", "\"base\"", "]", ",", "data", ")", "resources", "=", "config_utils", ".", "get_resources", "(", "\"bamtofastq\"", ",", "data", "[", "\"config\"", "]", ")", "cores", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"num_cores\"", "]", ",", "data", ",", "1", ")", "max_mem", "=", "config_utils", ".", "convert_to_bytes", "(", "resources", ".", "get", "(", "\"memory\"", ",", "\"1G\"", ")", ")", "*", "cores", "rext", "=", "\"-%s\"", "%", "region", ".", "replace", "(", "\":\"", ",", "\"_\"", ")", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "if", "region", "else", "\"full\"", "out_s", ",", "out_p1", ",", "out_p2", ",", "out_o1", ",", "out_o2", "=", "[", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s%s-%s.fq.gz\"", "%", "(", "base_name", ",", "rext", ",", "fext", ")", ")", "for", "fext", "in", "[", "\"s1\"", ",", "\"p1\"", ",", "\"p2\"", ",", "\"o1\"", ",", "\"o2\"", "]", "]", "if", "not", "utils", ".", "file_exists", "(", "out_p1", ")", ":", "with", "file_transaction", "(", "data", ",", "out_s", ",", "out_p1", ",", "out_p2", ",", "out_o1", ",", "out_o2", ")", "as", "(", "tx_out_s", ",", "tx_out_p1", ",", "tx_out_p2", ",", "tx_out_o1", ",", "tx_out_o2", ")", ":", "cram_file", "=", "objectstore", ".", "cl_input", "(", "cram_file", ")", "sortprefix", "=", "\"%s-sort\"", "%", "utils", ".", "splitext_plus", "(", "tx_out_s", ")", "[", "0", "]", "cmd", "=", "(", "\"bamtofastq filename={cram_file} inputformat=cram T={sortprefix} \"", "\"gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY \"", "\"F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} \"", "\"reference={ref_file}\"", ")", "if", "region", ":", "cmd", "+=", "\" ranges='{region}'\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"CRAM to fastq %s\"", "%", "region", "if", "region", "else", "\"\"", ")", "return", "[", "[", "out_p1", ",", "out_p2", ",", "out_s", "]", "]" ]
Convert CRAM to fastq in a specified region.
[ "Convert", "CRAM", "to", "fastq", "in", "a", "specified", "region", "." ]
python
train
64.75
happyleavesaoc/python-voobly
voobly/__init__.py
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L255-L263
def ladders(session, game_id): """Get a list of ladder IDs.""" if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = get_lobbies(session, game_id) ladder_ids = set() for lobby in lobbies: ladder_ids |= set(lobby['ladders']) return list(ladder_ids)
[ "def", "ladders", "(", "session", ",", "game_id", ")", ":", "if", "isinstance", "(", "game_id", ",", "str", ")", ":", "game_id", "=", "lookup_game_id", "(", "game_id", ")", "lobbies", "=", "get_lobbies", "(", "session", ",", "game_id", ")", "ladder_ids", "=", "set", "(", ")", "for", "lobby", "in", "lobbies", ":", "ladder_ids", "|=", "set", "(", "lobby", "[", "'ladders'", "]", ")", "return", "list", "(", "ladder_ids", ")" ]
Get a list of ladder IDs.
[ "Get", "a", "list", "of", "ladder", "IDs", "." ]
python
train
33.111111
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_base.py#L582-L615
def ProcessAllReadyRequests(self): """Processes all requests that are due to run. Returns: The number of processed requests. """ request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing( self.rdf_flow.client_id, self.rdf_flow.flow_id, next_needed_request=self.rdf_flow.next_request_to_process) if not request_dict: return 0 processed = 0 while self.rdf_flow.next_request_to_process in request_dict: request, responses = request_dict[self.rdf_flow.next_request_to_process] self.RunStateMethod(request.next_state, request, responses) self.rdf_flow.next_request_to_process += 1 processed += 1 self.completed_requests.append(request) if processed and self.IsRunning() and not self.outstanding_requests: self.RunStateMethod("End") if (self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and not self.outstanding_requests): self.MarkDone() self.PersistState() if not self.IsRunning(): # All requests and responses can now be deleted. self._ClearAllRequestsAndResponses() return processed
[ "def", "ProcessAllReadyRequests", "(", "self", ")", ":", "request_dict", "=", "data_store", ".", "REL_DB", ".", "ReadFlowRequestsReadyForProcessing", "(", "self", ".", "rdf_flow", ".", "client_id", ",", "self", ".", "rdf_flow", ".", "flow_id", ",", "next_needed_request", "=", "self", ".", "rdf_flow", ".", "next_request_to_process", ")", "if", "not", "request_dict", ":", "return", "0", "processed", "=", "0", "while", "self", ".", "rdf_flow", ".", "next_request_to_process", "in", "request_dict", ":", "request", ",", "responses", "=", "request_dict", "[", "self", ".", "rdf_flow", ".", "next_request_to_process", "]", "self", ".", "RunStateMethod", "(", "request", ".", "next_state", ",", "request", ",", "responses", ")", "self", ".", "rdf_flow", ".", "next_request_to_process", "+=", "1", "processed", "+=", "1", "self", ".", "completed_requests", ".", "append", "(", "request", ")", "if", "processed", "and", "self", ".", "IsRunning", "(", ")", "and", "not", "self", ".", "outstanding_requests", ":", "self", ".", "RunStateMethod", "(", "\"End\"", ")", "if", "(", "self", ".", "rdf_flow", ".", "flow_state", "==", "self", ".", "rdf_flow", ".", "FlowState", ".", "RUNNING", "and", "not", "self", ".", "outstanding_requests", ")", ":", "self", ".", "MarkDone", "(", ")", "self", ".", "PersistState", "(", ")", "if", "not", "self", ".", "IsRunning", "(", ")", ":", "# All requests and responses can now be deleted.", "self", ".", "_ClearAllRequestsAndResponses", "(", ")", "return", "processed" ]
Processes all requests that are due to run. Returns: The number of processed requests.
[ "Processes", "all", "requests", "that", "are", "due", "to", "run", "." ]
python
train
33.088235
quantopian/zipline
zipline/pipeline/loaders/blaze/core.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L881-L913
def register_column(self, column, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a single bound column to a collection of blaze expressions. The expressions need to have ``timestamp`` and ``as_of`` columns. Parameters ---------- column : BoundColumn The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """ self._table_expressions[column] = ExprData( expr, deltas, checkpoints, odo_kwargs, )
[ "def", "register_column", "(", "self", ",", "column", ",", "expr", ",", "deltas", "=", "None", ",", "checkpoints", "=", "None", ",", "odo_kwargs", "=", "None", ")", ":", "self", ".", "_table_expressions", "[", "column", "]", "=", "ExprData", "(", "expr", ",", "deltas", ",", "checkpoints", ",", "odo_kwargs", ",", ")" ]
Explicitly map a single bound column to a collection of blaze expressions. The expressions need to have ``timestamp`` and ``as_of`` columns. Parameters ---------- column : BoundColumn The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze`
[ "Explicitly", "map", "a", "single", "bound", "column", "to", "a", "collection", "of", "blaze", "expressions", ".", "The", "expressions", "need", "to", "have", "timestamp", "and", "as_of", "columns", "." ]
python
train
32
samuelcolvin/pydantic
pydantic/class_validators.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/class_validators.py#L32-L63
def validator( *fields: str, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True ) -> Callable[[AnyCallable], classmethod]: """ Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model """ if not fields: raise ConfigError('validator with no fields specified') elif isinstance(fields[0], FunctionType): raise ConfigError( "validators should be used with fields and keyword arguments, not bare. " "E.g. usage should be `@validator('<field_name>', ...)`" ) def dec(f: AnyCallable) -> classmethod: # avoid validators with duplicated names since without this validators can be overwritten silently # which generally isn't the intended behaviour, don't run in ipython - see #312 if not in_ipython(): # pragma: no branch ref = f.__module__ + '.' + f.__qualname__ if ref in _FUNCS: raise ConfigError(f'duplicate validator function "{ref}"') _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) # type: ignore return f_cls return dec
[ "def", "validator", "(", "*", "fields", ":", "str", ",", "pre", ":", "bool", "=", "False", ",", "whole", ":", "bool", "=", "False", ",", "always", ":", "bool", "=", "False", ",", "check_fields", ":", "bool", "=", "True", ")", "->", "Callable", "[", "[", "AnyCallable", "]", ",", "classmethod", "]", ":", "if", "not", "fields", ":", "raise", "ConfigError", "(", "'validator with no fields specified'", ")", "elif", "isinstance", "(", "fields", "[", "0", "]", ",", "FunctionType", ")", ":", "raise", "ConfigError", "(", "\"validators should be used with fields and keyword arguments, not bare. \"", "\"E.g. usage should be `@validator('<field_name>', ...)`\"", ")", "def", "dec", "(", "f", ":", "AnyCallable", ")", "->", "classmethod", ":", "# avoid validators with duplicated names since without this validators can be overwritten silently", "# which generally isn't the intended behaviour, don't run in ipython - see #312", "if", "not", "in_ipython", "(", ")", ":", "# pragma: no branch", "ref", "=", "f", ".", "__module__", "+", "'.'", "+", "f", ".", "__qualname__", "if", "ref", "in", "_FUNCS", ":", "raise", "ConfigError", "(", "f'duplicate validator function \"{ref}\"'", ")", "_FUNCS", ".", "add", "(", "ref", ")", "f_cls", "=", "classmethod", "(", "f", ")", "f_cls", ".", "__validator_config", "=", "fields", ",", "Validator", "(", "f", ",", "pre", ",", "whole", ",", "always", ",", "check_fields", ")", "# type: ignore", "return", "f_cls", "return", "dec" ]
Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model
[ "Decorate", "methods", "on", "the", "class", "indicating", "that", "they", "should", "be", "used", "to", "validate", "fields", ":", "param", "fields", ":", "which", "field", "(", "s", ")", "the", "method", "should", "be", "called", "on", ":", "param", "pre", ":", "whether", "or", "not", "this", "validator", "should", "be", "called", "before", "the", "standard", "validators", "(", "else", "after", ")", ":", "param", "whole", ":", "for", "complex", "objects", "(", "sets", "lists", "etc", ".", ")", "whether", "to", "validate", "individual", "elements", "or", "the", "whole", "object", ":", "param", "always", ":", "whether", "this", "method", "and", "other", "validators", "should", "be", "called", "even", "if", "the", "value", "is", "missing", ":", "param", "check_fields", ":", "whether", "to", "check", "that", "the", "fields", "actually", "exist", "on", "the", "model" ]
python
train
52.8125
mandiant/ioc_writer
examples/openioc_to_yara/openioc_to_yara.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/examples/openioc_to_yara/openioc_to_yara.py#L430-L443
def write_yara(self, output_file): """ Write out yara signatures to a file. """ fout = open(output_file, 'wb') fout.write('\n') for iocid in self.yara_signatures: signature = self.yara_signatures[iocid] fout.write(signature) fout.write('\n') fout.close() return True
[ "def", "write_yara", "(", "self", ",", "output_file", ")", ":", "fout", "=", "open", "(", "output_file", ",", "'wb'", ")", "fout", ".", "write", "(", "'\\n'", ")", "for", "iocid", "in", "self", ".", "yara_signatures", ":", "signature", "=", "self", ".", "yara_signatures", "[", "iocid", "]", "fout", ".", "write", "(", "signature", ")", "fout", ".", "write", "(", "'\\n'", ")", "fout", ".", "close", "(", ")", "return", "True" ]
Write out yara signatures to a file.
[ "Write", "out", "yara", "signatures", "to", "a", "file", "." ]
python
train
25.357143
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L716-L734
def ip_rtm_config_route_static_route_oif_vrf_static_route_oif_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf") static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest") static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest') next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf") next_hop_vrf_key.text = kwargs.pop('next_hop_vrf') static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type") static_route_oif_type_key.text = kwargs.pop('static_route_oif_type') static_route_oif_name = ET.SubElement(static_route_oif_vrf, "static-route-oif-name") static_route_oif_name.text = kwargs.pop('static_route_oif_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_rtm_config_route_static_route_oif_vrf_static_route_oif_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "rtm_config", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"rtm-config\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rtm\"", ")", "route", "=", "ET", ".", "SubElement", "(", "rtm_config", ",", "\"route\"", ")", "static_route_oif_vrf", "=", "ET", ".", "SubElement", "(", "route", ",", "\"static-route-oif-vrf\"", ")", "static_route_next_vrf_dest_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-next-vrf-dest\"", ")", "static_route_next_vrf_dest_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_next_vrf_dest'", ")", "next_hop_vrf_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"next-hop-vrf\"", ")", "next_hop_vrf_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'next_hop_vrf'", ")", "static_route_oif_type_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-oif-type\"", ")", "static_route_oif_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_type'", ")", "static_route_oif_name", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-oif-name\"", ")", "static_route_oif_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
64.263158
foremast/foremast
src/foremast/runner.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/runner.py#L129-L132
def create_archaius(self): """Create S3 bucket for Archaius.""" utils.banner("Creating S3") s3.init_properties(env=self.env, app=self.app)
[ "def", "create_archaius", "(", "self", ")", ":", "utils", ".", "banner", "(", "\"Creating S3\"", ")", "s3", ".", "init_properties", "(", "env", "=", "self", ".", "env", ",", "app", "=", "self", ".", "app", ")" ]
Create S3 bucket for Archaius.
[ "Create", "S3", "bucket", "for", "Archaius", "." ]
python
train
39.75
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py#L188-L194
def copy(self): """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.identifiers = object.__new__(self.identifiers.__class__) rv.identifiers.__dict__.update(self.identifiers.__dict__) return rv
[ "def", "copy", "(", "self", ")", ":", "rv", "=", "object", ".", "__new__", "(", "self", ".", "__class__", ")", "rv", ".", "__dict__", ".", "update", "(", "self", ".", "__dict__", ")", "rv", ".", "identifiers", "=", "object", ".", "__new__", "(", "self", ".", "identifiers", ".", "__class__", ")", "rv", ".", "identifiers", ".", "__dict__", ".", "update", "(", "self", ".", "identifiers", ".", "__dict__", ")", "return", "rv" ]
Create a copy of the current one.
[ "Create", "a", "copy", "of", "the", "current", "one", "." ]
python
train
42.142857
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/utility.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L141-L151
def split_action_id (id): """ Splits an id in the toolset and specific rule parts. E.g. 'gcc.compile.c++' returns ('gcc', 'compile.c++') """ assert isinstance(id, basestring) split = id.split ('.', 1) toolset = split [0] name = '' if len (split) > 1: name = split [1] return (toolset, name)
[ "def", "split_action_id", "(", "id", ")", ":", "assert", "isinstance", "(", "id", ",", "basestring", ")", "split", "=", "id", ".", "split", "(", "'.'", ",", "1", ")", "toolset", "=", "split", "[", "0", "]", "name", "=", "''", "if", "len", "(", "split", ")", ">", "1", ":", "name", "=", "split", "[", "1", "]", "return", "(", "toolset", ",", "name", ")" ]
Splits an id in the toolset and specific rule parts. E.g. 'gcc.compile.c++' returns ('gcc', 'compile.c++')
[ "Splits", "an", "id", "in", "the", "toolset", "and", "specific", "rule", "parts", ".", "E", ".", "g", ".", "gcc", ".", "compile", ".", "c", "++", "returns", "(", "gcc", "compile", ".", "c", "++", ")" ]
python
train
29.818182
hubo1016/vlcp
vlcp/utils/http.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/http.py#L632-L689
def routeevent(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']): ''' Route specified path to a routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent :param container: routine container. If None, default to self for bound method, or event.connection if not :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods ''' regm = re.compile(path + b'$') if vhost is None: vhost = self.vhost if container is None: container = getattr(routinemethod, '__self__', None) def ismatch(event): # Check vhost if vhost is not None and getattr(event.createby, 'vhost', '') != vhost: return False # First parse the path # RFC said we should accept absolute path psplit = urlsplit(event.path) if psplit.path[:1] != b'/': # For security reason, ignore unrecognized path return False if psplit.netloc and host is not None and host != psplit.netloc: # Maybe a proxy request, ignore it return False if getattr(event.createby, 'unquoteplus', True): realpath = unquote_plus_to_bytes(psplit.path) else: realpath = unquote_to_bytes(psplit.path) m = regm.match(realpath) if m is None: return False event.realpath = realpath event.querystring = psplit.query event.path_match = m return True def func(event, scheduler): try: if event.canignore: # Already processed return event.canignore = True c = event.connection if container is None else container c.subroutine(routinemethod(event), False) except Exception: pass for m in method: self.registerHandler(HttpRequestEvent.createMatcher(host, None, m, _ismatch = ismatch), func)
[ "def", "routeevent", "(", "self", ",", "path", ",", "routinemethod", ",", "container", "=", "None", ",", "host", "=", "None", ",", "vhost", "=", "None", ",", "method", "=", "[", "b'GET'", ",", "b'HEAD'", "]", ")", ":", "regm", "=", "re", ".", "compile", "(", "path", "+", "b'$'", ")", "if", "vhost", "is", "None", ":", "vhost", "=", "self", ".", "vhost", "if", "container", "is", "None", ":", "container", "=", "getattr", "(", "routinemethod", ",", "'__self__'", ",", "None", ")", "def", "ismatch", "(", "event", ")", ":", "# Check vhost", "if", "vhost", "is", "not", "None", "and", "getattr", "(", "event", ".", "createby", ",", "'vhost'", ",", "''", ")", "!=", "vhost", ":", "return", "False", "# First parse the path", "# RFC said we should accept absolute path", "psplit", "=", "urlsplit", "(", "event", ".", "path", ")", "if", "psplit", ".", "path", "[", ":", "1", "]", "!=", "b'/'", ":", "# For security reason, ignore unrecognized path", "return", "False", "if", "psplit", ".", "netloc", "and", "host", "is", "not", "None", "and", "host", "!=", "psplit", ".", "netloc", ":", "# Maybe a proxy request, ignore it", "return", "False", "if", "getattr", "(", "event", ".", "createby", ",", "'unquoteplus'", ",", "True", ")", ":", "realpath", "=", "unquote_plus_to_bytes", "(", "psplit", ".", "path", ")", "else", ":", "realpath", "=", "unquote_to_bytes", "(", "psplit", ".", "path", ")", "m", "=", "regm", ".", "match", "(", "realpath", ")", "if", "m", "is", "None", ":", "return", "False", "event", ".", "realpath", "=", "realpath", "event", ".", "querystring", "=", "psplit", ".", "query", "event", ".", "path_match", "=", "m", "return", "True", "def", "func", "(", "event", ",", "scheduler", ")", ":", "try", ":", "if", "event", ".", "canignore", ":", "# Already processed", "return", "event", ".", "canignore", "=", "True", "c", "=", "event", ".", "connection", "if", "container", "is", "None", "else", "container", "c", ".", "subroutine", "(", "routinemethod", "(", "event", ")", ",", "False", ")", "except", "Exception", ":", "pass", "for", "m", "in", "method", ":", "self", ".", "registerHandler", "(", "HttpRequestEvent", ".", "createMatcher", "(", "host", ",", "None", ",", "m", ",", "_ismatch", "=", "ismatch", ")", ",", "func", ")" ]
Route specified path to a routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent :param container: routine container. If None, default to self for bound method, or event.connection if not :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods
[ "Route", "specified", "path", "to", "a", "routine", "factory", ":", "param", "path", ":", "path", "to", "match", "can", "be", "a", "regular", "expression", ":", "param", "routinemethod", ":", "factory", "function", "routinemethod", "(", "event", ")", "event", "is", "the", "HttpRequestEvent", ":", "param", "container", ":", "routine", "container", ".", "If", "None", "default", "to", "self", "for", "bound", "method", "or", "event", ".", "connection", "if", "not", ":", "param", "host", ":", "if", "specified", "only", "response", "to", "request", "to", "specified", "host", ":", "param", "vhost", ":", "if", "specified", "only", "response", "to", "request", "to", "specified", "vhost", ".", "If", "not", "specified", "response", "to", "dispatcher", "default", "vhost", ".", ":", "param", "method", ":", "if", "specified", "response", "to", "specified", "methods" ]
python
train
42.87931
ewiger/mlab
src/mlab/mlabwrap.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/mlabwrap.py#L651-L664
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts): """Hacky convinience function to dump a couple of python variables in a .mat file. See `awmstools.saveVars`. """ from mlabwrap import mlab filename, varnames, outOf = __saveVarsHelper( filename, varNamesStr, outOf, '.mat', **opts) try: for varname in varnames: mlab._set(varname, outOf[varname]) mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0) finally: assert varnames mlab._do("clear('%s')" % "', '".join(varnames), nout=0)
[ "def", "saveVarsInMat", "(", "filename", ",", "varNamesStr", ",", "outOf", "=", "None", ",", "*", "*", "opts", ")", ":", "from", "mlabwrap", "import", "mlab", "filename", ",", "varnames", ",", "outOf", "=", "__saveVarsHelper", "(", "filename", ",", "varNamesStr", ",", "outOf", ",", "'.mat'", ",", "*", "*", "opts", ")", "try", ":", "for", "varname", "in", "varnames", ":", "mlab", ".", "_set", "(", "varname", ",", "outOf", "[", "varname", "]", ")", "mlab", ".", "_do", "(", "\"save('%s','%s')\"", "%", "(", "filename", ",", "\"', '\"", ".", "join", "(", "varnames", ")", ")", ",", "nout", "=", "0", ")", "finally", ":", "assert", "varnames", "mlab", ".", "_do", "(", "\"clear('%s')\"", "%", "\"', '\"", ".", "join", "(", "varnames", ")", ",", "nout", "=", "0", ")" ]
Hacky convinience function to dump a couple of python variables in a .mat file. See `awmstools.saveVars`.
[ "Hacky", "convinience", "function", "to", "dump", "a", "couple", "of", "python", "variables", "in", "a", ".", "mat", "file", ".", "See", "awmstools", ".", "saveVars", "." ]
python
train
41.428571
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py#L37-L41
def LoadImage(filename): '''return an image from the images/ directory''' app_dir = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(app_dir, 'images', filename) return Tkinter.PhotoImage(file=path)
[ "def", "LoadImage", "(", "filename", ")", ":", "app_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "path", "=", "os", ".", "path", ".", "join", "(", "app_dir", ",", "'images'", ",", "filename", ")", "return", "Tkinter", ".", "PhotoImage", "(", "file", "=", "path", ")" ]
return an image from the images/ directory
[ "return", "an", "image", "from", "the", "images", "/", "directory" ]
python
train
45
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1064-L1068
def from_xml(cls, xml): """ Returns a new Text from the given XML string. """ s = parse_string(xml) return Sentence(s.split("\n")[0], token=s.tags, language=s.language)
[ "def", "from_xml", "(", "cls", ",", "xml", ")", ":", "s", "=", "parse_string", "(", "xml", ")", "return", "Sentence", "(", "s", ".", "split", "(", "\"\\n\"", ")", "[", "0", "]", ",", "token", "=", "s", ".", "tags", ",", "language", "=", "s", ".", "language", ")" ]
Returns a new Text from the given XML string.
[ "Returns", "a", "new", "Text", "from", "the", "given", "XML", "string", "." ]
python
train
39.2
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L722-L738
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop('index') command = ET.SubElement(rule, "command") cmdlist = ET.SubElement(command, "cmdlist") interface_h = ET.SubElement(cmdlist, "interface-h") interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf") interface = ET.SubElement(interface_ge_leaf, "interface") gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf") gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "rule", "=", "ET", ".", "SubElement", "(", "config", ",", "\"rule\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "index_key", "=", "ET", ".", "SubElement", "(", "rule", ",", "\"index\"", ")", "index_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'index'", ")", "command", "=", "ET", ".", "SubElement", "(", "rule", ",", "\"command\"", ")", "cmdlist", "=", "ET", ".", "SubElement", "(", "command", ",", "\"cmdlist\"", ")", "interface_h", "=", "ET", ".", "SubElement", "(", "cmdlist", ",", "\"interface-h\"", ")", "interface_ge_leaf", "=", "ET", ".", "SubElement", "(", "interface_h", ",", "\"interface-ge-leaf\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "interface_ge_leaf", ",", "\"interface\"", ")", "gigabitethernet_leaf", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"gigabitethernet-leaf\"", ")", "gigabitethernet_leaf", ".", "text", "=", "kwargs", ".", "pop", "(", "'gigabitethernet_leaf'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.529412
NeilGirdhar/rectangle
rectangle/rectangle.py
https://github.com/NeilGirdhar/rectangle/blob/b0ca25e199cf6e331aef7fd99bda5ba10ae98753/rectangle/rectangle.py#L53-L67
def transformed(self, t): """ Transforms an m-dimensional Rect using t, an nxn matrix that can transform vectors in the form: [x, y, z, …, 1]. The Rect is padded to n dimensions. """ assert t.shape[0] == t.shape[1] extra_dimensions = t.shape[0] - self.dimensions - 1 def transform(a): return t.dot(np.concatenate( (a, [0] * extra_dimensions, [1]), axis=0 ))[:self.dimensions] return Rect(transform(self.mins), transform(self.maxes))
[ "def", "transformed", "(", "self", ",", "t", ")", ":", "assert", "t", ".", "shape", "[", "0", "]", "==", "t", ".", "shape", "[", "1", "]", "extra_dimensions", "=", "t", ".", "shape", "[", "0", "]", "-", "self", ".", "dimensions", "-", "1", "def", "transform", "(", "a", ")", ":", "return", "t", ".", "dot", "(", "np", ".", "concatenate", "(", "(", "a", ",", "[", "0", "]", "*", "extra_dimensions", ",", "[", "1", "]", ")", ",", "axis", "=", "0", ")", ")", "[", ":", "self", ".", "dimensions", "]", "return", "Rect", "(", "transform", "(", "self", ".", "mins", ")", ",", "transform", "(", "self", ".", "maxes", ")", ")" ]
Transforms an m-dimensional Rect using t, an nxn matrix that can transform vectors in the form: [x, y, z, …, 1]. The Rect is padded to n dimensions.
[ "Transforms", "an", "m", "-", "dimensional", "Rect", "using", "t", "an", "nxn", "matrix", "that", "can", "transform", "vectors", "in", "the", "form", ":", "[", "x", "y", "z", "…", "1", "]", ".", "The", "Rect", "is", "padded", "to", "n", "dimensions", "." ]
python
train
36.466667
SHTOOLS/SHTOOLS
pyshtools/shclasses/shcoeffsgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shcoeffsgrid.py#L2881-L2904
def _expand(self, normalization, csphase, **kwargs): """Expand the grid into real spherical harmonics.""" if normalization.lower() == '4pi': norm = 1 elif normalization.lower() == 'schmidt': norm = 2 elif normalization.lower() == 'unnorm': norm = 3 elif normalization.lower() == 'ortho': norm = 4 else: raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " + "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase, sampling=self.sampling, **kwargs) coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(), csphase=csphase, copy=False) return coeffs
[ "def", "_expand", "(", "self", ",", "normalization", ",", "csphase", ",", "*", "*", "kwargs", ")", ":", "if", "normalization", ".", "lower", "(", ")", "==", "'4pi'", ":", "norm", "=", "1", "elif", "normalization", ".", "lower", "(", ")", "==", "'schmidt'", ":", "norm", "=", "2", "elif", "normalization", ".", "lower", "(", ")", "==", "'unnorm'", ":", "norm", "=", "3", "elif", "normalization", ".", "lower", "(", ")", "==", "'ortho'", ":", "norm", "=", "4", "else", ":", "raise", "ValueError", "(", "\"The normalization must be '4pi', 'ortho', 'schmidt', \"", "+", "\"or 'unnorm'. Input value was {:s}.\"", ".", "format", "(", "repr", "(", "normalization", ")", ")", ")", "cilm", "=", "_shtools", ".", "SHExpandDH", "(", "self", ".", "data", ",", "norm", "=", "norm", ",", "csphase", "=", "csphase", ",", "sampling", "=", "self", ".", "sampling", ",", "*", "*", "kwargs", ")", "coeffs", "=", "SHCoeffs", ".", "from_array", "(", "cilm", ",", "normalization", "=", "normalization", ".", "lower", "(", ")", ",", "csphase", "=", "csphase", ",", "copy", "=", "False", ")", "return", "coeffs" ]
Expand the grid into real spherical harmonics.
[ "Expand", "the", "grid", "into", "real", "spherical", "harmonics", "." ]
python
train
40.791667
arista-eosplus/pyeapi
pyeapi/api/interfaces.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/interfaces.py#L672-L711
def set_members(self, name, members, mode=None): """Configures the array of member interfaces for the Port-Channel Args: name(str): The Port-Channel interface name to configure the member interfaces members(list): The list of Ethernet interfaces that should be member interfaces mode(str): The LACP mode to configure the member interfaces to. Valid values are 'on, 'passive', 'active'. When there are existing channel-group members and their lacp mode differs from this attribute, all of those members will be removed and then re-added using the specified lacp mode. If this attribute is omitted, the existing lacp mode will be used for new member additions. Returns: True if the operation succeeds otherwise False """ commands = list() grpid = re.search(r'(\d+)', name).group() current_members = self.get_members(name) lacp_mode = self.get_lacp_mode(name) if mode and mode != lacp_mode: lacp_mode = mode self.set_lacp_mode(grpid, lacp_mode) # remove members from the current port-channel interface for member in set(current_members).difference(members): commands.append('interface %s' % member) commands.append('no channel-group %s' % grpid) # add new member interfaces to the port-channel interface for member in set(members).difference(current_members): commands.append('interface %s' % member) commands.append('channel-group %s mode %s' % (grpid, lacp_mode)) return self.configure(commands) if commands else True
[ "def", "set_members", "(", "self", ",", "name", ",", "members", ",", "mode", "=", "None", ")", ":", "commands", "=", "list", "(", ")", "grpid", "=", "re", ".", "search", "(", "r'(\\d+)'", ",", "name", ")", ".", "group", "(", ")", "current_members", "=", "self", ".", "get_members", "(", "name", ")", "lacp_mode", "=", "self", ".", "get_lacp_mode", "(", "name", ")", "if", "mode", "and", "mode", "!=", "lacp_mode", ":", "lacp_mode", "=", "mode", "self", ".", "set_lacp_mode", "(", "grpid", ",", "lacp_mode", ")", "# remove members from the current port-channel interface", "for", "member", "in", "set", "(", "current_members", ")", ".", "difference", "(", "members", ")", ":", "commands", ".", "append", "(", "'interface %s'", "%", "member", ")", "commands", ".", "append", "(", "'no channel-group %s'", "%", "grpid", ")", "# add new member interfaces to the port-channel interface", "for", "member", "in", "set", "(", "members", ")", ".", "difference", "(", "current_members", ")", ":", "commands", ".", "append", "(", "'interface %s'", "%", "member", ")", "commands", ".", "append", "(", "'channel-group %s mode %s'", "%", "(", "grpid", ",", "lacp_mode", ")", ")", "return", "self", ".", "configure", "(", "commands", ")", "if", "commands", "else", "True" ]
Configures the array of member interfaces for the Port-Channel Args: name(str): The Port-Channel interface name to configure the member interfaces members(list): The list of Ethernet interfaces that should be member interfaces mode(str): The LACP mode to configure the member interfaces to. Valid values are 'on, 'passive', 'active'. When there are existing channel-group members and their lacp mode differs from this attribute, all of those members will be removed and then re-added using the specified lacp mode. If this attribute is omitted, the existing lacp mode will be used for new member additions. Returns: True if the operation succeeds otherwise False
[ "Configures", "the", "array", "of", "member", "interfaces", "for", "the", "Port", "-", "Channel" ]
python
train
43.6
JukeboxPipeline/jukebox-core
src/jukeboxcore/launcher.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L116-L135
def list(self, args, unknown): """List all addons that can be launched :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None """ pm = plugins.PluginManager.get() plugs = pm.get_all_plugins() if not plugs: print "No standalone addons found!" return print "Addons:" for p in plugs: if isinstance(p, plugins.JB_StandalonePlugin): print "\t%s" % p.__class__.__name__
[ "def", "list", "(", "self", ",", "args", ",", "unknown", ")", ":", "pm", "=", "plugins", ".", "PluginManager", ".", "get", "(", ")", "plugs", "=", "pm", ".", "get_all_plugins", "(", ")", "if", "not", "plugs", ":", "print", "\"No standalone addons found!\"", "return", "print", "\"Addons:\"", "for", "p", "in", "plugs", ":", "if", "isinstance", "(", "p", ",", "plugins", ".", "JB_StandalonePlugin", ")", ":", "print", "\"\\t%s\"", "%", "p", ".", "__class__", ".", "__name__" ]
List all addons that can be launched :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None
[ "List", "all", "addons", "that", "can", "be", "launched" ]
python
train
31.3
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Parser.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L1254-L1274
def getElementsByClassName(self, className, root='root', useIndex=True): ''' getElementsByClassName - Searches and returns all elements containing a given class name. @param className <str> - A one-word class name @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. @param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed. ''' (root, isFromRoot) = self._handleRootArg(root) if useIndex is True and self.indexClassNames is True: elements = self._classNameMap.get(className, []) if isFromRoot is False: _hasTagInParentLine = self._hasTagInParentLine elements = [x for x in elements if _hasTagInParentLine(x, root)] return TagCollection(elements) return AdvancedHTMLParser.getElementsByClassName(self, className, root)
[ "def", "getElementsByClassName", "(", "self", ",", "className", ",", "root", "=", "'root'", ",", "useIndex", "=", "True", ")", ":", "(", "root", ",", "isFromRoot", ")", "=", "self", ".", "_handleRootArg", "(", "root", ")", "if", "useIndex", "is", "True", "and", "self", ".", "indexClassNames", "is", "True", ":", "elements", "=", "self", ".", "_classNameMap", ".", "get", "(", "className", ",", "[", "]", ")", "if", "isFromRoot", "is", "False", ":", "_hasTagInParentLine", "=", "self", ".", "_hasTagInParentLine", "elements", "=", "[", "x", "for", "x", "in", "elements", "if", "_hasTagInParentLine", "(", "x", ",", "root", ")", "]", "return", "TagCollection", "(", "elements", ")", "return", "AdvancedHTMLParser", ".", "getElementsByClassName", "(", "self", ",", "className", ",", "root", ")" ]
getElementsByClassName - Searches and returns all elements containing a given class name. @param className <str> - A one-word class name @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. @param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
[ "getElementsByClassName", "-", "Searches", "and", "returns", "all", "elements", "containing", "a", "given", "class", "name", "." ]
python
train
51.190476
chemlab/chemlab
chemlab/core/base.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/base.py#L153-L162
def copy_from(self, other): """Copy properties from another ChemicalEntity """ # Need to copy all attributes, fields, relations self.__attributes__ = {k: v.copy() for k, v in other.__attributes__.items()} self.__fields__ = {k: v.copy() for k, v in other.__fields__.items()} self.__relations__ = {k: v.copy() for k, v in other.__relations__.items()} self.maps = {k: m.copy() for k, m in other.maps.items()} self.dimensions = other.dimensions.copy()
[ "def", "copy_from", "(", "self", ",", "other", ")", ":", "# Need to copy all attributes, fields, relations", "self", ".", "__attributes__", "=", "{", "k", ":", "v", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "other", ".", "__attributes__", ".", "items", "(", ")", "}", "self", ".", "__fields__", "=", "{", "k", ":", "v", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "other", ".", "__fields__", ".", "items", "(", ")", "}", "self", ".", "__relations__", "=", "{", "k", ":", "v", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "other", ".", "__relations__", ".", "items", "(", ")", "}", "self", ".", "maps", "=", "{", "k", ":", "m", ".", "copy", "(", ")", "for", "k", ",", "m", "in", "other", ".", "maps", ".", "items", "(", ")", "}", "self", ".", "dimensions", "=", "other", ".", "dimensions", ".", "copy", "(", ")" ]
Copy properties from another ChemicalEntity
[ "Copy", "properties", "from", "another", "ChemicalEntity" ]
python
train
51.1
bcbio/bcbio-nextgen
bcbio/heterogeneity/bubbletree.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L162-L172
def max_normal_germline_depth(in_file, params, somatic_info): """Calculate threshold for excluding potential heterozygotes based on normal depth. """ bcf_in = pysam.VariantFile(in_file) depths = [] for rec in bcf_in: stats = _is_possible_loh(rec, bcf_in, params, somatic_info) if tz.get_in(["normal", "depth"], stats): depths.append(tz.get_in(["normal", "depth"], stats)) if depths: return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"]
[ "def", "max_normal_germline_depth", "(", "in_file", ",", "params", ",", "somatic_info", ")", ":", "bcf_in", "=", "pysam", ".", "VariantFile", "(", "in_file", ")", "depths", "=", "[", "]", "for", "rec", "in", "bcf_in", ":", "stats", "=", "_is_possible_loh", "(", "rec", ",", "bcf_in", ",", "params", ",", "somatic_info", ")", "if", "tz", ".", "get_in", "(", "[", "\"normal\"", ",", "\"depth\"", "]", ",", "stats", ")", ":", "depths", ".", "append", "(", "tz", ".", "get_in", "(", "[", "\"normal\"", ",", "\"depth\"", "]", ",", "stats", ")", ")", "if", "depths", ":", "return", "np", ".", "median", "(", "depths", ")", "*", "NORMAL_FILTER_PARAMS", "[", "\"max_depth_percent\"", "]" ]
Calculate threshold for excluding potential heterozygotes based on normal depth.
[ "Calculate", "threshold", "for", "excluding", "potential", "heterozygotes", "based", "on", "normal", "depth", "." ]
python
train
45.545455
docker/docker-py
docker/api/image.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/image.py#L211-L225
def import_image_from_image(self, image, repository=None, tag=None, changes=None): """ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only supports importing from another image, like the ``FROM`` Dockerfile parameter. Args: image (str): Image name to import from repository (str): The repository to create tag (str): The tag to apply """ return self.import_image( image=image, repository=repository, tag=tag, changes=changes )
[ "def", "import_image_from_image", "(", "self", ",", "image", ",", "repository", "=", "None", ",", "tag", "=", "None", ",", "changes", "=", "None", ")", ":", "return", "self", ".", "import_image", "(", "image", "=", "image", ",", "repository", "=", "repository", ",", "tag", "=", "tag", ",", "changes", "=", "changes", ")" ]
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only supports importing from another image, like the ``FROM`` Dockerfile parameter. Args: image (str): Image name to import from repository (str): The repository to create tag (str): The tag to apply
[ "Like", ":", "py", ":", "meth", ":", "~docker", ".", "api", ".", "image", ".", "ImageApiMixin", ".", "import_image", "but", "only", "supports", "importing", "from", "another", "image", "like", "the", "FROM", "Dockerfile", "parameter", "." ]
python
train
38.4
desbma/sacad
sacad/sources/lastfm.py
https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/sources/lastfm.py#L55-L96
async def parseResults(self, api_data): """ See CoverSource.parseResults. """ results = [] # get xml results list xml_text = api_data.decode("utf-8") xml_root = xml.etree.ElementTree.fromstring(xml_text) status = xml_root.get("status") if status != "ok": raise Exception("Unexpected Last.fm response status: %s" % (status)) img_elements = xml_root.findall("album/image") # build results from xml thumbnail_url = None thumbnail_size = None for img_element in img_elements: img_url = img_element.text if not img_url: # last.fm returns empty image tag for size it does not have continue lfm_size = img_element.get("size") if lfm_size == "mega": check_metadata = CoverImageMetadata.SIZE else: check_metadata = CoverImageMetadata.NONE try: size = __class__.SIZES[lfm_size] except KeyError: continue if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)): thumbnail_url = img_url thumbnail_size = size[0] format = os.path.splitext(img_url)[1][1:].lower() format = SUPPORTED_IMG_FORMATS[format] results.append(LastFmCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, check_metadata=check_metadata)) return results
[ "async", "def", "parseResults", "(", "self", ",", "api_data", ")", ":", "results", "=", "[", "]", "# get xml results list", "xml_text", "=", "api_data", ".", "decode", "(", "\"utf-8\"", ")", "xml_root", "=", "xml", ".", "etree", ".", "ElementTree", ".", "fromstring", "(", "xml_text", ")", "status", "=", "xml_root", ".", "get", "(", "\"status\"", ")", "if", "status", "!=", "\"ok\"", ":", "raise", "Exception", "(", "\"Unexpected Last.fm response status: %s\"", "%", "(", "status", ")", ")", "img_elements", "=", "xml_root", ".", "findall", "(", "\"album/image\"", ")", "# build results from xml", "thumbnail_url", "=", "None", "thumbnail_size", "=", "None", "for", "img_element", "in", "img_elements", ":", "img_url", "=", "img_element", ".", "text", "if", "not", "img_url", ":", "# last.fm returns empty image tag for size it does not have", "continue", "lfm_size", "=", "img_element", ".", "get", "(", "\"size\"", ")", "if", "lfm_size", "==", "\"mega\"", ":", "check_metadata", "=", "CoverImageMetadata", ".", "SIZE", "else", ":", "check_metadata", "=", "CoverImageMetadata", ".", "NONE", "try", ":", "size", "=", "__class__", ".", "SIZES", "[", "lfm_size", "]", "except", "KeyError", ":", "continue", "if", "(", "size", "[", "0", "]", "<=", "MAX_THUMBNAIL_SIZE", ")", "and", "(", "(", "thumbnail_size", "is", "None", ")", "or", "(", "size", "[", "0", "]", "<", "thumbnail_size", ")", ")", ":", "thumbnail_url", "=", "img_url", "thumbnail_size", "=", "size", "[", "0", "]", "format", "=", "os", ".", "path", ".", "splitext", "(", "img_url", ")", "[", "1", "]", "[", "1", ":", "]", ".", "lower", "(", ")", "format", "=", "SUPPORTED_IMG_FORMATS", "[", "format", "]", "results", ".", "append", "(", "LastFmCoverSourceResult", "(", "img_url", ",", "size", ",", "format", ",", "thumbnail_url", "=", "thumbnail_url", ",", "source", "=", "self", ",", "check_metadata", "=", "check_metadata", ")", ")", "return", "results" ]
See CoverSource.parseResults.
[ "See", "CoverSource", ".", "parseResults", "." ]
python
train
37
CybOXProject/mixbox
mixbox/fields.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L50-L62
def iterfields(klass): """Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples. """ is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
[ "def", "iterfields", "(", "klass", ")", ":", "is_field", "=", "lambda", "x", ":", "isinstance", "(", "x", ",", "TypedField", ")", "for", "name", ",", "field", "in", "inspect", ".", "getmembers", "(", "klass", ",", "predicate", "=", "is_field", ")", ":", "yield", "name", ",", "field" ]
Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples.
[ "Iterate", "over", "the", "input", "class", "members", "and", "yield", "its", "TypedFields", "." ]
python
train
28.769231
icgood/pymap
pymap/mime/parsed.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/mime/parsed.py#L79-L84
def subject(self) -> Optional[UnstructuredHeader]: """The ``Subject`` header.""" try: return cast(UnstructuredHeader, self[b'subject'][0]) except (KeyError, IndexError): return None
[ "def", "subject", "(", "self", ")", "->", "Optional", "[", "UnstructuredHeader", "]", ":", "try", ":", "return", "cast", "(", "UnstructuredHeader", ",", "self", "[", "b'subject'", "]", "[", "0", "]", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "return", "None" ]
The ``Subject`` header.
[ "The", "Subject", "header", "." ]
python
train
37.333333
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L141-L163
def typestring(obj): """Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph' """ obj_type = type(obj) return '.'.join((obj_type.__module__, obj_type.__name__))
[ "def", "typestring", "(", "obj", ")", ":", "obj_type", "=", "type", "(", "obj", ")", "return", "'.'", ".", "join", "(", "(", "obj_type", ".", "__module__", ",", "obj_type", ".", "__name__", ")", ")" ]
Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph'
[ "Make", "a", "string", "for", "the", "object", "s", "type" ]
python
train
21.521739
Ex-Mente/auxi.0
auxi/modelling/process/materials/thermo.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L1588-L1595
def clone(self): """Create a complete copy of the stream. :returns: A new MaterialStream object.""" result = copy.copy(self) result._compound_mfrs = copy.deepcopy(self._compound_mfrs) return result
[ "def", "clone", "(", "self", ")", ":", "result", "=", "copy", ".", "copy", "(", "self", ")", "result", ".", "_compound_mfrs", "=", "copy", ".", "deepcopy", "(", "self", ".", "_compound_mfrs", ")", "return", "result" ]
Create a complete copy of the stream. :returns: A new MaterialStream object.
[ "Create", "a", "complete", "copy", "of", "the", "stream", "." ]
python
valid
29
inveniosoftware-attic/invenio-utils
invenio_utils/html.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L86-L108
def nmtoken_from_string(text): """ Returns a Nmtoken from a string. It is useful to produce XHTML valid values for the 'name' attribute of an anchor. CAUTION: the function is surjective: 2 different texts might lead to the same result. This is improbable on a single page. Nmtoken is the type that is a mixture of characters supported in attributes such as 'name' in HTML 'a' tag. For example, <a name="Articles%20%26%20Preprints"> should be tranformed to <a name="Articles372037263720Preprints"> using this function. http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken Also note that this function filters more characters than specified by the definition of Nmtoken ('CombiningChar' and 'Extender' charsets are filtered out). """ text = text.replace('-', '--') return ''.join([(((not char.isalnum() and char not in [ '.', '-', '_', ':' ]) and str(ord(char))) or char) for char in text])
[ "def", "nmtoken_from_string", "(", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "'-'", ",", "'--'", ")", "return", "''", ".", "join", "(", "[", "(", "(", "(", "not", "char", ".", "isalnum", "(", ")", "and", "char", "not", "in", "[", "'.'", ",", "'-'", ",", "'_'", ",", "':'", "]", ")", "and", "str", "(", "ord", "(", "char", ")", ")", ")", "or", "char", ")", "for", "char", "in", "text", "]", ")" ]
Returns a Nmtoken from a string. It is useful to produce XHTML valid values for the 'name' attribute of an anchor. CAUTION: the function is surjective: 2 different texts might lead to the same result. This is improbable on a single page. Nmtoken is the type that is a mixture of characters supported in attributes such as 'name' in HTML 'a' tag. For example, <a name="Articles%20%26%20Preprints"> should be tranformed to <a name="Articles372037263720Preprints"> using this function. http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken Also note that this function filters more characters than specified by the definition of Nmtoken ('CombiningChar' and 'Extender' charsets are filtered out).
[ "Returns", "a", "Nmtoken", "from", "a", "string", ".", "It", "is", "useful", "to", "produce", "XHTML", "valid", "values", "for", "the", "name", "attribute", "of", "an", "anchor", "." ]
python
train
41.304348
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L214-L220
def p_statement_break(p): '''statement : BREAK SEMI | BREAK expr SEMI''' if len(p) == 3: p[0] = ast.Break(None, lineno=p.lineno(1)) else: p[0] = ast.Break(p[2], lineno=p.lineno(1))
[ "def", "p_statement_break", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "ast", ".", "Break", "(", "None", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "else", ":", "p", "[", "0", "]", "=", "ast", ".", "Break", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
statement : BREAK SEMI | BREAK expr SEMI
[ "statement", ":", "BREAK", "SEMI", "|", "BREAK", "expr", "SEMI" ]
python
train
31.285714
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/model.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/model.py#L324-L330
def path(self): """str: URL path for the model's APIs.""" return "/projects/%s/datasets/%s/models/%s" % ( self._proto.project_id, self._proto.dataset_id, self._proto.model_id, )
[ "def", "path", "(", "self", ")", ":", "return", "\"/projects/%s/datasets/%s/models/%s\"", "%", "(", "self", ".", "_proto", ".", "project_id", ",", "self", ".", "_proto", ".", "dataset_id", ",", "self", ".", "_proto", ".", "model_id", ",", ")" ]
str: URL path for the model's APIs.
[ "str", ":", "URL", "path", "for", "the", "model", "s", "APIs", "." ]
python
train
33
xtream1101/cutil
cutil/__init__.py
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L224-L229
def split_into(max_num_chunks, list_to_chunk): """ Yields the list with a max total size of max_num_chunks """ max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks) return chunks_of(max_chunk_size, list_to_chunk)
[ "def", "split_into", "(", "max_num_chunks", ",", "list_to_chunk", ")", ":", "max_chunk_size", "=", "math", ".", "ceil", "(", "len", "(", "list_to_chunk", ")", "/", "max_num_chunks", ")", "return", "chunks_of", "(", "max_chunk_size", ",", "list_to_chunk", ")" ]
Yields the list with a max total size of max_num_chunks
[ "Yields", "the", "list", "with", "a", "max", "total", "size", "of", "max_num_chunks" ]
python
train
39.5
zqfang/GSEApy
gseapy/algorithm.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L191-L253
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes, ascending, rs=np.random.RandomState()): """Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix. """ # S: samples, G: gene number G, S = exprs.shape # genes = exprs.index.values expr_mat = exprs.values.T perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1)) # random shuffle on the first dim, last matrix is not shuffled for arr in perm_cor_tensor[:-1]: rs.shuffle(arr) classes = np.array(classes) pos = classes == pos neg = classes == neg pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1) neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1) pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1) neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1) if method == 'signal_to_noise': cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std) elif method == 't_test': denom = 1.0/G cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2) elif method == 'ratio_of_classes': cor_mat = pos_cor_mean / neg_cor_mean elif method == 'diff_of_classes': cor_mat = pos_cor_mean - neg_cor_mean elif method == 'log2_ratio_of_classes': cor_mat = np.log2(pos_cor_mean / neg_cor_mean) else: logging.error("Please provide correct method name!!!") sys.exit(0) # return matix[nperm+1, perm_cors] cor_mat_ind = cor_mat.argsort() # ndarray: sort in place cor_mat.sort() # genes_mat = genes.take(cor_mat_ind) if ascending: return cor_mat_ind, cor_mat # descending order of ranking and genes # return genes_mat[:,::-1], cor_mat[:,::-1] return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
[ "def", "ranking_metric_tensor", "(", "exprs", ",", "method", ",", "permutation_num", ",", "pos", ",", "neg", ",", "classes", ",", "ascending", ",", "rs", "=", "np", ".", "random", ".", "RandomState", "(", ")", ")", ":", "# S: samples, G: gene number", "G", ",", "S", "=", "exprs", ".", "shape", "# genes = exprs.index.values", "expr_mat", "=", "exprs", ".", "values", ".", "T", "perm_cor_tensor", "=", "np", ".", "tile", "(", "expr_mat", ",", "(", "permutation_num", "+", "1", ",", "1", ",", "1", ")", ")", "# random shuffle on the first dim, last matrix is not shuffled", "for", "arr", "in", "perm_cor_tensor", "[", ":", "-", "1", "]", ":", "rs", ".", "shuffle", "(", "arr", ")", "classes", "=", "np", ".", "array", "(", "classes", ")", "pos", "=", "classes", "==", "pos", "neg", "=", "classes", "==", "neg", "pos_cor_mean", "=", "perm_cor_tensor", "[", ":", ",", "pos", ",", ":", "]", ".", "mean", "(", "axis", "=", "1", ")", "neg_cor_mean", "=", "perm_cor_tensor", "[", ":", ",", "neg", ",", ":", "]", ".", "mean", "(", "axis", "=", "1", ")", "pos_cor_std", "=", "perm_cor_tensor", "[", ":", ",", "pos", ",", ":", "]", ".", "std", "(", "axis", "=", "1", ",", "ddof", "=", "1", ")", "neg_cor_std", "=", "perm_cor_tensor", "[", ":", ",", "neg", ",", ":", "]", ".", "std", "(", "axis", "=", "1", ",", "ddof", "=", "1", ")", "if", "method", "==", "'signal_to_noise'", ":", "cor_mat", "=", "(", "pos_cor_mean", "-", "neg_cor_mean", ")", "/", "(", "pos_cor_std", "+", "neg_cor_std", ")", "elif", "method", "==", "'t_test'", ":", "denom", "=", "1.0", "/", "G", "cor_mat", "=", "(", "pos_cor_mean", "-", "neg_cor_mean", ")", "/", "np", ".", "sqrt", "(", "denom", "*", "pos_cor_std", "**", "2", "+", "denom", "*", "neg_cor_std", "**", "2", ")", "elif", "method", "==", "'ratio_of_classes'", ":", "cor_mat", "=", "pos_cor_mean", "/", "neg_cor_mean", "elif", "method", "==", "'diff_of_classes'", ":", "cor_mat", "=", "pos_cor_mean", "-", "neg_cor_mean", "elif", "method", "==", "'log2_ratio_of_classes'", ":", "cor_mat", "=", "np", ".", "log2", "(", "pos_cor_mean", "/", "neg_cor_mean", ")", "else", ":", "logging", ".", "error", "(", "\"Please provide correct method name!!!\"", ")", "sys", ".", "exit", "(", "0", ")", "# return matix[nperm+1, perm_cors]", "cor_mat_ind", "=", "cor_mat", ".", "argsort", "(", ")", "# ndarray: sort in place", "cor_mat", ".", "sort", "(", ")", "# genes_mat = genes.take(cor_mat_ind)", "if", "ascending", ":", "return", "cor_mat_ind", ",", "cor_mat", "# descending order of ranking and genes", "# return genes_mat[:,::-1], cor_mat[:,::-1]", "return", "cor_mat_ind", "[", ":", ",", ":", ":", "-", "1", "]", ",", "cor_mat", "[", ":", ",", ":", ":", "-", "1", "]" ]
Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix.
[ "Build", "shuffled", "ranking", "matrix", "when", "permutation_type", "eq", "to", "phenotype", "." ]
python
test
46.063492
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L2074-L2094
def ws_db004(self, value=None): """ Corresponds to IDD Field `ws_db004` Mean wind speed coincident with 0.4% dry-bulb temperature Args: value (float): value for IDD Field `ws_db004` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws_db004`'.format(value)) self._ws_db004 = value
[ "def", "ws_db004", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `ws_db004`'", ".", "format", "(", "value", ")", ")", "self", ".", "_ws_db004", "=", "value" ]
Corresponds to IDD Field `ws_db004` Mean wind speed coincident with 0.4% dry-bulb temperature Args: value (float): value for IDD Field `ws_db004` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "ws_db004", "Mean", "wind", "speed", "coincident", "with", "0", ".", "4%", "dry", "-", "bulb", "temperature" ]
python
train
35
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L958-L971
def alias(self, alias): """Returns a new :class:`DataFrame` with an alias set. :param alias: string, an alias name to be set for the DataFrame. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect() [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)] """ assert isinstance(alias, basestring), "alias should be a string" return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
[ "def", "alias", "(", "self", ",", "alias", ")", ":", "assert", "isinstance", "(", "alias", ",", "basestring", ")", ",", "\"alias should be a string\"", "return", "DataFrame", "(", "getattr", "(", "self", ".", "_jdf", ",", "\"as\"", ")", "(", "alias", ")", ",", "self", ".", "sql_ctx", ")" ]
Returns a new :class:`DataFrame` with an alias set. :param alias: string, an alias name to be set for the DataFrame. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect() [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
[ "Returns", "a", "new", ":", "class", ":", "DataFrame", "with", "an", "alias", "set", "." ]
python
train
50
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L1373-L1415
def reshape_text(buffer, from_row, to_row): """ Reformat text, taking the width into account. `to_row` is included. (Vi 'gq' operator.) """ lines = buffer.text.splitlines(True) lines_before = lines[:from_row] lines_after = lines[to_row + 1:] lines_to_reformat = lines[from_row:to_row + 1] if lines_to_reformat: # Take indentation from the first line. length = re.search(r'^\s*', lines_to_reformat[0]).end() indent = lines_to_reformat[0][:length].replace('\n', '') # Now, take all the 'words' from the lines to be reshaped. words = ''.join(lines_to_reformat).split() # And reshape. width = (buffer.text_width or 80) - len(indent) reshaped_text = [indent] current_width = 0 for w in words: if current_width: if len(w) + current_width + 1 > width: reshaped_text.append('\n') reshaped_text.append(indent) current_width = 0 else: reshaped_text.append(' ') current_width += 1 reshaped_text.append(w) current_width += len(w) if reshaped_text[-1] != '\n': reshaped_text.append('\n') # Apply result. buffer.document = Document( text=''.join(lines_before + reshaped_text + lines_after), cursor_position=len(''.join(lines_before + reshaped_text)))
[ "def", "reshape_text", "(", "buffer", ",", "from_row", ",", "to_row", ")", ":", "lines", "=", "buffer", ".", "text", ".", "splitlines", "(", "True", ")", "lines_before", "=", "lines", "[", ":", "from_row", "]", "lines_after", "=", "lines", "[", "to_row", "+", "1", ":", "]", "lines_to_reformat", "=", "lines", "[", "from_row", ":", "to_row", "+", "1", "]", "if", "lines_to_reformat", ":", "# Take indentation from the first line.", "length", "=", "re", ".", "search", "(", "r'^\\s*'", ",", "lines_to_reformat", "[", "0", "]", ")", ".", "end", "(", ")", "indent", "=", "lines_to_reformat", "[", "0", "]", "[", ":", "length", "]", ".", "replace", "(", "'\\n'", ",", "''", ")", "# Now, take all the 'words' from the lines to be reshaped.", "words", "=", "''", ".", "join", "(", "lines_to_reformat", ")", ".", "split", "(", ")", "# And reshape.", "width", "=", "(", "buffer", ".", "text_width", "or", "80", ")", "-", "len", "(", "indent", ")", "reshaped_text", "=", "[", "indent", "]", "current_width", "=", "0", "for", "w", "in", "words", ":", "if", "current_width", ":", "if", "len", "(", "w", ")", "+", "current_width", "+", "1", ">", "width", ":", "reshaped_text", ".", "append", "(", "'\\n'", ")", "reshaped_text", ".", "append", "(", "indent", ")", "current_width", "=", "0", "else", ":", "reshaped_text", ".", "append", "(", "' '", ")", "current_width", "+=", "1", "reshaped_text", ".", "append", "(", "w", ")", "current_width", "+=", "len", "(", "w", ")", "if", "reshaped_text", "[", "-", "1", "]", "!=", "'\\n'", ":", "reshaped_text", ".", "append", "(", "'\\n'", ")", "# Apply result.", "buffer", ".", "document", "=", "Document", "(", "text", "=", "''", ".", "join", "(", "lines_before", "+", "reshaped_text", "+", "lines_after", ")", ",", "cursor_position", "=", "len", "(", "''", ".", "join", "(", "lines_before", "+", "reshaped_text", ")", ")", ")" ]
Reformat text, taking the width into account. `to_row` is included. (Vi 'gq' operator.)
[ "Reformat", "text", "taking", "the", "width", "into", "account", ".", "to_row", "is", "included", ".", "(", "Vi", "gq", "operator", ".", ")" ]
python
train
33.674419
ulule/django-badgify
badgify/registry.py
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/registry.py#L90-L109
def get_recipe_instances_for_badges(self, badges): """ Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``. """ from .exceptions import BadgeNotFound valid, invalid = [], [] if not isinstance(badges, (list, tuple)): badges = [badges] for badge in badges: try: recipe = self.get_recipe_instance(badge) valid.append(recipe) except BadgeNotFound: logger.debug('✘ Badge "%s" has not been registered', badge) invalid.append(badge) return (valid, invalid)
[ "def", "get_recipe_instances_for_badges", "(", "self", ",", "badges", ")", ":", "from", ".", "exceptions", "import", "BadgeNotFound", "valid", ",", "invalid", "=", "[", "]", ",", "[", "]", "if", "not", "isinstance", "(", "badges", ",", "(", "list", ",", "tuple", ")", ")", ":", "badges", "=", "[", "badges", "]", "for", "badge", "in", "badges", ":", "try", ":", "recipe", "=", "self", ".", "get_recipe_instance", "(", "badge", ")", "valid", ".", "append", "(", "recipe", ")", "except", "BadgeNotFound", ":", "logger", ".", "debug", "(", "'✘ Badge \"%s\" has not been registered', ", "b", "dge)", "", "invalid", ".", "append", "(", "badge", ")", "return", "(", "valid", ",", "invalid", ")" ]
Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``.
[ "Takes", "a", "list", "of", "badge", "slugs", "and", "returns", "a", "tuple", ":", "(", "valid", "invalid", ")", "." ]
python
train
30.8
coghost/izen
izen/chaos.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/chaos.py#L80-L108
def aes_encrypt(self, plain, sec_key, enable_b64=True): """ 使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据 - ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据`` - msg长度为16位数, 不足则补 'ascii \\0' .. warning:: msg长度为16位数, 不足则补 'ascii \\0' :param plain: :type plain: str :param sec_key: :type sec_key: str :param enable_b64: :type enable_b64: bool :return: :rtype: """ plain = helper.to_str(plain) sec_key = helper.to_str(sec_key) # 如果msg长度不为16倍数, 需要补位 '\0' plain += '\0' * (self.bs - len(plain) % self.bs) # 使用生成的 key, iv 加密 plain = helper.to_bytes(plain) cipher = self.aes_obj(sec_key).encrypt(plain) # 是否返回 base64 编码数据 cip = base64.b64encode(cipher) if enable_b64 else cipher return helper.to_str(cip)
[ "def", "aes_encrypt", "(", "self", ",", "plain", ",", "sec_key", ",", "enable_b64", "=", "True", ")", ":", "plain", "=", "helper", ".", "to_str", "(", "plain", ")", "sec_key", "=", "helper", ".", "to_str", "(", "sec_key", ")", "# 如果msg长度不为16倍数, 需要补位 '\\0'", "plain", "+=", "'\\0'", "*", "(", "self", ".", "bs", "-", "len", "(", "plain", ")", "%", "self", ".", "bs", ")", "# 使用生成的 key, iv 加密", "plain", "=", "helper", ".", "to_bytes", "(", "plain", ")", "cipher", "=", "self", ".", "aes_obj", "(", "sec_key", ")", ".", "encrypt", "(", "plain", ")", "# 是否返回 base64 编码数据", "cip", "=", "base64", ".", "b64encode", "(", "cipher", ")", "if", "enable_b64", "else", "cipher", "return", "helper", ".", "to_str", "(", "cip", ")" ]
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据 - ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据`` - msg长度为16位数, 不足则补 'ascii \\0' .. warning:: msg长度为16位数, 不足则补 'ascii \\0' :param plain: :type plain: str :param sec_key: :type sec_key: str :param enable_b64: :type enable_b64: bool :return: :rtype:
[ "使用", "aes", "加密数据", "并由", "base64编码", "加密后的数据" ]
python
train
30.034483
openstack/networking-cisco
networking_cisco/plugins/cisco/device_manager/scheduler/hosting_device_cfg_agent_scheduler.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/scheduler/hosting_device_cfg_agent_scheduler.py#L37-L54
def auto_schedule_hosting_devices(self, plugin, context, agent_host): """Schedules unassociated hosting devices to Cisco cfg agent. Schedules hosting devices to agent running on <agent_host>. """ query = context.session.query(bc.Agent) query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG, host=agent_host, admin_state_up=True) try: cfg_agent_db = query.one() except (exc.MultipleResultsFound, exc.NoResultFound): LOG.debug('No enabled Cisco cfg agent on host %s', agent_host) return if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down( cfg_agent_db.heartbeat_timestamp): LOG.warning('Cisco cfg agent %s is not alive', cfg_agent_db.id) return cfg_agent_db
[ "def", "auto_schedule_hosting_devices", "(", "self", ",", "plugin", ",", "context", ",", "agent_host", ")", ":", "query", "=", "context", ".", "session", ".", "query", "(", "bc", ".", "Agent", ")", "query", "=", "query", ".", "filter_by", "(", "agent_type", "=", "c_constants", ".", "AGENT_TYPE_CFG", ",", "host", "=", "agent_host", ",", "admin_state_up", "=", "True", ")", "try", ":", "cfg_agent_db", "=", "query", ".", "one", "(", ")", "except", "(", "exc", ".", "MultipleResultsFound", ",", "exc", ".", "NoResultFound", ")", ":", "LOG", ".", "debug", "(", "'No enabled Cisco cfg agent on host %s'", ",", "agent_host", ")", "return", "if", "cfg_agentschedulers_db", ".", "CfgAgentSchedulerDbMixin", ".", "is_agent_down", "(", "cfg_agent_db", ".", "heartbeat_timestamp", ")", ":", "LOG", ".", "warning", "(", "'Cisco cfg agent %s is not alive'", ",", "cfg_agent_db", ".", "id", ")", "return", "cfg_agent_db" ]
Schedules unassociated hosting devices to Cisco cfg agent. Schedules hosting devices to agent running on <agent_host>.
[ "Schedules", "unassociated", "hosting", "devices", "to", "Cisco", "cfg", "agent", "." ]
python
train
47.388889
bitprophet/spec
spec/__init__.py
https://github.com/bitprophet/spec/blob/d9646c5daf8e479937f970d21ebe185ad936a35a/spec/__init__.py#L32-L67
def eq_(result, expected, msg=None): """ Shadow of the Nose builtin which presents easier to read multiline output. """ params = {'expected': expected, 'result': result} aka = """ --------------------------------- aka ----------------------------------------- Expected: %(expected)r Got: %(result)r """ % params default_msg = """ Expected: %(expected)s Got: %(result)s """ % params if ( (repr(result) != six.text_type(result)) or (repr(expected) != six.text_type(expected)) ): default_msg += aka assertion_msg = msg or default_msg # This assert will bubble up to Nose's failure handling, which at some # point calls explicit str() - which will UnicodeDecodeError on any non # ASCII text. # To work around this, we make sure Unicode strings become bytestrings # beforehand, with explicit encode. if isinstance(assertion_msg, six.text_type): assertion_msg = assertion_msg.encode('utf-8') assert result == expected, assertion_msg
[ "def", "eq_", "(", "result", ",", "expected", ",", "msg", "=", "None", ")", ":", "params", "=", "{", "'expected'", ":", "expected", ",", "'result'", ":", "result", "}", "aka", "=", "\"\"\"\n\n--------------------------------- aka -----------------------------------------\n\nExpected:\n%(expected)r\n\nGot:\n%(result)r\n\"\"\"", "%", "params", "default_msg", "=", "\"\"\"\nExpected:\n%(expected)s\n\nGot:\n%(result)s\n\"\"\"", "%", "params", "if", "(", "(", "repr", "(", "result", ")", "!=", "six", ".", "text_type", "(", "result", ")", ")", "or", "(", "repr", "(", "expected", ")", "!=", "six", ".", "text_type", "(", "expected", ")", ")", ")", ":", "default_msg", "+=", "aka", "assertion_msg", "=", "msg", "or", "default_msg", "# This assert will bubble up to Nose's failure handling, which at some", "# point calls explicit str() - which will UnicodeDecodeError on any non", "# ASCII text.", "# To work around this, we make sure Unicode strings become bytestrings", "# beforehand, with explicit encode.", "if", "isinstance", "(", "assertion_msg", ",", "six", ".", "text_type", ")", ":", "assertion_msg", "=", "assertion_msg", ".", "encode", "(", "'utf-8'", ")", "assert", "result", "==", "expected", ",", "assertion_msg" ]
Shadow of the Nose builtin which presents easier to read multiline output.
[ "Shadow", "of", "the", "Nose", "builtin", "which", "presents", "easier", "to", "read", "multiline", "output", "." ]
python
valid
27.527778
annoviko/pyclustering
pyclustering/utils/__init__.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/utils/__init__.py#L107-L131
def rgb2gray(image_rgb_array): """! @brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel. @details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum: \f[Y = 0.2989R + 0.587G + 0.114B\f] @param[in] image_rgb_array (list): Image represented by RGB list. @return (list) Image as gray colored matrix, where one element of list describes pixel. @code colored_image = read_image(file_name); gray_image = rgb2gray(colored_image); @endcode @see read_image() """ image_gray_array = [0.0] * len(image_rgb_array); for index in range(0, len(image_rgb_array), 1): image_gray_array[index] = float(image_rgb_array[index][0]) * 0.2989 + float(image_rgb_array[index][1]) * 0.5870 + float(image_rgb_array[index][2]) * 0.1140; return image_gray_array;
[ "def", "rgb2gray", "(", "image_rgb_array", ")", ":", "image_gray_array", "=", "[", "0.0", "]", "*", "len", "(", "image_rgb_array", ")", "for", "index", "in", "range", "(", "0", ",", "len", "(", "image_rgb_array", ")", ",", "1", ")", ":", "image_gray_array", "[", "index", "]", "=", "float", "(", "image_rgb_array", "[", "index", "]", "[", "0", "]", ")", "*", "0.2989", "+", "float", "(", "image_rgb_array", "[", "index", "]", "[", "1", "]", ")", "*", "0.5870", "+", "float", "(", "image_rgb_array", "[", "index", "]", "[", "2", "]", ")", "*", "0.1140", "return", "image_gray_array" ]
! @brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel. @details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum: \f[Y = 0.2989R + 0.587G + 0.114B\f] @param[in] image_rgb_array (list): Image represented by RGB list. @return (list) Image as gray colored matrix, where one element of list describes pixel. @code colored_image = read_image(file_name); gray_image = rgb2gray(colored_image); @endcode @see read_image()
[ "!" ]
python
valid
39.08
theno/fabsetup
fabsetup/fabfile/setup/__init__.py
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/__init__.py#L428-L445
def telegram(): '''Install Telegram desktop client for linux (x64). More infos: https://telegram.org https://desktop.telegram.org/ ''' if not exists('~/bin/Telegram', msg='Download and install Telegram:'): run('mkdir -p /tmp/telegram') run('cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux') run('cd /tmp/telegram && tar xf linux') with warn_only(): run('mv /tmp/telegram/Telegram ~/bin') run('rm -rf /tmp/telegram') else: print('skip download, dir ~/bin/Telegram already exists') run('ln -snf ~/bin/Telegram/Telegram ~/bin/telegram', msg="\nCreate executable 'telegram':")
[ "def", "telegram", "(", ")", ":", "if", "not", "exists", "(", "'~/bin/Telegram'", ",", "msg", "=", "'Download and install Telegram:'", ")", ":", "run", "(", "'mkdir -p /tmp/telegram'", ")", "run", "(", "'cd /tmp/telegram && wget https://telegram.org/dl/desktop/linux'", ")", "run", "(", "'cd /tmp/telegram && tar xf linux'", ")", "with", "warn_only", "(", ")", ":", "run", "(", "'mv /tmp/telegram/Telegram ~/bin'", ")", "run", "(", "'rm -rf /tmp/telegram'", ")", "else", ":", "print", "(", "'skip download, dir ~/bin/Telegram already exists'", ")", "run", "(", "'ln -snf ~/bin/Telegram/Telegram ~/bin/telegram'", ",", "msg", "=", "\"\\nCreate executable 'telegram':\"", ")" ]
Install Telegram desktop client for linux (x64). More infos: https://telegram.org https://desktop.telegram.org/
[ "Install", "Telegram", "desktop", "client", "for", "linux", "(", "x64", ")", "." ]
python
train
38.055556
pavoni/pyvera
pyvera/__init__.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L1116-L1123
def set_fan_mode(self, mode): """Set the fan mode""" self.set_service_value( self.thermostat_fan_service, 'Mode', 'NewMode', mode) self.set_cache_value('fanmode', mode)
[ "def", "set_fan_mode", "(", "self", ",", "mode", ")", ":", "self", ".", "set_service_value", "(", "self", ".", "thermostat_fan_service", ",", "'Mode'", ",", "'NewMode'", ",", "mode", ")", "self", ".", "set_cache_value", "(", "'fanmode'", ",", "mode", ")" ]
Set the fan mode
[ "Set", "the", "fan", "mode" ]
python
train
29.125
CellProfiler/centrosome
centrosome/smooth.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/smooth.py#L70-L99
def fit_polynomial(pixel_data, mask, clip=True): '''Return an "image" which is a polynomial fit to the pixel data Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F pixel_data - a two-dimensional numpy array to be fitted mask - a mask of pixels whose intensities should be considered in the least squares fit clip - if True, clip the output array so that pixels less than zero in the fitted image are zero and pixels that are greater than one are one. ''' mask = np.logical_and(mask,pixel_data > 0) if not np.any(mask): return pixel_data x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]] x2 = x*x y2 = y*y xy = x*y o = np.ones(pixel_data.shape) a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]]) coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0] output_pixels = np.sum([coeff * index for coeff, index in zip(coeffs, [x,y,x2,y2,xy,o])],0) if clip: output_pixels[output_pixels > 1] = 1 output_pixels[output_pixels < 0] = 0 return output_pixels
[ "def", "fit_polynomial", "(", "pixel_data", ",", "mask", ",", "clip", "=", "True", ")", ":", "mask", "=", "np", ".", "logical_and", "(", "mask", ",", "pixel_data", ">", "0", ")", "if", "not", "np", ".", "any", "(", "mask", ")", ":", "return", "pixel_data", "x", ",", "y", "=", "np", ".", "mgrid", "[", "0", ":", "pixel_data", ".", "shape", "[", "0", "]", ",", "0", ":", "pixel_data", ".", "shape", "[", "1", "]", "]", "x2", "=", "x", "*", "x", "y2", "=", "y", "*", "y", "xy", "=", "x", "*", "y", "o", "=", "np", ".", "ones", "(", "pixel_data", ".", "shape", ")", "a", "=", "np", ".", "array", "(", "[", "x", "[", "mask", "]", ",", "y", "[", "mask", "]", ",", "x2", "[", "mask", "]", ",", "y2", "[", "mask", "]", ",", "xy", "[", "mask", "]", ",", "o", "[", "mask", "]", "]", ")", "coeffs", "=", "scipy", ".", "linalg", ".", "lstsq", "(", "a", ".", "transpose", "(", ")", ",", "pixel_data", "[", "mask", "]", ")", "[", "0", "]", "output_pixels", "=", "np", ".", "sum", "(", "[", "coeff", "*", "index", "for", "coeff", ",", "index", "in", "zip", "(", "coeffs", ",", "[", "x", ",", "y", ",", "x2", ",", "y2", ",", "xy", ",", "o", "]", ")", "]", ",", "0", ")", "if", "clip", ":", "output_pixels", "[", "output_pixels", ">", "1", "]", "=", "1", "output_pixels", "[", "output_pixels", "<", "0", "]", "=", "0", "return", "output_pixels" ]
Return an "image" which is a polynomial fit to the pixel data Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F pixel_data - a two-dimensional numpy array to be fitted mask - a mask of pixels whose intensities should be considered in the least squares fit clip - if True, clip the output array so that pixels less than zero in the fitted image are zero and pixels that are greater than one are one.
[ "Return", "an", "image", "which", "is", "a", "polynomial", "fit", "to", "the", "pixel", "data", "Fit", "the", "image", "to", "the", "polynomial", "Ax", "**", "2", "+", "By", "**", "2", "+", "Cxy", "+", "Dx", "+", "Ey", "+", "F", "pixel_data", "-", "a", "two", "-", "dimensional", "numpy", "array", "to", "be", "fitted", "mask", "-", "a", "mask", "of", "pixels", "whose", "intensities", "should", "be", "considered", "in", "the", "least", "squares", "fit", "clip", "-", "if", "True", "clip", "the", "output", "array", "so", "that", "pixels", "less", "than", "zero", "in", "the", "fitted", "image", "are", "zero", "and", "pixels", "that", "are", "greater", "than", "one", "are", "one", "." ]
python
train
38.033333
4degrees/clique
source/clique/__init__.py
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/__init__.py#L23-L206
def assemble( iterable, patterns=None, minimum_items=2, case_sensitive=True, assume_padded_when_ambiguous=False ): '''Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection. ''' collection_map = defaultdict(set) collections = [] remainder = [] # Compile patterns. flags = 0 if not case_sensitive: flags |= re.IGNORECASE compiled_patterns = [] if patterns is not None: if not patterns: return collections, list(iterable) for pattern in patterns: if isinstance(pattern, basestring): compiled_patterns.append(re.compile(pattern, flags=flags)) else: compiled_patterns.append(pattern) else: compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags)) # Process iterable. for item in iterable: matched = False for pattern in compiled_patterns: for match in pattern.finditer(item): index = match.group('index') head = item[:match.start('index')] tail = item[match.end('index'):] if not case_sensitive: head = head.lower() tail = tail.lower() padding = match.group('padding') if padding: padding = len(index) else: padding = 0 key = (head, tail, padding) collection_map[key].add(int(index)) matched = True if not matched: remainder.append(item) # Form collections. merge_candidates = [] for (head, tail, padding), indexes in collection_map.items(): collection = Collection(head, tail, padding, indexes) collections.append(collection) if collection.padding == 0: merge_candidates.append(collection) # Merge together collections that align on padding boundaries. For example, # 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only # indexes within the padding width limit are merged. If a collection is # entirely merged into another then it will not be included as a separate # collection in the results. fully_merged = [] for collection in collections: if collection.padding == 0: continue for candidate in merge_candidates: if ( candidate.head == collection.head and candidate.tail == collection.tail ): merged_index_count = 0 for index in candidate.indexes: if len(str(abs(index))) == collection.padding: collection.indexes.add(index) merged_index_count += 1 if merged_index_count == len(candidate.indexes): fully_merged.append(candidate) # Filter out fully merged collections. collections = [collection for collection in collections if collection not in fully_merged] # Filter out collections that do not have at least as many indexes as # minimum_items. In addition, add any members of a filtered collection, # which are not members of an unfiltered collection, to the remainder. filtered = [] remainder_candidates = [] for collection in collections: if len(collection.indexes) >= minimum_items: filtered.append(collection) else: for member in collection: remainder_candidates.append(member) for candidate in remainder_candidates: # Check if candidate has already been added to remainder to avoid # duplicate entries. if candidate in remainder: continue has_membership = False for collection in filtered: if candidate in collection: has_membership = True break if not has_membership: remainder.append(candidate) # Set padding for all ambiguous collections according to the # assume_padded_when_ambiguous setting. if assume_padded_when_ambiguous: for collection in filtered: if ( not collection.padding and collection.indexes ): indexes = list(collection.indexes) first_index_width = len(str(indexes[0])) last_index_width = len(str(indexes[-1])) if first_index_width == last_index_width: collection.padding = first_index_width return filtered, remainder
[ "def", "assemble", "(", "iterable", ",", "patterns", "=", "None", ",", "minimum_items", "=", "2", ",", "case_sensitive", "=", "True", ",", "assume_padded_when_ambiguous", "=", "False", ")", ":", "collection_map", "=", "defaultdict", "(", "set", ")", "collections", "=", "[", "]", "remainder", "=", "[", "]", "# Compile patterns.", "flags", "=", "0", "if", "not", "case_sensitive", ":", "flags", "|=", "re", ".", "IGNORECASE", "compiled_patterns", "=", "[", "]", "if", "patterns", "is", "not", "None", ":", "if", "not", "patterns", ":", "return", "collections", ",", "list", "(", "iterable", ")", "for", "pattern", "in", "patterns", ":", "if", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "pattern", ",", "flags", "=", "flags", ")", ")", "else", ":", "compiled_patterns", ".", "append", "(", "pattern", ")", "else", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "DIGITS_PATTERN", ",", "flags", "=", "flags", ")", ")", "# Process iterable.", "for", "item", "in", "iterable", ":", "matched", "=", "False", "for", "pattern", "in", "compiled_patterns", ":", "for", "match", "in", "pattern", ".", "finditer", "(", "item", ")", ":", "index", "=", "match", ".", "group", "(", "'index'", ")", "head", "=", "item", "[", ":", "match", ".", "start", "(", "'index'", ")", "]", "tail", "=", "item", "[", "match", ".", "end", "(", "'index'", ")", ":", "]", "if", "not", "case_sensitive", ":", "head", "=", "head", ".", "lower", "(", ")", "tail", "=", "tail", ".", "lower", "(", ")", "padding", "=", "match", ".", "group", "(", "'padding'", ")", "if", "padding", ":", "padding", "=", "len", "(", "index", ")", "else", ":", "padding", "=", "0", "key", "=", "(", "head", ",", "tail", ",", "padding", ")", "collection_map", "[", "key", "]", ".", "add", "(", "int", "(", "index", ")", ")", "matched", "=", "True", "if", "not", "matched", ":", "remainder", ".", "append", "(", "item", ")", "# Form collections.", "merge_candidates", "=", "[", "]", "for", "(", "head", ",", "tail", ",", "padding", ")", ",", "indexes", "in", "collection_map", ".", "items", "(", ")", ":", "collection", "=", "Collection", "(", "head", ",", "tail", ",", "padding", ",", "indexes", ")", "collections", ".", "append", "(", "collection", ")", "if", "collection", ".", "padding", "==", "0", ":", "merge_candidates", ".", "append", "(", "collection", ")", "# Merge together collections that align on padding boundaries. For example,", "# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only", "# indexes within the padding width limit are merged. If a collection is", "# entirely merged into another then it will not be included as a separate", "# collection in the results.", "fully_merged", "=", "[", "]", "for", "collection", "in", "collections", ":", "if", "collection", ".", "padding", "==", "0", ":", "continue", "for", "candidate", "in", "merge_candidates", ":", "if", "(", "candidate", ".", "head", "==", "collection", ".", "head", "and", "candidate", ".", "tail", "==", "collection", ".", "tail", ")", ":", "merged_index_count", "=", "0", "for", "index", "in", "candidate", ".", "indexes", ":", "if", "len", "(", "str", "(", "abs", "(", "index", ")", ")", ")", "==", "collection", ".", "padding", ":", "collection", ".", "indexes", ".", "add", "(", "index", ")", "merged_index_count", "+=", "1", "if", "merged_index_count", "==", "len", "(", "candidate", ".", "indexes", ")", ":", "fully_merged", ".", "append", "(", "candidate", ")", "# Filter out fully merged collections.", "collections", "=", "[", "collection", "for", "collection", "in", "collections", "if", "collection", "not", "in", "fully_merged", "]", "# Filter out collections that do not have at least as many indexes as", "# minimum_items. In addition, add any members of a filtered collection,", "# which are not members of an unfiltered collection, to the remainder.", "filtered", "=", "[", "]", "remainder_candidates", "=", "[", "]", "for", "collection", "in", "collections", ":", "if", "len", "(", "collection", ".", "indexes", ")", ">=", "minimum_items", ":", "filtered", ".", "append", "(", "collection", ")", "else", ":", "for", "member", "in", "collection", ":", "remainder_candidates", ".", "append", "(", "member", ")", "for", "candidate", "in", "remainder_candidates", ":", "# Check if candidate has already been added to remainder to avoid", "# duplicate entries.", "if", "candidate", "in", "remainder", ":", "continue", "has_membership", "=", "False", "for", "collection", "in", "filtered", ":", "if", "candidate", "in", "collection", ":", "has_membership", "=", "True", "break", "if", "not", "has_membership", ":", "remainder", ".", "append", "(", "candidate", ")", "# Set padding for all ambiguous collections according to the", "# assume_padded_when_ambiguous setting.", "if", "assume_padded_when_ambiguous", ":", "for", "collection", "in", "filtered", ":", "if", "(", "not", "collection", ".", "padding", "and", "collection", ".", "indexes", ")", ":", "indexes", "=", "list", "(", "collection", ".", "indexes", ")", "first_index_width", "=", "len", "(", "str", "(", "indexes", "[", "0", "]", ")", ")", "last_index_width", "=", "len", "(", "str", "(", "indexes", "[", "-", "1", "]", ")", ")", "if", "first_index_width", "==", "last_index_width", ":", "collection", ".", "padding", "=", "first_index_width", "return", "filtered", ",", "remainder" ]
Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection.
[ "Assemble", "items", "in", "*", "iterable", "*", "into", "discreet", "collections", "." ]
python
train
35.880435
MacHu-GWU/single_file_module-project
sfm/binarysearch.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/binarysearch.py#L50-L71
def find_lt(array, x): """ Find rightmost value less than x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_lt([0, 1, 2, 3], 2.5) 2 **中文文档** 寻找最大的小于x的数。 """ i = bisect.bisect_left(array, x) if i: return array[i - 1] raise ValueError
[ "def", "find_lt", "(", "array", ",", "x", ")", ":", "i", "=", "bisect", ".", "bisect_left", "(", "array", ",", "x", ")", "if", "i", ":", "return", "array", "[", "i", "-", "1", "]", "raise", "ValueError" ]
Find rightmost value less than x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_lt([0, 1, 2, 3], 2.5) 2 **中文文档** 寻找最大的小于x的数。
[ "Find", "rightmost", "value", "less", "than", "x", "." ]
python
train
16.318182
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8251-L8263
def serial_udb_extra_f5_send(self, sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST, force_mavlink1=False): ''' Backwards compatible version of SERIAL_UDB_EXTRA F5: format sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float) sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float) sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float) sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float) sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float) sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float) ''' return self.send(self.serial_udb_extra_f5_encode(sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST), force_mavlink1=force_mavlink1)
[ "def", "serial_udb_extra_f5_send", "(", "self", ",", "sue_YAWKP_AILERON", ",", "sue_YAWKD_AILERON", ",", "sue_ROLLKP", ",", "sue_ROLLKD", ",", "sue_YAW_STABILIZATION_AILERON", ",", "sue_AILERON_BOOST", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "serial_udb_extra_f5_encode", "(", "sue_YAWKP_AILERON", ",", "sue_YAWKD_AILERON", ",", "sue_ROLLKP", ",", "sue_ROLLKD", ",", "sue_YAW_STABILIZATION_AILERON", ",", "sue_AILERON_BOOST", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Backwards compatible version of SERIAL_UDB_EXTRA F5: format sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float) sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float) sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float) sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float) sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float) sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float)
[ "Backwards", "compatible", "version", "of", "SERIAL_UDB_EXTRA", "F5", ":", "format" ]
python
train
91.538462
karan/TPB
tpb/tpb.py
https://github.com/karan/TPB/blob/f424a73a10d4bcf4e363d7e7e8cb915a3a057671/tpb/tpb.py#L292-L300
def search(self, query, page=0, order=7, category=0, multipage=False): """ Searches TPB for query and returns a list of paginated Torrents capable of changing query, categories and orders. """ search = Search(self.base_url, query, page, order, category) if multipage: search.multipage() return search
[ "def", "search", "(", "self", ",", "query", ",", "page", "=", "0", ",", "order", "=", "7", ",", "category", "=", "0", ",", "multipage", "=", "False", ")", ":", "search", "=", "Search", "(", "self", ".", "base_url", ",", "query", ",", "page", ",", "order", ",", "category", ")", "if", "multipage", ":", "search", ".", "multipage", "(", ")", "return", "search" ]
Searches TPB for query and returns a list of paginated Torrents capable of changing query, categories and orders.
[ "Searches", "TPB", "for", "query", "and", "returns", "a", "list", "of", "paginated", "Torrents", "capable", "of", "changing", "query", "categories", "and", "orders", "." ]
python
train
40
SoCo/SoCo
soco/music_services/music_service.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_services/music_service.py#L695-L726
def search(self, category, term='', index=0, count=100): """Search for an item in a category. Args: category (str): The search category to use. Standard Sonos search categories are 'artists', 'albums', 'tracks', 'playlists', 'genres', 'stations', 'tags'. Not all are available for each music service. Call available_search_categories for a list for this service. term (str): The term to search for. index (int): The starting index. Default 0. count (int): The maximum number of items to return. Default 100. Returns: ~collections.OrderedDict: The search results, or `None`. See also: The Sonos `search API <http://musicpartners.sonos.com/node/86>`_ """ search_category = self._get_search_prefix_map().get(category, None) if search_category is None: raise MusicServiceException( "%s does not support the '%s' search category" % ( self.service_name, category)) response = self.soap_client.call( 'search', [ ('id', search_category), ('term', term), ('index', index), ('count', count)]) return parse_response(self, response, category)
[ "def", "search", "(", "self", ",", "category", ",", "term", "=", "''", ",", "index", "=", "0", ",", "count", "=", "100", ")", ":", "search_category", "=", "self", ".", "_get_search_prefix_map", "(", ")", ".", "get", "(", "category", ",", "None", ")", "if", "search_category", "is", "None", ":", "raise", "MusicServiceException", "(", "\"%s does not support the '%s' search category\"", "%", "(", "self", ".", "service_name", ",", "category", ")", ")", "response", "=", "self", ".", "soap_client", ".", "call", "(", "'search'", ",", "[", "(", "'id'", ",", "search_category", ")", ",", "(", "'term'", ",", "term", ")", ",", "(", "'index'", ",", "index", ")", ",", "(", "'count'", ",", "count", ")", "]", ")", "return", "parse_response", "(", "self", ",", "response", ",", "category", ")" ]
Search for an item in a category. Args: category (str): The search category to use. Standard Sonos search categories are 'artists', 'albums', 'tracks', 'playlists', 'genres', 'stations', 'tags'. Not all are available for each music service. Call available_search_categories for a list for this service. term (str): The term to search for. index (int): The starting index. Default 0. count (int): The maximum number of items to return. Default 100. Returns: ~collections.OrderedDict: The search results, or `None`. See also: The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
[ "Search", "for", "an", "item", "in", "a", "category", "." ]
python
train
41.125
Spinmob/spinmob
_plotting_mess.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_plotting_mess.py#L1167-L1207
def image_file(path=None, zscript='self[1:]', xscript='[0,1]', yscript='d[0]', g=None, **kwargs): """ Loads an data file and plots it with color. Data file must have columns of the same length! Parameters ---------- path=None Path to data file. zscript='self[1:]' Determines how to get data from the columns xscript='[0,1]', yscript='d[0]' Determine the x and y arrays used for setting the axes bounds g=None Optional dictionary of globals for the scripts See spinmob.plot.image.data() for additional optional keyword arguments. See spinmob.data.databox.execute_script() for more information about scripts. """ if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter') else: delimiter = None d = _data.load(paths=path, delimiter = delimiter) if d is None or len(d) == 0: return # allows the user to overwrite the defaults default_kwargs = dict(xlabel = str(xscript), ylabel = str(yscript), title = d.path, clabel = str(zscript)) default_kwargs.update(kwargs) # get the data X = d(xscript, g) Y = d(yscript, g) Z = _n.array(d(zscript, g)) # Z = Z.transpose() # plot! image_data(Z, X, Y, **default_kwargs)
[ "def", "image_file", "(", "path", "=", "None", ",", "zscript", "=", "'self[1:]'", ",", "xscript", "=", "'[0,1]'", ",", "yscript", "=", "'d[0]'", ",", "g", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "'delimiter'", "in", "kwargs", ":", "delimiter", "=", "kwargs", ".", "pop", "(", "'delimiter'", ")", "else", ":", "delimiter", "=", "None", "d", "=", "_data", ".", "load", "(", "paths", "=", "path", ",", "delimiter", "=", "delimiter", ")", "if", "d", "is", "None", "or", "len", "(", "d", ")", "==", "0", ":", "return", "# allows the user to overwrite the defaults", "default_kwargs", "=", "dict", "(", "xlabel", "=", "str", "(", "xscript", ")", ",", "ylabel", "=", "str", "(", "yscript", ")", ",", "title", "=", "d", ".", "path", ",", "clabel", "=", "str", "(", "zscript", ")", ")", "default_kwargs", ".", "update", "(", "kwargs", ")", "# get the data", "X", "=", "d", "(", "xscript", ",", "g", ")", "Y", "=", "d", "(", "yscript", ",", "g", ")", "Z", "=", "_n", ".", "array", "(", "d", "(", "zscript", ",", "g", ")", ")", "# Z = Z.transpose()", "# plot!", "image_data", "(", "Z", ",", "X", ",", "Y", ",", "*", "*", "default_kwargs", ")" ]
Loads an data file and plots it with color. Data file must have columns of the same length! Parameters ---------- path=None Path to data file. zscript='self[1:]' Determines how to get data from the columns xscript='[0,1]', yscript='d[0]' Determine the x and y arrays used for setting the axes bounds g=None Optional dictionary of globals for the scripts See spinmob.plot.image.data() for additional optional keyword arguments. See spinmob.data.databox.execute_script() for more information about scripts.
[ "Loads", "an", "data", "file", "and", "plots", "it", "with", "color", ".", "Data", "file", "must", "have", "columns", "of", "the", "same", "length!" ]
python
train
32.170732
hubo1016/vlcp
vlcp/event/lock.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L65-L76
def trylock(self): "Try to acquire lock and return True; if cannot acquire the lock at this moment, return False." if self.locked: return True if self.lockroutine: return False waiter = self.scheduler.send(LockEvent(self.context, self.key, self)) if waiter: return False else: self.locked = True return True
[ "def", "trylock", "(", "self", ")", ":", "if", "self", ".", "locked", ":", "return", "True", "if", "self", ".", "lockroutine", ":", "return", "False", "waiter", "=", "self", ".", "scheduler", ".", "send", "(", "LockEvent", "(", "self", ".", "context", ",", "self", ".", "key", ",", "self", ")", ")", "if", "waiter", ":", "return", "False", "else", ":", "self", ".", "locked", "=", "True", "return", "True" ]
Try to acquire lock and return True; if cannot acquire the lock at this moment, return False.
[ "Try", "to", "acquire", "lock", "and", "return", "True", ";", "if", "cannot", "acquire", "the", "lock", "at", "this", "moment", "return", "False", "." ]
python
train
33.666667
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4363-L4368
def newDocProp(self, name, value): """Create a new property carried by a document. """ ret = libxml2mod.xmlNewDocProp(self._o, name, value) if ret is None:raise treeError('xmlNewDocProp() failed') __tmp = xmlAttr(_obj=ret) return __tmp
[ "def", "newDocProp", "(", "self", ",", "name", ",", "value", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewDocProp", "(", "self", ".", "_o", ",", "name", ",", "value", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewDocProp() failed'", ")", "__tmp", "=", "xmlAttr", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Create a new property carried by a document.
[ "Create", "a", "new", "property", "carried", "by", "a", "document", "." ]
python
train
45
cloud-custodian/cloud-custodian
tools/c7n_gcp/c7n_gcp/client.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/client.py#L377-L392
def execute_command(self, verb, verb_arguments): """Executes command (ex. add) via a dedicated http object. Async APIs may take minutes to complete. Therefore, callers are encouraged to leverage concurrent.futures (or similar) to place long running commands on a separate threads. Args: verb (str): Method to execute on the component (ex. get, list). verb_arguments (dict): key-value pairs to be passed to _build_request. Returns: dict: An async operation Service Response. """ request = self._build_request(verb, verb_arguments) return self._execute(request)
[ "def", "execute_command", "(", "self", ",", "verb", ",", "verb_arguments", ")", ":", "request", "=", "self", ".", "_build_request", "(", "verb", ",", "verb_arguments", ")", "return", "self", ".", "_execute", "(", "request", ")" ]
Executes command (ex. add) via a dedicated http object. Async APIs may take minutes to complete. Therefore, callers are encouraged to leverage concurrent.futures (or similar) to place long running commands on a separate threads. Args: verb (str): Method to execute on the component (ex. get, list). verb_arguments (dict): key-value pairs to be passed to _build_request. Returns: dict: An async operation Service Response.
[ "Executes", "command", "(", "ex", ".", "add", ")", "via", "a", "dedicated", "http", "object", "." ]
python
train
40.9375
huggingface/pytorch-pretrained-BERT
examples/run_openai_gpt.py
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_openai_gpt.py#L56-L64
def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output
[ "def", "load_rocstories_dataset", "(", "dataset_path", ")", ":", "with", "open", "(", "dataset_path", ",", "encoding", "=", "'utf_8'", ")", "as", "f", ":", "f", "=", "csv", ".", "reader", "(", "f", ")", "output", "=", "[", "]", "next", "(", "f", ")", "# skip the first line", "for", "line", "in", "tqdm", "(", "f", ")", ":", "output", ".", "append", "(", "(", "' '", ".", "join", "(", "line", "[", "1", ":", "5", "]", ")", ",", "line", "[", "5", "]", ",", "line", "[", "6", "]", ",", "int", "(", "line", "[", "-", "1", "]", ")", "-", "1", ")", ")", "return", "output" ]
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
[ "Output", "a", "list", "of", "tuples", "(", "story", "1st", "continuation", "2nd", "continuation", "label", ")" ]
python
train
43
juiceinc/recipe
recipe/core.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L526-L531
def as_table(self, name=None): """ Return an alias to a table """ if name is None: name = self._id return alias(self.subquery(), name=name)
[ "def", "as_table", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "_id", "return", "alias", "(", "self", ".", "subquery", "(", ")", ",", "name", "=", "name", ")" ]
Return an alias to a table
[ "Return", "an", "alias", "to", "a", "table" ]
python
train
29.666667
aichaos/rivescript-python
rivescript/rivescript.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L165-L192
def load_directory(self, directory, ext=None): """Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``. """ self._say("Loading from directory: " + directory) if ext is None: # Use the default extensions - .rive is preferable. ext = ['.rive', '.rs'] elif type(ext) == str: # Backwards compatibility for ext being a string value. ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): # Load this file. self.load_file(os.path.join(root, file)) break
[ "def", "load_directory", "(", "self", ",", "directory", ",", "ext", "=", "None", ")", ":", "self", ".", "_say", "(", "\"Loading from directory: \"", "+", "directory", ")", "if", "ext", "is", "None", ":", "# Use the default extensions - .rive is preferable.", "ext", "=", "[", "'.rive'", ",", "'.rs'", "]", "elif", "type", "(", "ext", ")", "==", "str", ":", "# Backwards compatibility for ext being a string value.", "ext", "=", "[", "ext", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "self", ".", "_warn", "(", "\"Error: \"", "+", "directory", "+", "\" is not a directory.\"", ")", "return", "for", "root", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "directory", ")", ":", "for", "file", "in", "files", ":", "for", "extension", "in", "ext", ":", "if", "file", ".", "lower", "(", ")", ".", "endswith", "(", "extension", ")", ":", "# Load this file.", "self", ".", "load_file", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", "break" ]
Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``.
[ "Load", "RiveScript", "documents", "from", "a", "directory", "." ]
python
train
38.642857
aichaos/rivescript-python
rivescript/brain.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L421-L507
def reply_regexp(self, user, regexp): """Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.""" if regexp in self.master._regexc["trigger"]: # Already compiled this one! return self.master._regexc["trigger"][regexp] # If the trigger is simply '*' then the * there needs to become (.*?) # to match the blank string too. regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp) # Filter in arrays. arrays = re.findall(RE.array, regexp) for array in arrays: rep = '' if array in self.master._array: rep = r'(?:' + '|'.join(self.expand_array(array)) + ')' regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp) # Simple replacements. regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?) regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?) regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?) regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket regexp = regexp.replace('<zerowidthstar>', r'(.*?)') # Optionals. optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip()) new.append(p) # If this optional had a star or anything in it, make it # non-matching. pipes = '|'.join(new) pipes = pipes.replace(r'(.+?)', r'(?:.+?)') pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)') pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)') regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*', '(?:' + pipes + r'|(?:\\s|\\b))', regexp) # _ wildcards can't match numbers! regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp) # Filter in bot variables. bvars = re.findall(RE.bot_tag, regexp) for var in bvars: rep = '' if var in self.master._var: rep = self.format_message(self.master._var[var]) regexp = regexp.replace('<bot {var}>'.format(var=var), rep) # Filter in user variables. uvars = re.findall(RE.get_tag, regexp) for var in uvars: rep = '' value = self.master.get_uservar(user, var) if value not in [None, "undefined"]: rep = utils.strip_nasties(value) regexp = regexp.replace('<get {var}>'.format(var=var), rep) # Filter in <input> and <reply> tags. This is a slow process, so only # do it if we have to! if '<input' in regexp or '<reply' in regexp: history = self.master.get_uservar(user, "__history__") for type in ['input', 'reply']: tags = re.findall(r'<' + type + r'([0-9])>', regexp) for index in tags: rep = self.format_message(history[type][int(index) - 1]) regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep) regexp = regexp.replace('<{type}>'.format(type=type), self.format_message(history[type][0])) # TODO: the Perl version doesn't do just <input>/<reply> in trigs! if self.utf8: return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE) else: return re.compile(r'^' + regexp.lower() + r'$')
[ "def", "reply_regexp", "(", "self", ",", "user", ",", "regexp", ")", ":", "if", "regexp", "in", "self", ".", "master", ".", "_regexc", "[", "\"trigger\"", "]", ":", "# Already compiled this one!", "return", "self", ".", "master", ".", "_regexc", "[", "\"trigger\"", "]", "[", "regexp", "]", "# If the trigger is simply '*' then the * there needs to become (.*?)", "# to match the blank string too.", "regexp", "=", "re", ".", "sub", "(", "RE", ".", "zero_star", ",", "r'<zerowidthstar>'", ",", "regexp", ")", "# Filter in arrays.", "arrays", "=", "re", ".", "findall", "(", "RE", ".", "array", ",", "regexp", ")", "for", "array", "in", "arrays", ":", "rep", "=", "''", "if", "array", "in", "self", ".", "master", ".", "_array", ":", "rep", "=", "r'(?:'", "+", "'|'", ".", "join", "(", "self", ".", "expand_array", "(", "array", ")", ")", "+", "')'", "regexp", "=", "re", ".", "sub", "(", "r'\\@'", "+", "re", ".", "escape", "(", "array", ")", "+", "r'\\b'", ",", "rep", ",", "regexp", ")", "# Simple replacements.", "regexp", "=", "regexp", ".", "replace", "(", "'*'", ",", "'(.+?)'", ")", "# Convert * into (.+?)", "regexp", "=", "regexp", ".", "replace", "(", "'#'", ",", "'(\\d+?)'", ")", "# Convert # into (\\d+?)", "regexp", "=", "regexp", ".", "replace", "(", "'_'", ",", "'(\\w+?)'", ")", "# Convert _ into (\\w+?)", "regexp", "=", "re", ".", "sub", "(", "RE", ".", "weight", ",", "''", ",", "regexp", ")", "# Remove {weight} tags, allow spaces before the bracket", "regexp", "=", "regexp", ".", "replace", "(", "'<zerowidthstar>'", ",", "r'(.*?)'", ")", "# Optionals.", "optionals", "=", "re", ".", "findall", "(", "RE", ".", "optionals", ",", "regexp", ")", "for", "match", "in", "optionals", ":", "parts", "=", "match", ".", "split", "(", "\"|\"", ")", "new", "=", "[", "]", "for", "p", "in", "parts", ":", "p", "=", "r'(?:\\\\s|\\\\b)+{}(?:\\\\s|\\\\b)+'", ".", "format", "(", "p", ".", "strip", "(", ")", ")", "new", ".", "append", "(", "p", ")", "# If this optional had a star or anything in it, make it", "# non-matching.", "pipes", "=", "'|'", ".", "join", "(", "new", ")", "pipes", "=", "pipes", ".", "replace", "(", "r'(.+?)'", ",", "r'(?:.+?)'", ")", "pipes", "=", "pipes", ".", "replace", "(", "r'(\\d+?)'", ",", "r'(?:\\d+?)'", ")", "pipes", "=", "pipes", ".", "replace", "(", "r'([A-Za-z]+?)'", ",", "r'(?:[A-Za-z]+?)'", ")", "regexp", "=", "re", ".", "sub", "(", "r'\\s*\\['", "+", "re", ".", "escape", "(", "match", ")", "+", "'\\]\\s*'", ",", "'(?:'", "+", "pipes", "+", "r'|(?:\\\\s|\\\\b))'", ",", "regexp", ")", "# _ wildcards can't match numbers!", "regexp", "=", "re", ".", "sub", "(", "RE", ".", "literal_w", ",", "r'[^\\\\s\\\\d]'", ",", "regexp", ")", "# Filter in bot variables.", "bvars", "=", "re", ".", "findall", "(", "RE", ".", "bot_tag", ",", "regexp", ")", "for", "var", "in", "bvars", ":", "rep", "=", "''", "if", "var", "in", "self", ".", "master", ".", "_var", ":", "rep", "=", "self", ".", "format_message", "(", "self", ".", "master", ".", "_var", "[", "var", "]", ")", "regexp", "=", "regexp", ".", "replace", "(", "'<bot {var}>'", ".", "format", "(", "var", "=", "var", ")", ",", "rep", ")", "# Filter in user variables.", "uvars", "=", "re", ".", "findall", "(", "RE", ".", "get_tag", ",", "regexp", ")", "for", "var", "in", "uvars", ":", "rep", "=", "''", "value", "=", "self", ".", "master", ".", "get_uservar", "(", "user", ",", "var", ")", "if", "value", "not", "in", "[", "None", ",", "\"undefined\"", "]", ":", "rep", "=", "utils", ".", "strip_nasties", "(", "value", ")", "regexp", "=", "regexp", ".", "replace", "(", "'<get {var}>'", ".", "format", "(", "var", "=", "var", ")", ",", "rep", ")", "# Filter in <input> and <reply> tags. This is a slow process, so only", "# do it if we have to!", "if", "'<input'", "in", "regexp", "or", "'<reply'", "in", "regexp", ":", "history", "=", "self", ".", "master", ".", "get_uservar", "(", "user", ",", "\"__history__\"", ")", "for", "type", "in", "[", "'input'", ",", "'reply'", "]", ":", "tags", "=", "re", ".", "findall", "(", "r'<'", "+", "type", "+", "r'([0-9])>'", ",", "regexp", ")", "for", "index", "in", "tags", ":", "rep", "=", "self", ".", "format_message", "(", "history", "[", "type", "]", "[", "int", "(", "index", ")", "-", "1", "]", ")", "regexp", "=", "regexp", ".", "replace", "(", "'<{type}{index}>'", ".", "format", "(", "type", "=", "type", ",", "index", "=", "index", ")", ",", "rep", ")", "regexp", "=", "regexp", ".", "replace", "(", "'<{type}>'", ".", "format", "(", "type", "=", "type", ")", ",", "self", ".", "format_message", "(", "history", "[", "type", "]", "[", "0", "]", ")", ")", "# TODO: the Perl version doesn't do just <input>/<reply> in trigs!", "if", "self", ".", "utf8", ":", "return", "re", ".", "compile", "(", "r'^'", "+", "regexp", ".", "lower", "(", ")", "+", "r'$'", ",", "re", ".", "UNICODE", ")", "else", ":", "return", "re", ".", "compile", "(", "r'^'", "+", "regexp", ".", "lower", "(", ")", "+", "r'$'", ")" ]
Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.
[ "Prepares", "a", "trigger", "for", "the", "regular", "expression", "engine", "." ]
python
train
42.850575
pycampers/zproc
zproc/exceptions.py
https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/exceptions.py#L86-L94
def exception_to_signal(sig: Union[SignalException, signal.Signals]): """ Rollback any changes done by :py:func:`signal_to_exception`. """ if isinstance(sig, SignalException): signum = sig.signum else: signum = sig.value signal.signal(signum, signal.SIG_DFL)
[ "def", "exception_to_signal", "(", "sig", ":", "Union", "[", "SignalException", ",", "signal", ".", "Signals", "]", ")", ":", "if", "isinstance", "(", "sig", ",", "SignalException", ")", ":", "signum", "=", "sig", ".", "signum", "else", ":", "signum", "=", "sig", ".", "value", "signal", ".", "signal", "(", "signum", ",", "signal", ".", "SIG_DFL", ")" ]
Rollback any changes done by :py:func:`signal_to_exception`.
[ "Rollback", "any", "changes", "done", "by", ":", "py", ":", "func", ":", "signal_to_exception", "." ]
python
train
32.222222
gc3-uzh-ch/elasticluster
elasticluster/providers/ec2_boto.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/ec2_boto.py#L415-L434
def is_instance_running(self, instance_id): """Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise """ instance = self._load_instance(instance_id) if instance.update() == "running": # If the instance is up&running, ensure it has an IP # address. if not instance.ip_address and self.request_floating_ip: log.debug("Public ip address has to be assigned through " "elasticluster.") self._allocate_address(instance) instance.update() return True else: return False
[ "def", "is_instance_running", "(", "self", ",", "instance_id", ")", ":", "instance", "=", "self", ".", "_load_instance", "(", "instance_id", ")", "if", "instance", ".", "update", "(", ")", "==", "\"running\"", ":", "# If the instance is up&running, ensure it has an IP", "# address.", "if", "not", "instance", ".", "ip_address", "and", "self", ".", "request_floating_ip", ":", "log", ".", "debug", "(", "\"Public ip address has to be assigned through \"", "\"elasticluster.\"", ")", "self", ".", "_allocate_address", "(", "instance", ")", "instance", ".", "update", "(", ")", "return", "True", "else", ":", "return", "False" ]
Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise
[ "Checks", "if", "the", "instance", "is", "up", "and", "running", "." ]
python
train
35.85
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2603-L2607
def organization_subscription_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription" api_path = "/api/v2/organization_subscriptions/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "organization_subscription_delete", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/organization_subscriptions/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "organization_subscriptions#delete", "-", "organization", "-", "subscription" ]
python
train
68.4
chimera0/accel-brain-code
Reinforcement-Learning/demo/demo_maze_multi_agent_deep_q_network.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/demo/demo_maze_multi_agent_deep_q_network.py#L73-L128
def inference(self, state_arr, limit=1000): ''' Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route. ''' self.__inferencing_flag = True agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] self.__create_enemy(self.__map_arr) result_list = [(agent_x, agent_y, 0.0)] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) result_val_list.append(0.0) result_list.append(tuple(result_val_list)) self.t = 0 while self.t < limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) self.__move_enemy(action_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) try: result_val_list.append(q[0]) except IndexError: result_val_list.append(q) result_list.append(tuple(result_val_list)) # Update State. state_arr = self.update_state(state_arr, action_arr) # Epsode. self.t += 1 # Check. end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
[ "def", "inference", "(", "self", ",", "state_arr", ",", "limit", "=", "1000", ")", ":", "self", ".", "__inferencing_flag", "=", "True", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "state_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "self", ".", "__create_enemy", "(", "self", ".", "__map_arr", ")", "result_list", "=", "[", "(", "agent_x", ",", "agent_y", ",", "0.0", ")", "]", "result_val_list", "=", "[", "agent_x", ",", "agent_y", "]", "for", "e", "in", "range", "(", "self", ".", "__enemy_num", ")", ":", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "0", "]", ")", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "1", "]", ")", "result_val_list", ".", "append", "(", "0.0", ")", "result_list", ".", "append", "(", "tuple", "(", "result_val_list", ")", ")", "self", ".", "t", "=", "0", "while", "self", ".", "t", "<", "limit", ":", "next_action_arr", "=", "self", ".", "extract_possible_actions", "(", "state_arr", ")", "next_q_arr", "=", "self", ".", "function_approximator", ".", "inference_q", "(", "next_action_arr", ")", "action_arr", ",", "q", "=", "self", ".", "select_action", "(", "next_action_arr", ",", "next_q_arr", ")", "self", ".", "__move_enemy", "(", "action_arr", ")", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "action_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "result_val_list", "=", "[", "agent_x", ",", "agent_y", "]", "for", "e", "in", "range", "(", "self", ".", "__enemy_num", ")", ":", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "0", "]", ")", "result_val_list", ".", "append", "(", "self", ".", "__enemy_pos_list", "[", "e", "]", "[", "1", "]", ")", "try", ":", "result_val_list", ".", "append", "(", "q", "[", "0", "]", ")", "except", "IndexError", ":", "result_val_list", ".", "append", "(", "q", ")", "result_list", ".", "append", "(", "tuple", "(", "result_val_list", ")", ")", "# Update State.", "state_arr", "=", "self", ".", "update_state", "(", "state_arr", ",", "action_arr", ")", "# Epsode.", "self", ".", "t", "+=", "1", "# Check.", "end_flag", "=", "self", ".", "check_the_end_flag", "(", "state_arr", ")", "if", "end_flag", "is", "True", ":", "break", "return", "result_list" ]
Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route.
[ "Infernce", ".", "Args", ":", "state_arr", ":", "np", ".", "ndarray", "of", "state", ".", "limit", ":", "The", "number", "of", "inferencing", ".", "Returns", ":", "list", "of", "np", ".", "ndarray", "of", "an", "optimal", "route", "." ]
python
train
34.785714
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L614-L629
def from_mask_and_sub_grid_size(cls, mask, sub_grid_size=1): """Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \ every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates. Parameters ----------- mask : Mask The mask whose masked pixels are used to setup the sub-pixel grid_stack. sub_grid_size : int The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid. """ sub_grid_masked = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size( mask=mask, pixel_scales=mask.pixel_scales, sub_grid_size=sub_grid_size) return SubGrid(sub_grid_masked, mask, sub_grid_size)
[ "def", "from_mask_and_sub_grid_size", "(", "cls", ",", "mask", ",", "sub_grid_size", "=", "1", ")", ":", "sub_grid_masked", "=", "grid_util", ".", "sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size", "(", "mask", "=", "mask", ",", "pixel_scales", "=", "mask", ".", "pixel_scales", ",", "sub_grid_size", "=", "sub_grid_size", ")", "return", "SubGrid", "(", "sub_grid_masked", ",", "mask", ",", "sub_grid_size", ")" ]
Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \ every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates. Parameters ----------- mask : Mask The mask whose masked pixels are used to setup the sub-pixel grid_stack. sub_grid_size : int The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.
[ "Setup", "a", "sub", "-", "grid", "of", "the", "unmasked", "pixels", "using", "a", "mask", "and", "a", "specified", "sub", "-", "grid", "size", ".", "The", "center", "of", "\\", "every", "unmasked", "pixel", "s", "sub", "-", "pixels", "give", "the", "grid", "s", "(", "y", "x", ")", "arc", "-", "second", "coordinates", "." ]
python
valid
48.9375
bitesofcode/projex
projex/security.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/security.py#L213-L225
def generateToken(bits=32): """ Generates a random token based on the given parameters. :return <str> """ if bits == 64: hasher = hashlib.sha256 elif bits == 32: hasher = hashlib.md5 else: raise StandardError('Unknown bit level.') return hasher(nstr(random.getrandbits(256))).hexdigest()
[ "def", "generateToken", "(", "bits", "=", "32", ")", ":", "if", "bits", "==", "64", ":", "hasher", "=", "hashlib", ".", "sha256", "elif", "bits", "==", "32", ":", "hasher", "=", "hashlib", ".", "md5", "else", ":", "raise", "StandardError", "(", "'Unknown bit level.'", ")", "return", "hasher", "(", "nstr", "(", "random", ".", "getrandbits", "(", "256", ")", ")", ")", ".", "hexdigest", "(", ")" ]
Generates a random token based on the given parameters. :return <str>
[ "Generates", "a", "random", "token", "based", "on", "the", "given", "parameters", ".", ":", "return", "<str", ">" ]
python
train
26.153846
CityOfZion/neo-python
neo/Network/NodeLeader.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Network/NodeLeader.py#L644-L657
def MempoolCheck(self): """ Checks the Mempool and removes any tx found on the Blockchain Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703 """ txs = [] values = self.MemPool.values() for tx in values: txs.append(tx) for tx in txs: res = self.RemoveTransaction(tx) if res: logger.debug("found tx 0x%s on the blockchain ...removed from mempool" % tx.Hash)
[ "def", "MempoolCheck", "(", "self", ")", ":", "txs", "=", "[", "]", "values", "=", "self", ".", "MemPool", ".", "values", "(", ")", "for", "tx", "in", "values", ":", "txs", ".", "append", "(", "tx", ")", "for", "tx", "in", "txs", ":", "res", "=", "self", ".", "RemoveTransaction", "(", "tx", ")", "if", "res", ":", "logger", ".", "debug", "(", "\"found tx 0x%s on the blockchain ...removed from mempool\"", "%", "tx", ".", "Hash", ")" ]
Checks the Mempool and removes any tx found on the Blockchain Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703
[ "Checks", "the", "Mempool", "and", "removes", "any", "tx", "found", "on", "the", "Blockchain", "Implemented", "to", "resolve", "https", ":", "//", "github", ".", "com", "/", "CityOfZion", "/", "neo", "-", "python", "/", "issues", "/", "703" ]
python
train
34.5
trevisanj/a99
a99/textinterface.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/textinterface.py#L65-L85
def format_h2(s, format="text", indents=0): """ Encloses string in format text Args, Returns: see format_h1() >>> print("\\n".join(format_h2("Header 2", indents=2))) Header 2 -------- >>> print("\\n".join(format_h2("Header 2", "markdown", 2))) ## Header 2 """ _CHAR = "-" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return ["## {}".format(s)] elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
[ "def", "format_h2", "(", "s", ",", "format", "=", "\"text\"", ",", "indents", "=", "0", ")", ":", "_CHAR", "=", "\"-\"", "if", "format", ".", "startswith", "(", "\"text\"", ")", ":", "return", "format_underline", "(", "s", ",", "_CHAR", ",", "indents", ")", "elif", "format", ".", "startswith", "(", "\"markdown\"", ")", ":", "return", "[", "\"## {}\"", ".", "format", "(", "s", ")", "]", "elif", "format", ".", "startswith", "(", "\"rest\"", ")", ":", "return", "format_underline", "(", "s", ",", "_CHAR", ",", "0", ")" ]
Encloses string in format text Args, Returns: see format_h1() >>> print("\\n".join(format_h2("Header 2", indents=2))) Header 2 -------- >>> print("\\n".join(format_h2("Header 2", "markdown", 2))) ## Header 2
[ "Encloses", "string", "in", "format", "text", "Args", "Returns", ":", "see", "format_h1", "()", ">>>", "print", "(", "\\\\", "n", ".", "join", "(", "format_h2", "(", "Header", "2", "indents", "=", "2", ")))", "Header", "2", "--------", ">>>", "print", "(", "\\\\", "n", ".", "join", "(", "format_h2", "(", "Header", "2", "markdown", "2", ")))", "##", "Header", "2" ]
python
train
26.666667
markreidvfx/pyaaf2
aaf2/mobid.py
https://github.com/markreidvfx/pyaaf2/blob/37de8c10d3c3495cc00c705eb6c5048bc4a7e51f/aaf2/mobid.py#L298-L315
def to_dict(self): """ MobID representation as dict """ material = {'Data1': self.Data1, 'Data2': self.Data2, 'Data3': self.Data3, 'Data4': list(self.Data4) } return {'material':material, 'length': self.length, 'instanceHigh': self.instanceHigh, 'instanceMid': self.instanceMid, 'instanceLow': self.instanceLow, 'SMPTELabel': list(self.SMPTELabel) }
[ "def", "to_dict", "(", "self", ")", ":", "material", "=", "{", "'Data1'", ":", "self", ".", "Data1", ",", "'Data2'", ":", "self", ".", "Data2", ",", "'Data3'", ":", "self", ".", "Data3", ",", "'Data4'", ":", "list", "(", "self", ".", "Data4", ")", "}", "return", "{", "'material'", ":", "material", ",", "'length'", ":", "self", ".", "length", ",", "'instanceHigh'", ":", "self", ".", "instanceHigh", ",", "'instanceMid'", ":", "self", ".", "instanceMid", ",", "'instanceLow'", ":", "self", ".", "instanceLow", ",", "'SMPTELabel'", ":", "list", "(", "self", ".", "SMPTELabel", ")", "}" ]
MobID representation as dict
[ "MobID", "representation", "as", "dict" ]
python
train
30.555556
neurodata/ndio
ndio/convert/volume.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/convert/volume.py#L20-L41
def from_voxels(voxels): """ Converts a voxel list to an ndarray. Arguments: voxels (tuple[]): A list of coordinates indicating coordinates of populated voxels in an ndarray. Returns: numpy.ndarray The result of the transformation. """ dimensions = len(voxels[0]) for d in range(len(dimensions)): size.append(max([i[d] for i in voxels])) result = numpy.zeros(dimensions) for v in voxels: result[v] = 1 return result
[ "def", "from_voxels", "(", "voxels", ")", ":", "dimensions", "=", "len", "(", "voxels", "[", "0", "]", ")", "for", "d", "in", "range", "(", "len", "(", "dimensions", ")", ")", ":", "size", ".", "append", "(", "max", "(", "[", "i", "[", "d", "]", "for", "i", "in", "voxels", "]", ")", ")", "result", "=", "numpy", ".", "zeros", "(", "dimensions", ")", "for", "v", "in", "voxels", ":", "result", "[", "v", "]", "=", "1", "return", "result" ]
Converts a voxel list to an ndarray. Arguments: voxels (tuple[]): A list of coordinates indicating coordinates of populated voxels in an ndarray. Returns: numpy.ndarray The result of the transformation.
[ "Converts", "a", "voxel", "list", "to", "an", "ndarray", "." ]
python
test
22
lobocv/crashreporter
crashreporter/process.py
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/process.py#L11-L20
def enable_mp_crash_reporting(): """ Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess. Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead. This function must be called before any imports to mulitprocessing in order for the monkey-patching to work. """ global mp_crash_reporting_enabled multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess mp_crash_reporting_enabled = True
[ "def", "enable_mp_crash_reporting", "(", ")", ":", "global", "mp_crash_reporting_enabled", "multiprocessing", ".", "Process", "=", "multiprocessing", ".", "process", ".", "Process", "=", "CrashReportingProcess", "mp_crash_reporting_enabled", "=", "True" ]
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess. Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead. This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
[ "Monkey", "-", "patch", "the", "multiprocessing", ".", "Process", "class", "with", "our", "own", "CrashReportingProcess", ".", "Any", "subsequent", "imports", "of", "multiprocessing", ".", "Process", "will", "reference", "CrashReportingProcess", "instead", "." ]
python
train
50.2
baztian/jaydebeapi
jaydebeapi/__init__.py
https://github.com/baztian/jaydebeapi/blob/e99a05d5a84e9aa37ff0bac00bd5591336f54402/jaydebeapi/__init__.py#L636-L645
def _init_converters(types_map): """Prepares the converters for conversion of java types to python objects. types_map: Mapping of java.sql.Types field name to java.sql.Types field constant value""" global _converters _converters = {} for i in _DEFAULT_CONVERTERS: const_val = types_map[i] _converters[const_val] = _DEFAULT_CONVERTERS[i]
[ "def", "_init_converters", "(", "types_map", ")", ":", "global", "_converters", "_converters", "=", "{", "}", "for", "i", "in", "_DEFAULT_CONVERTERS", ":", "const_val", "=", "types_map", "[", "i", "]", "_converters", "[", "const_val", "]", "=", "_DEFAULT_CONVERTERS", "[", "i", "]" ]
Prepares the converters for conversion of java types to python objects. types_map: Mapping of java.sql.Types field name to java.sql.Types field constant value
[ "Prepares", "the", "converters", "for", "conversion", "of", "java", "types", "to", "python", "objects", ".", "types_map", ":", "Mapping", "of", "java", ".", "sql", ".", "Types", "field", "name", "to", "java", ".", "sql", ".", "Types", "field", "constant", "value" ]
python
train
37.1
nicolargo/glances
glances/exports/glances_restful.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_restful.py#L70-L82
def export(self, name, columns, points): """Export the stats to the Statsd server.""" if name == self.plugins_to_export()[0] and self.buffer != {}: # One complete loop have been done logger.debug("Export stats ({}) to RESTful endpoint ({})".format(listkeys(self.buffer), self.client)) # Export stats post(self.client, json=self.buffer, allow_redirects=True) # Reset buffer self.buffer = {} # Add current stat to the buffer self.buffer[name] = dict(zip(columns, points))
[ "def", "export", "(", "self", ",", "name", ",", "columns", ",", "points", ")", ":", "if", "name", "==", "self", ".", "plugins_to_export", "(", ")", "[", "0", "]", "and", "self", ".", "buffer", "!=", "{", "}", ":", "# One complete loop have been done", "logger", ".", "debug", "(", "\"Export stats ({}) to RESTful endpoint ({})\"", ".", "format", "(", "listkeys", "(", "self", ".", "buffer", ")", ",", "self", ".", "client", ")", ")", "# Export stats", "post", "(", "self", ".", "client", ",", "json", "=", "self", ".", "buffer", ",", "allow_redirects", "=", "True", ")", "# Reset buffer", "self", ".", "buffer", "=", "{", "}", "# Add current stat to the buffer", "self", ".", "buffer", "[", "name", "]", "=", "dict", "(", "zip", "(", "columns", ",", "points", ")", ")" ]
Export the stats to the Statsd server.
[ "Export", "the", "stats", "to", "the", "Statsd", "server", "." ]
python
train
49.153846
scanny/python-pptx
pptx/chart/xlsx.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xlsx.py#L211-L218
def x_values_ref(self, series): """ The Excel worksheet reference to the X values for this chart (not including the column label). """ top_row = self.series_table_row_offset(series) + 2 bottom_row = top_row + len(series) - 1 return "Sheet1!$A$%d:$A$%d" % (top_row, bottom_row)
[ "def", "x_values_ref", "(", "self", ",", "series", ")", ":", "top_row", "=", "self", ".", "series_table_row_offset", "(", "series", ")", "+", "2", "bottom_row", "=", "top_row", "+", "len", "(", "series", ")", "-", "1", "return", "\"Sheet1!$A$%d:$A$%d\"", "%", "(", "top_row", ",", "bottom_row", ")" ]
The Excel worksheet reference to the X values for this chart (not including the column label).
[ "The", "Excel", "worksheet", "reference", "to", "the", "X", "values", "for", "this", "chart", "(", "not", "including", "the", "column", "label", ")", "." ]
python
train
40.625
Azure/azure-event-hubs-python
azure/eventprocessorhost/partition_manager.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_manager.py#L71-L84
async def run_async(self): """ Starts the run loop and manages exceptions and cleanup. """ try: await self.run_loop_async() except Exception as err: # pylint: disable=broad-except _logger.error("Run loop failed %r", err) try: _logger.info("Shutting down all pumps %r", self.host.guid) await self.remove_all_pumps_async("Shutdown") except Exception as err: # pylint: disable=broad-except raise Exception("Failed to remove all pumps {!r}".format(err))
[ "async", "def", "run_async", "(", "self", ")", ":", "try", ":", "await", "self", ".", "run_loop_async", "(", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_logger", ".", "error", "(", "\"Run loop failed %r\"", ",", "err", ")", "try", ":", "_logger", ".", "info", "(", "\"Shutting down all pumps %r\"", ",", "self", ".", "host", ".", "guid", ")", "await", "self", ".", "remove_all_pumps_async", "(", "\"Shutdown\"", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "raise", "Exception", "(", "\"Failed to remove all pumps {!r}\"", ".", "format", "(", "err", ")", ")" ]
Starts the run loop and manages exceptions and cleanup.
[ "Starts", "the", "run", "loop", "and", "manages", "exceptions", "and", "cleanup", "." ]
python
train
39.642857
sci-bots/pygtkhelpers
pygtkhelpers/ui/dialogs.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/dialogs.py#L363-L427
def animation_dialog(images, delay_s=1., loop=True, **kwargs): ''' .. versionadded:: v0.19 Parameters ---------- images : list Filepaths to images or :class:`gtk.Pixbuf` instances. delay_s : float, optional Number of seconds to display each frame. Default: ``1.0``. loop : bool, optional If ``True``, restart animation after last image has been displayed. Default: ``True``. Returns ------- gtk.MessageDialog Message dialog with animation displayed in `gtk.Image` widget when dialog is run. ''' def _as_pixbuf(image): if isinstance(image, types.StringTypes): return gtk.gdk.pixbuf_new_from_file(image) else: return image pixbufs = map(_as_pixbuf, images) # Need this to support background thread execution with GTK. gtk.gdk.threads_init() dialog = gtk.MessageDialog(**kwargs) # Append image to dialog content area. image = gtk.Image() content_area = dialog.get_content_area() content_area.pack_start(image) content_area.show_all() stop_animation = threading.Event() def _stop_animation(*args): stop_animation.set() def _animate(dialog): def __animate(): if loop: frames = it.cycle(pixbufs) else: frames = pixbufs for pixbuf_i in frames: gobject.idle_add(image.set_from_pixbuf, pixbuf_i) if stop_animation.wait(delay_s): break thread = threading.Thread(target=__animate) thread.daemon = True thread.start() dialog.connect('destroy', _stop_animation) dialog.connect('show', _animate) return dialog
[ "def", "animation_dialog", "(", "images", ",", "delay_s", "=", "1.", ",", "loop", "=", "True", ",", "*", "*", "kwargs", ")", ":", "def", "_as_pixbuf", "(", "image", ")", ":", "if", "isinstance", "(", "image", ",", "types", ".", "StringTypes", ")", ":", "return", "gtk", ".", "gdk", ".", "pixbuf_new_from_file", "(", "image", ")", "else", ":", "return", "image", "pixbufs", "=", "map", "(", "_as_pixbuf", ",", "images", ")", "# Need this to support background thread execution with GTK.", "gtk", ".", "gdk", ".", "threads_init", "(", ")", "dialog", "=", "gtk", ".", "MessageDialog", "(", "*", "*", "kwargs", ")", "# Append image to dialog content area.", "image", "=", "gtk", ".", "Image", "(", ")", "content_area", "=", "dialog", ".", "get_content_area", "(", ")", "content_area", ".", "pack_start", "(", "image", ")", "content_area", ".", "show_all", "(", ")", "stop_animation", "=", "threading", ".", "Event", "(", ")", "def", "_stop_animation", "(", "*", "args", ")", ":", "stop_animation", ".", "set", "(", ")", "def", "_animate", "(", "dialog", ")", ":", "def", "__animate", "(", ")", ":", "if", "loop", ":", "frames", "=", "it", ".", "cycle", "(", "pixbufs", ")", "else", ":", "frames", "=", "pixbufs", "for", "pixbuf_i", "in", "frames", ":", "gobject", ".", "idle_add", "(", "image", ".", "set_from_pixbuf", ",", "pixbuf_i", ")", "if", "stop_animation", ".", "wait", "(", "delay_s", ")", ":", "break", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "__animate", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")", "dialog", ".", "connect", "(", "'destroy'", ",", "_stop_animation", ")", "dialog", ".", "connect", "(", "'show'", ",", "_animate", ")", "return", "dialog" ]
.. versionadded:: v0.19 Parameters ---------- images : list Filepaths to images or :class:`gtk.Pixbuf` instances. delay_s : float, optional Number of seconds to display each frame. Default: ``1.0``. loop : bool, optional If ``True``, restart animation after last image has been displayed. Default: ``True``. Returns ------- gtk.MessageDialog Message dialog with animation displayed in `gtk.Image` widget when dialog is run.
[ "..", "versionadded", "::", "v0", ".", "19" ]
python
train
26.276923
mattharrison/rst2odp
odplib/preso.py
https://github.com/mattharrison/rst2odp/blob/4adbf29b28c8207ec882f792ded07e98b1d3e7d0/odplib/preso.py#L1282-L1293
def parent_of(self, name): """ go to parent of node with name, and set as cur_node. Useful for creating new paragraphs """ if not self._in_tag(name): return node = self.cur_node while node.tag != name: node = node.getparent() self.cur_node = node.getparent()
[ "def", "parent_of", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "_in_tag", "(", "name", ")", ":", "return", "node", "=", "self", ".", "cur_node", "while", "node", ".", "tag", "!=", "name", ":", "node", "=", "node", ".", "getparent", "(", ")", "self", ".", "cur_node", "=", "node", ".", "getparent", "(", ")" ]
go to parent of node with name, and set as cur_node. Useful for creating new paragraphs
[ "go", "to", "parent", "of", "node", "with", "name", "and", "set", "as", "cur_node", ".", "Useful", "for", "creating", "new", "paragraphs" ]
python
train
28