repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
django-leonardo/django-leonardo
leonardo/utils/settings.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/utils/settings.py#L121-L141
def _get_correct_module(mod): """returns imported module check if is ``leonardo_module_conf`` specified and then import them """ module_location = getattr( mod, 'leonardo_module_conf', getattr(mod, "LEONARDO_MODULE_CONF", None)) if module_location: mod = import_module(module_location) elif hasattr(mod, 'default_app_config'): # use django behavior mod_path, _, cls_name = mod.default_app_config.rpartition('.') _mod = import_module(mod_path) config_class = getattr(_mod, cls_name) # check if is leonardo config compliant if _is_leonardo_module(config_class): mod = config_class return mod
[ "def", "_get_correct_module", "(", "mod", ")", ":", "module_location", "=", "getattr", "(", "mod", ",", "'leonardo_module_conf'", ",", "getattr", "(", "mod", ",", "\"LEONARDO_MODULE_CONF\"", ",", "None", ")", ")", "if", "module_location", ":", "mod", "=", "import_module", "(", "module_location", ")", "elif", "hasattr", "(", "mod", ",", "'default_app_config'", ")", ":", "# use django behavior", "mod_path", ",", "_", ",", "cls_name", "=", "mod", ".", "default_app_config", ".", "rpartition", "(", "'.'", ")", "_mod", "=", "import_module", "(", "mod_path", ")", "config_class", "=", "getattr", "(", "_mod", ",", "cls_name", ")", "# check if is leonardo config compliant", "if", "_is_leonardo_module", "(", "config_class", ")", ":", "mod", "=", "config_class", "return", "mod" ]
returns imported module check if is ``leonardo_module_conf`` specified and then import them
[ "returns", "imported", "module", "check", "if", "is", "leonardo_module_conf", "specified", "and", "then", "import", "them" ]
python
train
python-diamond/Diamond
src/collectors/ipmisensor/ipmisensor.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/ipmisensor/ipmisensor.py#L36-L49
def get_default_config(self): """ Returns the default collector settings """ config = super(IPMISensorCollector, self).get_default_config() config.update({ 'bin': '/usr/bin/ipmitool', 'use_sudo': False, 'sudo_cmd': '/usr/bin/sudo', 'path': 'ipmi.sensors', 'thresholds': False, 'delimiter': '.' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "IPMISensorCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'bin'", ":", "'/usr/bin/ipmitool'", ",", "'use_sudo'", ":", "False", ",", "'sudo_cmd'", ":", "'/usr/bin/sudo'", ",", "'path'", ":", "'ipmi.sensors'", ",", "'thresholds'", ":", "False", ",", "'delimiter'", ":", "'.'", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
secdev/scapy
scapy/fields.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/fields.py#L301-L312
def _find_fld_pkt_val(self, pkt, val): """Given a Packet instance `pkt` and the value `val` to be set, returns the Field subclass to be used, and the updated `val` if necessary. """ fld = self._iterate_fields_cond(pkt, val, True) # Default ? (in this case, let's make sure it's up-do-date) dflts_pkt = pkt.default_fields if val == dflts_pkt[self.name] and self.name not in pkt.fields: dflts_pkt[self.name] = fld.default val = fld.default return fld, val
[ "def", "_find_fld_pkt_val", "(", "self", ",", "pkt", ",", "val", ")", ":", "fld", "=", "self", ".", "_iterate_fields_cond", "(", "pkt", ",", "val", ",", "True", ")", "# Default ? (in this case, let's make sure it's up-do-date)", "dflts_pkt", "=", "pkt", ".", "default_fields", "if", "val", "==", "dflts_pkt", "[", "self", ".", "name", "]", "and", "self", ".", "name", "not", "in", "pkt", ".", "fields", ":", "dflts_pkt", "[", "self", ".", "name", "]", "=", "fld", ".", "default", "val", "=", "fld", ".", "default", "return", "fld", ",", "val" ]
Given a Packet instance `pkt` and the value `val` to be set, returns the Field subclass to be used, and the updated `val` if necessary.
[ "Given", "a", "Packet", "instance", "pkt", "and", "the", "value", "val", "to", "be", "set", "returns", "the", "Field", "subclass", "to", "be", "used", "and", "the", "updated", "val", "if", "necessary", "." ]
python
train
LonamiWebs/Telethon
telethon/network/mtprotosender.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/mtprotosender.py#L552-L563
async def _handle_pong(self, message): """ Handles pong results, which don't come inside a ``rpc_result`` but are still sent through a request: pong#347773c5 msg_id:long ping_id:long = Pong; """ pong = message.obj self._log.debug('Handling pong for message %d', pong.msg_id) state = self._pending_state.pop(pong.msg_id, None) if state: state.future.set_result(pong)
[ "async", "def", "_handle_pong", "(", "self", ",", "message", ")", ":", "pong", "=", "message", ".", "obj", "self", ".", "_log", ".", "debug", "(", "'Handling pong for message %d'", ",", "pong", ".", "msg_id", ")", "state", "=", "self", ".", "_pending_state", ".", "pop", "(", "pong", ".", "msg_id", ",", "None", ")", "if", "state", ":", "state", ".", "future", ".", "set_result", "(", "pong", ")" ]
Handles pong results, which don't come inside a ``rpc_result`` but are still sent through a request: pong#347773c5 msg_id:long ping_id:long = Pong;
[ "Handles", "pong", "results", "which", "don", "t", "come", "inside", "a", "rpc_result", "but", "are", "still", "sent", "through", "a", "request", ":" ]
python
train
HDI-Project/MLBlocks
mlblocks/mlpipeline.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L369-L385
def load(cls, path): """Create a new MLPipeline from a JSON specification. The JSON file format is the same as the one created by the `to_dict` method. Args: path (str): Path of the JSON file to load. Returns: MLPipeline: A new MLPipeline instance with the specification found in the JSON file. """ with open(path, 'r') as in_file: metadata = json.load(in_file) return cls.from_dict(metadata)
[ "def", "load", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "in_file", ":", "metadata", "=", "json", ".", "load", "(", "in_file", ")", "return", "cls", ".", "from_dict", "(", "metadata", ")" ]
Create a new MLPipeline from a JSON specification. The JSON file format is the same as the one created by the `to_dict` method. Args: path (str): Path of the JSON file to load. Returns: MLPipeline: A new MLPipeline instance with the specification found in the JSON file.
[ "Create", "a", "new", "MLPipeline", "from", "a", "JSON", "specification", "." ]
python
train
PythonCharmers/python-future
src/future/backports/xmlrpc/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/xmlrpc/server.py#L311-L340
def system_methodHelp(self, method_name): """system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.""" method = None if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: # Instance can implement _methodHelp to return help for a method if hasattr(self.instance, '_methodHelp'): return self.instance._methodHelp(method_name) # if the instance has a _dispatch method then we # don't have enough information to provide help elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name, self.allow_dotted_names ) except AttributeError: pass # Note that we aren't checking that the method actually # be a callable object of some kind if method is None: return "" else: return pydoc.getdoc(method)
[ "def", "system_methodHelp", "(", "self", ",", "method_name", ")", ":", "method", "=", "None", "if", "method_name", "in", "self", ".", "funcs", ":", "method", "=", "self", ".", "funcs", "[", "method_name", "]", "elif", "self", ".", "instance", "is", "not", "None", ":", "# Instance can implement _methodHelp to return help for a method", "if", "hasattr", "(", "self", ".", "instance", ",", "'_methodHelp'", ")", ":", "return", "self", ".", "instance", ".", "_methodHelp", "(", "method_name", ")", "# if the instance has a _dispatch method then we", "# don't have enough information to provide help", "elif", "not", "hasattr", "(", "self", ".", "instance", ",", "'_dispatch'", ")", ":", "try", ":", "method", "=", "resolve_dotted_attribute", "(", "self", ".", "instance", ",", "method_name", ",", "self", ".", "allow_dotted_names", ")", "except", "AttributeError", ":", "pass", "# Note that we aren't checking that the method actually", "# be a callable object of some kind", "if", "method", "is", "None", ":", "return", "\"\"", "else", ":", "return", "pydoc", ".", "getdoc", "(", "method", ")" ]
system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.
[ "system", ".", "methodHelp", "(", "add", ")", "=", ">", "Adds", "two", "integers", "together" ]
python
train
taxjar/taxjar-python
taxjar/client.py
https://github.com/taxjar/taxjar-python/blob/be9b30d7dc968d24e066c7c133849fee180f8d95/taxjar/client.py#L41-L44
def list_orders(self, params=None): """Lists existing order transactions.""" request = self._get('transactions/orders', params) return self.responder(request)
[ "def", "list_orders", "(", "self", ",", "params", "=", "None", ")", ":", "request", "=", "self", ".", "_get", "(", "'transactions/orders'", ",", "params", ")", "return", "self", ".", "responder", "(", "request", ")" ]
Lists existing order transactions.
[ "Lists", "existing", "order", "transactions", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/attributes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/attributes.py#L791-L806
def get_network_attributegroup_items(network_id, **kwargs): """ Get all the group items in a network """ user_id=kwargs.get('user_id') net_i = _get_network(network_id) net_i.check_read_permission(user_id) group_items_i = db.DBSession.query(AttrGroupItem).filter( AttrGroupItem.network_id==network_id).all() return group_items_i
[ "def", "get_network_attributegroup_items", "(", "network_id", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "net_i", "=", "_get_network", "(", "network_id", ")", "net_i", ".", "check_read_permission", "(", "user_id", ")", "group_items_i", "=", "db", ".", "DBSession", ".", "query", "(", "AttrGroupItem", ")", ".", "filter", "(", "AttrGroupItem", ".", "network_id", "==", "network_id", ")", ".", "all", "(", ")", "return", "group_items_i" ]
Get all the group items in a network
[ "Get", "all", "the", "group", "items", "in", "a", "network" ]
python
train
numenta/nupic
examples/opf/tools/sp_plotter.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/tools/sp_plotter.py#L138-L162
def appendInputWithNSimilarValues(inputs, numNear = 10): """ Creates a neighboring record for each record in the inputs and adds new records at the end of the inputs list """ numInputs = len(inputs) skipOne = False for i in xrange(numInputs): input = inputs[i] numChanged = 0 newInput = copy.deepcopy(input) for j in xrange(len(input)-1): if skipOne: skipOne = False continue if input[j] == 1 and input[j+1] == 0: newInput[j] = 0 newInput[j+1] = 1 inputs.append(newInput) newInput = copy.deepcopy(newInput) #print input #print newInput numChanged += 1 skipOne = True if numChanged == numNear: break
[ "def", "appendInputWithNSimilarValues", "(", "inputs", ",", "numNear", "=", "10", ")", ":", "numInputs", "=", "len", "(", "inputs", ")", "skipOne", "=", "False", "for", "i", "in", "xrange", "(", "numInputs", ")", ":", "input", "=", "inputs", "[", "i", "]", "numChanged", "=", "0", "newInput", "=", "copy", ".", "deepcopy", "(", "input", ")", "for", "j", "in", "xrange", "(", "len", "(", "input", ")", "-", "1", ")", ":", "if", "skipOne", ":", "skipOne", "=", "False", "continue", "if", "input", "[", "j", "]", "==", "1", "and", "input", "[", "j", "+", "1", "]", "==", "0", ":", "newInput", "[", "j", "]", "=", "0", "newInput", "[", "j", "+", "1", "]", "=", "1", "inputs", ".", "append", "(", "newInput", ")", "newInput", "=", "copy", ".", "deepcopy", "(", "newInput", ")", "#print input", "#print newInput", "numChanged", "+=", "1", "skipOne", "=", "True", "if", "numChanged", "==", "numNear", ":", "break" ]
Creates a neighboring record for each record in the inputs and adds new records at the end of the inputs list
[ "Creates", "a", "neighboring", "record", "for", "each", "record", "in", "the", "inputs", "and", "adds", "new", "records", "at", "the", "end", "of", "the", "inputs", "list" ]
python
valid
quantumlib/Cirq
cirq/google/engine/engine.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/engine/engine.py#L585-L600
def results(self) -> List[TrialResult]: """Returns the job results, blocking until the job is complete.""" if not self._results: job = self._update_job() for _ in range(1000): if job['executionStatus']['state'] in TERMINAL_STATES: break time.sleep(0.5) job = self._update_job() if job['executionStatus']['state'] != 'SUCCESS': raise RuntimeError( 'Job %s did not succeed. It is in state %s.' % ( job['name'], job['executionStatus']['state'])) self._results = self._engine.get_job_results( self.job_resource_name) return self._results
[ "def", "results", "(", "self", ")", "->", "List", "[", "TrialResult", "]", ":", "if", "not", "self", ".", "_results", ":", "job", "=", "self", ".", "_update_job", "(", ")", "for", "_", "in", "range", "(", "1000", ")", ":", "if", "job", "[", "'executionStatus'", "]", "[", "'state'", "]", "in", "TERMINAL_STATES", ":", "break", "time", ".", "sleep", "(", "0.5", ")", "job", "=", "self", ".", "_update_job", "(", ")", "if", "job", "[", "'executionStatus'", "]", "[", "'state'", "]", "!=", "'SUCCESS'", ":", "raise", "RuntimeError", "(", "'Job %s did not succeed. It is in state %s.'", "%", "(", "job", "[", "'name'", "]", ",", "job", "[", "'executionStatus'", "]", "[", "'state'", "]", ")", ")", "self", ".", "_results", "=", "self", ".", "_engine", ".", "get_job_results", "(", "self", ".", "job_resource_name", ")", "return", "self", ".", "_results" ]
Returns the job results, blocking until the job is complete.
[ "Returns", "the", "job", "results", "blocking", "until", "the", "job", "is", "complete", "." ]
python
train
gawel/panoramisk
panoramisk/manager.py
https://github.com/gawel/panoramisk/blob/2ccb5d18be28a8e8f444dc0cd3a3bfb59aa19a8e/panoramisk/manager.py#L184-L195
def connect(self): """connect to the server""" if self.loop is None: # pragma: no cover self.loop = asyncio.get_event_loop() t = asyncio.Task( self.loop.create_connection( self.config['protocol_factory'], self.config['host'], self.config['port'], ssl=self.config['ssl']), loop=self.loop) t.add_done_callback(self.connection_made) return t
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "loop", "is", "None", ":", "# pragma: no cover", "self", ".", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "t", "=", "asyncio", ".", "Task", "(", "self", ".", "loop", ".", "create_connection", "(", "self", ".", "config", "[", "'protocol_factory'", "]", ",", "self", ".", "config", "[", "'host'", "]", ",", "self", ".", "config", "[", "'port'", "]", ",", "ssl", "=", "self", ".", "config", "[", "'ssl'", "]", ")", ",", "loop", "=", "self", ".", "loop", ")", "t", ".", "add_done_callback", "(", "self", ".", "connection_made", ")", "return", "t" ]
connect to the server
[ "connect", "to", "the", "server" ]
python
test
beelit94/python-terraform
python_terraform/__init__.py
https://github.com/beelit94/python-terraform/blob/99950cb03c37abadb0d7e136452e43f4f17dd4e1/python_terraform/__init__.py#L366-L388
def read_state_file(self, file_path=None): """ read .tfstate file :param file_path: relative path to working dir :return: states file in dict type """ working_dir = self.working_dir or '' file_path = file_path or self.state or '' if not file_path: backend_path = os.path.join(file_path, '.terraform', 'terraform.tfstate') if os.path.exists(os.path.join(working_dir, backend_path)): file_path = backend_path else: file_path = os.path.join(file_path, 'terraform.tfstate') file_path = os.path.join(working_dir, file_path) self.tfstate = Tfstate.load_file(file_path)
[ "def", "read_state_file", "(", "self", ",", "file_path", "=", "None", ")", ":", "working_dir", "=", "self", ".", "working_dir", "or", "''", "file_path", "=", "file_path", "or", "self", ".", "state", "or", "''", "if", "not", "file_path", ":", "backend_path", "=", "os", ".", "path", ".", "join", "(", "file_path", ",", "'.terraform'", ",", "'terraform.tfstate'", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "working_dir", ",", "backend_path", ")", ")", ":", "file_path", "=", "backend_path", "else", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "file_path", ",", "'terraform.tfstate'", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "working_dir", ",", "file_path", ")", "self", ".", "tfstate", "=", "Tfstate", ".", "load_file", "(", "file_path", ")" ]
read .tfstate file :param file_path: relative path to working dir :return: states file in dict type
[ "read", ".", "tfstate", "file", ":", "param", "file_path", ":", "relative", "path", "to", "working", "dir", ":", "return", ":", "states", "file", "in", "dict", "type" ]
python
train
madsbk/lrcloud
lrcloud/util.py
https://github.com/madsbk/lrcloud/blob/8d99be3e1abdf941642e9a1c86b7d775dc373c0b/lrcloud/util.py#L56-L75
def apply_changesets(args, changesets, catalog): """Apply to the 'catalog' the changesets in the metafile list 'changesets'""" tmpdir = tempfile.mkdtemp() tmp_patch = join(tmpdir, "tmp.patch") tmp_lcat = join(tmpdir, "tmp.lcat") for node in changesets: remove(tmp_patch) copy(node.mfile['changeset']['filename'], tmp_patch) logging.info("mv %s %s"%(catalog, tmp_lcat)) shutil.move(catalog, tmp_lcat) cmd = args.patch_cmd.replace("$in1", tmp_lcat)\ .replace("$patch", tmp_patch)\ .replace("$out", catalog) logging.info("Patch: %s"%cmd) subprocess.check_call(cmd, shell=True) shutil.rmtree(tmpdir, ignore_errors=True)
[ "def", "apply_changesets", "(", "args", ",", "changesets", ",", "catalog", ")", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "tmp_patch", "=", "join", "(", "tmpdir", ",", "\"tmp.patch\"", ")", "tmp_lcat", "=", "join", "(", "tmpdir", ",", "\"tmp.lcat\"", ")", "for", "node", "in", "changesets", ":", "remove", "(", "tmp_patch", ")", "copy", "(", "node", ".", "mfile", "[", "'changeset'", "]", "[", "'filename'", "]", ",", "tmp_patch", ")", "logging", ".", "info", "(", "\"mv %s %s\"", "%", "(", "catalog", ",", "tmp_lcat", ")", ")", "shutil", ".", "move", "(", "catalog", ",", "tmp_lcat", ")", "cmd", "=", "args", ".", "patch_cmd", ".", "replace", "(", "\"$in1\"", ",", "tmp_lcat", ")", ".", "replace", "(", "\"$patch\"", ",", "tmp_patch", ")", ".", "replace", "(", "\"$out\"", ",", "catalog", ")", "logging", ".", "info", "(", "\"Patch: %s\"", "%", "cmd", ")", "subprocess", ".", "check_call", "(", "cmd", ",", "shell", "=", "True", ")", "shutil", ".", "rmtree", "(", "tmpdir", ",", "ignore_errors", "=", "True", ")" ]
Apply to the 'catalog' the changesets in the metafile list 'changesets
[ "Apply", "to", "the", "catalog", "the", "changesets", "in", "the", "metafile", "list", "changesets" ]
python
valid
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/object_tree.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/object_tree.py#L72-L81
def get_object_record(self, pid): """Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed. """ try: return self._cache['records'][pid] except KeyError: raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException('Unknown PID')
[ "def", "get_object_record", "(", "self", ",", "pid", ")", ":", "try", ":", "return", "self", ".", "_cache", "[", "'records'", "]", "[", "pid", "]", "except", "KeyError", ":", "raise", "d1_onedrive", ".", "impl", ".", "onedrive_exceptions", ".", "ONEDriveException", "(", "'Unknown PID'", ")" ]
Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed.
[ "Get", "an", "object", "that", "has", "already", "been", "cached", "in", "the", "object", "tree", "." ]
python
train
yaybu/callsign
callsign/scripts/daemon.py
https://github.com/yaybu/callsign/blob/e70e5368bfe4fd3ae3fdd1ed43944b53ffa1e100/callsign/scripts/daemon.py#L13-L22
def spawn(opts, conf): """ Acts like twistd """ if opts.config is not None: os.environ["CALLSIGN_CONFIG_FILE"] = opts.config sys.argv[1:] = [ "-noy", sibpath(__file__, "callsign.tac"), "--pidfile", conf['pidfile'], "--logfile", conf['logfile'], ] twistd.run()
[ "def", "spawn", "(", "opts", ",", "conf", ")", ":", "if", "opts", ".", "config", "is", "not", "None", ":", "os", ".", "environ", "[", "\"CALLSIGN_CONFIG_FILE\"", "]", "=", "opts", ".", "config", "sys", ".", "argv", "[", "1", ":", "]", "=", "[", "\"-noy\"", ",", "sibpath", "(", "__file__", ",", "\"callsign.tac\"", ")", ",", "\"--pidfile\"", ",", "conf", "[", "'pidfile'", "]", ",", "\"--logfile\"", ",", "conf", "[", "'logfile'", "]", ",", "]", "twistd", ".", "run", "(", ")" ]
Acts like twistd
[ "Acts", "like", "twistd" ]
python
train
wummel/dosage
dosagelib/scraper.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L364-L381
def fetchUrls(cls, url, data, urlSearch): """Search all entries for given URL pattern(s) in a HTML page.""" searchUrls = [] searches = makeSequence(urlSearch) for search in searches: for match in search.finditer(data[0]): searchUrl = match.group(1) if not searchUrl: raise ValueError("Pattern %s matched empty URL at %s." % (search.pattern, url)) out.debug(u'matched URL %r with pattern %s' % (searchUrl, search.pattern)) searchUrls.append(normaliseURL(urljoin(data[1], searchUrl))) if searchUrls: # do not search other links if one pattern matched break if not searchUrls: patterns = [x.pattern for x in searches] raise ValueError("Patterns %s not found at URL %s." % (patterns, url)) return searchUrls
[ "def", "fetchUrls", "(", "cls", ",", "url", ",", "data", ",", "urlSearch", ")", ":", "searchUrls", "=", "[", "]", "searches", "=", "makeSequence", "(", "urlSearch", ")", "for", "search", "in", "searches", ":", "for", "match", "in", "search", ".", "finditer", "(", "data", "[", "0", "]", ")", ":", "searchUrl", "=", "match", ".", "group", "(", "1", ")", "if", "not", "searchUrl", ":", "raise", "ValueError", "(", "\"Pattern %s matched empty URL at %s.\"", "%", "(", "search", ".", "pattern", ",", "url", ")", ")", "out", ".", "debug", "(", "u'matched URL %r with pattern %s'", "%", "(", "searchUrl", ",", "search", ".", "pattern", ")", ")", "searchUrls", ".", "append", "(", "normaliseURL", "(", "urljoin", "(", "data", "[", "1", "]", ",", "searchUrl", ")", ")", ")", "if", "searchUrls", ":", "# do not search other links if one pattern matched", "break", "if", "not", "searchUrls", ":", "patterns", "=", "[", "x", ".", "pattern", "for", "x", "in", "searches", "]", "raise", "ValueError", "(", "\"Patterns %s not found at URL %s.\"", "%", "(", "patterns", ",", "url", ")", ")", "return", "searchUrls" ]
Search all entries for given URL pattern(s) in a HTML page.
[ "Search", "all", "entries", "for", "given", "URL", "pattern", "(", "s", ")", "in", "a", "HTML", "page", "." ]
python
train
Azure/azure-cli-extensions
src/sqlvm-preview/azext_sqlvm_preview/_format.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/_format.py#L78-L100
def transform_aglistener_output(result): ''' Transforms the result of Availability Group Listener to eliminate unnecessary parameters. ''' from collections import OrderedDict from msrestazure.tools import parse_resource_id try: resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] # Create a dictionary with the relevant parameters output = OrderedDict([('id', result.id), ('name', result.name), ('provisioningState', result.provisioning_state), ('port', result.port), ('resourceGroup', resource_group)]) # Note, wsfcDomainCredentials will not display if result.load_balancer_configurations is not None: output['loadBalancerConfigurations'] = format_load_balancer_configuration_list(result.load_balancer_configurations) return output except AttributeError: # Return the response object if the formating fails return result
[ "def", "transform_aglistener_output", "(", "result", ")", ":", "from", "collections", "import", "OrderedDict", "from", "msrestazure", ".", "tools", "import", "parse_resource_id", "try", ":", "resource_group", "=", "getattr", "(", "result", ",", "'resource_group'", ",", "None", ")", "or", "parse_resource_id", "(", "result", ".", "id", ")", "[", "'resource_group'", "]", "# Create a dictionary with the relevant parameters", "output", "=", "OrderedDict", "(", "[", "(", "'id'", ",", "result", ".", "id", ")", ",", "(", "'name'", ",", "result", ".", "name", ")", ",", "(", "'provisioningState'", ",", "result", ".", "provisioning_state", ")", ",", "(", "'port'", ",", "result", ".", "port", ")", ",", "(", "'resourceGroup'", ",", "resource_group", ")", "]", ")", "# Note, wsfcDomainCredentials will not display", "if", "result", ".", "load_balancer_configurations", "is", "not", "None", ":", "output", "[", "'loadBalancerConfigurations'", "]", "=", "format_load_balancer_configuration_list", "(", "result", ".", "load_balancer_configurations", ")", "return", "output", "except", "AttributeError", ":", "# Return the response object if the formating fails", "return", "result" ]
Transforms the result of Availability Group Listener to eliminate unnecessary parameters.
[ "Transforms", "the", "result", "of", "Availability", "Group", "Listener", "to", "eliminate", "unnecessary", "parameters", "." ]
python
train
eXamadeus/godaddypy
godaddypy/client.py
https://github.com/eXamadeus/godaddypy/blob/67820604ffe233a67ef9f6b3a59ab85b02653e57/godaddypy/client.py#L205-L249
def update_ip(self, ip, record_type='A', domains=None, subdomains=None): """Update the IP address in all records, specified by type, to the value of ip. Returns True if no exceptions occurred during the update. If no domains are provided, all domains returned from self.get_domains() will be updated. By default, only A records are updated. :param record_type: The type of records to update (eg. 'A') :param ip: The new IP address (eg. '123.1.2.255') :param domains: A list of the domains you want to update (eg. ['123.com','abc.net']) :param subdomains: A list of the subdomains you want to update (eg. ['www','dev']) :type record_type: str or unicode :type ip: str or unicode :type domains: str, list of str :type subdomains: str, list of str :return: True if no exceptions occurred """ if domains is None: domains = self.get_domains() elif sys.version_info < (3, 0): if isinstance(domains, (str, unicode)): domains = [domains] elif sys.version_info >= (3, 0): if isinstance(domains, str): domains = [domains] else: # we have a tuple, set, or something else, try to convert it to a list domains = list(domains) for domain in domains: a_records = self.get_records(domain, record_type=record_type) for record in a_records: r_name = str(record['name']) r_ip = str(record['data']) if not r_ip == ip: if (subdomains is None or (isinstance(subdomains, (unicode, str)) and r_name == subdomains) or r_name in subdomains): record.update(data=str(ip)) self.update_record(domain, record) # If we didn't get any exceptions, return True to let the user know return True
[ "def", "update_ip", "(", "self", ",", "ip", ",", "record_type", "=", "'A'", ",", "domains", "=", "None", ",", "subdomains", "=", "None", ")", ":", "if", "domains", "is", "None", ":", "domains", "=", "self", ".", "get_domains", "(", ")", "elif", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "if", "isinstance", "(", "domains", ",", "(", "str", ",", "unicode", ")", ")", ":", "domains", "=", "[", "domains", "]", "elif", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "if", "isinstance", "(", "domains", ",", "str", ")", ":", "domains", "=", "[", "domains", "]", "else", ":", "# we have a tuple, set, or something else, try to convert it to a list", "domains", "=", "list", "(", "domains", ")", "for", "domain", "in", "domains", ":", "a_records", "=", "self", ".", "get_records", "(", "domain", ",", "record_type", "=", "record_type", ")", "for", "record", "in", "a_records", ":", "r_name", "=", "str", "(", "record", "[", "'name'", "]", ")", "r_ip", "=", "str", "(", "record", "[", "'data'", "]", ")", "if", "not", "r_ip", "==", "ip", ":", "if", "(", "subdomains", "is", "None", "or", "(", "isinstance", "(", "subdomains", ",", "(", "unicode", ",", "str", ")", ")", "and", "r_name", "==", "subdomains", ")", "or", "r_name", "in", "subdomains", ")", ":", "record", ".", "update", "(", "data", "=", "str", "(", "ip", ")", ")", "self", ".", "update_record", "(", "domain", ",", "record", ")", "# If we didn't get any exceptions, return True to let the user know", "return", "True" ]
Update the IP address in all records, specified by type, to the value of ip. Returns True if no exceptions occurred during the update. If no domains are provided, all domains returned from self.get_domains() will be updated. By default, only A records are updated. :param record_type: The type of records to update (eg. 'A') :param ip: The new IP address (eg. '123.1.2.255') :param domains: A list of the domains you want to update (eg. ['123.com','abc.net']) :param subdomains: A list of the subdomains you want to update (eg. ['www','dev']) :type record_type: str or unicode :type ip: str or unicode :type domains: str, list of str :type subdomains: str, list of str :return: True if no exceptions occurred
[ "Update", "the", "IP", "address", "in", "all", "records", "specified", "by", "type", "to", "the", "value", "of", "ip", ".", "Returns", "True", "if", "no", "exceptions", "occurred", "during", "the", "update", ".", "If", "no", "domains", "are", "provided", "all", "domains", "returned", "from", "self", ".", "get_domains", "()", "will", "be", "updated", ".", "By", "default", "only", "A", "records", "are", "updated", "." ]
python
train
singnet/snet-cli
snet_cli/mpe_client_command.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_client_command.py#L179-L193
def _get_channel_state_statelessly(self, grpc_channel, channel_id): """ We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md """ server = self._get_channel_state_from_server (grpc_channel, channel_id) blockchain = self._get_channel_state_from_blockchain( channel_id) if (server["current_nonce"] == blockchain["nonce"]): unspent_amount = blockchain["value"] - server["current_signed_amount"] else: unspent_amount = None # in this case we cannot securely define unspent_amount yet return (server["current_nonce"], server["current_signed_amount"], unspent_amount)
[ "def", "_get_channel_state_statelessly", "(", "self", ",", "grpc_channel", ",", "channel_id", ")", ":", "server", "=", "self", ".", "_get_channel_state_from_server", "(", "grpc_channel", ",", "channel_id", ")", "blockchain", "=", "self", ".", "_get_channel_state_from_blockchain", "(", "channel_id", ")", "if", "(", "server", "[", "\"current_nonce\"", "]", "==", "blockchain", "[", "\"nonce\"", "]", ")", ":", "unspent_amount", "=", "blockchain", "[", "\"value\"", "]", "-", "server", "[", "\"current_signed_amount\"", "]", "else", ":", "unspent_amount", "=", "None", "# in this case we cannot securely define unspent_amount yet", "return", "(", "server", "[", "\"current_nonce\"", "]", ",", "server", "[", "\"current_signed_amount\"", "]", ",", "unspent_amount", ")" ]
We get state of the channel (nonce, amount, unspent_amount) We do it by securely combine information from the server and blockchain https://github.com/singnet/wiki/blob/master/multiPartyEscrowContract/MultiPartyEscrow_stateless_client.md
[ "We", "get", "state", "of", "the", "channel", "(", "nonce", "amount", "unspent_amount", ")", "We", "do", "it", "by", "securely", "combine", "information", "from", "the", "server", "and", "blockchain", "https", ":", "//", "github", ".", "com", "/", "singnet", "/", "wiki", "/", "blob", "/", "master", "/", "multiPartyEscrowContract", "/", "MultiPartyEscrow_stateless_client", ".", "md" ]
python
train
Erotemic/utool
utool/util_arg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_arg.py#L1130-L1151
def get_cmdline_varargs(argv=None): """ Returns positional args specified directly after the scriptname and before any args starting with '-' on the commandline. """ if argv is None: argv = sys.argv scriptname = argv[0] if scriptname == '': # python invoked by iteself pos_start = 0 pos_end = 0 else: pos_start = pos_end = 1 for idx in range(pos_start, len(argv)): if argv[idx].startswith('-'): pos_end = idx break else: pos_end = len(argv) cmdline_varargs = argv[pos_start:pos_end] return cmdline_varargs
[ "def", "get_cmdline_varargs", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "scriptname", "=", "argv", "[", "0", "]", "if", "scriptname", "==", "''", ":", "# python invoked by iteself", "pos_start", "=", "0", "pos_end", "=", "0", "else", ":", "pos_start", "=", "pos_end", "=", "1", "for", "idx", "in", "range", "(", "pos_start", ",", "len", "(", "argv", ")", ")", ":", "if", "argv", "[", "idx", "]", ".", "startswith", "(", "'-'", ")", ":", "pos_end", "=", "idx", "break", "else", ":", "pos_end", "=", "len", "(", "argv", ")", "cmdline_varargs", "=", "argv", "[", "pos_start", ":", "pos_end", "]", "return", "cmdline_varargs" ]
Returns positional args specified directly after the scriptname and before any args starting with '-' on the commandline.
[ "Returns", "positional", "args", "specified", "directly", "after", "the", "scriptname", "and", "before", "any", "args", "starting", "with", "-", "on", "the", "commandline", "." ]
python
train
libtcod/python-tcod
tcod/console.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/console.py#L874-L921
def print( self, x: int, y: int, string: str, fg: Optional[Tuple[int, int, int]] = None, bg: Optional[Tuple[int, int, int]] = None, bg_blend: int = tcod.constants.BKGND_SET, alignment: int = tcod.constants.LEFT, ) -> None: """Print a string on a console with manual line breaks. `x` and `y` are the starting tile, with ``0,0`` as the upper-left corner of the console. You can use negative numbers if you want to start printing relative to the bottom-right corner, but this behavior may change in future versions. `string` is a Unicode string which may include color control characters. Strings which are too long will be truncated until the next newline character ``"\\n"``. `fg` and `bg` are the foreground text color and background tile color respectfully. This is a 3-item tuple with (r, g, b) color values from 0 to 255. These parameters can also be set to `None` to leave the colors unchanged. `bg_blend` is the blend type used by libtcod. `alignment` can be `tcod.LEFT`, `tcod.CENTER`, or `tcod.RIGHT`. .. versionadded:: 8.5 .. versionchanged:: 9.0 `fg` and `bg` now default to `None` instead of white-on-black. """ x, y = self._pythonic_index(x, y) string_ = string.encode("utf-8") # type: bytes lib.console_print( self.console_c, x, y, string_, len(string_), (fg,) if fg is not None else ffi.NULL, (bg,) if bg is not None else ffi.NULL, bg_blend, alignment, )
[ "def", "print", "(", "self", ",", "x", ":", "int", ",", "y", ":", "int", ",", "string", ":", "str", ",", "fg", ":", "Optional", "[", "Tuple", "[", "int", ",", "int", ",", "int", "]", "]", "=", "None", ",", "bg", ":", "Optional", "[", "Tuple", "[", "int", ",", "int", ",", "int", "]", "]", "=", "None", ",", "bg_blend", ":", "int", "=", "tcod", ".", "constants", ".", "BKGND_SET", ",", "alignment", ":", "int", "=", "tcod", ".", "constants", ".", "LEFT", ",", ")", "->", "None", ":", "x", ",", "y", "=", "self", ".", "_pythonic_index", "(", "x", ",", "y", ")", "string_", "=", "string", ".", "encode", "(", "\"utf-8\"", ")", "# type: bytes", "lib", ".", "console_print", "(", "self", ".", "console_c", ",", "x", ",", "y", ",", "string_", ",", "len", "(", "string_", ")", ",", "(", "fg", ",", ")", "if", "fg", "is", "not", "None", "else", "ffi", ".", "NULL", ",", "(", "bg", ",", ")", "if", "bg", "is", "not", "None", "else", "ffi", ".", "NULL", ",", "bg_blend", ",", "alignment", ",", ")" ]
Print a string on a console with manual line breaks. `x` and `y` are the starting tile, with ``0,0`` as the upper-left corner of the console. You can use negative numbers if you want to start printing relative to the bottom-right corner, but this behavior may change in future versions. `string` is a Unicode string which may include color control characters. Strings which are too long will be truncated until the next newline character ``"\\n"``. `fg` and `bg` are the foreground text color and background tile color respectfully. This is a 3-item tuple with (r, g, b) color values from 0 to 255. These parameters can also be set to `None` to leave the colors unchanged. `bg_blend` is the blend type used by libtcod. `alignment` can be `tcod.LEFT`, `tcod.CENTER`, or `tcod.RIGHT`. .. versionadded:: 8.5 .. versionchanged:: 9.0 `fg` and `bg` now default to `None` instead of white-on-black.
[ "Print", "a", "string", "on", "a", "console", "with", "manual", "line", "breaks", "." ]
python
train
dmlc/gluon-nlp
scripts/bert/staticbert/static_bert.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/staticbert/static_bert.py#L608-L652
def bert_12_768_12(dataset_name=None, vocab=None, pretrained=True, ctx=mx.cpu(), root=os.path.join(get_home_dir(), 'models'), use_pooler=True, use_decoder=True, use_classifier=True, input_size=None, seq_length=None, **kwargs): """Static BERT BASE model. The number of layers (L) is 12, number of units (H) is 768, and the number of self-attention heads (A) is 12. Parameters ---------- dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased', 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased'. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. input_size : int, default None Represents the embedding size of the input. seq_length : int, default None Stands for the sequence length of the input. Returns ------- StaticBERTModel, gluonnlp.vocab.BERTVocab """ return get_static_bert_model(model_name='bert_12_768_12', vocab=vocab, dataset_name=dataset_name, pretrained=pretrained, ctx=ctx, use_pooler=use_pooler, use_decoder=use_decoder, use_classifier=use_classifier, root=root, input_size=input_size, seq_length=seq_length, **kwargs)
[ "def", "bert_12_768_12", "(", "dataset_name", "=", "None", ",", "vocab", "=", "None", ",", "pretrained", "=", "True", ",", "ctx", "=", "mx", ".", "cpu", "(", ")", ",", "root", "=", "os", ".", "path", ".", "join", "(", "get_home_dir", "(", ")", ",", "'models'", ")", ",", "use_pooler", "=", "True", ",", "use_decoder", "=", "True", ",", "use_classifier", "=", "True", ",", "input_size", "=", "None", ",", "seq_length", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "get_static_bert_model", "(", "model_name", "=", "'bert_12_768_12'", ",", "vocab", "=", "vocab", ",", "dataset_name", "=", "dataset_name", ",", "pretrained", "=", "pretrained", ",", "ctx", "=", "ctx", ",", "use_pooler", "=", "use_pooler", ",", "use_decoder", "=", "use_decoder", ",", "use_classifier", "=", "use_classifier", ",", "root", "=", "root", ",", "input_size", "=", "input_size", ",", "seq_length", "=", "seq_length", ",", "*", "*", "kwargs", ")" ]
Static BERT BASE model. The number of layers (L) is 12, number of units (H) is 768, and the number of self-attention heads (A) is 12. Parameters ---------- dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased', 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased'. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. input_size : int, default None Represents the embedding size of the input. seq_length : int, default None Stands for the sequence length of the input. Returns ------- StaticBERTModel, gluonnlp.vocab.BERTVocab
[ "Static", "BERT", "BASE", "model", "." ]
python
train
lanius/tinyik
tinyik/optimizer.py
https://github.com/lanius/tinyik/blob/dffe5031ee044caf43e51746c4b0a6d45922d50e/tinyik/optimizer.py#L106-L114
def optimize(self, angles0, target): """Calculate an optimum argument of an objective function.""" def new_objective(angles): return self.f(angles, target) return scipy.optimize.minimize( new_objective, angles0, **self.optimizer_opt).x
[ "def", "optimize", "(", "self", ",", "angles0", ",", "target", ")", ":", "def", "new_objective", "(", "angles", ")", ":", "return", "self", ".", "f", "(", "angles", ",", "target", ")", "return", "scipy", ".", "optimize", ".", "minimize", "(", "new_objective", ",", "angles0", ",", "*", "*", "self", ".", "optimizer_opt", ")", ".", "x" ]
Calculate an optimum argument of an objective function.
[ "Calculate", "an", "optimum", "argument", "of", "an", "objective", "function", "." ]
python
train
dancsalo/TensorBase
tensorbase/data.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/data.py#L119-L125
def dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = np.arange(num_labels) * num_classes labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot
[ "def", "dense_to_one_hot", "(", "labels_dense", ",", "num_classes", ")", ":", "num_labels", "=", "labels_dense", ".", "shape", "[", "0", "]", "index_offset", "=", "np", ".", "arange", "(", "num_labels", ")", "*", "num_classes", "labels_one_hot", "=", "np", ".", "zeros", "(", "(", "num_labels", ",", "num_classes", ")", ")", "labels_one_hot", ".", "flat", "[", "index_offset", "+", "labels_dense", ".", "ravel", "(", ")", "]", "=", "1", "return", "labels_one_hot" ]
Convert class labels from scalars to one-hot vectors.
[ "Convert", "class", "labels", "from", "scalars", "to", "one", "-", "hot", "vectors", "." ]
python
train
StackStorm/pybind
pybind/nos/v7_2_0/rbridge_id/maps/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/maps/__init__.py#L267-L288
def _set_relay(self, v, load=False): """ Setter method for relay, mapped from YANG variable /rbridge_id/maps/relay (list) If this variable is read-only (config: false) in the source YANG file, then _set_relay is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_relay() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("hostip",relay.relay, yang_name="relay", rest_name="relay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip', extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}), is_container='list', yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """relay must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("hostip",relay.relay, yang_name="relay", rest_name="relay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip', extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}), is_container='list', yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)""", }) self.__relay = t if hasattr(self, '_set'): self._set()
[ "def", "_set_relay", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"hostip\"", ",", "relay", ".", "relay", ",", "yang_name", "=", "\"relay\"", ",", "rest_name", "=", "\"relay\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'hostip'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure relay ip mail settings'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'maps_relay_callpoint'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"relay\"", ",", "rest_name", "=", "\"relay\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure relay ip mail settings'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'maps_relay_callpoint'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-maps'", ",", "defining_module", "=", "'brocade-maps'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"relay must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"hostip\",relay.relay, yang_name=\"relay\", rest_name=\"relay\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip', extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}), is_container='list', yang_name=\"relay\", rest_name=\"relay\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__relay", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for relay, mapped from YANG variable /rbridge_id/maps/relay (list) If this variable is read-only (config: false) in the source YANG file, then _set_relay is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_relay() directly.
[ "Setter", "method", "for", "relay", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "maps", "/", "relay", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_relay", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_relay", "()", "directly", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/Point.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Point.py#L185-L189
def set_meta_rdf(self, rdf, fmt='n3'): """Set the metadata for this Point in rdf fmt """ evt = self._client._request_point_meta_set(self._type, self.__lid, self.__pid, rdf, fmt=fmt) self._client._wait_and_except_if_failed(evt)
[ "def", "set_meta_rdf", "(", "self", ",", "rdf", ",", "fmt", "=", "'n3'", ")", ":", "evt", "=", "self", ".", "_client", ".", "_request_point_meta_set", "(", "self", ".", "_type", ",", "self", ".", "__lid", ",", "self", ".", "__pid", ",", "rdf", ",", "fmt", "=", "fmt", ")", "self", ".", "_client", ".", "_wait_and_except_if_failed", "(", "evt", ")" ]
Set the metadata for this Point in rdf fmt
[ "Set", "the", "metadata", "for", "this", "Point", "in", "rdf", "fmt" ]
python
train
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L668-L674
def _update_phi(self): """Update `phi` using current `eta`.""" etaprod = 1.0 for w in range(N_NT - 1): self.phi[w] = etaprod * (1 - self.eta[w]) etaprod *= self.eta[w] self.phi[N_NT - 1] = etaprod
[ "def", "_update_phi", "(", "self", ")", ":", "etaprod", "=", "1.0", "for", "w", "in", "range", "(", "N_NT", "-", "1", ")", ":", "self", ".", "phi", "[", "w", "]", "=", "etaprod", "*", "(", "1", "-", "self", ".", "eta", "[", "w", "]", ")", "etaprod", "*=", "self", ".", "eta", "[", "w", "]", "self", ".", "phi", "[", "N_NT", "-", "1", "]", "=", "etaprod" ]
Update `phi` using current `eta`.
[ "Update", "phi", "using", "current", "eta", "." ]
python
train
fedora-infra/fmn.rules
fmn/rules/taskotron.py
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L108-L127
def taskotron_release_critical_task(config, message): """ Release-critical taskotron tasks With this rule, you can limit messages to only those of release-critical `taskotron <https://taskotron.fedoraproject.org/>`_ task. These are the tasks which are deemed extremely important by the distribution, and their failure should be carefully inspected. Currently these tasks are ``dist.depcheck`` and ``dist.upgradepath``. """ # We only operate on taskotron messages, first off. if not taskotron_result_new(config, message): return False task = message['msg']['task'].get('name') return task in ['dist.depcheck', 'dist.upgradepath']
[ "def", "taskotron_release_critical_task", "(", "config", ",", "message", ")", ":", "# We only operate on taskotron messages, first off.", "if", "not", "taskotron_result_new", "(", "config", ",", "message", ")", ":", "return", "False", "task", "=", "message", "[", "'msg'", "]", "[", "'task'", "]", ".", "get", "(", "'name'", ")", "return", "task", "in", "[", "'dist.depcheck'", ",", "'dist.upgradepath'", "]" ]
Release-critical taskotron tasks With this rule, you can limit messages to only those of release-critical `taskotron <https://taskotron.fedoraproject.org/>`_ task. These are the tasks which are deemed extremely important by the distribution, and their failure should be carefully inspected. Currently these tasks are ``dist.depcheck`` and ``dist.upgradepath``.
[ "Release", "-", "critical", "taskotron", "tasks" ]
python
train
google/grr
grr/core/grr_response_core/lib/util/compatibility.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/util/compatibility.py#L80-L104
def GetName(obj): """A compatibility wrapper for getting object's name. In Python 2 class names are returned as `bytes` (since class names can contain only ASCII characters) whereas in Python 3 they are `unicode` (since class names can contain arbitrary unicode characters). This function makes this behaviour consistent and always returns class name as an unicode string. Once support for Python 2 is dropped all invocations of this call can be replaced with ordinary `__name__` access. Args: obj: A type or function object to get the name for. Returns: Name of the specified class as unicode string. """ precondition.AssertType(obj, (type, types.FunctionType)) if PY2: return obj.__name__.decode("ascii") else: return obj.__name__
[ "def", "GetName", "(", "obj", ")", ":", "precondition", ".", "AssertType", "(", "obj", ",", "(", "type", ",", "types", ".", "FunctionType", ")", ")", "if", "PY2", ":", "return", "obj", ".", "__name__", ".", "decode", "(", "\"ascii\"", ")", "else", ":", "return", "obj", ".", "__name__" ]
A compatibility wrapper for getting object's name. In Python 2 class names are returned as `bytes` (since class names can contain only ASCII characters) whereas in Python 3 they are `unicode` (since class names can contain arbitrary unicode characters). This function makes this behaviour consistent and always returns class name as an unicode string. Once support for Python 2 is dropped all invocations of this call can be replaced with ordinary `__name__` access. Args: obj: A type or function object to get the name for. Returns: Name of the specified class as unicode string.
[ "A", "compatibility", "wrapper", "for", "getting", "object", "s", "name", "." ]
python
train
munhitsu/django-dowser
django_dowser/views.py
https://github.com/munhitsu/django-dowser/blob/3030be07cd3cf183adea634b066337bcd07074d6/django_dowser/views.py#L222-L243
def get_repr(self, obj, referent=None): """Return an HTML tree block describing the given object.""" objtype = type(obj) typename = str(objtype.__module__) + "." + objtype.__name__ prettytype = typename.replace("__builtin__.", "") name = getattr(obj, "__name__", "") if name: prettytype = "%s %r" % (prettytype, name) key = "" if referent: key = self.get_refkey(obj, referent) url = reverse('dowser_trace_object', args=( typename, id(obj) )) return ('<a class="objectid" href="%s">%s</a> ' '<span class="typename">%s</span>%s<br />' '<span class="repr">%s</span>' % (url, id(obj), prettytype, key, get_repr(obj, 100)) )
[ "def", "get_repr", "(", "self", ",", "obj", ",", "referent", "=", "None", ")", ":", "objtype", "=", "type", "(", "obj", ")", "typename", "=", "str", "(", "objtype", ".", "__module__", ")", "+", "\".\"", "+", "objtype", ".", "__name__", "prettytype", "=", "typename", ".", "replace", "(", "\"__builtin__.\"", ",", "\"\"", ")", "name", "=", "getattr", "(", "obj", ",", "\"__name__\"", ",", "\"\"", ")", "if", "name", ":", "prettytype", "=", "\"%s %r\"", "%", "(", "prettytype", ",", "name", ")", "key", "=", "\"\"", "if", "referent", ":", "key", "=", "self", ".", "get_refkey", "(", "obj", ",", "referent", ")", "url", "=", "reverse", "(", "'dowser_trace_object'", ",", "args", "=", "(", "typename", ",", "id", "(", "obj", ")", ")", ")", "return", "(", "'<a class=\"objectid\" href=\"%s\">%s</a> '", "'<span class=\"typename\">%s</span>%s<br />'", "'<span class=\"repr\">%s</span>'", "%", "(", "url", ",", "id", "(", "obj", ")", ",", "prettytype", ",", "key", ",", "get_repr", "(", "obj", ",", "100", ")", ")", ")" ]
Return an HTML tree block describing the given object.
[ "Return", "an", "HTML", "tree", "block", "describing", "the", "given", "object", "." ]
python
valid
abseil/abseil-py
absl/logging/converter.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/converter.py#L172-L200
def standard_to_absl(level): """Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging. """ if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level < 0: level = 0 if level < STANDARD_DEBUG: # Maps to vlog levels. return STANDARD_DEBUG - level + 1 elif level < STANDARD_INFO: return ABSL_DEBUG elif level < STANDARD_WARNING: return ABSL_INFO elif level < STANDARD_ERROR: return ABSL_WARNING elif level < STANDARD_CRITICAL: return ABSL_ERROR else: return ABSL_FATAL
[ "def", "standard_to_absl", "(", "level", ")", ":", "if", "not", "isinstance", "(", "level", ",", "int", ")", ":", "raise", "TypeError", "(", "'Expect an int level, found {}'", ".", "format", "(", "type", "(", "level", ")", ")", ")", "if", "level", "<", "0", ":", "level", "=", "0", "if", "level", "<", "STANDARD_DEBUG", ":", "# Maps to vlog levels.", "return", "STANDARD_DEBUG", "-", "level", "+", "1", "elif", "level", "<", "STANDARD_INFO", ":", "return", "ABSL_DEBUG", "elif", "level", "<", "STANDARD_WARNING", ":", "return", "ABSL_INFO", "elif", "level", "<", "STANDARD_ERROR", ":", "return", "ABSL_WARNING", "elif", "level", "<", "STANDARD_CRITICAL", ":", "return", "ABSL_ERROR", "else", ":", "return", "ABSL_FATAL" ]
Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging.
[ "Converts", "an", "integer", "level", "from", "the", "standard", "value", "to", "the", "absl", "value", "." ]
python
train
saltstack/salt
salt/fileclient.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L314-L342
def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return ''
[ "def", "is_cached", "(", "self", ",", "path", ",", "saltenv", "=", "'base'", ",", "cachedir", "=", "None", ")", ":", "if", "path", ".", "startswith", "(", "'salt://'", ")", ":", "path", ",", "senv", "=", "salt", ".", "utils", ".", "url", ".", "parse", "(", "path", ")", "if", "senv", ":", "saltenv", "=", "senv", "escaped", "=", "True", "if", "salt", ".", "utils", ".", "url", ".", "is_escaped", "(", "path", ")", "else", "False", "# also strip escape character '|'", "localsfilesdest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "'localfiles'", ",", "path", ".", "lstrip", "(", "'|/'", ")", ")", "filesdest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "'files'", ",", "saltenv", ",", "path", ".", "lstrip", "(", "'|/'", ")", ")", "extrndest", "=", "self", ".", "_extrn_path", "(", "path", ",", "saltenv", ",", "cachedir", "=", "cachedir", ")", "if", "os", ".", "path", ".", "exists", "(", "filesdest", ")", ":", "return", "salt", ".", "utils", ".", "url", ".", "escape", "(", "filesdest", ")", "if", "escaped", "else", "filesdest", "elif", "os", ".", "path", ".", "exists", "(", "localsfilesdest", ")", ":", "return", "salt", ".", "utils", ".", "url", ".", "escape", "(", "localsfilesdest", ")", "if", "escaped", "else", "localsfilesdest", "elif", "os", ".", "path", ".", "exists", "(", "extrndest", ")", ":", "return", "extrndest", "return", "''" ]
Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string
[ "Returns", "the", "full", "path", "to", "a", "file", "if", "it", "is", "cached", "locally", "on", "the", "minion", "otherwise", "returns", "a", "blank", "string" ]
python
train
SpamScope/mail-parser
mailparser/mailparser.py
https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L207-L220
def from_string(cls, s): """ Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser """ log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
[ "def", "from_string", "(", "cls", ",", "s", ")", ":", "log", ".", "debug", "(", "\"Parsing email from string\"", ")", "message", "=", "email", ".", "message_from_string", "(", "s", ")", "return", "cls", "(", "message", ")" ]
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
[ "Init", "a", "new", "object", "from", "a", "string", "." ]
python
train
brycepg/mand
mand/mand.py
https://github.com/brycepg/mand/blob/3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c/mand/mand.py#L66-L78
def replace_sys_args(new_args): """Temporarily replace sys.argv with current arguments Restores sys.argv upon exit of the context manager. """ # Replace sys.argv arguments # for module import old_args = sys.argv sys.argv = new_args try: yield finally: sys.argv = old_args
[ "def", "replace_sys_args", "(", "new_args", ")", ":", "# Replace sys.argv arguments", "# for module import", "old_args", "=", "sys", ".", "argv", "sys", ".", "argv", "=", "new_args", "try", ":", "yield", "finally", ":", "sys", ".", "argv", "=", "old_args" ]
Temporarily replace sys.argv with current arguments Restores sys.argv upon exit of the context manager.
[ "Temporarily", "replace", "sys", ".", "argv", "with", "current", "arguments" ]
python
train
csaez/wishlib
wishlib/si/siwrapper.py
https://github.com/csaez/wishlib/blob/c212fa7875006a332a4cefbf69885ced9647bc2f/wishlib/si/siwrapper.py#L69-L79
def auto_update(cls, function): """ This class method could be used as decorator on subclasses, it ensures update method is called after function execution. """ def wrapper(self, *args, **kwargs): f = function(self, *args, **kwargs) self.update() return f return wrapper
[ "def", "auto_update", "(", "cls", ",", "function", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "=", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "update", "(", ")", "return", "f", "return", "wrapper" ]
This class method could be used as decorator on subclasses, it ensures update method is called after function execution.
[ "This", "class", "method", "could", "be", "used", "as", "decorator", "on", "subclasses", "it", "ensures", "update", "method", "is", "called", "after", "function", "execution", "." ]
python
train
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L985-L1006
def _do_multivalued_field_facets(self, results, field_facets): """ Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199) """ facet_dict = {} for field in field_facets: facet_list = {} if not self._multi_value_field(field): continue for result in results: field_value = getattr(result, field) for item in field_value: # Facet each item in a MultiValueField facet_list[item] = facet_list.get(item, 0) + 1 facet_dict[field] = list(facet_list.items()) return facet_dict
[ "def", "_do_multivalued_field_facets", "(", "self", ",", "results", ",", "field_facets", ")", ":", "facet_dict", "=", "{", "}", "for", "field", "in", "field_facets", ":", "facet_list", "=", "{", "}", "if", "not", "self", ".", "_multi_value_field", "(", "field", ")", ":", "continue", "for", "result", "in", "results", ":", "field_value", "=", "getattr", "(", "result", ",", "field", ")", "for", "item", "in", "field_value", ":", "# Facet each item in a MultiValueField", "facet_list", "[", "item", "]", "=", "facet_list", ".", "get", "(", "item", ",", "0", ")", "+", "1", "facet_dict", "[", "field", "]", "=", "list", "(", "facet_list", ".", "items", "(", ")", ")", "return", "facet_dict" ]
Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199)
[ "Implements", "a", "multivalued", "field", "facet", "on", "the", "results", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/messages/payloads/revoke.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/revoke.py#L129-L145
def validate(self): """ Error check the attributes of the ActivateRequestPayload object. """ if self.unique_identifier is not None: if not isinstance(self.unique_identifier, attributes.UniqueIdentifier): msg = "invalid unique identifier" raise TypeError(msg) if self.compromise_occurrence_date is not None: if not isinstance(self.compromise_occurrence_date, primitives.DateTime): msg = "invalid compromise time" raise TypeError(msg) if not isinstance(self.revocation_reason, objects.RevocationReason): msg = "invalid revocation reason" raise TypeError(msg)
[ "def", "validate", "(", "self", ")", ":", "if", "self", ".", "unique_identifier", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "unique_identifier", ",", "attributes", ".", "UniqueIdentifier", ")", ":", "msg", "=", "\"invalid unique identifier\"", "raise", "TypeError", "(", "msg", ")", "if", "self", ".", "compromise_occurrence_date", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "compromise_occurrence_date", ",", "primitives", ".", "DateTime", ")", ":", "msg", "=", "\"invalid compromise time\"", "raise", "TypeError", "(", "msg", ")", "if", "not", "isinstance", "(", "self", ".", "revocation_reason", ",", "objects", ".", "RevocationReason", ")", ":", "msg", "=", "\"invalid revocation reason\"", "raise", "TypeError", "(", "msg", ")" ]
Error check the attributes of the ActivateRequestPayload object.
[ "Error", "check", "the", "attributes", "of", "the", "ActivateRequestPayload", "object", "." ]
python
test
gtsystem/parallelpipe
parallelpipe.py
https://github.com/gtsystem/parallelpipe/blob/b10eba28de6019cbf34e08ac575d31a4c493b39c/parallelpipe.py#L131-L134
def set_out(self, que_out, num_followers): """Set the queue in output and the number of parallel tasks that follow""" for p in self.processes: p.set_out(que_out, num_followers)
[ "def", "set_out", "(", "self", ",", "que_out", ",", "num_followers", ")", ":", "for", "p", "in", "self", ".", "processes", ":", "p", ".", "set_out", "(", "que_out", ",", "num_followers", ")" ]
Set the queue in output and the number of parallel tasks that follow
[ "Set", "the", "queue", "in", "output", "and", "the", "number", "of", "parallel", "tasks", "that", "follow" ]
python
train
mitsei/dlkit
dlkit/json_/osid/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/objects.py#L1345-L1360
def set_end_date(self, date): """Sets the end date. arg: date (osid.calendaring.DateTime): the new date raise: InvalidArgument - ``date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``date`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_end_date_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_date_time(date, self.get_end_date_metadata()): raise errors.InvalidArgument() # self._my_map['endDate'] = self._get_date_map(date) self._my_map['endDate'] = date
[ "def", "set_end_date", "(", "self", ",", "date", ")", ":", "if", "self", ".", "get_end_date_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "if", "not", "self", ".", "_is_valid_date_time", "(", "date", ",", "self", ".", "get_end_date_metadata", "(", ")", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "# self._my_map['endDate'] = self._get_date_map(date)", "self", ".", "_my_map", "[", "'endDate'", "]", "=", "date" ]
Sets the end date. arg: date (osid.calendaring.DateTime): the new date raise: InvalidArgument - ``date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``date`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "end", "date", "." ]
python
train
DataBiosphere/toil
src/toil/wdl/wdl_functions.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_functions.py#L31-L45
def glob(glob_pattern, directoryname): ''' Walks through a directory and its subdirectories looking for files matching the glob_pattern and returns a list=[]. :param directoryname: Any accessible folder name on the filesystem. :param glob_pattern: A string like "*.txt", which would find all text files. :return: A list=[] of absolute filepaths matching the glob pattern. ''' matches = [] for root, dirnames, filenames in os.walk(directoryname): for filename in fnmatch.filter(filenames, glob_pattern): absolute_filepath = os.path.join(root, filename) matches.append(absolute_filepath) return matches
[ "def", "glob", "(", "glob_pattern", ",", "directoryname", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "directoryname", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "glob_pattern", ")", ":", "absolute_filepath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "matches", ".", "append", "(", "absolute_filepath", ")", "return", "matches" ]
Walks through a directory and its subdirectories looking for files matching the glob_pattern and returns a list=[]. :param directoryname: Any accessible folder name on the filesystem. :param glob_pattern: A string like "*.txt", which would find all text files. :return: A list=[] of absolute filepaths matching the glob pattern.
[ "Walks", "through", "a", "directory", "and", "its", "subdirectories", "looking", "for", "files", "matching", "the", "glob_pattern", "and", "returns", "a", "list", "=", "[]", "." ]
python
train
yaml/pyyaml
lib/yaml/__init__.py
https://github.com/yaml/pyyaml/blob/e471e86bf6dabdad45a1438c20a4a5c033eb9034/lib/yaml/__init__.py#L118-L132
def load_all(stream, Loader=None): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ if Loader is None: load_warning('load_all') Loader = FullLoader loader = Loader(stream) try: while loader.check_data(): yield loader.get_data() finally: loader.dispose()
[ "def", "load_all", "(", "stream", ",", "Loader", "=", "None", ")", ":", "if", "Loader", "is", "None", ":", "load_warning", "(", "'load_all'", ")", "Loader", "=", "FullLoader", "loader", "=", "Loader", "(", "stream", ")", "try", ":", "while", "loader", ".", "check_data", "(", ")", ":", "yield", "loader", ".", "get_data", "(", ")", "finally", ":", "loader", ".", "dispose", "(", ")" ]
Parse all YAML documents in a stream and produce corresponding Python objects.
[ "Parse", "all", "YAML", "documents", "in", "a", "stream", "and", "produce", "corresponding", "Python", "objects", "." ]
python
train
getnikola/coil
coil/web.py
https://github.com/getnikola/coil/blob/80ef1827460b0691cf2c98351a14d88e235c9899/coil/web.py#L237-L250
def check_old_password(pwdhash, password): """Check the old password hash from :func:`password_hash`. .. versionadded:: 1.1.0 :param str pwdhash: Hash from :func:`password_hash` to check :param str password: Password in plaintext :return: password match :rtype: bool """ from flask.ext.bcrypt import Bcrypt app.config['BCRYPT_LOG_ROUNDS'] = 12 bcrypt = Bcrypt(app) return bcrypt.check_password_hash(pwdhash, password)
[ "def", "check_old_password", "(", "pwdhash", ",", "password", ")", ":", "from", "flask", ".", "ext", ".", "bcrypt", "import", "Bcrypt", "app", ".", "config", "[", "'BCRYPT_LOG_ROUNDS'", "]", "=", "12", "bcrypt", "=", "Bcrypt", "(", "app", ")", "return", "bcrypt", ".", "check_password_hash", "(", "pwdhash", ",", "password", ")" ]
Check the old password hash from :func:`password_hash`. .. versionadded:: 1.1.0 :param str pwdhash: Hash from :func:`password_hash` to check :param str password: Password in plaintext :return: password match :rtype: bool
[ "Check", "the", "old", "password", "hash", "from", ":", "func", ":", "password_hash", "." ]
python
train
jhermann/rituals
src/rituals/util/antglob.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/util/antglob.py#L69-L73
def compile_glob(spec): """Convert the given glob `spec` to a compiled regex.""" parsed = "".join(parse_glob(spec)) regex = "^{0}$".format(parsed) return re.compile(regex)
[ "def", "compile_glob", "(", "spec", ")", ":", "parsed", "=", "\"\"", ".", "join", "(", "parse_glob", "(", "spec", ")", ")", "regex", "=", "\"^{0}$\"", ".", "format", "(", "parsed", ")", "return", "re", ".", "compile", "(", "regex", ")" ]
Convert the given glob `spec` to a compiled regex.
[ "Convert", "the", "given", "glob", "spec", "to", "a", "compiled", "regex", "." ]
python
valid
gmr/tredis
tredis/sortedsets.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sortedsets.py#L236-L255
def zrem(self, key, *members): """Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'ZREM', key] + list(members))
[ "def", "zrem", "(", "self", ",", "key", ",", "*", "members", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'ZREM'", ",", "key", "]", "+", "list", "(", "members", ")", ")" ]
Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
[ "Removes", "the", "specified", "members", "from", "the", "sorted", "set", "stored", "at", "key", ".", "Non", "existing", "members", "are", "ignored", "." ]
python
train
acutesoftware/AIKIF
aikif/cls_log.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L64-L85
def get_session_id(self): """ get a unique id (shortish string) to allow simple aggregation of log records from multiple sources. This id is used for the life of the running program to allow extraction from all logs. WARING - this can give duplicate sessions when 2 apps hit it at the same time. """ max_session = '0' try: with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f: for _ in f: txt = f.readline() if txt.strip('\n') != '': max_session = txt except Exception: max_session = '1' this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2: f2.write(this_session + '\n') return this_session
[ "def", "get_session_id", "(", "self", ")", ":", "max_session", "=", "'0'", "try", ":", "with", "open", "(", "self", ".", "log_folder", "+", "os", ".", "sep", "+", "'_sessions.txt'", ",", "'r'", ")", "as", "f", ":", "for", "_", "in", "f", ":", "txt", "=", "f", ".", "readline", "(", ")", "if", "txt", ".", "strip", "(", "'\\n'", ")", "!=", "''", ":", "max_session", "=", "txt", "except", "Exception", ":", "max_session", "=", "'1'", "this_session", "=", "str", "(", "int", "(", "max_session", ")", "+", "random", ".", "randint", "(", "9", ",", "100", ")", ")", ".", "zfill", "(", "9", ")", "# not a great way to ensure uniqueness - TODO FIX ", "with", "open", "(", "self", ".", "log_folder", "+", "os", ".", "sep", "+", "'_sessions.txt'", ",", "'a'", ")", "as", "f2", ":", "f2", ".", "write", "(", "this_session", "+", "'\\n'", ")", "return", "this_session" ]
get a unique id (shortish string) to allow simple aggregation of log records from multiple sources. This id is used for the life of the running program to allow extraction from all logs. WARING - this can give duplicate sessions when 2 apps hit it at the same time.
[ "get", "a", "unique", "id", "(", "shortish", "string", ")", "to", "allow", "simple", "aggregation", "of", "log", "records", "from", "multiple", "sources", ".", "This", "id", "is", "used", "for", "the", "life", "of", "the", "running", "program", "to", "allow", "extraction", "from", "all", "logs", ".", "WARING", "-", "this", "can", "give", "duplicate", "sessions", "when", "2", "apps", "hit", "it", "at", "the", "same", "time", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/preferences_window.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/preferences_window.py#L136-L158
def on_preliminary_config_changed(self, config_m, prop_name, info): """Callback when a preliminary config value has been changed Mainly collects information, delegates handling further to _handle_config_update :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'preliminary_config' :param dict info: Information e.g. about the changed config key """ self.check_for_preliminary_config() method_name = info['method_name'] # __setitem__, __delitem__, clear, ... if method_name in ['__setitem__', '__delitem__']: config_key = info['args'][0] self._handle_config_update(config_m, config_key) # Probably the preliminary config has been cleared, update corresponding list stores elif config_m is self.core_config_model: self.update_core_config_list_store() self.update_libraries_list_store() else: self.update_gui_config_list_store() self.update_shortcut_settings()
[ "def", "on_preliminary_config_changed", "(", "self", ",", "config_m", ",", "prop_name", ",", "info", ")", ":", "self", ".", "check_for_preliminary_config", "(", ")", "method_name", "=", "info", "[", "'method_name'", "]", "# __setitem__, __delitem__, clear, ...", "if", "method_name", "in", "[", "'__setitem__'", ",", "'__delitem__'", "]", ":", "config_key", "=", "info", "[", "'args'", "]", "[", "0", "]", "self", ".", "_handle_config_update", "(", "config_m", ",", "config_key", ")", "# Probably the preliminary config has been cleared, update corresponding list stores", "elif", "config_m", "is", "self", ".", "core_config_model", ":", "self", ".", "update_core_config_list_store", "(", ")", "self", ".", "update_libraries_list_store", "(", ")", "else", ":", "self", ".", "update_gui_config_list_store", "(", ")", "self", ".", "update_shortcut_settings", "(", ")" ]
Callback when a preliminary config value has been changed Mainly collects information, delegates handling further to _handle_config_update :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'preliminary_config' :param dict info: Information e.g. about the changed config key
[ "Callback", "when", "a", "preliminary", "config", "value", "has", "been", "changed" ]
python
train
pandas-dev/pandas
pandas/core/arrays/datetimelike.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1468-L1498
def validate_endpoints(closed): """ Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values """ left_closed = False right_closed = False if closed is None: left_closed = True right_closed = True elif closed == "left": left_closed = True elif closed == "right": right_closed = True else: raise ValueError("Closed has to be either 'left', 'right' or None") return left_closed, right_closed
[ "def", "validate_endpoints", "(", "closed", ")", ":", "left_closed", "=", "False", "right_closed", "=", "False", "if", "closed", "is", "None", ":", "left_closed", "=", "True", "right_closed", "=", "True", "elif", "closed", "==", "\"left\"", ":", "left_closed", "=", "True", "elif", "closed", "==", "\"right\"", ":", "right_closed", "=", "True", "else", ":", "raise", "ValueError", "(", "\"Closed has to be either 'left', 'right' or None\"", ")", "return", "left_closed", ",", "right_closed" ]
Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values
[ "Check", "that", "the", "closed", "argument", "is", "among", "[", "None", "left", "right", "]" ]
python
train
itamarst/eliot
eliot/_action.py
https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_action.py#L594-L603
def status(self): """ One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}. """ message = self.end_message if self.end_message else self.start_message if message: return message.contents[ACTION_STATUS_FIELD] else: return None
[ "def", "status", "(", "self", ")", ":", "message", "=", "self", ".", "end_message", "if", "self", ".", "end_message", "else", "self", ".", "start_message", "if", "message", ":", "return", "message", ".", "contents", "[", "ACTION_STATUS_FIELD", "]", "else", ":", "return", "None" ]
One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}.
[ "One", "of", "C", "{", "STARTED_STATUS", "}", "C", "{", "SUCCEEDED_STATUS", "}", "C", "{", "FAILED_STATUS", "}", "or", "C", "{", "None", "}", "." ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L4338-L4353
def trace_format(self): """Retrieves the current format the trace buffer is using. Args: self (JLink): the ``JLink`` instance. Returns: The current format the trace buffer is using. This is one of the attributes of ``JLinkTraceFormat``. """ cmd = enums.JLinkTraceCommand.GET_FORMAT data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get trace format.') return data.value
[ "def", "trace_format", "(", "self", ")", ":", "cmd", "=", "enums", ".", "JLinkTraceCommand", ".", "GET_FORMAT", "data", "=", "ctypes", ".", "c_uint32", "(", "0", ")", "res", "=", "self", ".", "_dll", ".", "JLINKARM_TRACE_Control", "(", "cmd", ",", "ctypes", ".", "byref", "(", "data", ")", ")", "if", "(", "res", "==", "1", ")", ":", "raise", "errors", ".", "JLinkException", "(", "'Failed to get trace format.'", ")", "return", "data", ".", "value" ]
Retrieves the current format the trace buffer is using. Args: self (JLink): the ``JLink`` instance. Returns: The current format the trace buffer is using. This is one of the attributes of ``JLinkTraceFormat``.
[ "Retrieves", "the", "current", "format", "the", "trace", "buffer", "is", "using", "." ]
python
train
lorien/grab
grab/spider/decorators.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/spider/decorators.py#L6-L36
def integrity(integrity_func, retry_errors=(ResponseNotValid,)): """ Args: :param integrity_func: couldb callable or string contains name of method to call """ def build_decorator(func): @functools.wraps(func) def func_wrapper(self, grab, task): if isinstance(integrity_func, (list, tuple)): int_funcs = integrity_func else: int_funcs = [integrity_func] try: for int_func in int_funcs: if isinstance(int_func, str): getattr(self, int_func)(grab) else: int_func(grab) except retry_errors as ex: yield task.clone(refresh_cache=True) error_code = ex.__class__.__name__.replace('_', '-') self.stat.inc('integrity:%s' % error_code) else: result = func(self, grab, task) if result is not None: for event in result: yield event func_wrapper._original_func = func # pylint: disable=protected-access return func_wrapper return build_decorator
[ "def", "integrity", "(", "integrity_func", ",", "retry_errors", "=", "(", "ResponseNotValid", ",", ")", ")", ":", "def", "build_decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "func_wrapper", "(", "self", ",", "grab", ",", "task", ")", ":", "if", "isinstance", "(", "integrity_func", ",", "(", "list", ",", "tuple", ")", ")", ":", "int_funcs", "=", "integrity_func", "else", ":", "int_funcs", "=", "[", "integrity_func", "]", "try", ":", "for", "int_func", "in", "int_funcs", ":", "if", "isinstance", "(", "int_func", ",", "str", ")", ":", "getattr", "(", "self", ",", "int_func", ")", "(", "grab", ")", "else", ":", "int_func", "(", "grab", ")", "except", "retry_errors", "as", "ex", ":", "yield", "task", ".", "clone", "(", "refresh_cache", "=", "True", ")", "error_code", "=", "ex", ".", "__class__", ".", "__name__", ".", "replace", "(", "'_'", ",", "'-'", ")", "self", ".", "stat", ".", "inc", "(", "'integrity:%s'", "%", "error_code", ")", "else", ":", "result", "=", "func", "(", "self", ",", "grab", ",", "task", ")", "if", "result", "is", "not", "None", ":", "for", "event", "in", "result", ":", "yield", "event", "func_wrapper", ".", "_original_func", "=", "func", "# pylint: disable=protected-access", "return", "func_wrapper", "return", "build_decorator" ]
Args: :param integrity_func: couldb callable or string contains name of method to call
[ "Args", ":", ":", "param", "integrity_func", ":", "couldb", "callable", "or", "string", "contains", "name", "of", "method", "to", "call" ]
python
train
ShopRunner/apparate
apparate/configure.py
https://github.com/ShopRunner/apparate/blob/3ae74798c1c52cf3f69ee67d617c7c875c949577/apparate/configure.py#L72-L102
def configure(): """ Configure information about Databricks account and default behavior. Configuration is stored in a `.apparatecfg` file. A config file must exist before this package can be used, and can be supplied either directly as a text file or generated using this configuration tool. """ config = _load_config(CFG_FILE) _update_value( config, 'host', 'Databricks host (e.g. https://my-organization.cloud.databricks.com)', is_sensitive=False, ) _update_value( config, 'token', 'Databricks API token', is_sensitive=True, ) _update_value( config, 'prod_folder', 'Databricks folder for production libraries', is_sensitive=False, ) with open(CFG_FILE, 'w+') as f: config.write(f)
[ "def", "configure", "(", ")", ":", "config", "=", "_load_config", "(", "CFG_FILE", ")", "_update_value", "(", "config", ",", "'host'", ",", "'Databricks host (e.g. https://my-organization.cloud.databricks.com)'", ",", "is_sensitive", "=", "False", ",", ")", "_update_value", "(", "config", ",", "'token'", ",", "'Databricks API token'", ",", "is_sensitive", "=", "True", ",", ")", "_update_value", "(", "config", ",", "'prod_folder'", ",", "'Databricks folder for production libraries'", ",", "is_sensitive", "=", "False", ",", ")", "with", "open", "(", "CFG_FILE", ",", "'w+'", ")", "as", "f", ":", "config", ".", "write", "(", "f", ")" ]
Configure information about Databricks account and default behavior. Configuration is stored in a `.apparatecfg` file. A config file must exist before this package can be used, and can be supplied either directly as a text file or generated using this configuration tool.
[ "Configure", "information", "about", "Databricks", "account", "and", "default", "behavior", "." ]
python
train
Shapeways/coyote_framework
coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L490-L512
def text(self, force_get=False): """ Get the text of the element @rtype: str @return: Text of the element """ def text_element(): """ Wrapper to get text of element """ return self.element.text def force_text_element(): """Get text by javascript""" return self.driver_wrapper.js_executor.execute_template_and_return_result( 'getElementText.js', {}, self.element ) if force_get: return self.execute_and_handle_webelement_exceptions(force_text_element, 'get text by javascript') else: return self.execute_and_handle_webelement_exceptions(text_element, 'get text')
[ "def", "text", "(", "self", ",", "force_get", "=", "False", ")", ":", "def", "text_element", "(", ")", ":", "\"\"\"\n Wrapper to get text of element\n \"\"\"", "return", "self", ".", "element", ".", "text", "def", "force_text_element", "(", ")", ":", "\"\"\"Get text by javascript\"\"\"", "return", "self", ".", "driver_wrapper", ".", "js_executor", ".", "execute_template_and_return_result", "(", "'getElementText.js'", ",", "{", "}", ",", "self", ".", "element", ")", "if", "force_get", ":", "return", "self", ".", "execute_and_handle_webelement_exceptions", "(", "force_text_element", ",", "'get text by javascript'", ")", "else", ":", "return", "self", ".", "execute_and_handle_webelement_exceptions", "(", "text_element", ",", "'get text'", ")" ]
Get the text of the element @rtype: str @return: Text of the element
[ "Get", "the", "text", "of", "the", "element" ]
python
train
CamilleMo/SuperSight
supersight/Main.py
https://github.com/CamilleMo/SuperSight/blob/246ea35f42675801ab54df4ea78e957d592780e0/supersight/Main.py#L102-L120
def __create_lists_for_nav_bar(self): """This method is called by __create_template_vars_index. Once the index is created, self.nav_bar_section_page is available for each page.""" nav_bar_section = [] nav_bar_section_page = [] for key in self.sections: nav_bar_section.append(key) nav_bar_page = [] for page in self.sections[key].pages: nav_bar_page.append(page) nav_bar_section_page.append(nav_bar_page) nav_bar_section_page_len = [len(x) for x in nav_bar_section_page] self.nav_bar_section_page = nav_bar_section_page # create a variable for later # nav_bar_page is a list of lists return nav_bar_section, nav_bar_section_page, nav_bar_section_page_len
[ "def", "__create_lists_for_nav_bar", "(", "self", ")", ":", "nav_bar_section", "=", "[", "]", "nav_bar_section_page", "=", "[", "]", "for", "key", "in", "self", ".", "sections", ":", "nav_bar_section", ".", "append", "(", "key", ")", "nav_bar_page", "=", "[", "]", "for", "page", "in", "self", ".", "sections", "[", "key", "]", ".", "pages", ":", "nav_bar_page", ".", "append", "(", "page", ")", "nav_bar_section_page", ".", "append", "(", "nav_bar_page", ")", "nav_bar_section_page_len", "=", "[", "len", "(", "x", ")", "for", "x", "in", "nav_bar_section_page", "]", "self", ".", "nav_bar_section_page", "=", "nav_bar_section_page", "# create a variable for later", "# nav_bar_page is a list of lists", "return", "nav_bar_section", ",", "nav_bar_section_page", ",", "nav_bar_section_page_len" ]
This method is called by __create_template_vars_index. Once the index is created, self.nav_bar_section_page is available for each page.
[ "This", "method", "is", "called", "by", "__create_template_vars_index", ".", "Once", "the", "index", "is", "created", "self", ".", "nav_bar_section_page", "is", "available", "for", "each", "page", "." ]
python
train
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L217-L223
async def delTrigger(self, iden): ''' Deletes a trigger from the cortex ''' trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get('useriden')) self.cell.triggers.delete(iden)
[ "async", "def", "delTrigger", "(", "self", ",", "iden", ")", ":", "trig", "=", "self", ".", "cell", ".", "triggers", ".", "get", "(", "iden", ")", "self", ".", "_trig_auth_check", "(", "trig", ".", "get", "(", "'useriden'", ")", ")", "self", ".", "cell", ".", "triggers", ".", "delete", "(", "iden", ")" ]
Deletes a trigger from the cortex
[ "Deletes", "a", "trigger", "from", "the", "cortex" ]
python
train
OpenHumans/open-humans-api
ohapi/utils_fs.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/utils_fs.py#L455-L466
def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ with open(outputfilepath, 'w') as filestream: write_metadata_to_filestream(filedir, filestream, max_bytes)
[ "def", "mk_metadata_csv", "(", "filedir", ",", "outputfilepath", ",", "max_bytes", "=", "MAX_FILE_DEFAULT", ")", ":", "with", "open", "(", "outputfilepath", ",", "'w'", ")", "as", "filestream", ":", "write_metadata_to_filestream", "(", "filedir", ",", "filestream", ",", "max_bytes", ")" ]
Make metadata file for all files in a directory. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
[ "Make", "metadata", "file", "for", "all", "files", "in", "a", "directory", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L296-L304
def clear_jobs(self, recursive=True): """Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link` """ if recursive: self._scatter_link.clear_jobs(recursive) self.jobs.clear()
[ "def", "clear_jobs", "(", "self", ",", "recursive", "=", "True", ")", ":", "if", "recursive", ":", "self", ".", "_scatter_link", ".", "clear_jobs", "(", "recursive", ")", "self", ".", "jobs", ".", "clear", "(", ")" ]
Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link`
[ "Clear", "the", "self", ".", "jobs", "dictionary", "that", "contains", "information", "about", "jobs", "associated", "with", "this", "ScatterGather" ]
python
train
googleapis/google-cloud-python
kms/google/cloud/kms_v1/gapic/key_management_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/kms/google/cloud/kms_v1/gapic/key_management_service_client.py#L98-L106
def crypto_key_path_path(cls, project, location, key_ring, crypto_key_path): """Return a fully-qualified crypto_key_path string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key_path=**}", project=project, location=location, key_ring=key_ring, crypto_key_path=crypto_key_path, )
[ "def", "crypto_key_path_path", "(", "cls", ",", "project", ",", "location", ",", "key_ring", ",", "crypto_key_path", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key_path=**}\"", ",", "project", "=", "project", ",", "location", "=", "location", ",", "key_ring", "=", "key_ring", ",", "crypto_key_path", "=", "crypto_key_path", ",", ")" ]
Return a fully-qualified crypto_key_path string.
[ "Return", "a", "fully", "-", "qualified", "crypto_key_path", "string", "." ]
python
train
miguelgrinberg/python-engineio
engineio/asyncio_client.py
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/asyncio_client.py#L321-L336
async def _receive_packet(self, pkt): """Handle incoming packets from the server.""" packet_name = packet.packet_names[pkt.packet_type] \ if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN' self.logger.info( 'Received packet %s data %s', packet_name, pkt.data if not isinstance(pkt.data, bytes) else '<binary>') if pkt.packet_type == packet.MESSAGE: await self._trigger_event('message', pkt.data, run_async=True) elif pkt.packet_type == packet.PONG: self.pong_received = True elif pkt.packet_type == packet.NOOP: pass else: self.logger.error('Received unexpected packet of type %s', pkt.packet_type)
[ "async", "def", "_receive_packet", "(", "self", ",", "pkt", ")", ":", "packet_name", "=", "packet", ".", "packet_names", "[", "pkt", ".", "packet_type", "]", "if", "pkt", ".", "packet_type", "<", "len", "(", "packet", ".", "packet_names", ")", "else", "'UNKNOWN'", "self", ".", "logger", ".", "info", "(", "'Received packet %s data %s'", ",", "packet_name", ",", "pkt", ".", "data", "if", "not", "isinstance", "(", "pkt", ".", "data", ",", "bytes", ")", "else", "'<binary>'", ")", "if", "pkt", ".", "packet_type", "==", "packet", ".", "MESSAGE", ":", "await", "self", ".", "_trigger_event", "(", "'message'", ",", "pkt", ".", "data", ",", "run_async", "=", "True", ")", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "PONG", ":", "self", ".", "pong_received", "=", "True", "elif", "pkt", ".", "packet_type", "==", "packet", ".", "NOOP", ":", "pass", "else", ":", "self", ".", "logger", ".", "error", "(", "'Received unexpected packet of type %s'", ",", "pkt", ".", "packet_type", ")" ]
Handle incoming packets from the server.
[ "Handle", "incoming", "packets", "from", "the", "server", "." ]
python
train
saltstack/salt
salt/modules/mongodb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L91-L110
def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err)
[ "def", "db_list", "(", "user", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "authdb", "=", "None", ")", ":", "conn", "=", "_connect", "(", "user", ",", "password", ",", "host", ",", "port", ",", "authdb", "=", "authdb", ")", "if", "not", "conn", ":", "return", "'Failed to connect to mongo database'", "try", ":", "log", ".", "info", "(", "'Listing databases'", ")", "return", "conn", ".", "database_names", "(", ")", "except", "pymongo", ".", "errors", ".", "PyMongoError", "as", "err", ":", "log", ".", "error", "(", "err", ")", "return", "six", ".", "text_type", "(", "err", ")" ]
List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port>
[ "List", "all", "MongoDB", "databases" ]
python
train
sijis/sumologic-python
src/sumologic/collectors.py
https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L93-L105
def info(self, collector_id): """Return a dict of collector. Args: collector_id (int): id of collector (optional) """ cid = self.collector_id if collector_id: cid = collector_id url = '{0}/{1}'.format(self.url, cid) request = requests.get(url, auth=self.auth) return request.json()
[ "def", "info", "(", "self", ",", "collector_id", ")", ":", "cid", "=", "self", ".", "collector_id", "if", "collector_id", ":", "cid", "=", "collector_id", "url", "=", "'{0}/{1}'", ".", "format", "(", "self", ".", "url", ",", "cid", ")", "request", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "self", ".", "auth", ")", "return", "request", ".", "json", "(", ")" ]
Return a dict of collector. Args: collector_id (int): id of collector (optional)
[ "Return", "a", "dict", "of", "collector", "." ]
python
train
pyvisa/pyvisa
pyvisa/highlevel.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/highlevel.py#L214-L224
def uninstall_all_visa_handlers(self, session): """Uninstalls all previously installed handlers for a particular session. :param session: Unique logical identifier to a session. If None, operates on all sessions. """ if session is not None: self.__uninstall_all_handlers_helper(session) else: for session in list(self.handlers): self.__uninstall_all_handlers_helper(session)
[ "def", "uninstall_all_visa_handlers", "(", "self", ",", "session", ")", ":", "if", "session", "is", "not", "None", ":", "self", ".", "__uninstall_all_handlers_helper", "(", "session", ")", "else", ":", "for", "session", "in", "list", "(", "self", ".", "handlers", ")", ":", "self", ".", "__uninstall_all_handlers_helper", "(", "session", ")" ]
Uninstalls all previously installed handlers for a particular session. :param session: Unique logical identifier to a session. If None, operates on all sessions.
[ "Uninstalls", "all", "previously", "installed", "handlers", "for", "a", "particular", "session", "." ]
python
train
adrn/gala
gala/dynamics/actionangle.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L443-L537
def _single_orbit_find_actions(orbit, N_max, toy_potential=None, force_harmonic_oscillator=False): """ Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential. """ if orbit.norbits > 1: raise ValueError("must be a single orbit") if toy_potential is None: toy_potential = fit_toy_potential( orbit, force_harmonic_oscillator=force_harmonic_oscillator) else: logger.debug("Using *fixed* toy potential: {}" .format(toy_potential.parameters)) if isinstance(toy_potential, IsochronePotential): orbit_align = orbit.align_circulation_with_z() w = orbit_align.w() dxyz = (1, 2, 2) circ = np.sign(w[0, 0]*w[4, 0]-w[1, 0]*w[3, 0]) sign = np.array([1., circ, 1.]) orbit = orbit_align elif isinstance(toy_potential, HarmonicOscillatorPotential): dxyz = (2, 2, 2) sign = 1. w = orbit.w() else: raise ValueError("Invalid toy potential.") t = orbit.t.value # Now find toy actions and angles aaf = toy_potential.action_angle(orbit) if aaf[0].ndim > 2: aa = np.vstack((aaf[0].value[..., 0], aaf[1].value[..., 0])) else: aa = np.vstack((aaf[0].value, aaf[1].value)) if np.any(np.isnan(aa)): ix = ~np.any(np.isnan(aa), axis=0) aa = aa[:, ix] t = t[ix] warnings.warn("NaN value in toy actions or angles!") if sum(ix) > 1: raise ValueError("Too many NaN value in toy actions or angles!") t1 = time.time() A, b, nvecs = _action_prepare(aa, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2]) actions = np.array(solve(A,b)) logger.debug("Action solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(actions), time.time()-t1)) t1 = time.time() A, b, nvecs = _angle_prepare(aa, t, N_max, dx=dxyz[0], dy=dxyz[1], dz=dxyz[2], sign=sign) angles = np.array(solve(A, b)) logger.debug("Angle solution found for N_max={}, size {} symmetric" " matrix in {} seconds" .format(N_max, len(angles), time.time()-t1)) # Just some checks if len(angles) > len(aa): warnings.warn("More unknowns than equations!") J = actions[:3] # * sign theta = angles[:3] freqs = angles[3:6] # * sign return dict(actions=J*aaf[0].unit, angles=theta*aaf[1].unit, freqs=freqs*aaf[2].unit, Sn=actions[3:], dSn_dJ=angles[6:], nvecs=nvecs)
[ "def", "_single_orbit_find_actions", "(", "orbit", ",", "N_max", ",", "toy_potential", "=", "None", ",", "force_harmonic_oscillator", "=", "False", ")", ":", "if", "orbit", ".", "norbits", ">", "1", ":", "raise", "ValueError", "(", "\"must be a single orbit\"", ")", "if", "toy_potential", "is", "None", ":", "toy_potential", "=", "fit_toy_potential", "(", "orbit", ",", "force_harmonic_oscillator", "=", "force_harmonic_oscillator", ")", "else", ":", "logger", ".", "debug", "(", "\"Using *fixed* toy potential: {}\"", ".", "format", "(", "toy_potential", ".", "parameters", ")", ")", "if", "isinstance", "(", "toy_potential", ",", "IsochronePotential", ")", ":", "orbit_align", "=", "orbit", ".", "align_circulation_with_z", "(", ")", "w", "=", "orbit_align", ".", "w", "(", ")", "dxyz", "=", "(", "1", ",", "2", ",", "2", ")", "circ", "=", "np", ".", "sign", "(", "w", "[", "0", ",", "0", "]", "*", "w", "[", "4", ",", "0", "]", "-", "w", "[", "1", ",", "0", "]", "*", "w", "[", "3", ",", "0", "]", ")", "sign", "=", "np", ".", "array", "(", "[", "1.", ",", "circ", ",", "1.", "]", ")", "orbit", "=", "orbit_align", "elif", "isinstance", "(", "toy_potential", ",", "HarmonicOscillatorPotential", ")", ":", "dxyz", "=", "(", "2", ",", "2", ",", "2", ")", "sign", "=", "1.", "w", "=", "orbit", ".", "w", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid toy potential.\"", ")", "t", "=", "orbit", ".", "t", ".", "value", "# Now find toy actions and angles", "aaf", "=", "toy_potential", ".", "action_angle", "(", "orbit", ")", "if", "aaf", "[", "0", "]", ".", "ndim", ">", "2", ":", "aa", "=", "np", ".", "vstack", "(", "(", "aaf", "[", "0", "]", ".", "value", "[", "...", ",", "0", "]", ",", "aaf", "[", "1", "]", ".", "value", "[", "...", ",", "0", "]", ")", ")", "else", ":", "aa", "=", "np", ".", "vstack", "(", "(", "aaf", "[", "0", "]", ".", "value", ",", "aaf", "[", "1", "]", ".", "value", ")", ")", "if", "np", ".", "any", "(", "np", ".", "isnan", "(", "aa", ")", ")", ":", "ix", "=", "~", "np", ".", "any", "(", "np", ".", "isnan", "(", "aa", ")", ",", "axis", "=", "0", ")", "aa", "=", "aa", "[", ":", ",", "ix", "]", "t", "=", "t", "[", "ix", "]", "warnings", ".", "warn", "(", "\"NaN value in toy actions or angles!\"", ")", "if", "sum", "(", "ix", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Too many NaN value in toy actions or angles!\"", ")", "t1", "=", "time", ".", "time", "(", ")", "A", ",", "b", ",", "nvecs", "=", "_action_prepare", "(", "aa", ",", "N_max", ",", "dx", "=", "dxyz", "[", "0", "]", ",", "dy", "=", "dxyz", "[", "1", "]", ",", "dz", "=", "dxyz", "[", "2", "]", ")", "actions", "=", "np", ".", "array", "(", "solve", "(", "A", ",", "b", ")", ")", "logger", ".", "debug", "(", "\"Action solution found for N_max={}, size {} symmetric\"", "\" matrix in {} seconds\"", ".", "format", "(", "N_max", ",", "len", "(", "actions", ")", ",", "time", ".", "time", "(", ")", "-", "t1", ")", ")", "t1", "=", "time", ".", "time", "(", ")", "A", ",", "b", ",", "nvecs", "=", "_angle_prepare", "(", "aa", ",", "t", ",", "N_max", ",", "dx", "=", "dxyz", "[", "0", "]", ",", "dy", "=", "dxyz", "[", "1", "]", ",", "dz", "=", "dxyz", "[", "2", "]", ",", "sign", "=", "sign", ")", "angles", "=", "np", ".", "array", "(", "solve", "(", "A", ",", "b", ")", ")", "logger", ".", "debug", "(", "\"Angle solution found for N_max={}, size {} symmetric\"", "\" matrix in {} seconds\"", ".", "format", "(", "N_max", ",", "len", "(", "angles", ")", ",", "time", ".", "time", "(", ")", "-", "t1", ")", ")", "# Just some checks", "if", "len", "(", "angles", ")", ">", "len", "(", "aa", ")", ":", "warnings", ".", "warn", "(", "\"More unknowns than equations!\"", ")", "J", "=", "actions", "[", ":", "3", "]", "# * sign", "theta", "=", "angles", "[", ":", "3", "]", "freqs", "=", "angles", "[", "3", ":", "6", "]", "# * sign", "return", "dict", "(", "actions", "=", "J", "*", "aaf", "[", "0", "]", ".", "unit", ",", "angles", "=", "theta", "*", "aaf", "[", "1", "]", ".", "unit", ",", "freqs", "=", "freqs", "*", "aaf", "[", "2", "]", ".", "unit", ",", "Sn", "=", "actions", "[", "3", ":", "]", ",", "dSn_dJ", "=", "angles", "[", "6", ":", "]", ",", "nvecs", "=", "nvecs", ")" ]
Find approximate actions and angles for samples of a phase-space orbit, `w`, at times `t`. Uses toy potentials with known, analytic action-angle transformations to approximate the true coordinates as a Fourier sum. This code is adapted from Jason Sanders' `genfunc <https://github.com/jlsanders/genfunc>`_ .. todo:: Wrong shape for w -- should be (6,n) as usual... Parameters ---------- orbit : `~gala.dynamics.Orbit` N_max : int Maximum integer Fourier mode vector length, |n|. toy_potential : Potential (optional) Fix the toy potential class. force_harmonic_oscillator : bool (optional) Force using the harmonic oscillator potential as the toy potential.
[ "Find", "approximate", "actions", "and", "angles", "for", "samples", "of", "a", "phase", "-", "space", "orbit", "w", "at", "times", "t", ".", "Uses", "toy", "potentials", "with", "known", "analytic", "action", "-", "angle", "transformations", "to", "approximate", "the", "true", "coordinates", "as", "a", "Fourier", "sum", "." ]
python
train
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L286-L314
def connection_sync(self, connection_id, connProps=None): """Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object. """ if connProps is None: connProps = {} request = requests_pb2.ConnectionSyncRequest() request.connection_id = connection_id request.conn_props.auto_commit = connProps.get('autoCommit', False) request.conn_props.has_auto_commit = True request.conn_props.read_only = connProps.get('readOnly', False) request.conn_props.has_read_only = True request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0) request.conn_props.catalog = connProps.get('catalog', '') request.conn_props.schema = connProps.get('schema', '') response_data = self._apply(request) response = responses_pb2.ConnectionSyncResponse() response.ParseFromString(response_data) return response.conn_props
[ "def", "connection_sync", "(", "self", ",", "connection_id", ",", "connProps", "=", "None", ")", ":", "if", "connProps", "is", "None", ":", "connProps", "=", "{", "}", "request", "=", "requests_pb2", ".", "ConnectionSyncRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "request", ".", "conn_props", ".", "auto_commit", "=", "connProps", ".", "get", "(", "'autoCommit'", ",", "False", ")", "request", ".", "conn_props", ".", "has_auto_commit", "=", "True", "request", ".", "conn_props", ".", "read_only", "=", "connProps", ".", "get", "(", "'readOnly'", ",", "False", ")", "request", ".", "conn_props", ".", "has_read_only", "=", "True", "request", ".", "conn_props", ".", "transaction_isolation", "=", "connProps", ".", "get", "(", "'transactionIsolation'", ",", "0", ")", "request", ".", "conn_props", ".", "catalog", "=", "connProps", ".", "get", "(", "'catalog'", ",", "''", ")", "request", ".", "conn_props", ".", "schema", "=", "connProps", ".", "get", "(", "'schema'", ",", "''", ")", "response_data", "=", "self", ".", "_apply", "(", "request", ")", "response", "=", "responses_pb2", ".", "ConnectionSyncResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")", "return", "response", ".", "conn_props" ]
Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object.
[ "Synchronizes", "connection", "properties", "with", "the", "server", "." ]
python
train
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L2358-L2387
def merge(self, merge_commit_message=None, should_remove_source_branch=False, merge_when_pipeline_succeeds=False, **kwargs): """Accept the merge request. Args: merge_commit_message (bool): Commit message should_remove_source_branch (bool): If True, removes the source branch merge_when_pipeline_succeeds (bool): Wait for the build to succeed, then merge **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRClosedError: If the merge failed """ path = '%s/%s/merge' % (self.manager.path, self.get_id()) data = {} if merge_commit_message: data['merge_commit_message'] = merge_commit_message if should_remove_source_branch: data['should_remove_source_branch'] = True if merge_when_pipeline_succeeds: data['merge_when_pipeline_succeeds'] = True server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs) self._update_attrs(server_data)
[ "def", "merge", "(", "self", ",", "merge_commit_message", "=", "None", ",", "should_remove_source_branch", "=", "False", ",", "merge_when_pipeline_succeeds", "=", "False", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/merge'", "%", "(", "self", ".", "manager", ".", "path", ",", "self", ".", "get_id", "(", ")", ")", "data", "=", "{", "}", "if", "merge_commit_message", ":", "data", "[", "'merge_commit_message'", "]", "=", "merge_commit_message", "if", "should_remove_source_branch", ":", "data", "[", "'should_remove_source_branch'", "]", "=", "True", "if", "merge_when_pipeline_succeeds", ":", "data", "[", "'merge_when_pipeline_succeeds'", "]", "=", "True", "server_data", "=", "self", ".", "manager", ".", "gitlab", ".", "http_put", "(", "path", ",", "post_data", "=", "data", ",", "*", "*", "kwargs", ")", "self", ".", "_update_attrs", "(", "server_data", ")" ]
Accept the merge request. Args: merge_commit_message (bool): Commit message should_remove_source_branch (bool): If True, removes the source branch merge_when_pipeline_succeeds (bool): Wait for the build to succeed, then merge **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRClosedError: If the merge failed
[ "Accept", "the", "merge", "request", "." ]
python
train
EventTeam/beliefs
src/beliefs/cells/posets.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/posets.py#L273-L301
def coerce(self, other, is_positive=True): """ Only copies a pointer to the new domain's cell """ if hasattr(other, 'get_domain') and hasattr(other, 'lower') and hasattr(other, 'upper'): if self.is_domain_equal(other): return other else: msg = "Cannot merge partial orders with different domains!" raise CellConstructionFailure(msg) if isinstance(other, LinearOrderedCell): # convert other's domain to a chain/dag # first ensure domain has same size and elements raise NotImplemented("Please Implement me!") domain = self.get_domain() if other in domain: c = self.__class__() if not is_positive: # add value to lower (negative example) c.lower = set([other]) c.upper = set() else: # add value to upper (positive example) c.upper = set([other]) c.lower = set() return c else: raise CellConstructionFailure("Could not coerce value that is"+ " outside order's domain . (Other = %s) " % (str(other),))
[ "def", "coerce", "(", "self", ",", "other", ",", "is_positive", "=", "True", ")", ":", "if", "hasattr", "(", "other", ",", "'get_domain'", ")", "and", "hasattr", "(", "other", ",", "'lower'", ")", "and", "hasattr", "(", "other", ",", "'upper'", ")", ":", "if", "self", ".", "is_domain_equal", "(", "other", ")", ":", "return", "other", "else", ":", "msg", "=", "\"Cannot merge partial orders with different domains!\"", "raise", "CellConstructionFailure", "(", "msg", ")", "if", "isinstance", "(", "other", ",", "LinearOrderedCell", ")", ":", "# convert other's domain to a chain/dag", "# first ensure domain has same size and elements", "raise", "NotImplemented", "(", "\"Please Implement me!\"", ")", "domain", "=", "self", ".", "get_domain", "(", ")", "if", "other", "in", "domain", ":", "c", "=", "self", ".", "__class__", "(", ")", "if", "not", "is_positive", ":", "# add value to lower (negative example)", "c", ".", "lower", "=", "set", "(", "[", "other", "]", ")", "c", ".", "upper", "=", "set", "(", ")", "else", ":", "# add value to upper (positive example)", "c", ".", "upper", "=", "set", "(", "[", "other", "]", ")", "c", ".", "lower", "=", "set", "(", ")", "return", "c", "else", ":", "raise", "CellConstructionFailure", "(", "\"Could not coerce value that is\"", "+", "\" outside order's domain . (Other = %s) \"", "%", "(", "str", "(", "other", ")", ",", ")", ")" ]
Only copies a pointer to the new domain's cell
[ "Only", "copies", "a", "pointer", "to", "the", "new", "domain", "s", "cell" ]
python
train
osrg/ryu
ryu/lib/lacplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/lacplib.py#L287-L303
def _add_flow_v1_0(self, src, port, timeout, datapath): """enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.0.""" ofproto = datapath.ofproto parser = datapath.ofproto_parser match = parser.OFPMatch( in_port=port, dl_src=addrconv.mac.text_to_bin(src), dl_type=ether.ETH_TYPE_SLOW) actions = [parser.OFPActionOutput( ofproto.OFPP_CONTROLLER, 65535)] mod = parser.OFPFlowMod( datapath=datapath, match=match, cookie=0, command=ofproto.OFPFC_ADD, idle_timeout=timeout, priority=65535, flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions) datapath.send_msg(mod)
[ "def", "_add_flow_v1_0", "(", "self", ",", "src", ",", "port", ",", "timeout", ",", "datapath", ")", ":", "ofproto", "=", "datapath", ".", "ofproto", "parser", "=", "datapath", ".", "ofproto_parser", "match", "=", "parser", ".", "OFPMatch", "(", "in_port", "=", "port", ",", "dl_src", "=", "addrconv", ".", "mac", ".", "text_to_bin", "(", "src", ")", ",", "dl_type", "=", "ether", ".", "ETH_TYPE_SLOW", ")", "actions", "=", "[", "parser", ".", "OFPActionOutput", "(", "ofproto", ".", "OFPP_CONTROLLER", ",", "65535", ")", "]", "mod", "=", "parser", ".", "OFPFlowMod", "(", "datapath", "=", "datapath", ",", "match", "=", "match", ",", "cookie", "=", "0", ",", "command", "=", "ofproto", ".", "OFPFC_ADD", ",", "idle_timeout", "=", "timeout", ",", "priority", "=", "65535", ",", "flags", "=", "ofproto", ".", "OFPFF_SEND_FLOW_REM", ",", "actions", "=", "actions", ")", "datapath", ".", "send_msg", "(", "mod", ")" ]
enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.0.
[ "enter", "a", "flow", "entry", "for", "the", "packet", "from", "the", "slave", "i", "/", "f", "with", "idle_timeout", ".", "for", "OpenFlow", "ver1", ".", "0", "." ]
python
train
iwanbk/nyamuk
nyamuk/mqtt_pkt.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/mqtt_pkt.py#L193-L204
def read_uint16(self): """Read 2 bytes.""" if self.pos + 2 > self.remaining_length: return NC.ERR_PROTOCOL msb = self.payload[self.pos] self.pos += 1 lsb = self.payload[self.pos] self.pos += 1 word = (msb << 8) + lsb return NC.ERR_SUCCESS, word
[ "def", "read_uint16", "(", "self", ")", ":", "if", "self", ".", "pos", "+", "2", ">", "self", ".", "remaining_length", ":", "return", "NC", ".", "ERR_PROTOCOL", "msb", "=", "self", ".", "payload", "[", "self", ".", "pos", "]", "self", ".", "pos", "+=", "1", "lsb", "=", "self", ".", "payload", "[", "self", ".", "pos", "]", "self", ".", "pos", "+=", "1", "word", "=", "(", "msb", "<<", "8", ")", "+", "lsb", "return", "NC", ".", "ERR_SUCCESS", ",", "word" ]
Read 2 bytes.
[ "Read", "2", "bytes", "." ]
python
train
IDSIA/sacred
sacred/commands.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/commands.py#L119-L137
def print_dependencies(_run): """Print the detected source-files and dependencies.""" print('Dependencies:') for dep in _run.experiment_info['dependencies']: pack, _, version = dep.partition('==') print(' {:<20} == {}'.format(pack, version)) print('\nSources:') for source, digest in _run.experiment_info['sources']: print(' {:<43} {}'.format(source, digest)) if _run.experiment_info['repositories']: repos = _run.experiment_info['repositories'] print('\nVersion Control:') for repo in repos: mod = COLOR_DIRTY + 'M' if repo['dirty'] else ' ' print('{} {:<43} {}'.format(mod, repo['url'], repo['commit']) + ENDC) print('')
[ "def", "print_dependencies", "(", "_run", ")", ":", "print", "(", "'Dependencies:'", ")", "for", "dep", "in", "_run", ".", "experiment_info", "[", "'dependencies'", "]", ":", "pack", ",", "_", ",", "version", "=", "dep", ".", "partition", "(", "'=='", ")", "print", "(", "' {:<20} == {}'", ".", "format", "(", "pack", ",", "version", ")", ")", "print", "(", "'\\nSources:'", ")", "for", "source", ",", "digest", "in", "_run", ".", "experiment_info", "[", "'sources'", "]", ":", "print", "(", "' {:<43} {}'", ".", "format", "(", "source", ",", "digest", ")", ")", "if", "_run", ".", "experiment_info", "[", "'repositories'", "]", ":", "repos", "=", "_run", ".", "experiment_info", "[", "'repositories'", "]", "print", "(", "'\\nVersion Control:'", ")", "for", "repo", "in", "repos", ":", "mod", "=", "COLOR_DIRTY", "+", "'M'", "if", "repo", "[", "'dirty'", "]", "else", "' '", "print", "(", "'{} {:<43} {}'", ".", "format", "(", "mod", ",", "repo", "[", "'url'", "]", ",", "repo", "[", "'commit'", "]", ")", "+", "ENDC", ")", "print", "(", "''", ")" ]
Print the detected source-files and dependencies.
[ "Print", "the", "detected", "source", "-", "files", "and", "dependencies", "." ]
python
train
Ex-Mente/auxi.0
auxi/tools/materialphysicalproperties/core.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/materialphysicalproperties/core.py#L53-L86
def create_template(material, path, show=False): """ Create a template csv file for a data set. :param material: the name of the material :param path: the path of the directory where the file must be written :param show: a boolean indicating whether the created file should be \ displayed after creation """ file_name = 'dataset-%s.csv' % material.lower() file_path = os.path.join(path, file_name) with open(file_path, 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['Name', material]) writer.writerow(['Description', '<Add a data set description ' 'here.>']) writer.writerow(['Reference', '<Add a reference to the source of ' 'the data set here.>']) writer.writerow(['Temperature', '<parameter 1 name>', '<parameter 2 name>', '<parameter 3 name>']) writer.writerow(['T', '<parameter 1 display symbol>', '<parameter 2 display symbol>', '<parameter 3 display symbol>']) writer.writerow(['K', '<parameter 1 units>', '<parameter 2 units>', '<parameter 3 units>']) writer.writerow(['T', '<parameter 1 symbol>', '<parameter 2 symbol>', '<parameter 3 symbol>']) for i in range(10): writer.writerow([100.0 + i*50, float(i), 10.0 + i, 100.0 + i]) if show is True: webbrowser.open_new(file_path)
[ "def", "create_template", "(", "material", ",", "path", ",", "show", "=", "False", ")", ":", "file_name", "=", "'dataset-%s.csv'", "%", "material", ".", "lower", "(", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "file_name", ")", "with", "open", "(", "file_path", ",", "'w'", ",", "newline", "=", "''", ")", "as", "csvfile", ":", "writer", "=", "csv", ".", "writer", "(", "csvfile", ",", "delimiter", "=", "','", ",", "quotechar", "=", "'\"'", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ")", "writer", ".", "writerow", "(", "[", "'Name'", ",", "material", "]", ")", "writer", ".", "writerow", "(", "[", "'Description'", ",", "'<Add a data set description '", "'here.>'", "]", ")", "writer", ".", "writerow", "(", "[", "'Reference'", ",", "'<Add a reference to the source of '", "'the data set here.>'", "]", ")", "writer", ".", "writerow", "(", "[", "'Temperature'", ",", "'<parameter 1 name>'", ",", "'<parameter 2 name>'", ",", "'<parameter 3 name>'", "]", ")", "writer", ".", "writerow", "(", "[", "'T'", ",", "'<parameter 1 display symbol>'", ",", "'<parameter 2 display symbol>'", ",", "'<parameter 3 display symbol>'", "]", ")", "writer", ".", "writerow", "(", "[", "'K'", ",", "'<parameter 1 units>'", ",", "'<parameter 2 units>'", ",", "'<parameter 3 units>'", "]", ")", "writer", ".", "writerow", "(", "[", "'T'", ",", "'<parameter 1 symbol>'", ",", "'<parameter 2 symbol>'", ",", "'<parameter 3 symbol>'", "]", ")", "for", "i", "in", "range", "(", "10", ")", ":", "writer", ".", "writerow", "(", "[", "100.0", "+", "i", "*", "50", ",", "float", "(", "i", ")", ",", "10.0", "+", "i", ",", "100.0", "+", "i", "]", ")", "if", "show", "is", "True", ":", "webbrowser", ".", "open_new", "(", "file_path", ")" ]
Create a template csv file for a data set. :param material: the name of the material :param path: the path of the directory where the file must be written :param show: a boolean indicating whether the created file should be \ displayed after creation
[ "Create", "a", "template", "csv", "file", "for", "a", "data", "set", "." ]
python
valid
MisterY/asset-allocation
asset_allocation/assetclass_cli.py
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L126-L141
def tree(): """ Display a tree of asset classes """ session = AppAggregate().open_session() classes = session.query(AssetClass).all() # Get the root classes root = [] for ac in classes: if ac.parentid is None: root.append(ac) # logger.debug(ac.parentid) # header print_row("id", "asset class", "allocation", "level") print(f"-------------------------------") for ac in root: print_item_with_children(ac, classes, 0)
[ "def", "tree", "(", ")", ":", "session", "=", "AppAggregate", "(", ")", ".", "open_session", "(", ")", "classes", "=", "session", ".", "query", "(", "AssetClass", ")", ".", "all", "(", ")", "# Get the root classes", "root", "=", "[", "]", "for", "ac", "in", "classes", ":", "if", "ac", ".", "parentid", "is", "None", ":", "root", ".", "append", "(", "ac", ")", "# logger.debug(ac.parentid)", "# header", "print_row", "(", "\"id\"", ",", "\"asset class\"", ",", "\"allocation\"", ",", "\"level\"", ")", "print", "(", "f\"-------------------------------\"", ")", "for", "ac", "in", "root", ":", "print_item_with_children", "(", "ac", ",", "classes", ",", "0", ")" ]
Display a tree of asset classes
[ "Display", "a", "tree", "of", "asset", "classes" ]
python
train
google/grumpy
third_party/pypy/binascii.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/binascii.py#L540-L582
def rlecode_hqx(s): """ Run length encoding for binhex4. The CPython implementation does not do run length encoding of \x90 characters. This implementation does. """ if not s: return '' result = [] prev = s[0] count = 1 # Add a dummy character to get the loop to go one extra round. # The dummy must be different from the last character of s. # In the same step we remove the first character, which has # already been stored in prev. if s[-1] == '!': s = s[1:] + '?' else: s = s[1:] + '!' for c in s: if c == prev and count < 255: count += 1 else: if count == 1: if prev != '\x90': result.append(prev) else: result += ['\x90', '\x00'] elif count < 4: if prev != '\x90': result += [prev] * count else: result += ['\x90', '\x00'] * count else: if prev != '\x90': result += [prev, '\x90', chr(count)] else: result += ['\x90', '\x00', '\x90', chr(count)] count = 1 prev = c return ''.join(result)
[ "def", "rlecode_hqx", "(", "s", ")", ":", "if", "not", "s", ":", "return", "''", "result", "=", "[", "]", "prev", "=", "s", "[", "0", "]", "count", "=", "1", "# Add a dummy character to get the loop to go one extra round.", "# The dummy must be different from the last character of s.", "# In the same step we remove the first character, which has", "# already been stored in prev.", "if", "s", "[", "-", "1", "]", "==", "'!'", ":", "s", "=", "s", "[", "1", ":", "]", "+", "'?'", "else", ":", "s", "=", "s", "[", "1", ":", "]", "+", "'!'", "for", "c", "in", "s", ":", "if", "c", "==", "prev", "and", "count", "<", "255", ":", "count", "+=", "1", "else", ":", "if", "count", "==", "1", ":", "if", "prev", "!=", "'\\x90'", ":", "result", ".", "append", "(", "prev", ")", "else", ":", "result", "+=", "[", "'\\x90'", ",", "'\\x00'", "]", "elif", "count", "<", "4", ":", "if", "prev", "!=", "'\\x90'", ":", "result", "+=", "[", "prev", "]", "*", "count", "else", ":", "result", "+=", "[", "'\\x90'", ",", "'\\x00'", "]", "*", "count", "else", ":", "if", "prev", "!=", "'\\x90'", ":", "result", "+=", "[", "prev", ",", "'\\x90'", ",", "chr", "(", "count", ")", "]", "else", ":", "result", "+=", "[", "'\\x90'", ",", "'\\x00'", ",", "'\\x90'", ",", "chr", "(", "count", ")", "]", "count", "=", "1", "prev", "=", "c", "return", "''", ".", "join", "(", "result", ")" ]
Run length encoding for binhex4. The CPython implementation does not do run length encoding of \x90 characters. This implementation does.
[ "Run", "length", "encoding", "for", "binhex4", ".", "The", "CPython", "implementation", "does", "not", "do", "run", "length", "encoding", "of", "\\", "x90", "characters", ".", "This", "implementation", "does", "." ]
python
valid
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1802-L1818
def geo_point_n(arg, n): """Return the Nth point in a single linestring in the geometry. Negative values are counted backwards from the end of the LineString, so that -1 is the last point. Returns NULL if there is no linestring in the geometry Parameters ---------- arg : geometry n : integer Returns ------- PointN : geometry scalar """ op = ops.GeoPointN(arg, n) return op.to_expr()
[ "def", "geo_point_n", "(", "arg", ",", "n", ")", ":", "op", "=", "ops", ".", "GeoPointN", "(", "arg", ",", "n", ")", "return", "op", ".", "to_expr", "(", ")" ]
Return the Nth point in a single linestring in the geometry. Negative values are counted backwards from the end of the LineString, so that -1 is the last point. Returns NULL if there is no linestring in the geometry Parameters ---------- arg : geometry n : integer Returns ------- PointN : geometry scalar
[ "Return", "the", "Nth", "point", "in", "a", "single", "linestring", "in", "the", "geometry", ".", "Negative", "values", "are", "counted", "backwards", "from", "the", "end", "of", "the", "LineString", "so", "that", "-", "1", "is", "the", "last", "point", ".", "Returns", "NULL", "if", "there", "is", "no", "linestring", "in", "the", "geometry" ]
python
train
aouyar/PyMunin
pysysinfo/asterisk.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/asterisk.py#L257-L273
def _initModuleList(self): """Query Asterisk Manager Interface to initialize internal list of loaded modules. CLI Command - core show modules """ if self.checkVersion('1.4'): cmd = "module show" else: cmd = "show modules" cmdresp = self.executeCommand(cmd) self._modules = set() for line in cmdresp.splitlines()[1:-1]: mobj = re.match('\s*(\S+)\s', line) if mobj: self._modules.add(mobj.group(1).lower())
[ "def", "_initModuleList", "(", "self", ")", ":", "if", "self", ".", "checkVersion", "(", "'1.4'", ")", ":", "cmd", "=", "\"module show\"", "else", ":", "cmd", "=", "\"show modules\"", "cmdresp", "=", "self", ".", "executeCommand", "(", "cmd", ")", "self", ".", "_modules", "=", "set", "(", ")", "for", "line", "in", "cmdresp", ".", "splitlines", "(", ")", "[", "1", ":", "-", "1", "]", ":", "mobj", "=", "re", ".", "match", "(", "'\\s*(\\S+)\\s'", ",", "line", ")", "if", "mobj", ":", "self", ".", "_modules", ".", "add", "(", "mobj", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ")" ]
Query Asterisk Manager Interface to initialize internal list of loaded modules. CLI Command - core show modules
[ "Query", "Asterisk", "Manager", "Interface", "to", "initialize", "internal", "list", "of", "loaded", "modules", ".", "CLI", "Command", "-", "core", "show", "modules" ]
python
train
andrenarchy/krypy
krypy/utils.py
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/utils.py#L775-L814
def hegedus(A, b, x0, M=None, Ml=None, ip_B=None): """Rescale initial guess appropriately (Hegedüs trick). The Hegedüs trick rescales the initial guess to :math:`\\gamma_{\\min} x_0` such that .. math :: \\|r_0\\|_{M^{-1}} = \\| M M_l (b - A \\gamma_{\\min} x_0) \\|_{M^{-1}} = \\min_{\\gamma\\in\\mathbb{C}} \\| M M_l (b - A \\gamma x_0) \\|_{M^{-1}} \\leq \\| M M_l b \\|_{M^{-1}}. This is achieved by :math:`\\gamma_{\\min} = \\frac{\\langle z, M M_l b \\rangle_{M^{-1}}}{\\|z\\|_{M^{-1}}^2}` for :math:`z=M M_l A x_0` because then :math:`r_0=P_{z^\\perp}b`. (Note that the right hand side of formula (5.8.16) in [LieS13]_ has to be complex conjugated.) The parameters are the parameters you want to pass to :py:meth:`~krypy.linsys.gmres`, :py:meth:`~krypy.linsys.minres` or :py:meth:`~krypy.linsys.cg`. :return: the adapted initial guess with the above property. """ N = len(b) shape = (N, N) A = get_linearoperator(shape, A) M = get_linearoperator(shape, M) Ml = get_linearoperator(shape, Ml) MlAx0 = Ml*(A*x0) z = M*MlAx0 znorm2 = inner(z, MlAx0, ip_B=ip_B) if znorm2 <= 1e-15: return numpy.zeros((N, 1)) gamma = inner(z, Ml*b, ip_B=ip_B) / znorm2 return gamma*x0
[ "def", "hegedus", "(", "A", ",", "b", ",", "x0", ",", "M", "=", "None", ",", "Ml", "=", "None", ",", "ip_B", "=", "None", ")", ":", "N", "=", "len", "(", "b", ")", "shape", "=", "(", "N", ",", "N", ")", "A", "=", "get_linearoperator", "(", "shape", ",", "A", ")", "M", "=", "get_linearoperator", "(", "shape", ",", "M", ")", "Ml", "=", "get_linearoperator", "(", "shape", ",", "Ml", ")", "MlAx0", "=", "Ml", "*", "(", "A", "*", "x0", ")", "z", "=", "M", "*", "MlAx0", "znorm2", "=", "inner", "(", "z", ",", "MlAx0", ",", "ip_B", "=", "ip_B", ")", "if", "znorm2", "<=", "1e-15", ":", "return", "numpy", ".", "zeros", "(", "(", "N", ",", "1", ")", ")", "gamma", "=", "inner", "(", "z", ",", "Ml", "*", "b", ",", "ip_B", "=", "ip_B", ")", "/", "znorm2", "return", "gamma", "*", "x0" ]
Rescale initial guess appropriately (Hegedüs trick). The Hegedüs trick rescales the initial guess to :math:`\\gamma_{\\min} x_0` such that .. math :: \\|r_0\\|_{M^{-1}} = \\| M M_l (b - A \\gamma_{\\min} x_0) \\|_{M^{-1}} = \\min_{\\gamma\\in\\mathbb{C}} \\| M M_l (b - A \\gamma x_0) \\|_{M^{-1}} \\leq \\| M M_l b \\|_{M^{-1}}. This is achieved by :math:`\\gamma_{\\min} = \\frac{\\langle z, M M_l b \\rangle_{M^{-1}}}{\\|z\\|_{M^{-1}}^2}` for :math:`z=M M_l A x_0` because then :math:`r_0=P_{z^\\perp}b`. (Note that the right hand side of formula (5.8.16) in [LieS13]_ has to be complex conjugated.) The parameters are the parameters you want to pass to :py:meth:`~krypy.linsys.gmres`, :py:meth:`~krypy.linsys.minres` or :py:meth:`~krypy.linsys.cg`. :return: the adapted initial guess with the above property.
[ "Rescale", "initial", "guess", "appropriately", "(", "Hegedüs", "trick", ")", "." ]
python
train
eyeseast/python-frontmatter
frontmatter/__init__.py
https://github.com/eyeseast/python-frontmatter/blob/c318e583c48599eb597e0ad59c5d972258c3febc/frontmatter/__init__.py#L34-L49
def detect_format(text, handlers): """ Figure out which handler to use, based on metadata. Returns a handler instance or None. ``text`` should be unicode text about to be parsed. ``handlers`` is a dictionary where keys are opening delimiters and values are handler instances. """ for pattern, handler in handlers.items(): if pattern.match(text): return handler # nothing matched, give nothing back return None
[ "def", "detect_format", "(", "text", ",", "handlers", ")", ":", "for", "pattern", ",", "handler", "in", "handlers", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "text", ")", ":", "return", "handler", "# nothing matched, give nothing back", "return", "None" ]
Figure out which handler to use, based on metadata. Returns a handler instance or None. ``text`` should be unicode text about to be parsed. ``handlers`` is a dictionary where keys are opening delimiters and values are handler instances.
[ "Figure", "out", "which", "handler", "to", "use", "based", "on", "metadata", ".", "Returns", "a", "handler", "instance", "or", "None", "." ]
python
test
InfoAgeTech/django-core
django_core/mail/sending.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/mail/sending.py#L9-L39
def send_email_from_template(to_email, from_email, subject, markdown_template=None, text_template=None, html_template=None, fail_silently=False, context=None, **kwargs): """Send an email from a template. :param to_email: the email address to send the email to :param from_email: the email address the email will be from :param subject: the subject of the email :param markdown_template: the markdown syntax template to use for the email. If provided, this will generate both the text and html versions of the email. You must have the "markdown" library installed in order to use this. pip install markdown. :param text_template: the template for the text version of the email. This can be omitted if the markdown_template is provided. :param html_template: the template for the html version of the email. This can be omitted if the markdown_template is provided. :param context: the context for the email templates """ return send_emails_from_template( to_emails=[to_email], from_email=from_email, subject=subject, markdown_template=markdown_template, text_template=text_template, html_template=html_template, fail_silently=fail_silently, context=context, **kwargs )
[ "def", "send_email_from_template", "(", "to_email", ",", "from_email", ",", "subject", ",", "markdown_template", "=", "None", ",", "text_template", "=", "None", ",", "html_template", "=", "None", ",", "fail_silently", "=", "False", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "send_emails_from_template", "(", "to_emails", "=", "[", "to_email", "]", ",", "from_email", "=", "from_email", ",", "subject", "=", "subject", ",", "markdown_template", "=", "markdown_template", ",", "text_template", "=", "text_template", ",", "html_template", "=", "html_template", ",", "fail_silently", "=", "fail_silently", ",", "context", "=", "context", ",", "*", "*", "kwargs", ")" ]
Send an email from a template. :param to_email: the email address to send the email to :param from_email: the email address the email will be from :param subject: the subject of the email :param markdown_template: the markdown syntax template to use for the email. If provided, this will generate both the text and html versions of the email. You must have the "markdown" library installed in order to use this. pip install markdown. :param text_template: the template for the text version of the email. This can be omitted if the markdown_template is provided. :param html_template: the template for the html version of the email. This can be omitted if the markdown_template is provided. :param context: the context for the email templates
[ "Send", "an", "email", "from", "a", "template", "." ]
python
train
campbellr/smashrun-client
smashrun/client.py
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L109-L113
def get_notables(self, id_num): """Return the notables of the activity with the given id. """ url = self._build_url('my', 'activities', id_num, 'notables') return self._json(url)
[ "def", "get_notables", "(", "self", ",", "id_num", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'my'", ",", "'activities'", ",", "id_num", ",", "'notables'", ")", "return", "self", ".", "_json", "(", "url", ")" ]
Return the notables of the activity with the given id.
[ "Return", "the", "notables", "of", "the", "activity", "with", "the", "given", "id", "." ]
python
train
gem/oq-engine
openquake/engine/engine.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/engine/engine.py#L269-L291
def poll_queue(job_id, pid, poll_time): """ Check the queue of executing/submitted jobs and exit when there is a free slot. """ if config.distribution.serialize_jobs: first_time = True while True: jobs = logs.dbcmd(GET_JOBS) failed = [job.id for job in jobs if not psutil.pid_exists(job.pid)] if failed: logs.dbcmd("UPDATE job SET status='failed', is_running=0 " "WHERE id in (?X)", failed) elif any(job.id < job_id for job in jobs): if first_time: logs.LOG.warn('Waiting for jobs %s', [j.id for j in jobs]) logs.dbcmd('update_job', job_id, {'status': 'submitted', 'pid': pid}) first_time = False time.sleep(poll_time) else: break logs.dbcmd('update_job', job_id, {'status': 'executing', 'pid': _PID})
[ "def", "poll_queue", "(", "job_id", ",", "pid", ",", "poll_time", ")", ":", "if", "config", ".", "distribution", ".", "serialize_jobs", ":", "first_time", "=", "True", "while", "True", ":", "jobs", "=", "logs", ".", "dbcmd", "(", "GET_JOBS", ")", "failed", "=", "[", "job", ".", "id", "for", "job", "in", "jobs", "if", "not", "psutil", ".", "pid_exists", "(", "job", ".", "pid", ")", "]", "if", "failed", ":", "logs", ".", "dbcmd", "(", "\"UPDATE job SET status='failed', is_running=0 \"", "\"WHERE id in (?X)\"", ",", "failed", ")", "elif", "any", "(", "job", ".", "id", "<", "job_id", "for", "job", "in", "jobs", ")", ":", "if", "first_time", ":", "logs", ".", "LOG", ".", "warn", "(", "'Waiting for jobs %s'", ",", "[", "j", ".", "id", "for", "j", "in", "jobs", "]", ")", "logs", ".", "dbcmd", "(", "'update_job'", ",", "job_id", ",", "{", "'status'", ":", "'submitted'", ",", "'pid'", ":", "pid", "}", ")", "first_time", "=", "False", "time", ".", "sleep", "(", "poll_time", ")", "else", ":", "break", "logs", ".", "dbcmd", "(", "'update_job'", ",", "job_id", ",", "{", "'status'", ":", "'executing'", ",", "'pid'", ":", "_PID", "}", ")" ]
Check the queue of executing/submitted jobs and exit when there is a free slot.
[ "Check", "the", "queue", "of", "executing", "/", "submitted", "jobs", "and", "exit", "when", "there", "is", "a", "free", "slot", "." ]
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2322-L2349
def debug_consec_list(list_): """ Returns: tuple of (missing_items, missing_indices, duplicate_items) """ if not issorted(list_): print('warning list is not sorted. indices will not match') sortedlist = sorted(list_) start = sortedlist[0] last = start - 1 missing_vals = [] missing_indices = [] duplicate_items = [] for count, item in enumerate(sortedlist): diff = item - last if diff > 1: missing_indices.append(count) for miss in range(last + 1, last + diff): missing_vals.append(miss) elif diff == 0: duplicate_items.append(item) elif diff == 1: # Expected case pass else: raise AssertionError('We sorted the list. diff can not be negative') last = item return missing_vals, missing_indices, duplicate_items
[ "def", "debug_consec_list", "(", "list_", ")", ":", "if", "not", "issorted", "(", "list_", ")", ":", "print", "(", "'warning list is not sorted. indices will not match'", ")", "sortedlist", "=", "sorted", "(", "list_", ")", "start", "=", "sortedlist", "[", "0", "]", "last", "=", "start", "-", "1", "missing_vals", "=", "[", "]", "missing_indices", "=", "[", "]", "duplicate_items", "=", "[", "]", "for", "count", ",", "item", "in", "enumerate", "(", "sortedlist", ")", ":", "diff", "=", "item", "-", "last", "if", "diff", ">", "1", ":", "missing_indices", ".", "append", "(", "count", ")", "for", "miss", "in", "range", "(", "last", "+", "1", ",", "last", "+", "diff", ")", ":", "missing_vals", ".", "append", "(", "miss", ")", "elif", "diff", "==", "0", ":", "duplicate_items", ".", "append", "(", "item", ")", "elif", "diff", "==", "1", ":", "# Expected case", "pass", "else", ":", "raise", "AssertionError", "(", "'We sorted the list. diff can not be negative'", ")", "last", "=", "item", "return", "missing_vals", ",", "missing_indices", ",", "duplicate_items" ]
Returns: tuple of (missing_items, missing_indices, duplicate_items)
[ "Returns", ":", "tuple", "of", "(", "missing_items", "missing_indices", "duplicate_items", ")" ]
python
train
mbakker7/timml
timml/model.py
https://github.com/mbakker7/timml/blob/91e99ad573cb8a9ad8ac1fa041c3ca44520c2390/timml/model.py#L201-L230
def headalongline(self, x, y, layers=None): """Head along line or curve Parameters ---------- x : array x values of line y : array y values of line layers : integer, list or array, optional layers for which grid is returned Returns ------- h : array size `nlayers, nx` """ xg, yg = np.atleast_1d(x), np.atleast_1d(y) if layers is None: Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq else: Nlayers = len(np.atleast_1d(layers)) nx = len(xg) if len(yg) == 1: yg = yg * np.ones(nx) h = np.zeros((Nlayers, nx)) for i in range(nx): h[:, i] = self.head(xg[i], yg[i], layers) return h
[ "def", "headalongline", "(", "self", ",", "x", ",", "y", ",", "layers", "=", "None", ")", ":", "xg", ",", "yg", "=", "np", ".", "atleast_1d", "(", "x", ")", ",", "np", ".", "atleast_1d", "(", "y", ")", "if", "layers", "is", "None", ":", "Nlayers", "=", "self", ".", "aq", ".", "find_aquifer_data", "(", "xg", "[", "0", "]", ",", "yg", "[", "0", "]", ")", ".", "naq", "else", ":", "Nlayers", "=", "len", "(", "np", ".", "atleast_1d", "(", "layers", ")", ")", "nx", "=", "len", "(", "xg", ")", "if", "len", "(", "yg", ")", "==", "1", ":", "yg", "=", "yg", "*", "np", ".", "ones", "(", "nx", ")", "h", "=", "np", ".", "zeros", "(", "(", "Nlayers", ",", "nx", ")", ")", "for", "i", "in", "range", "(", "nx", ")", ":", "h", "[", ":", ",", "i", "]", "=", "self", ".", "head", "(", "xg", "[", "i", "]", ",", "yg", "[", "i", "]", ",", "layers", ")", "return", "h" ]
Head along line or curve Parameters ---------- x : array x values of line y : array y values of line layers : integer, list or array, optional layers for which grid is returned Returns ------- h : array size `nlayers, nx`
[ "Head", "along", "line", "or", "curve", "Parameters", "----------", "x", ":", "array", "x", "values", "of", "line", "y", ":", "array", "y", "values", "of", "line", "layers", ":", "integer", "list", "or", "array", "optional", "layers", "for", "which", "grid", "is", "returned", "Returns", "-------", "h", ":", "array", "size", "nlayers", "nx" ]
python
train
cloudendpoints/endpoints-python
endpoints/apiserving.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/apiserving.py#L209-L234
def __register_class(self, parsed_config): """Register the class implementing this config, so we only add it once. Args: parsed_config: The JSON object with the API configuration being added. Raises: ApiConfigurationError: If the class has already been registered. """ methods = parsed_config.get('methods') if not methods: return # Determine the name of the class that implements this configuration. service_classes = set() for method in methods.itervalues(): rosy_method = method.get('rosyMethod') if rosy_method and '.' in rosy_method: method_class = rosy_method.split('.', 1)[0] service_classes.add(method_class) for service_class in service_classes: if service_class in self.__registered_classes: raise api_exceptions.ApiConfigurationError( 'API class %s has already been registered.' % service_class) self.__registered_classes.add(service_class)
[ "def", "__register_class", "(", "self", ",", "parsed_config", ")", ":", "methods", "=", "parsed_config", ".", "get", "(", "'methods'", ")", "if", "not", "methods", ":", "return", "# Determine the name of the class that implements this configuration.", "service_classes", "=", "set", "(", ")", "for", "method", "in", "methods", ".", "itervalues", "(", ")", ":", "rosy_method", "=", "method", ".", "get", "(", "'rosyMethod'", ")", "if", "rosy_method", "and", "'.'", "in", "rosy_method", ":", "method_class", "=", "rosy_method", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", "service_classes", ".", "add", "(", "method_class", ")", "for", "service_class", "in", "service_classes", ":", "if", "service_class", "in", "self", ".", "__registered_classes", ":", "raise", "api_exceptions", ".", "ApiConfigurationError", "(", "'API class %s has already been registered.'", "%", "service_class", ")", "self", ".", "__registered_classes", ".", "add", "(", "service_class", ")" ]
Register the class implementing this config, so we only add it once. Args: parsed_config: The JSON object with the API configuration being added. Raises: ApiConfigurationError: If the class has already been registered.
[ "Register", "the", "class", "implementing", "this", "config", "so", "we", "only", "add", "it", "once", "." ]
python
train
networks-lab/tidyextractors
tidyextractors/base_extractor.py
https://github.com/networks-lab/tidyextractors/blob/658448ed533beecf32adcc188fc64d1068d15ca6/tidyextractors/base_extractor.py#L105-L119
def _drop_collections(self, df): """ Drops columns containing collections (i.e. sets, dicts, lists) from a DataFrame. :param pandas.DataFrame df: Usually self._data. :return: pandas.DataFrame """ all_cols = df.columns keep_cols = [] # Check whether each column contains collections. for c in all_cols: if len(self._col_type_set(c, df).intersection([set, dict, list])) == 0: keep_cols.append(c) return df[keep_cols]
[ "def", "_drop_collections", "(", "self", ",", "df", ")", ":", "all_cols", "=", "df", ".", "columns", "keep_cols", "=", "[", "]", "# Check whether each column contains collections.", "for", "c", "in", "all_cols", ":", "if", "len", "(", "self", ".", "_col_type_set", "(", "c", ",", "df", ")", ".", "intersection", "(", "[", "set", ",", "dict", ",", "list", "]", ")", ")", "==", "0", ":", "keep_cols", ".", "append", "(", "c", ")", "return", "df", "[", "keep_cols", "]" ]
Drops columns containing collections (i.e. sets, dicts, lists) from a DataFrame. :param pandas.DataFrame df: Usually self._data. :return: pandas.DataFrame
[ "Drops", "columns", "containing", "collections", "(", "i", ".", "e", ".", "sets", "dicts", "lists", ")", "from", "a", "DataFrame", "." ]
python
train
bharadwaj-raju/libdesktop
libdesktop/volume.py
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L64-L88
def get_volume(): '''Get the volume. Get the current volume. Returns: int: The current volume (percentage, between 0 and 100). ''' if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume = system.get_cmd_out( ['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol']) return int(volume) * 10 else: # Linux/Unix volume = system.get_cmd_out( ('amixer get Master |grep % |awk \'{print $5}\'|' 'sed -e \'s/\[//\' -e \'s/\]//\' | head -n1')) return int(volume.replace('%', ''))
[ "def", "get_volume", "(", ")", ":", "if", "system", ".", "get_name", "(", ")", "==", "'windows'", ":", "# TODO: Implement volume for Windows. Looks like WinAPI is the", "# solution...", "pass", "elif", "system", ".", "get_name", "(", ")", "==", "'mac'", ":", "volume", "=", "system", ".", "get_cmd_out", "(", "[", "'osascript'", ",", "'-e'", ",", "'set ovol to output volume of (get volume settings); return the quoted form of ovol'", "]", ")", "return", "int", "(", "volume", ")", "*", "10", "else", ":", "# Linux/Unix", "volume", "=", "system", ".", "get_cmd_out", "(", "(", "'amixer get Master |grep % |awk \\'{print $5}\\'|'", "'sed -e \\'s/\\[//\\' -e \\'s/\\]//\\' | head -n1'", ")", ")", "return", "int", "(", "volume", ".", "replace", "(", "'%'", ",", "''", ")", ")" ]
Get the volume. Get the current volume. Returns: int: The current volume (percentage, between 0 and 100).
[ "Get", "the", "volume", "." ]
python
train
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L1951-L1998
def list_load_areas(self, session, mv_districts): """list load_areas (load areas) peak load from database for a single MV grid_district Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_districts: List of MV districts """ # threshold: load area peak load, if peak load < threshold => disregard # load area lv_loads_threshold = cfg_ding0.get('mv_routing', 'load_area_threshold') #lv_loads_threshold = 0 gw2kw = 10 ** 6 # load in database is in GW -> scale to kW #filter list for only desired MV districts stations_list = [d.mv_grid._station.id_db for d in mv_districts] # build SQL query lv_load_areas_sqla = session.query( self.orm['orm_lv_load_areas'].id.label('id_db'), (self.orm['orm_lv_load_areas'].sector_peakload_residential * gw2kw).\ label('peak_load_residential'), (self.orm['orm_lv_load_areas'].sector_peakload_retail * gw2kw).\ label('peak_load_retail'), (self.orm['orm_lv_load_areas'].sector_peakload_industrial * gw2kw).\ label('peak_load_industrial'), (self.orm['orm_lv_load_areas'].sector_peakload_agricultural * gw2kw).\ label('peak_load_agricultural'), #self.orm['orm_lv_load_areas'].subst_id ). \ filter(self.orm['orm_lv_load_areas'].subst_id.in_(stations_list)).\ filter(((self.orm['orm_lv_load_areas'].sector_peakload_residential # only pick load areas with peak load > lv_loads_threshold + self.orm['orm_lv_load_areas'].sector_peakload_retail + self.orm['orm_lv_load_areas'].sector_peakload_industrial + self.orm['orm_lv_load_areas'].sector_peakload_agricultural) * gw2kw) > lv_loads_threshold). \ filter(self.orm['version_condition_la']) # read data from db lv_load_areas = pd.read_sql_query(lv_load_areas_sqla.statement, session.bind, index_col='id_db') return lv_load_areas
[ "def", "list_load_areas", "(", "self", ",", "session", ",", "mv_districts", ")", ":", "# threshold: load area peak load, if peak load < threshold => disregard", "# load area", "lv_loads_threshold", "=", "cfg_ding0", ".", "get", "(", "'mv_routing'", ",", "'load_area_threshold'", ")", "#lv_loads_threshold = 0", "gw2kw", "=", "10", "**", "6", "# load in database is in GW -> scale to kW", "#filter list for only desired MV districts", "stations_list", "=", "[", "d", ".", "mv_grid", ".", "_station", ".", "id_db", "for", "d", "in", "mv_districts", "]", "# build SQL query", "lv_load_areas_sqla", "=", "session", ".", "query", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "id", ".", "label", "(", "'id_db'", ")", ",", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_residential", "*", "gw2kw", ")", ".", "label", "(", "'peak_load_residential'", ")", ",", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_retail", "*", "gw2kw", ")", ".", "label", "(", "'peak_load_retail'", ")", ",", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_industrial", "*", "gw2kw", ")", ".", "label", "(", "'peak_load_industrial'", ")", ",", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_agricultural", "*", "gw2kw", ")", ".", "label", "(", "'peak_load_agricultural'", ")", ",", "#self.orm['orm_lv_load_areas'].subst_id", ")", ".", "filter", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "subst_id", ".", "in_", "(", "stations_list", ")", ")", ".", "filter", "(", "(", "(", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_residential", "# only pick load areas with peak load > lv_loads_threshold", "+", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_retail", "+", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_industrial", "+", "self", ".", "orm", "[", "'orm_lv_load_areas'", "]", ".", "sector_peakload_agricultural", ")", "*", "gw2kw", ")", ">", "lv_loads_threshold", ")", ".", "filter", "(", "self", ".", "orm", "[", "'version_condition_la'", "]", ")", "# read data from db", "lv_load_areas", "=", "pd", ".", "read_sql_query", "(", "lv_load_areas_sqla", ".", "statement", ",", "session", ".", "bind", ",", "index_col", "=", "'id_db'", ")", "return", "lv_load_areas" ]
list load_areas (load areas) peak load from database for a single MV grid_district Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_districts: List of MV districts
[ "list", "load_areas", "(", "load", "areas", ")", "peak", "load", "from", "database", "for", "a", "single", "MV", "grid_district" ]
python
train
tehmaze/natural
natural/size.py
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/size.py#L12-L51
def filesize(value, format='decimal', digits=2): ''' Convert a file size into natural readable format. Multiple formats are supported. :param value: size :param format: default ``decimal``, choices ``binary``, ``decimal`` or ``gnu`` :param digits: default ``2`` >>> print(filesize(123)) 123.00 B >>> print(filesize(123456)) 120.56 kB >>> print(filesize(1234567890)) 1.15 GB ''' if format not in FILESIZE_SUFFIX: raise TypeError base = FILESIZE_BASE[format] size = int(value) sign = size < 0 and u'-' or '' size = abs(size) for i, suffix in enumerate(FILESIZE_SUFFIX[format]): unit = base ** (i + 1) if size < unit: result = u''.join([ sign, _format(base * size / float(unit), digits), u' ', suffix, ]) if format == 'gnu': result = result.replace(' ', '') return result raise OverflowError
[ "def", "filesize", "(", "value", ",", "format", "=", "'decimal'", ",", "digits", "=", "2", ")", ":", "if", "format", "not", "in", "FILESIZE_SUFFIX", ":", "raise", "TypeError", "base", "=", "FILESIZE_BASE", "[", "format", "]", "size", "=", "int", "(", "value", ")", "sign", "=", "size", "<", "0", "and", "u'-'", "or", "''", "size", "=", "abs", "(", "size", ")", "for", "i", ",", "suffix", "in", "enumerate", "(", "FILESIZE_SUFFIX", "[", "format", "]", ")", ":", "unit", "=", "base", "**", "(", "i", "+", "1", ")", "if", "size", "<", "unit", ":", "result", "=", "u''", ".", "join", "(", "[", "sign", ",", "_format", "(", "base", "*", "size", "/", "float", "(", "unit", ")", ",", "digits", ")", ",", "u' '", ",", "suffix", ",", "]", ")", "if", "format", "==", "'gnu'", ":", "result", "=", "result", ".", "replace", "(", "' '", ",", "''", ")", "return", "result", "raise", "OverflowError" ]
Convert a file size into natural readable format. Multiple formats are supported. :param value: size :param format: default ``decimal``, choices ``binary``, ``decimal`` or ``gnu`` :param digits: default ``2`` >>> print(filesize(123)) 123.00 B >>> print(filesize(123456)) 120.56 kB >>> print(filesize(1234567890)) 1.15 GB
[ "Convert", "a", "file", "size", "into", "natural", "readable", "format", ".", "Multiple", "formats", "are", "supported", "." ]
python
train
globality-corp/flake8-logging-format
logging_format/visitor.py
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L156-L168
def visit_keyword(self, node): """ Process keyword arguments. """ if self.should_check_whitelist(node): if node.arg not in self.whitelist and not node.arg.startswith("debug_"): self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(node.arg))) if self.should_check_extra_exception(node): self.check_exception_arg(node.value) super(LoggingVisitor, self).generic_visit(node)
[ "def", "visit_keyword", "(", "self", ",", "node", ")", ":", "if", "self", ".", "should_check_whitelist", "(", "node", ")", ":", "if", "node", ".", "arg", "not", "in", "self", ".", "whitelist", "and", "not", "node", ".", "arg", ".", "startswith", "(", "\"debug_\"", ")", ":", "self", ".", "violations", ".", "append", "(", "(", "self", ".", "current_logging_call", ",", "WHITELIST_VIOLATION", ".", "format", "(", "node", ".", "arg", ")", ")", ")", "if", "self", ".", "should_check_extra_exception", "(", "node", ")", ":", "self", ".", "check_exception_arg", "(", "node", ".", "value", ")", "super", "(", "LoggingVisitor", ",", "self", ")", ".", "generic_visit", "(", "node", ")" ]
Process keyword arguments.
[ "Process", "keyword", "arguments", "." ]
python
test
PixelwarStudio/PyTree
Tree/core.py
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L81-L90
def get_node_sum(self, age=None): """Get sum of all branches in the tree. Returns: int: The sum of all nodes grown until the age. """ if age is None: age = self.age return age if self.comp == 1 else int((pow(self.comp, age+1) - 1) / (self.comp - 1))
[ "def", "get_node_sum", "(", "self", ",", "age", "=", "None", ")", ":", "if", "age", "is", "None", ":", "age", "=", "self", ".", "age", "return", "age", "if", "self", ".", "comp", "==", "1", "else", "int", "(", "(", "pow", "(", "self", ".", "comp", ",", "age", "+", "1", ")", "-", "1", ")", "/", "(", "self", ".", "comp", "-", "1", ")", ")" ]
Get sum of all branches in the tree. Returns: int: The sum of all nodes grown until the age.
[ "Get", "sum", "of", "all", "branches", "in", "the", "tree", "." ]
python
train
benmoran56/esper
esper.py
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L111-L135
def delete_entity(self, entity: int, immediate=False) -> None: """Delete an Entity from the World. Delete an Entity and all of it's assigned Component instances from the world. By default, Entity deletion is delayed until the next call to *World.process*. You can request immediate deletion, however, by passing the "immediate=True" parameter. This should generally not be done during Entity iteration (calls to World.get_component/s). Raises a KeyError if the given entity does not exist in the database. :param entity: The Entity ID you wish to delete. :param immediate: If True, delete the Entity immediately. """ if immediate: for component_type in self._entities[entity]: self._components[component_type].discard(entity) if not self._components[component_type]: del self._components[component_type] del self._entities[entity] self.clear_cache() else: self._dead_entities.add(entity)
[ "def", "delete_entity", "(", "self", ",", "entity", ":", "int", ",", "immediate", "=", "False", ")", "->", "None", ":", "if", "immediate", ":", "for", "component_type", "in", "self", ".", "_entities", "[", "entity", "]", ":", "self", ".", "_components", "[", "component_type", "]", ".", "discard", "(", "entity", ")", "if", "not", "self", ".", "_components", "[", "component_type", "]", ":", "del", "self", ".", "_components", "[", "component_type", "]", "del", "self", ".", "_entities", "[", "entity", "]", "self", ".", "clear_cache", "(", ")", "else", ":", "self", ".", "_dead_entities", ".", "add", "(", "entity", ")" ]
Delete an Entity from the World. Delete an Entity and all of it's assigned Component instances from the world. By default, Entity deletion is delayed until the next call to *World.process*. You can request immediate deletion, however, by passing the "immediate=True" parameter. This should generally not be done during Entity iteration (calls to World.get_component/s). Raises a KeyError if the given entity does not exist in the database. :param entity: The Entity ID you wish to delete. :param immediate: If True, delete the Entity immediately.
[ "Delete", "an", "Entity", "from", "the", "World", "." ]
python
train
saltstack/salt
salt/modules/smartos_imgadm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_imgadm.py#L478-L510
def source_add(source, source_type='imgapi'): ''' Add a new source source : string source url to add source_trype : string (imgapi) source type, either imgapi or docker .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' imgadm.source_add https://updates.joyent.com salt '*' imgadm.source_add https://docker.io docker ''' ret = {} # NOTE: there are some undocumented deprecated source types # so we just warn instead of error on those if source_type not in ['imgapi', 'docker']: log.warning('Possible unsupported imgage source type specified!') cmd = 'imgadm sources -a {0} -t {1}'.format(source, source_type) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode, res['stderr']) return ret return sources(False)
[ "def", "source_add", "(", "source", ",", "source_type", "=", "'imgapi'", ")", ":", "ret", "=", "{", "}", "# NOTE: there are some undocumented deprecated source types", "# so we just warn instead of error on those", "if", "source_type", "not", "in", "[", "'imgapi'", ",", "'docker'", "]", ":", "log", ".", "warning", "(", "'Possible unsupported imgage source type specified!'", ")", "cmd", "=", "'imgadm sources -a {0} -t {1}'", ".", "format", "(", "source", ",", "source_type", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "retcode", "=", "res", "[", "'retcode'", "]", "if", "retcode", "!=", "0", ":", "ret", "[", "'Error'", "]", "=", "_exit_status", "(", "retcode", ",", "res", "[", "'stderr'", "]", ")", "return", "ret", "return", "sources", "(", "False", ")" ]
Add a new source source : string source url to add source_trype : string (imgapi) source type, either imgapi or docker .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' imgadm.source_add https://updates.joyent.com salt '*' imgadm.source_add https://docker.io docker
[ "Add", "a", "new", "source" ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/variant.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/variant.py#L39-L157
def add_gene_info(self, variant_obj, gene_panels=None): """Add extra information about genes from gene panels Args: variant_obj(dict): A variant from the database gene_panels(list(dict)): List of panels from database """ gene_panels = gene_panels or [] # Add a variable that checks if there are any refseq transcripts variant_obj['has_refseq'] = False # We need to check if there are any additional information in the gene panels # extra_info will hold information from gene panels # Collect all extra info from the panels in a dictionary with hgnc_id as keys extra_info = {} for panel_obj in gene_panels: for gene_info in panel_obj['genes']: hgnc_id = gene_info['hgnc_id'] if hgnc_id not in extra_info: extra_info[hgnc_id] = [] extra_info[hgnc_id].append(gene_info) # Loop over the genes in the variant object to add information # from hgnc_genes and panel genes to the variant object for variant_gene in variant_obj.get('genes', []): hgnc_id = variant_gene['hgnc_id'] # Get the hgnc_gene hgnc_gene = self.hgnc_gene(hgnc_id) if not hgnc_gene: continue # Create a dictionary with transcripts information # Use ensembl transcript id as keys transcripts_dict = {} # Add transcript information from the hgnc gene for transcript in hgnc_gene.get('transcripts', []): tx_id = transcript['ensembl_transcript_id'] transcripts_dict[tx_id] = transcript # Add the transcripts to the gene object hgnc_gene['transcripts_dict'] = transcripts_dict if hgnc_gene.get('incomplete_penetrance'): variant_gene['omim_penetrance'] = True ############# PANEL SPECIFIC INFORMATION ############# # Panels can have extra information about genes and transcripts panel_info = extra_info.get(hgnc_id, []) # Manually annotated disease associated transcripts disease_associated = set() # We need to strip the version to compare against others disease_associated_no_version = set() manual_penetrance = False mosaicism = False manual_inheritance = set() # We need to loop since there can be information from multiple panels for gene_info in panel_info: # Check if there are manually annotated disease transcripts for tx in gene_info.get('disease_associated_transcripts', []): # We remove the version of transcript at this stage stripped = re.sub(r'\.[0-9]', '', tx) disease_associated_no_version.add(stripped) disease_associated.add(tx) if gene_info.get('reduced_penetrance'): manual_penetrance = True if gene_info.get('mosaicism'): mosaicism = True manual_inheritance.update(gene_info.get('inheritance_models', [])) variant_gene['disease_associated_transcripts'] = list(disease_associated) variant_gene['manual_penetrance'] = manual_penetrance variant_gene['mosaicism'] = mosaicism variant_gene['manual_inheritance'] = list(manual_inheritance) # Now add the information from hgnc and panels # to the transcripts on the variant # First loop over the variants transcripts for transcript in variant_gene.get('transcripts', []): tx_id = transcript['transcript_id'] if not tx_id in transcripts_dict: continue # This is the common information about the transcript hgnc_transcript = transcripts_dict[tx_id] # Check in the common information if it is a primary transcript if hgnc_transcript.get('is_primary'): transcript['is_primary'] = True # If the transcript has a ref seq identifier we add that # to the variants transcript if not hgnc_transcript.get('refseq_id'): continue refseq_id = hgnc_transcript['refseq_id'] transcript['refseq_id'] = refseq_id variant_obj['has_refseq'] = True # Check if the refseq id are disease associated if refseq_id in disease_associated_no_version: transcript['is_disease_associated'] = True # Since a ensemble transcript can have multiple refseq identifiers we add all of # those transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[]) variant_gene['common'] = hgnc_gene # Add the associated disease terms variant_gene['disease_terms'] = self.disease_terms(hgnc_id) return variant_obj
[ "def", "add_gene_info", "(", "self", ",", "variant_obj", ",", "gene_panels", "=", "None", ")", ":", "gene_panels", "=", "gene_panels", "or", "[", "]", "# Add a variable that checks if there are any refseq transcripts", "variant_obj", "[", "'has_refseq'", "]", "=", "False", "# We need to check if there are any additional information in the gene panels", "# extra_info will hold information from gene panels", "# Collect all extra info from the panels in a dictionary with hgnc_id as keys", "extra_info", "=", "{", "}", "for", "panel_obj", "in", "gene_panels", ":", "for", "gene_info", "in", "panel_obj", "[", "'genes'", "]", ":", "hgnc_id", "=", "gene_info", "[", "'hgnc_id'", "]", "if", "hgnc_id", "not", "in", "extra_info", ":", "extra_info", "[", "hgnc_id", "]", "=", "[", "]", "extra_info", "[", "hgnc_id", "]", ".", "append", "(", "gene_info", ")", "# Loop over the genes in the variant object to add information", "# from hgnc_genes and panel genes to the variant object", "for", "variant_gene", "in", "variant_obj", ".", "get", "(", "'genes'", ",", "[", "]", ")", ":", "hgnc_id", "=", "variant_gene", "[", "'hgnc_id'", "]", "# Get the hgnc_gene", "hgnc_gene", "=", "self", ".", "hgnc_gene", "(", "hgnc_id", ")", "if", "not", "hgnc_gene", ":", "continue", "# Create a dictionary with transcripts information", "# Use ensembl transcript id as keys", "transcripts_dict", "=", "{", "}", "# Add transcript information from the hgnc gene", "for", "transcript", "in", "hgnc_gene", ".", "get", "(", "'transcripts'", ",", "[", "]", ")", ":", "tx_id", "=", "transcript", "[", "'ensembl_transcript_id'", "]", "transcripts_dict", "[", "tx_id", "]", "=", "transcript", "# Add the transcripts to the gene object", "hgnc_gene", "[", "'transcripts_dict'", "]", "=", "transcripts_dict", "if", "hgnc_gene", ".", "get", "(", "'incomplete_penetrance'", ")", ":", "variant_gene", "[", "'omim_penetrance'", "]", "=", "True", "############# PANEL SPECIFIC INFORMATION #############", "# Panels can have extra information about genes and transcripts", "panel_info", "=", "extra_info", ".", "get", "(", "hgnc_id", ",", "[", "]", ")", "# Manually annotated disease associated transcripts", "disease_associated", "=", "set", "(", ")", "# We need to strip the version to compare against others", "disease_associated_no_version", "=", "set", "(", ")", "manual_penetrance", "=", "False", "mosaicism", "=", "False", "manual_inheritance", "=", "set", "(", ")", "# We need to loop since there can be information from multiple panels", "for", "gene_info", "in", "panel_info", ":", "# Check if there are manually annotated disease transcripts", "for", "tx", "in", "gene_info", ".", "get", "(", "'disease_associated_transcripts'", ",", "[", "]", ")", ":", "# We remove the version of transcript at this stage", "stripped", "=", "re", ".", "sub", "(", "r'\\.[0-9]'", ",", "''", ",", "tx", ")", "disease_associated_no_version", ".", "add", "(", "stripped", ")", "disease_associated", ".", "add", "(", "tx", ")", "if", "gene_info", ".", "get", "(", "'reduced_penetrance'", ")", ":", "manual_penetrance", "=", "True", "if", "gene_info", ".", "get", "(", "'mosaicism'", ")", ":", "mosaicism", "=", "True", "manual_inheritance", ".", "update", "(", "gene_info", ".", "get", "(", "'inheritance_models'", ",", "[", "]", ")", ")", "variant_gene", "[", "'disease_associated_transcripts'", "]", "=", "list", "(", "disease_associated", ")", "variant_gene", "[", "'manual_penetrance'", "]", "=", "manual_penetrance", "variant_gene", "[", "'mosaicism'", "]", "=", "mosaicism", "variant_gene", "[", "'manual_inheritance'", "]", "=", "list", "(", "manual_inheritance", ")", "# Now add the information from hgnc and panels", "# to the transcripts on the variant", "# First loop over the variants transcripts", "for", "transcript", "in", "variant_gene", ".", "get", "(", "'transcripts'", ",", "[", "]", ")", ":", "tx_id", "=", "transcript", "[", "'transcript_id'", "]", "if", "not", "tx_id", "in", "transcripts_dict", ":", "continue", "# This is the common information about the transcript", "hgnc_transcript", "=", "transcripts_dict", "[", "tx_id", "]", "# Check in the common information if it is a primary transcript", "if", "hgnc_transcript", ".", "get", "(", "'is_primary'", ")", ":", "transcript", "[", "'is_primary'", "]", "=", "True", "# If the transcript has a ref seq identifier we add that", "# to the variants transcript", "if", "not", "hgnc_transcript", ".", "get", "(", "'refseq_id'", ")", ":", "continue", "refseq_id", "=", "hgnc_transcript", "[", "'refseq_id'", "]", "transcript", "[", "'refseq_id'", "]", "=", "refseq_id", "variant_obj", "[", "'has_refseq'", "]", "=", "True", "# Check if the refseq id are disease associated", "if", "refseq_id", "in", "disease_associated_no_version", ":", "transcript", "[", "'is_disease_associated'", "]", "=", "True", "# Since a ensemble transcript can have multiple refseq identifiers we add all of", "# those", "transcript", "[", "'refseq_identifiers'", "]", "=", "hgnc_transcript", ".", "get", "(", "'refseq_identifiers'", ",", "[", "]", ")", "variant_gene", "[", "'common'", "]", "=", "hgnc_gene", "# Add the associated disease terms", "variant_gene", "[", "'disease_terms'", "]", "=", "self", ".", "disease_terms", "(", "hgnc_id", ")", "return", "variant_obj" ]
Add extra information about genes from gene panels Args: variant_obj(dict): A variant from the database gene_panels(list(dict)): List of panels from database
[ "Add", "extra", "information", "about", "genes", "from", "gene", "panels" ]
python
test
odlgroup/odl
odl/tomo/geometry/detector.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/detector.py#L153-L199
def surface_normal(self, param): """Unit vector perpendicular to the detector surface at ``param``. The orientation is chosen as follows: - In 2D, the system ``(normal, tangent)`` should be right-handed. - In 3D, the system ``(tangent[0], tangent[1], normal)`` should be right-handed. Here, ``tangent`` is the return value of `surface_deriv` at ``param``. Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. If ``ndim >= 2``, a sequence of length `ndim` must be provided. Returns ------- normal : `numpy.ndarray` Unit vector(s) perpendicular to the detector surface at ``param``. If ``param`` is a single parameter, an array of shape ``(space_ndim,)`` representing a single vector is returned. Otherwise the shape of the returned array is - ``param.shape + (space_ndim,)`` if `ndim` is 1, - ``param.shape[:-1] + (space_ndim,)`` otherwise. """ # Checking is done by `surface_deriv` if self.ndim == 1 and self.space_ndim == 2: return -perpendicular_vector(self.surface_deriv(param)) elif self.ndim == 2 and self.space_ndim == 3: deriv = self.surface_deriv(param) if deriv.ndim > 2: # Vectorized, need to reshape (N, 2, 3) to (2, N, 3) deriv = moveaxis(deriv, -2, 0) normal = np.cross(*deriv, axis=-1) normal /= np.linalg.norm(normal, axis=-1, keepdims=True) return normal else: raise NotImplementedError( 'no default implementation of `surface_normal` available ' 'for `ndim = {}` and `space_ndim = {}`' ''.format(self.ndim, self.space_ndim))
[ "def", "surface_normal", "(", "self", ",", "param", ")", ":", "# Checking is done by `surface_deriv`", "if", "self", ".", "ndim", "==", "1", "and", "self", ".", "space_ndim", "==", "2", ":", "return", "-", "perpendicular_vector", "(", "self", ".", "surface_deriv", "(", "param", ")", ")", "elif", "self", ".", "ndim", "==", "2", "and", "self", ".", "space_ndim", "==", "3", ":", "deriv", "=", "self", ".", "surface_deriv", "(", "param", ")", "if", "deriv", ".", "ndim", ">", "2", ":", "# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)", "deriv", "=", "moveaxis", "(", "deriv", ",", "-", "2", ",", "0", ")", "normal", "=", "np", ".", "cross", "(", "*", "deriv", ",", "axis", "=", "-", "1", ")", "normal", "/=", "np", ".", "linalg", ".", "norm", "(", "normal", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "return", "normal", "else", ":", "raise", "NotImplementedError", "(", "'no default implementation of `surface_normal` available '", "'for `ndim = {}` and `space_ndim = {}`'", "''", ".", "format", "(", "self", ".", "ndim", ",", "self", ".", "space_ndim", ")", ")" ]
Unit vector perpendicular to the detector surface at ``param``. The orientation is chosen as follows: - In 2D, the system ``(normal, tangent)`` should be right-handed. - In 3D, the system ``(tangent[0], tangent[1], normal)`` should be right-handed. Here, ``tangent`` is the return value of `surface_deriv` at ``param``. Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. If ``ndim >= 2``, a sequence of length `ndim` must be provided. Returns ------- normal : `numpy.ndarray` Unit vector(s) perpendicular to the detector surface at ``param``. If ``param`` is a single parameter, an array of shape ``(space_ndim,)`` representing a single vector is returned. Otherwise the shape of the returned array is - ``param.shape + (space_ndim,)`` if `ndim` is 1, - ``param.shape[:-1] + (space_ndim,)`` otherwise.
[ "Unit", "vector", "perpendicular", "to", "the", "detector", "surface", "at", "param", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L9964-L9977
def fence_point_encode(self, target_system, target_component, idx, count, lat, lng): ''' A fence point. Used to set a point when from GCS -> MAV. Also used to return a point from MAV -> GCS target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t) count : total number of points (for sanity checking) (uint8_t) lat : Latitude of point (float) lng : Longitude of point (float) ''' return MAVLink_fence_point_message(target_system, target_component, idx, count, lat, lng)
[ "def", "fence_point_encode", "(", "self", ",", "target_system", ",", "target_component", ",", "idx", ",", "count", ",", "lat", ",", "lng", ")", ":", "return", "MAVLink_fence_point_message", "(", "target_system", ",", "target_component", ",", "idx", ",", "count", ",", "lat", ",", "lng", ")" ]
A fence point. Used to set a point when from GCS -> MAV. Also used to return a point from MAV -> GCS target_system : System ID (uint8_t) target_component : Component ID (uint8_t) idx : point index (first point is 1, 0 is for return point) (uint8_t) count : total number of points (for sanity checking) (uint8_t) lat : Latitude of point (float) lng : Longitude of point (float)
[ "A", "fence", "point", ".", "Used", "to", "set", "a", "point", "when", "from", "GCS", "-", ">", "MAV", ".", "Also", "used", "to", "return", "a", "point", "from", "MAV", "-", ">", "GCS" ]
python
train
ynop/audiomate
audiomate/utils/units.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/units.py#L117-L124
def frame_to_sample(self, frame_index): """ Return a tuple containing the indices of the sample which are the first sample and the end (exclusive) of the frame with the given index. """ start = frame_index * self.hop_size end = start + self.frame_size return start, end
[ "def", "frame_to_sample", "(", "self", ",", "frame_index", ")", ":", "start", "=", "frame_index", "*", "self", ".", "hop_size", "end", "=", "start", "+", "self", ".", "frame_size", "return", "start", ",", "end" ]
Return a tuple containing the indices of the sample which are the first sample and the end (exclusive) of the frame with the given index.
[ "Return", "a", "tuple", "containing", "the", "indices", "of", "the", "sample", "which", "are", "the", "first", "sample", "and", "the", "end", "(", "exclusive", ")", "of", "the", "frame", "with", "the", "given", "index", "." ]
python
train
thiderman/doge
doge/core.py
https://github.com/thiderman/doge/blob/cea077d4f72929f9dcf44d0d16a7d1a6ee0e3e3f/doge/core.py#L365-L386
def get_tty_size(self): """ Get the current terminal size without using a subprocess http://stackoverflow.com/questions/566746 I have no clue what-so-fucking ever over how this works or why it returns the size of the terminal in both cells and pixels. But hey, it does. """ if sys.platform == 'win32': # stdin, stdout, stderr = -10, -11, -12 ret = self._tty_size_windows(-10) ret = ret or self._tty_size_windows(-11) ret = ret or self._tty_size_windows(-12) else: # stdin, stdout, stderr = 0, 1, 2 ret = self._tty_size_linux(0) ret = ret or self._tty_size_linux(1) ret = ret or self._tty_size_linux(2) return ret or (25, 80)
[ "def", "get_tty_size", "(", "self", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "# stdin, stdout, stderr = -10, -11, -12", "ret", "=", "self", ".", "_tty_size_windows", "(", "-", "10", ")", "ret", "=", "ret", "or", "self", ".", "_tty_size_windows", "(", "-", "11", ")", "ret", "=", "ret", "or", "self", ".", "_tty_size_windows", "(", "-", "12", ")", "else", ":", "# stdin, stdout, stderr = 0, 1, 2", "ret", "=", "self", ".", "_tty_size_linux", "(", "0", ")", "ret", "=", "ret", "or", "self", ".", "_tty_size_linux", "(", "1", ")", "ret", "=", "ret", "or", "self", ".", "_tty_size_linux", "(", "2", ")", "return", "ret", "or", "(", "25", ",", "80", ")" ]
Get the current terminal size without using a subprocess http://stackoverflow.com/questions/566746 I have no clue what-so-fucking ever over how this works or why it returns the size of the terminal in both cells and pixels. But hey, it does.
[ "Get", "the", "current", "terminal", "size", "without", "using", "a", "subprocess" ]
python
train
archman/beamline
beamline/models.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/models.py#L126-L144
def putCtrlConf(self, eleobj, ctrlkey, val, type='raw'): """ put the value to control PV field :param eleobj: element object in lattice :param ctrlkey: element control property, PV name :param val: new value for ctrlkey :param type: set in 'raw' or 'real' mode, 'raw' by default 'raw': set PV with the value of 'val', 'real': set PV with the value translated from 'val' """ if ctrlkey in eleobj.ctrlkeys: if type == 'raw': newval = val else: # val should be translated newval = eleobj.unitTrans(val, direction='-') epics.caput(eleobj.ctrlinfo[ctrlkey]['pv'], newval) return True else: return False
[ "def", "putCtrlConf", "(", "self", ",", "eleobj", ",", "ctrlkey", ",", "val", ",", "type", "=", "'raw'", ")", ":", "if", "ctrlkey", "in", "eleobj", ".", "ctrlkeys", ":", "if", "type", "==", "'raw'", ":", "newval", "=", "val", "else", ":", "# val should be translated", "newval", "=", "eleobj", ".", "unitTrans", "(", "val", ",", "direction", "=", "'-'", ")", "epics", ".", "caput", "(", "eleobj", ".", "ctrlinfo", "[", "ctrlkey", "]", "[", "'pv'", "]", ",", "newval", ")", "return", "True", "else", ":", "return", "False" ]
put the value to control PV field :param eleobj: element object in lattice :param ctrlkey: element control property, PV name :param val: new value for ctrlkey :param type: set in 'raw' or 'real' mode, 'raw' by default 'raw': set PV with the value of 'val', 'real': set PV with the value translated from 'val'
[ "put", "the", "value", "to", "control", "PV", "field" ]
python
train
google/transitfeed
transitfeed/problems.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L506-L519
def GetOrderKey(self): """Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values. """ context_attributes = ['_type'] context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS) context_attributes.extend(self._GetExtraOrderAttributes()) tokens = [] for context_attribute in context_attributes: tokens.append(getattr(self, context_attribute, None)) return tokens
[ "def", "GetOrderKey", "(", "self", ")", ":", "context_attributes", "=", "[", "'_type'", "]", "context_attributes", ".", "extend", "(", "ExceptionWithContext", ".", "CONTEXT_PARTS", ")", "context_attributes", ".", "extend", "(", "self", ".", "_GetExtraOrderAttributes", "(", ")", ")", "tokens", "=", "[", "]", "for", "context_attribute", "in", "context_attributes", ":", "tokens", ".", "append", "(", "getattr", "(", "self", ",", "context_attribute", ",", "None", ")", ")", "return", "tokens" ]
Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values.
[ "Return", "a", "tuple", "that", "can", "be", "used", "to", "sort", "problems", "into", "a", "consistent", "order", "." ]
python
train
xtuml/pyxtuml
xtuml/consistency_check.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/consistency_check.py#L133-L149
def check_link_integrity(m, link): ''' Check the model for integrity violations on an association in a particular direction. ''' res = 0 for inst in link.from_metaclass.select_many(): q_set = list(link.navigate(inst)) if(len(q_set) < 1 and not link.conditional) or ( (len(q_set) > 1 and not link.many)): res += 1 logger.warning('integrity violation in ' '%s --(%s)--> %s' % (pretty_from_link(inst, link), link.rel_id, pretty_to_link(inst, link))) return res
[ "def", "check_link_integrity", "(", "m", ",", "link", ")", ":", "res", "=", "0", "for", "inst", "in", "link", ".", "from_metaclass", ".", "select_many", "(", ")", ":", "q_set", "=", "list", "(", "link", ".", "navigate", "(", "inst", ")", ")", "if", "(", "len", "(", "q_set", ")", "<", "1", "and", "not", "link", ".", "conditional", ")", "or", "(", "(", "len", "(", "q_set", ")", ">", "1", "and", "not", "link", ".", "many", ")", ")", ":", "res", "+=", "1", "logger", ".", "warning", "(", "'integrity violation in '", "'%s --(%s)--> %s'", "%", "(", "pretty_from_link", "(", "inst", ",", "link", ")", ",", "link", ".", "rel_id", ",", "pretty_to_link", "(", "inst", ",", "link", ")", ")", ")", "return", "res" ]
Check the model for integrity violations on an association in a particular direction.
[ "Check", "the", "model", "for", "integrity", "violations", "on", "an", "association", "in", "a", "particular", "direction", "." ]
python
test
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L609-L650
def fetch_tuples(self, max_tuples=20, timeout=None): """ Fetch a number of tuples from this view. Fetching of data must have been started with :py:meth:`start_data_fetch` before calling this method. If ``timeout`` is ``None`` then the returned list will contain ``max_tuples`` tuples. Otherwise if the timeout is reached the list may contain less than ``max_tuples`` tuples. Args: max_tuples(int): Maximum number of tuples to fetch. timeout(float): Maximum time to wait for ``max_tuples`` tuples. Returns: list: List of fetched tuples. .. versionadded:: 1.12 """ tuples = list() if timeout is None: while len(tuples) < max_tuples: fetcher = self._data_fetcher if not fetcher: break tuples.append(fetcher.items.get()) return tuples timeout = float(timeout) end = time.time() + timeout while len(tuples) < max_tuples: qto = end - time.time() if qto <= 0: break try: fetcher = self._data_fetcher if not fetcher: break tuples.append(fetcher.items.get(timeout=qto)) except queue.Empty: break return tuples
[ "def", "fetch_tuples", "(", "self", ",", "max_tuples", "=", "20", ",", "timeout", "=", "None", ")", ":", "tuples", "=", "list", "(", ")", "if", "timeout", "is", "None", ":", "while", "len", "(", "tuples", ")", "<", "max_tuples", ":", "fetcher", "=", "self", ".", "_data_fetcher", "if", "not", "fetcher", ":", "break", "tuples", ".", "append", "(", "fetcher", ".", "items", ".", "get", "(", ")", ")", "return", "tuples", "timeout", "=", "float", "(", "timeout", ")", "end", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "len", "(", "tuples", ")", "<", "max_tuples", ":", "qto", "=", "end", "-", "time", ".", "time", "(", ")", "if", "qto", "<=", "0", ":", "break", "try", ":", "fetcher", "=", "self", ".", "_data_fetcher", "if", "not", "fetcher", ":", "break", "tuples", ".", "append", "(", "fetcher", ".", "items", ".", "get", "(", "timeout", "=", "qto", ")", ")", "except", "queue", ".", "Empty", ":", "break", "return", "tuples" ]
Fetch a number of tuples from this view. Fetching of data must have been started with :py:meth:`start_data_fetch` before calling this method. If ``timeout`` is ``None`` then the returned list will contain ``max_tuples`` tuples. Otherwise if the timeout is reached the list may contain less than ``max_tuples`` tuples. Args: max_tuples(int): Maximum number of tuples to fetch. timeout(float): Maximum time to wait for ``max_tuples`` tuples. Returns: list: List of fetched tuples. .. versionadded:: 1.12
[ "Fetch", "a", "number", "of", "tuples", "from", "this", "view", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/nodes.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/nodes.py#L598-L611
def validate(self): """ validate: Makes sure audio is valid Args: None Returns: boolean indicating if audio is valid """ from .files import AudioFile try: assert self.kind == content_kinds.AUDIO, "Assumption Failed: Node should be audio" assert self.questions == [], "Assumption Failed: Audio should not have questions" assert len(self.files) > 0, "Assumption Failed: Audio should have at least one file" assert any(filter(lambda f: isinstance(f, AudioFile), self.files)), "Assumption Failed: Audio should have at least one audio file" return super(AudioNode, self).validate() except AssertionError as ae: raise InvalidNodeException("Invalid node ({}): {} - {}".format(ae.args[0], self.title, self.__dict__))
[ "def", "validate", "(", "self", ")", ":", "from", ".", "files", "import", "AudioFile", "try", ":", "assert", "self", ".", "kind", "==", "content_kinds", ".", "AUDIO", ",", "\"Assumption Failed: Node should be audio\"", "assert", "self", ".", "questions", "==", "[", "]", ",", "\"Assumption Failed: Audio should not have questions\"", "assert", "len", "(", "self", ".", "files", ")", ">", "0", ",", "\"Assumption Failed: Audio should have at least one file\"", "assert", "any", "(", "filter", "(", "lambda", "f", ":", "isinstance", "(", "f", ",", "AudioFile", ")", ",", "self", ".", "files", ")", ")", ",", "\"Assumption Failed: Audio should have at least one audio file\"", "return", "super", "(", "AudioNode", ",", "self", ")", ".", "validate", "(", ")", "except", "AssertionError", "as", "ae", ":", "raise", "InvalidNodeException", "(", "\"Invalid node ({}): {} - {}\"", ".", "format", "(", "ae", ".", "args", "[", "0", "]", ",", "self", ".", "title", ",", "self", ".", "__dict__", ")", ")" ]
validate: Makes sure audio is valid Args: None Returns: boolean indicating if audio is valid
[ "validate", ":", "Makes", "sure", "audio", "is", "valid", "Args", ":", "None", "Returns", ":", "boolean", "indicating", "if", "audio", "is", "valid" ]
python
train
epfl-lts2/pygsp
pygsp/graphs/randomregular.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/graphs/randomregular.py#L107-L136
def is_regular(self): r""" Troubleshoot a given regular graph. """ warn = False msg = 'The given matrix' # check symmetry if np.abs(self.A - self.A.T).sum() > 0: warn = True msg = '{} is not symmetric,'.format(msg) # check parallel edged if self.A.max(axis=None) > 1: warn = True msg = '{} has parallel edges,'.format(msg) # check that d is d-regular if np.min(self.d) != np.max(self.d): warn = True msg = '{} is not d-regular,'.format(msg) # check that g doesn't contain any self-loop if self.A.diagonal().any(): warn = True msg = '{} has self loop.'.format(msg) if warn: self.logger.warning('{}.'.format(msg[:-1]))
[ "def", "is_regular", "(", "self", ")", ":", "warn", "=", "False", "msg", "=", "'The given matrix'", "# check symmetry", "if", "np", ".", "abs", "(", "self", ".", "A", "-", "self", ".", "A", ".", "T", ")", ".", "sum", "(", ")", ">", "0", ":", "warn", "=", "True", "msg", "=", "'{} is not symmetric,'", ".", "format", "(", "msg", ")", "# check parallel edged", "if", "self", ".", "A", ".", "max", "(", "axis", "=", "None", ")", ">", "1", ":", "warn", "=", "True", "msg", "=", "'{} has parallel edges,'", ".", "format", "(", "msg", ")", "# check that d is d-regular", "if", "np", ".", "min", "(", "self", ".", "d", ")", "!=", "np", ".", "max", "(", "self", ".", "d", ")", ":", "warn", "=", "True", "msg", "=", "'{} is not d-regular,'", ".", "format", "(", "msg", ")", "# check that g doesn't contain any self-loop", "if", "self", ".", "A", ".", "diagonal", "(", ")", ".", "any", "(", ")", ":", "warn", "=", "True", "msg", "=", "'{} has self loop.'", ".", "format", "(", "msg", ")", "if", "warn", ":", "self", ".", "logger", ".", "warning", "(", "'{}.'", ".", "format", "(", "msg", "[", ":", "-", "1", "]", ")", ")" ]
r""" Troubleshoot a given regular graph.
[ "r", "Troubleshoot", "a", "given", "regular", "graph", "." ]
python
train