repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
OpenTreeOfLife/peyotl
peyotl/amendments/amendments_shard.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/amendments/amendments_shard.py#L128-L137
def get_configuration_dict(self, secret_attrs=False): """Overrides superclass method and renames some properties""" cd = super(TaxonomicAmendmentsShard, self).get_configuration_dict(secret_attrs=secret_attrs) # "rename" some keys in the dict provided cd['number of amendments'] = cd.pop('number of documents') cd['amendments'] = cd.pop('documents') # add keys particular to this shard subclass if self._next_ott_id is not None: cd['_next_ott_id'] = self._next_ott_id, return cd
[ "def", "get_configuration_dict", "(", "self", ",", "secret_attrs", "=", "False", ")", ":", "cd", "=", "super", "(", "TaxonomicAmendmentsShard", ",", "self", ")", ".", "get_configuration_dict", "(", "secret_attrs", "=", "secret_attrs", ")", "# \"rename\" some keys in the dict provided", "cd", "[", "'number of amendments'", "]", "=", "cd", ".", "pop", "(", "'number of documents'", ")", "cd", "[", "'amendments'", "]", "=", "cd", ".", "pop", "(", "'documents'", ")", "# add keys particular to this shard subclass", "if", "self", ".", "_next_ott_id", "is", "not", "None", ":", "cd", "[", "'_next_ott_id'", "]", "=", "self", ".", "_next_ott_id", ",", "return", "cd" ]
Overrides superclass method and renames some properties
[ "Overrides", "superclass", "method", "and", "renames", "some", "properties" ]
python
train
54.4
codelv/enaml-native
src/enamlnative/android/android_wifi.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_wifi.py#L232-L262
def disconnect(cls): """ Disconnect from the current network (if connected). Returns -------- result: future A future that resolves to true if the disconnect was successful. Will be set to None if the change network permission is denied. """ app = AndroidApplication.instance() f = app.create_future() def on_permission_result(result): if not result: f.set_result(None) return def on_ready(mgr): mgr.disconnect().then(f.set_result) #: Get the service WifiManager.get().then(on_ready) #: Request permissions WifiManager.request_permission([ WifiManager.PERMISSION_CHANGE_WIFI_STATE ]).then(on_permission_result) return f
[ "def", "disconnect", "(", "cls", ")", ":", "app", "=", "AndroidApplication", ".", "instance", "(", ")", "f", "=", "app", ".", "create_future", "(", ")", "def", "on_permission_result", "(", "result", ")", ":", "if", "not", "result", ":", "f", ".", "set_result", "(", "None", ")", "return", "def", "on_ready", "(", "mgr", ")", ":", "mgr", ".", "disconnect", "(", ")", ".", "then", "(", "f", ".", "set_result", ")", "#: Get the service", "WifiManager", ".", "get", "(", ")", ".", "then", "(", "on_ready", ")", "#: Request permissions", "WifiManager", ".", "request_permission", "(", "[", "WifiManager", ".", "PERMISSION_CHANGE_WIFI_STATE", "]", ")", ".", "then", "(", "on_permission_result", ")", "return", "f" ]
Disconnect from the current network (if connected). Returns -------- result: future A future that resolves to true if the disconnect was successful. Will be set to None if the change network permission is denied.
[ "Disconnect", "from", "the", "current", "network", "(", "if", "connected", ")", ".", "Returns", "--------", "result", ":", "future", "A", "future", "that", "resolves", "to", "true", "if", "the", "disconnect", "was", "successful", ".", "Will", "be", "set", "to", "None", "if", "the", "change", "network", "permission", "is", "denied", "." ]
python
train
28.064516
nutechsoftware/alarmdecoder
alarmdecoder/decoder.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L388-L397
def _wire_events(self): """ Wires up the internal device events. """ self._device.on_open += self._on_open self._device.on_close += self._on_close self._device.on_read += self._on_read self._device.on_write += self._on_write self._zonetracker.on_fault += self._on_zone_fault self._zonetracker.on_restore += self._on_zone_restore
[ "def", "_wire_events", "(", "self", ")", ":", "self", ".", "_device", ".", "on_open", "+=", "self", ".", "_on_open", "self", ".", "_device", ".", "on_close", "+=", "self", ".", "_on_close", "self", ".", "_device", ".", "on_read", "+=", "self", ".", "_on_read", "self", ".", "_device", ".", "on_write", "+=", "self", ".", "_on_write", "self", ".", "_zonetracker", ".", "on_fault", "+=", "self", ".", "_on_zone_fault", "self", ".", "_zonetracker", ".", "on_restore", "+=", "self", ".", "_on_zone_restore" ]
Wires up the internal device events.
[ "Wires", "up", "the", "internal", "device", "events", "." ]
python
train
39.1
pricingassistant/mongokat
mongokat/document.py
https://github.com/pricingassistant/mongokat/blob/61eaf4bc1c4cc359c6f9592ec97b9a04d9561411/mongokat/document.py#L102-L112
def refetch_fields(self, missing_fields): """ Refetches a list of fields from the DB """ db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields}) self._fetched_fields += tuple(missing_fields) if not db_fields: return for k, v in db_fields.items(): self[k] = v
[ "def", "refetch_fields", "(", "self", ",", "missing_fields", ")", ":", "db_fields", "=", "self", ".", "mongokat_collection", ".", "find_one", "(", "{", "\"_id\"", ":", "self", "[", "\"_id\"", "]", "}", ",", "fields", "=", "{", "k", ":", "1", "for", "k", "in", "missing_fields", "}", ")", "self", ".", "_fetched_fields", "+=", "tuple", "(", "missing_fields", ")", "if", "not", "db_fields", ":", "return", "for", "k", ",", "v", "in", "db_fields", ".", "items", "(", ")", ":", "self", "[", "k", "]", "=", "v" ]
Refetches a list of fields from the DB
[ "Refetches", "a", "list", "of", "fields", "from", "the", "DB" ]
python
train
33.272727
GoogleCloudPlatform/flask-talisman
flask_talisman/talisman.py
https://github.com/GoogleCloudPlatform/flask-talisman/blob/c45a9a5b2671b9667856e281d2726c8bfd0f0fd7/flask_talisman/talisman.py#L193-L218
def _force_https(self): """Redirect any non-https requests to https. Based largely on flask-sslify. """ if self.session_cookie_secure: if not self.app.debug: self.app.config['SESSION_COOKIE_SECURE'] = True criteria = [ self.app.debug, flask.request.is_secure, flask.request.headers.get('X-Forwarded-Proto', 'http') == 'https', ] local_options = self._get_local_options() if local_options['force_https'] and not any(criteria): if flask.request.url.startswith('http://'): url = flask.request.url.replace('http://', 'https://', 1) code = 302 if self.force_https_permanent: code = 301 r = flask.redirect(url, code=code) return r
[ "def", "_force_https", "(", "self", ")", ":", "if", "self", ".", "session_cookie_secure", ":", "if", "not", "self", ".", "app", ".", "debug", ":", "self", ".", "app", ".", "config", "[", "'SESSION_COOKIE_SECURE'", "]", "=", "True", "criteria", "=", "[", "self", ".", "app", ".", "debug", ",", "flask", ".", "request", ".", "is_secure", ",", "flask", ".", "request", ".", "headers", ".", "get", "(", "'X-Forwarded-Proto'", ",", "'http'", ")", "==", "'https'", ",", "]", "local_options", "=", "self", ".", "_get_local_options", "(", ")", "if", "local_options", "[", "'force_https'", "]", "and", "not", "any", "(", "criteria", ")", ":", "if", "flask", ".", "request", ".", "url", ".", "startswith", "(", "'http://'", ")", ":", "url", "=", "flask", ".", "request", ".", "url", ".", "replace", "(", "'http://'", ",", "'https://'", ",", "1", ")", "code", "=", "302", "if", "self", ".", "force_https_permanent", ":", "code", "=", "301", "r", "=", "flask", ".", "redirect", "(", "url", ",", "code", "=", "code", ")", "return", "r" ]
Redirect any non-https requests to https. Based largely on flask-sslify.
[ "Redirect", "any", "non", "-", "https", "requests", "to", "https", "." ]
python
train
32.461538
pydata/pandas-gbq
pandas_gbq/gbq.py
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1351-L1374
def create(self, dataset_id): """ Create a dataset in Google BigQuery Parameters ---------- dataset : str Name of dataset to be written """ from google.cloud.bigquery import Dataset if self.exists(dataset_id): raise DatasetCreationError( "Dataset {0} already " "exists".format(dataset_id) ) dataset = Dataset(self.client.dataset(dataset_id)) if self.location is not None: dataset.location = self.location try: self.client.create_dataset(dataset) except self.http_error as ex: self.process_http_error(ex)
[ "def", "create", "(", "self", ",", "dataset_id", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", "import", "Dataset", "if", "self", ".", "exists", "(", "dataset_id", ")", ":", "raise", "DatasetCreationError", "(", "\"Dataset {0} already \"", "\"exists\"", ".", "format", "(", "dataset_id", ")", ")", "dataset", "=", "Dataset", "(", "self", ".", "client", ".", "dataset", "(", "dataset_id", ")", ")", "if", "self", ".", "location", "is", "not", "None", ":", "dataset", ".", "location", "=", "self", ".", "location", "try", ":", "self", ".", "client", ".", "create_dataset", "(", "dataset", ")", "except", "self", ".", "http_error", "as", "ex", ":", "self", ".", "process_http_error", "(", "ex", ")" ]
Create a dataset in Google BigQuery Parameters ---------- dataset : str Name of dataset to be written
[ "Create", "a", "dataset", "in", "Google", "BigQuery" ]
python
train
27.541667
xtream1101/web-wrapper
web_wrapper/driver_selenium_phantomjs.py
https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/driver_selenium_phantomjs.py#L92-L100
def _create_session(self): """ Creates a fresh session with no/default headers and proxies """ logger.debug("Create new phantomjs web driver") self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap, **self.driver_args) self.set_cookies(self.current_cookies) self.driver.set_window_size(1920, 1080)
[ "def", "_create_session", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Create new phantomjs web driver\"", ")", "self", ".", "driver", "=", "webdriver", ".", "PhantomJS", "(", "desired_capabilities", "=", "self", ".", "dcap", ",", "*", "*", "self", ".", "driver_args", ")", "self", ".", "set_cookies", "(", "self", ".", "current_cookies", ")", "self", ".", "driver", ".", "set_window_size", "(", "1920", ",", "1080", ")" ]
Creates a fresh session with no/default headers and proxies
[ "Creates", "a", "fresh", "session", "with", "no", "/", "default", "headers", "and", "proxies" ]
python
train
44.111111
duniter/duniter-python-api
duniterpy/documents/transaction.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L722-L760
def compact(self) -> str: """ Return a transaction in its compact format from the instance :return: """ """TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME PUBLIC_KEY:INDEX ... INDEX:SOURCE:FINGERPRINT:AMOUNT ... PUBLIC_KEY:AMOUNT ... COMMENT """ doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version, len(self.issuers), len(self.inputs), len(self.unlocks), len(self.outputs), '1' if self.comment != "" else '0', self.locktime) if self.version >= 3: doc += "{0}\n".format(self.blockstamp) for pubkey in self.issuers: doc += "{0}\n".format(pubkey) for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) for u in self.unlocks: doc += "{0}\n".format(u.inline()) for o in self.outputs: doc += "{0}\n".format(o.inline()) if self.comment != "": doc += "{0}\n".format(self.comment) for s in self.signatures: doc += "{0}\n".format(s) return doc
[ "def", "compact", "(", "self", ")", "->", "str", ":", "\"\"\"TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME\nPUBLIC_KEY:INDEX\n...\nINDEX:SOURCE:FINGERPRINT:AMOUNT\n...\nPUBLIC_KEY:AMOUNT\n...\nCOMMENT\n\"\"\"", "doc", "=", "\"TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\\n\"", ".", "format", "(", "self", ".", "version", ",", "len", "(", "self", ".", "issuers", ")", ",", "len", "(", "self", ".", "inputs", ")", ",", "len", "(", "self", ".", "unlocks", ")", ",", "len", "(", "self", ".", "outputs", ")", ",", "'1'", "if", "self", ".", "comment", "!=", "\"\"", "else", "'0'", ",", "self", ".", "locktime", ")", "if", "self", ".", "version", ">=", "3", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "self", ".", "blockstamp", ")", "for", "pubkey", "in", "self", ".", "issuers", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "pubkey", ")", "for", "i", "in", "self", ".", "inputs", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "i", ".", "inline", "(", "self", ".", "version", ")", ")", "for", "u", "in", "self", ".", "unlocks", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "u", ".", "inline", "(", ")", ")", "for", "o", "in", "self", ".", "outputs", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "o", ".", "inline", "(", ")", ")", "if", "self", ".", "comment", "!=", "\"\"", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "self", ".", "comment", ")", "for", "s", "in", "self", ".", "signatures", ":", "doc", "+=", "\"{0}\\n\"", ".", "format", "(", "s", ")", "return", "doc" ]
Return a transaction in its compact format from the instance :return:
[ "Return", "a", "transaction", "in", "its", "compact", "format", "from", "the", "instance" ]
python
train
35.358974
saltstack/salt
salt/runners/http.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/http.py#L18-L45
def query(url, output=True, **kwargs): ''' Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: CLI Example: .. code-block:: bash salt-run http.query http://somelink.com/ salt-run http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt-run http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' ''' if output is not True: log.warning('Output option has been deprecated. Please use --quiet.') if 'node' not in kwargs: kwargs['node'] = 'master' opts = __opts__.copy() if 'opts' in kwargs: opts.update(kwargs['opts']) del kwargs['opts'] ret = salt.utils.http.query(url=url, opts=opts, **kwargs) return ret
[ "def", "query", "(", "url", ",", "output", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "output", "is", "not", "True", ":", "log", ".", "warning", "(", "'Output option has been deprecated. Please use --quiet.'", ")", "if", "'node'", "not", "in", "kwargs", ":", "kwargs", "[", "'node'", "]", "=", "'master'", "opts", "=", "__opts__", ".", "copy", "(", ")", "if", "'opts'", "in", "kwargs", ":", "opts", ".", "update", "(", "kwargs", "[", "'opts'", "]", ")", "del", "kwargs", "[", "'opts'", "]", "ret", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "url", "=", "url", ",", "opts", "=", "opts", ",", "*", "*", "kwargs", ")", "return", "ret" ]
Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: CLI Example: .. code-block:: bash salt-run http.query http://somelink.com/ salt-run http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt-run http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>'
[ "Query", "a", "resource", "and", "decode", "the", "return", "data" ]
python
train
30.785714
hughsie/python-appstream
appstream/component.py
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L243-L251
def _parse_tree(self, node): """ Parse a <image> object """ if 'type' in node.attrib: self.kind = node.attrib['type'] if 'width' in node.attrib: self.width = int(node.attrib['width']) if 'height' in node.attrib: self.height = int(node.attrib['height']) self.url = node.text
[ "def", "_parse_tree", "(", "self", ",", "node", ")", ":", "if", "'type'", "in", "node", ".", "attrib", ":", "self", ".", "kind", "=", "node", ".", "attrib", "[", "'type'", "]", "if", "'width'", "in", "node", ".", "attrib", ":", "self", ".", "width", "=", "int", "(", "node", ".", "attrib", "[", "'width'", "]", ")", "if", "'height'", "in", "node", ".", "attrib", ":", "self", ".", "height", "=", "int", "(", "node", ".", "attrib", "[", "'height'", "]", ")", "self", ".", "url", "=", "node", ".", "text" ]
Parse a <image> object
[ "Parse", "a", "<image", ">", "object" ]
python
train
37.888889
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/graphs/graph.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/graphs/graph.py#L177-L226
def set_data(self, adjacency_mat=None, **kwargs): """Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows. """ if adjacency_mat is not None: if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError("Adjacency matrix should be square.") self._adjacency_mat = adjacency_mat for k in self._arrow_attributes: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) setattr(self._edges, translated, kwargs.pop(k)) arrow_kwargs = {} for k in self._arrow_kwargs: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) arrow_kwargs[translated] = kwargs.pop(k) node_kwargs = {} for k in self._node_kwargs: if k in kwargs: translated = (self._node_kw_trans[k] if k in self._node_kw_trans else k) node_kwargs[translated] = kwargs.pop(k) if len(kwargs) > 0: raise TypeError("%s.set_data() got invalid keyword arguments: %S" % (self.__class__.__name__, list(kwargs.keys()))) # The actual data is set in GraphVisual.animate_layout or # GraphVisual.set_final_layout self._arrow_data = arrow_kwargs self._node_data = node_kwargs if not self._animate: self.set_final_layout()
[ "def", "set_data", "(", "self", ",", "adjacency_mat", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "adjacency_mat", "is", "not", "None", ":", "if", "adjacency_mat", ".", "shape", "[", "0", "]", "!=", "adjacency_mat", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Adjacency matrix should be square.\"", ")", "self", ".", "_adjacency_mat", "=", "adjacency_mat", "for", "k", "in", "self", ".", "_arrow_attributes", ":", "if", "k", "in", "kwargs", ":", "translated", "=", "(", "self", ".", "_arrow_kw_trans", "[", "k", "]", "if", "k", "in", "self", ".", "_arrow_kw_trans", "else", "k", ")", "setattr", "(", "self", ".", "_edges", ",", "translated", ",", "kwargs", ".", "pop", "(", "k", ")", ")", "arrow_kwargs", "=", "{", "}", "for", "k", "in", "self", ".", "_arrow_kwargs", ":", "if", "k", "in", "kwargs", ":", "translated", "=", "(", "self", ".", "_arrow_kw_trans", "[", "k", "]", "if", "k", "in", "self", ".", "_arrow_kw_trans", "else", "k", ")", "arrow_kwargs", "[", "translated", "]", "=", "kwargs", ".", "pop", "(", "k", ")", "node_kwargs", "=", "{", "}", "for", "k", "in", "self", ".", "_node_kwargs", ":", "if", "k", "in", "kwargs", ":", "translated", "=", "(", "self", ".", "_node_kw_trans", "[", "k", "]", "if", "k", "in", "self", ".", "_node_kw_trans", "else", "k", ")", "node_kwargs", "[", "translated", "]", "=", "kwargs", ".", "pop", "(", "k", ")", "if", "len", "(", "kwargs", ")", ">", "0", ":", "raise", "TypeError", "(", "\"%s.set_data() got invalid keyword arguments: %S\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "list", "(", "kwargs", ".", "keys", "(", ")", ")", ")", ")", "# The actual data is set in GraphVisual.animate_layout or", "# GraphVisual.set_final_layout", "self", ".", "_arrow_data", "=", "arrow_kwargs", "self", ".", "_node_data", "=", "node_kwargs", "if", "not", "self", ".", "_animate", ":", "self", ".", "set_final_layout", "(", ")" ]
Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows.
[ "Set", "the", "data" ]
python
train
33.54
kamikaze/webdav
src/webdav/client.py
https://github.com/kamikaze/webdav/blob/6facff7224023d3e28c8e1592f3c58401c91a0e6/src/webdav/client.py#L254-L262
def free(self): """Returns an amount of free space on remote WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :return: an amount of free space in bytes. """ data = WebDavXmlUtils.create_free_space_request_content() response = self.execute_request(action='free', path='', data=data) return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname)
[ "def", "free", "(", "self", ")", ":", "data", "=", "WebDavXmlUtils", ".", "create_free_space_request_content", "(", ")", "response", "=", "self", ".", "execute_request", "(", "action", "=", "'free'", ",", "path", "=", "''", ",", "data", "=", "data", ")", "return", "WebDavXmlUtils", ".", "parse_free_space_response", "(", "response", ".", "content", ",", "self", ".", "webdav", ".", "hostname", ")" ]
Returns an amount of free space on remote WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :return: an amount of free space in bytes.
[ "Returns", "an", "amount", "of", "free", "space", "on", "remote", "WebDAV", "server", ".", "More", "information", "you", "can", "find", "by", "link", "http", ":", "//", "webdav", ".", "org", "/", "specs", "/", "rfc4918", ".", "html#METHOD_PROPFIND" ]
python
train
52.777778
deginner/mq-client
mq_client.py
https://github.com/deginner/mq-client/blob/a20ab50ea18870c01e8d142b049233c355858872/mq_client.py#L10-L33
def _on_message(channel, method, header, body): """ Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body """ print "Message:" print "\t%r" % method print "\t%r" % header print "\t%r" % body # Acknowledge message receipt channel.basic_ack(method.delivery_tag) # when ready, stop consuming channel.stop_consuming()
[ "def", "_on_message", "(", "channel", ",", "method", ",", "header", ",", "body", ")", ":", "print", "\"Message:\"", "print", "\"\\t%r\"", "%", "method", "print", "\"\\t%r\"", "%", "header", "print", "\"\\t%r\"", "%", "body", "# Acknowledge message receipt", "channel", ".", "basic_ack", "(", "method", ".", "delivery_tag", ")", "# when ready, stop consuming", "channel", ".", "stop_consuming", "(", ")" ]
Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body
[ "Invoked", "by", "pika", "when", "a", "message", "is", "delivered", "from", "RabbitMQ", ".", "The", "channel", "is", "passed", "for", "your", "convenience", ".", "The", "basic_deliver", "object", "that", "is", "passed", "in", "carries", "the", "exchange", "routing", "key", "delivery", "tag", "and", "a", "redelivered", "flag", "for", "the", "message", ".", "The", "properties", "passed", "in", "is", "an", "instance", "of", "BasicProperties", "with", "the", "message", "properties", "and", "the", "body", "is", "the", "message", "that", "was", "sent", "." ]
python
train
37.75
cmck/pybrowserstack-screenshots
browserstack_screenshots/__init__.py
https://github.com/cmck/pybrowserstack-screenshots/blob/598358fc5b9a41678b3f913f2c082a288011322d/browserstack_screenshots/__init__.py#L79-L87
def generate_screenshots(self): """ Take a config file as input and generate screenshots """ headers = {'content-type': 'application/json', 'Accept': 'application/json'} resp = requests.post(self.api_url, data=json.dumps(self.config), \ headers=headers, auth=self.auth) resp = self._process_response(resp) return resp.json()
[ "def", "generate_screenshots", "(", "self", ")", ":", "headers", "=", "{", "'content-type'", ":", "'application/json'", ",", "'Accept'", ":", "'application/json'", "}", "resp", "=", "requests", ".", "post", "(", "self", ".", "api_url", ",", "data", "=", "json", ".", "dumps", "(", "self", ".", "config", ")", ",", "headers", "=", "headers", ",", "auth", "=", "self", ".", "auth", ")", "resp", "=", "self", ".", "_process_response", "(", "resp", ")", "return", "resp", ".", "json", "(", ")" ]
Take a config file as input and generate screenshots
[ "Take", "a", "config", "file", "as", "input", "and", "generate", "screenshots" ]
python
train
44.555556
lduchesne/python-openstacksdk-hubic
hubic/hubic.py
https://github.com/lduchesne/python-openstacksdk-hubic/blob/25e752f847613bb7e068c05e094a8abadaa7925a/hubic/hubic.py#L94-L117
def get_endpoint(self, session, **kwargs): """Get the HubiC storage endpoint uri. If the current session has not been authenticated, this will trigger a new authentication to the HubiC OAuth service. :param keystoneclient.Session session: The session object to use for queries. :raises keystoneclient.exceptions.AuthorizationFailure: if something goes wrong. :returns: The uri to use for object-storage v1 requests. :rtype: string """ if self.endpoint is None: try: self._refresh_tokens(session) self._fetch_credentials(session) except: raise AuthorizationFailure() return self.endpoint
[ "def", "get_endpoint", "(", "self", ",", "session", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "endpoint", "is", "None", ":", "try", ":", "self", ".", "_refresh_tokens", "(", "session", ")", "self", ".", "_fetch_credentials", "(", "session", ")", "except", ":", "raise", "AuthorizationFailure", "(", ")", "return", "self", ".", "endpoint" ]
Get the HubiC storage endpoint uri. If the current session has not been authenticated, this will trigger a new authentication to the HubiC OAuth service. :param keystoneclient.Session session: The session object to use for queries. :raises keystoneclient.exceptions.AuthorizationFailure: if something goes wrong. :returns: The uri to use for object-storage v1 requests. :rtype: string
[ "Get", "the", "HubiC", "storage", "endpoint", "uri", "." ]
python
train
34.666667
blackecho/Deep-Learning-TensorFlow
yadlt/models/autoencoders/stacked_denoising_autoencoder.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/autoencoders/stacked_denoising_autoencoder.py#L210-L237
def _create_variables_no_pretrain(self, n_features): """Create model variables (no previous unsupervised pretraining). :param n_features: number of features :return: self """ self.encoding_w_ = [] self.encoding_b_ = [] for l, layer in enumerate(self.layers): w_name = 'enc-w-{}'.format(l) b_name = 'enc-b-{}'.format(l) if l == 0: w_shape = [n_features, self.layers[l]] else: w_shape = [self.layers[l - 1], self.layers[l]] w_init = tf.truncated_normal(shape=w_shape, stddev=0.1) W = tf.Variable(w_init, name=w_name) tf.summary.histogram(w_name, W) self.encoding_w_.append(W) b_init = tf.constant(0.1, shape=[self.layers[l]]) b = tf.Variable(b_init, name=b_name) tf.summary.histogram(b_name, b) self.encoding_b_.append(b)
[ "def", "_create_variables_no_pretrain", "(", "self", ",", "n_features", ")", ":", "self", ".", "encoding_w_", "=", "[", "]", "self", ".", "encoding_b_", "=", "[", "]", "for", "l", ",", "layer", "in", "enumerate", "(", "self", ".", "layers", ")", ":", "w_name", "=", "'enc-w-{}'", ".", "format", "(", "l", ")", "b_name", "=", "'enc-b-{}'", ".", "format", "(", "l", ")", "if", "l", "==", "0", ":", "w_shape", "=", "[", "n_features", ",", "self", ".", "layers", "[", "l", "]", "]", "else", ":", "w_shape", "=", "[", "self", ".", "layers", "[", "l", "-", "1", "]", ",", "self", ".", "layers", "[", "l", "]", "]", "w_init", "=", "tf", ".", "truncated_normal", "(", "shape", "=", "w_shape", ",", "stddev", "=", "0.1", ")", "W", "=", "tf", ".", "Variable", "(", "w_init", ",", "name", "=", "w_name", ")", "tf", ".", "summary", ".", "histogram", "(", "w_name", ",", "W", ")", "self", ".", "encoding_w_", ".", "append", "(", "W", ")", "b_init", "=", "tf", ".", "constant", "(", "0.1", ",", "shape", "=", "[", "self", ".", "layers", "[", "l", "]", "]", ")", "b", "=", "tf", ".", "Variable", "(", "b_init", ",", "name", "=", "b_name", ")", "tf", ".", "summary", ".", "histogram", "(", "b_name", ",", "b", ")", "self", ".", "encoding_b_", ".", "append", "(", "b", ")" ]
Create model variables (no previous unsupervised pretraining). :param n_features: number of features :return: self
[ "Create", "model", "variables", "(", "no", "previous", "unsupervised", "pretraining", ")", "." ]
python
train
33.214286
robinandeer/puzzle
puzzle/plugins/gemini/mixins/case.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/case.py#L70-L82
def individual(self, ind_id=None): """Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual) """ for ind_obj in self.individuals: if ind_obj.ind_id == ind_id: return ind_obj return None
[ "def", "individual", "(", "self", ",", "ind_id", "=", "None", ")", ":", "for", "ind_obj", "in", "self", ".", "individuals", ":", "if", "ind_obj", ".", "ind_id", "==", "ind_id", ":", "return", "ind_obj", "return", "None" ]
Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual)
[ "Return", "a", "individual", "object", "Args", ":", "ind_id", "(", "str", ")", ":", "A", "individual", "id", "Returns", ":", "individual", "(", "puzzle", ".", "models", ".", "individual", ")" ]
python
train
28.153846
bachya/pyopenuv
pyopenuv/client.py
https://github.com/bachya/pyopenuv/blob/f7c2f9dd99dd4e3b8b1f9e501ea17ce62a7ace46/pyopenuv/client.py#L27-L59
async def request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None) -> dict: """Make a request against air-matters.com.""" url = '{0}/{1}'.format(API_URL_SCAFFOLD, endpoint) if not headers: headers = {} headers.update({'x-access-token': self._api_key}) if not params: params = {} params.update({ 'lat': self.latitude, 'lng': self.longitude, 'alt': self.altitude }) async with self._websession.request(method, url, headers=headers, params=params) as resp: try: resp.raise_for_status() return await resp.json(content_type=None) except client_exceptions.ClientError as err: if any(code in str(err) for code in ('401', '403')): raise InvalidApiKeyError('Invalid API key') raise RequestError( 'Error requesting data from {0}: {1}'.format( endpoint, err)) from None
[ "async", "def", "request", "(", "self", ",", "method", ":", "str", ",", "endpoint", ":", "str", ",", "*", ",", "headers", ":", "dict", "=", "None", ",", "params", ":", "dict", "=", "None", ")", "->", "dict", ":", "url", "=", "'{0}/{1}'", ".", "format", "(", "API_URL_SCAFFOLD", ",", "endpoint", ")", "if", "not", "headers", ":", "headers", "=", "{", "}", "headers", ".", "update", "(", "{", "'x-access-token'", ":", "self", ".", "_api_key", "}", ")", "if", "not", "params", ":", "params", "=", "{", "}", "params", ".", "update", "(", "{", "'lat'", ":", "self", ".", "latitude", ",", "'lng'", ":", "self", ".", "longitude", ",", "'alt'", ":", "self", ".", "altitude", "}", ")", "async", "with", "self", ".", "_websession", ".", "request", "(", "method", ",", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "as", "resp", ":", "try", ":", "resp", ".", "raise_for_status", "(", ")", "return", "await", "resp", ".", "json", "(", "content_type", "=", "None", ")", "except", "client_exceptions", ".", "ClientError", "as", "err", ":", "if", "any", "(", "code", "in", "str", "(", "err", ")", "for", "code", "in", "(", "'401'", ",", "'403'", ")", ")", ":", "raise", "InvalidApiKeyError", "(", "'Invalid API key'", ")", "raise", "RequestError", "(", "'Error requesting data from {0}: {1}'", ".", "format", "(", "endpoint", ",", "err", ")", ")", "from", "None" ]
Make a request against air-matters.com.
[ "Make", "a", "request", "against", "air", "-", "matters", ".", "com", "." ]
python
train
34.939394
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2343-L2346
def p_expr_BAND_expr(p): """ expr : expr BAND expr """ p[0] = make_binary(p.lineno(2), 'BAND', p[1], p[3], lambda x, y: x & y)
[ "def", "p_expr_BAND_expr", "(", "p", ")", ":", "p", "[", "0", "]", "=", "make_binary", "(", "p", ".", "lineno", "(", "2", ")", ",", "'BAND'", ",", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lambda", "x", ",", "y", ":", "x", "&", "y", ")" ]
expr : expr BAND expr
[ "expr", ":", "expr", "BAND", "expr" ]
python
train
33.75
anomaly/prestans
prestans/types/model.py
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/types/model.py#L294-L318
def attribute_rewrite_map(self): """ Example: long_name -> a_b :return: the rewrite map :rtype: dict """ rewrite_map = dict() token_rewrite_map = self.generate_attribute_token_rewrite_map() for attribute_name, type_instance in self.getmembers(): if isinstance(type_instance, DataType): attribute_tokens = attribute_name.split('_') rewritten_attribute_name = '' for token in attribute_tokens: rewritten_attribute_name += token_rewrite_map[token] + "_" # remove the trailing underscore rewritten_attribute_name = rewritten_attribute_name[:-1] rewrite_map[attribute_name] = rewritten_attribute_name return rewrite_map
[ "def", "attribute_rewrite_map", "(", "self", ")", ":", "rewrite_map", "=", "dict", "(", ")", "token_rewrite_map", "=", "self", ".", "generate_attribute_token_rewrite_map", "(", ")", "for", "attribute_name", ",", "type_instance", "in", "self", ".", "getmembers", "(", ")", ":", "if", "isinstance", "(", "type_instance", ",", "DataType", ")", ":", "attribute_tokens", "=", "attribute_name", ".", "split", "(", "'_'", ")", "rewritten_attribute_name", "=", "''", "for", "token", "in", "attribute_tokens", ":", "rewritten_attribute_name", "+=", "token_rewrite_map", "[", "token", "]", "+", "\"_\"", "# remove the trailing underscore", "rewritten_attribute_name", "=", "rewritten_attribute_name", "[", ":", "-", "1", "]", "rewrite_map", "[", "attribute_name", "]", "=", "rewritten_attribute_name", "return", "rewrite_map" ]
Example: long_name -> a_b :return: the rewrite map :rtype: dict
[ "Example", ":", "long_name", "-", ">", "a_b" ]
python
train
31.88
colab/colab
colab/accounts/views.py
https://github.com/colab/colab/blob/2ad099231e620bec647363b27d38006eca71e13b/colab/accounts/views.py#L151-L178
def delete(self, request, key): """Remove an email address, validated or not.""" request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get('email') user_id = request.DELETE.get('user') if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
[ "def", "delete", "(", "self", ",", "request", ",", "key", ")", ":", "request", ".", "DELETE", "=", "http", ".", "QueryDict", "(", "request", ".", "body", ")", "email_addr", "=", "request", ".", "DELETE", ".", "get", "(", "'email'", ")", "user_id", "=", "request", ".", "DELETE", ".", "get", "(", "'user'", ")", "if", "not", "email_addr", ":", "return", "http", ".", "HttpResponseBadRequest", "(", ")", "try", ":", "email", "=", "EmailAddressValidation", ".", "objects", ".", "get", "(", "address", "=", "email_addr", ",", "user_id", "=", "user_id", ")", "except", "EmailAddressValidation", ".", "DoesNotExist", ":", "pass", "else", ":", "email", ".", "delete", "(", ")", "return", "http", ".", "HttpResponse", "(", "status", "=", "204", ")", "try", ":", "email", "=", "EmailAddress", ".", "objects", ".", "get", "(", "address", "=", "email_addr", ",", "user_id", "=", "user_id", ")", "except", "EmailAddress", ".", "DoesNotExist", ":", "raise", "http", ".", "Http404", "email", ".", "user", "=", "None", "email", ".", "save", "(", ")", "return", "http", ".", "HttpResponse", "(", "status", "=", "204", ")" ]
Remove an email address, validated or not.
[ "Remove", "an", "email", "address", "validated", "or", "not", "." ]
python
train
32.642857
matrix-org/matrix-python-sdk
matrix_client/api.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L223-L234
def join_room(self, room_id_or_alias): """Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join. """ if not room_id_or_alias: raise MatrixError("No alias or room ID to join.") path = "/join/%s" % quote(room_id_or_alias) return self._send("POST", path)
[ "def", "join_room", "(", "self", ",", "room_id_or_alias", ")", ":", "if", "not", "room_id_or_alias", ":", "raise", "MatrixError", "(", "\"No alias or room ID to join.\"", ")", "path", "=", "\"/join/%s\"", "%", "quote", "(", "room_id_or_alias", ")", "return", "self", ".", "_send", "(", "\"POST\"", ",", "path", ")" ]
Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join.
[ "Performs", "/", "join", "/", "$room_id" ]
python
train
29.083333
qiniu/python-sdk
qiniu/auth.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/auth.py#L107-L125
def private_download_url(self, url, expires=3600): """生成私有资源下载链接 Args: url: 私有空间资源的原始URL expires: 下载凭证有效期,默认为3600s Returns: 私有资源的下载链接 """ deadline = int(time.time()) + expires if '?' in url: url += '&' else: url += '?' url = '{0}e={1}'.format(url, str(deadline)) token = self.token(url) return '{0}&token={1}'.format(url, token)
[ "def", "private_download_url", "(", "self", ",", "url", ",", "expires", "=", "3600", ")", ":", "deadline", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "expires", "if", "'?'", "in", "url", ":", "url", "+=", "'&'", "else", ":", "url", "+=", "'?'", "url", "=", "'{0}e={1}'", ".", "format", "(", "url", ",", "str", "(", "deadline", ")", ")", "token", "=", "self", ".", "token", "(", "url", ")", "return", "'{0}&token={1}'", ".", "format", "(", "url", ",", "token", ")" ]
生成私有资源下载链接 Args: url: 私有空间资源的原始URL expires: 下载凭证有效期,默认为3600s Returns: 私有资源的下载链接
[ "生成私有资源下载链接" ]
python
train
24.052632
openxc/openxc-python
openxc/sources/trace.py
https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/sources/trace.py#L54-L70
def read(self): """Read a line of data from the input source at a time.""" line = self.trace_file.readline() if line == '': if self.loop: self._reopen_file() else: self.trace_file.close() self.trace_file = None raise DataSourceError() message = JsonFormatter.deserialize(line) timestamp = message.get('timestamp', None) if self.realtime and timestamp is not None: self._store_timestamp(timestamp) self._wait(self.starting_time, self.first_timestamp, timestamp) return line + "\x00"
[ "def", "read", "(", "self", ")", ":", "line", "=", "self", ".", "trace_file", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "if", "self", ".", "loop", ":", "self", ".", "_reopen_file", "(", ")", "else", ":", "self", ".", "trace_file", ".", "close", "(", ")", "self", ".", "trace_file", "=", "None", "raise", "DataSourceError", "(", ")", "message", "=", "JsonFormatter", ".", "deserialize", "(", "line", ")", "timestamp", "=", "message", ".", "get", "(", "'timestamp'", ",", "None", ")", "if", "self", ".", "realtime", "and", "timestamp", "is", "not", "None", ":", "self", ".", "_store_timestamp", "(", "timestamp", ")", "self", ".", "_wait", "(", "self", ".", "starting_time", ",", "self", ".", "first_timestamp", ",", "timestamp", ")", "return", "line", "+", "\"\\x00\"" ]
Read a line of data from the input source at a time.
[ "Read", "a", "line", "of", "data", "from", "the", "input", "source", "at", "a", "time", "." ]
python
train
37.294118
IdentityPython/fedoidcmsg
src/fedoidcmsg/signing_service.py
https://github.com/IdentityPython/fedoidcmsg/blob/d30107be02521fa6cdfe285da3b6b0cdd153c8cc/src/fedoidcmsg/signing_service.py#L265-L275
def create(self, req, **kwargs): """ Uses POST to send a first metadata statement signing request to a signing service. :param req: The metadata statement that the entity wants signed :return: returns a dictionary with 'sms' and 'loc' as keys. """ response = requests.post(self.url, json=req, **self.req_args()) return self.parse_response(response)
[ "def", "create", "(", "self", ",", "req", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "url", ",", "json", "=", "req", ",", "*", "*", "self", ".", "req_args", "(", ")", ")", "return", "self", ".", "parse_response", "(", "response", ")" ]
Uses POST to send a first metadata statement signing request to a signing service. :param req: The metadata statement that the entity wants signed :return: returns a dictionary with 'sms' and 'loc' as keys.
[ "Uses", "POST", "to", "send", "a", "first", "metadata", "statement", "signing", "request", "to", "a", "signing", "service", "." ]
python
test
36.727273
pteichman/cobe
cobe/brain.py
https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L154-L165
def _to_graph(self, contexts): """This is an iterator that returns each edge of our graph with its two nodes""" prev = None for context in contexts: if prev is None: prev = context continue yield prev[0], context[1], context[0] prev = context
[ "def", "_to_graph", "(", "self", ",", "contexts", ")", ":", "prev", "=", "None", "for", "context", "in", "contexts", ":", "if", "prev", "is", "None", ":", "prev", "=", "context", "continue", "yield", "prev", "[", "0", "]", ",", "context", "[", "1", "]", ",", "context", "[", "0", "]", "prev", "=", "context" ]
This is an iterator that returns each edge of our graph with its two nodes
[ "This", "is", "an", "iterator", "that", "returns", "each", "edge", "of", "our", "graph", "with", "its", "two", "nodes" ]
python
train
27.083333
all-umass/graphs
graphs/base/base.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L139-L148
def to_igraph(self, weighted=None): '''Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.''' # Import here to avoid ImportErrors when igraph isn't available. import igraph ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(), directed=self.is_directed()) if weighted is not False and self.is_weighted(): ig.es['weight'] = self.edge_weights() return ig
[ "def", "to_igraph", "(", "self", ",", "weighted", "=", "None", ")", ":", "# Import here to avoid ImportErrors when igraph isn't available.", "import", "igraph", "ig", "=", "igraph", ".", "Graph", "(", "n", "=", "self", ".", "num_vertices", "(", ")", ",", "edges", "=", "self", ".", "pairs", "(", ")", ".", "tolist", "(", ")", ",", "directed", "=", "self", ".", "is_directed", "(", ")", ")", "if", "weighted", "is", "not", "False", "and", "self", ".", "is_weighted", "(", ")", ":", "ig", ".", "es", "[", "'weight'", "]", "=", "self", ".", "edge_weights", "(", ")", "return", "ig" ]
Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.
[ "Converts", "this", "Graph", "object", "to", "an", "igraph", "-", "compatible", "object", ".", "Requires", "the", "python", "-", "igraph", "library", "." ]
python
train
45.8
fabiobatalha/crossrefapi
crossref/restful.py
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L901-L959
def doi(self, doi, only_message=True): """ This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result['message'] if only_message is True else result
[ "def", "doi", "(", "self", ",", "doi", ",", "only_message", "=", "True", ")", ":", "request_url", "=", "build_url_endpoint", "(", "'/'", ".", "join", "(", "[", "self", ".", "ENDPOINT", ",", "doi", "]", ")", ")", "request_params", "=", "{", "}", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "if", "result", ".", "status_code", "==", "404", ":", "return", "result", "=", "result", ".", "json", "(", ")", "return", "result", "[", "'message'", "]", "if", "only_message", "is", "True", "else", "result" ]
This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
[ "This", "method", "retrieve", "the", "DOI", "metadata", "related", "to", "a", "given", "DOI", "number", "." ]
python
train
54.271186
inasafe/inasafe
safe/gis/raster/zonal_statistics.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/raster/zonal_statistics.py#L37-L154
def zonal_stats(raster, vector): """Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3190 The algorithm will take care about projections. We don't want to reproject the raster layer. So if CRS are different, we reproject the vector layer and then we do a lookup from the reprojected layer to the original vector layer. :param raster: The raster layer. :type raster: QgsRasterLayer :param vector: The vector layer. :type vector: QgsVectorLayer :return: The output of the zonal stats. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = zonal_stats_steps['output_layer_name'] exposure = raster.keywords['exposure'] if raster.crs().authid() != vector.crs().authid(): layer = reproject(vector, raster.crs()) # We prepare the copy output_layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, output_layer) else: layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, layer) input_band = layer.keywords.get('active_band', 1) analysis = QgsZonalStatistics( layer, raster, 'exposure_', input_band, QgsZonalStatistics.Sum) result = analysis.calculateStatistics(None) LOGGER.debug(tr('Zonal stats on %s : %s' % (raster.source(), result))) output_field = exposure_count_field['field_name'] % exposure if raster.crs().authid() != vector.crs().authid(): output_layer.startEditing() field = create_field_from_definition( exposure_count_field, exposure) output_layer.addAttribute(field) new_index = output_layer.fields().lookupField(field.name()) old_index = layer.fields().lookupField('exposure_sum') for feature_input, feature_output in zip( layer.getFeatures(), output_layer.getFeatures()): output_layer.changeAttributeValue( feature_input.id(), new_index, feature_input[old_index]) output_layer.commitChanges() layer = output_layer else: fields_to_rename = { 'exposure_sum': output_field } if qgis_version() >= 21600: rename_fields(layer, fields_to_rename) else: copy_fields(layer, fields_to_rename) remove_fields(layer, list(fields_to_rename.keys())) layer.commitChanges() # The zonal stats is producing some None values. We need to fill these # with 0. See issue : #3778 # We should start a new editing session as previous fields need to be # committed first. layer.startEditing() request = QgsFeatureRequest() expression = '\"%s\" is None' % output_field request.setFilterExpression(expression) request.setFlags(QgsFeatureRequest.NoGeometry) index = layer.fields().lookupField(output_field) for feature in layer.getFeatures(): if feature[output_field] is None: layer.changeAttributeValue(feature.id(), index, 0) layer.commitChanges() layer.keywords = raster.keywords.copy() layer.keywords['inasafe_fields'] = vector.keywords['inasafe_fields'].copy() layer.keywords['inasafe_default_values'] = ( raster.keywords['inasafe_default_values'].copy()) key = exposure_count_field['key'] % raster.keywords['exposure'] # Special case here, one field is the exposure count and the total. layer.keywords['inasafe_fields'][key] = output_field layer.keywords['inasafe_fields'][total_field['key']] = output_field layer.keywords['exposure_keywords'] = raster.keywords.copy() layer.keywords['hazard_keywords'] = vector.keywords[ 'hazard_keywords'].copy() layer.keywords['aggregation_keywords'] = ( vector.keywords['aggregation_keywords']) layer.keywords['layer_purpose'] = ( layer_purpose_aggregate_hazard_impacted['key']) layer.keywords['title'] = output_layer_name check_layer(layer) return layer
[ "def", "zonal_stats", "(", "raster", ",", "vector", ")", ":", "output_layer_name", "=", "zonal_stats_steps", "[", "'output_layer_name'", "]", "exposure", "=", "raster", ".", "keywords", "[", "'exposure'", "]", "if", "raster", ".", "crs", "(", ")", ".", "authid", "(", ")", "!=", "vector", ".", "crs", "(", ")", ".", "authid", "(", ")", ":", "layer", "=", "reproject", "(", "vector", ",", "raster", ".", "crs", "(", ")", ")", "# We prepare the copy", "output_layer", "=", "create_memory_layer", "(", "output_layer_name", ",", "vector", ".", "geometryType", "(", ")", ",", "vector", ".", "crs", "(", ")", ",", "vector", ".", "fields", "(", ")", ")", "copy_layer", "(", "vector", ",", "output_layer", ")", "else", ":", "layer", "=", "create_memory_layer", "(", "output_layer_name", ",", "vector", ".", "geometryType", "(", ")", ",", "vector", ".", "crs", "(", ")", ",", "vector", ".", "fields", "(", ")", ")", "copy_layer", "(", "vector", ",", "layer", ")", "input_band", "=", "layer", ".", "keywords", ".", "get", "(", "'active_band'", ",", "1", ")", "analysis", "=", "QgsZonalStatistics", "(", "layer", ",", "raster", ",", "'exposure_'", ",", "input_band", ",", "QgsZonalStatistics", ".", "Sum", ")", "result", "=", "analysis", ".", "calculateStatistics", "(", "None", ")", "LOGGER", ".", "debug", "(", "tr", "(", "'Zonal stats on %s : %s'", "%", "(", "raster", ".", "source", "(", ")", ",", "result", ")", ")", ")", "output_field", "=", "exposure_count_field", "[", "'field_name'", "]", "%", "exposure", "if", "raster", ".", "crs", "(", ")", ".", "authid", "(", ")", "!=", "vector", ".", "crs", "(", ")", ".", "authid", "(", ")", ":", "output_layer", ".", "startEditing", "(", ")", "field", "=", "create_field_from_definition", "(", "exposure_count_field", ",", "exposure", ")", "output_layer", ".", "addAttribute", "(", "field", ")", "new_index", "=", "output_layer", ".", "fields", "(", ")", ".", "lookupField", "(", "field", ".", "name", "(", ")", ")", "old_index", "=", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "'exposure_sum'", ")", "for", "feature_input", ",", "feature_output", "in", "zip", "(", "layer", ".", "getFeatures", "(", ")", ",", "output_layer", ".", "getFeatures", "(", ")", ")", ":", "output_layer", ".", "changeAttributeValue", "(", "feature_input", ".", "id", "(", ")", ",", "new_index", ",", "feature_input", "[", "old_index", "]", ")", "output_layer", ".", "commitChanges", "(", ")", "layer", "=", "output_layer", "else", ":", "fields_to_rename", "=", "{", "'exposure_sum'", ":", "output_field", "}", "if", "qgis_version", "(", ")", ">=", "21600", ":", "rename_fields", "(", "layer", ",", "fields_to_rename", ")", "else", ":", "copy_fields", "(", "layer", ",", "fields_to_rename", ")", "remove_fields", "(", "layer", ",", "list", "(", "fields_to_rename", ".", "keys", "(", ")", ")", ")", "layer", ".", "commitChanges", "(", ")", "# The zonal stats is producing some None values. We need to fill these", "# with 0. See issue : #3778", "# We should start a new editing session as previous fields need to be", "# committed first.", "layer", ".", "startEditing", "(", ")", "request", "=", "QgsFeatureRequest", "(", ")", "expression", "=", "'\\\"%s\\\" is None'", "%", "output_field", "request", ".", "setFilterExpression", "(", "expression", ")", "request", ".", "setFlags", "(", "QgsFeatureRequest", ".", "NoGeometry", ")", "index", "=", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "output_field", ")", "for", "feature", "in", "layer", ".", "getFeatures", "(", ")", ":", "if", "feature", "[", "output_field", "]", "is", "None", ":", "layer", ".", "changeAttributeValue", "(", "feature", ".", "id", "(", ")", ",", "index", ",", "0", ")", "layer", ".", "commitChanges", "(", ")", "layer", ".", "keywords", "=", "raster", ".", "keywords", ".", "copy", "(", ")", "layer", ".", "keywords", "[", "'inasafe_fields'", "]", "=", "vector", ".", "keywords", "[", "'inasafe_fields'", "]", ".", "copy", "(", ")", "layer", ".", "keywords", "[", "'inasafe_default_values'", "]", "=", "(", "raster", ".", "keywords", "[", "'inasafe_default_values'", "]", ".", "copy", "(", ")", ")", "key", "=", "exposure_count_field", "[", "'key'", "]", "%", "raster", ".", "keywords", "[", "'exposure'", "]", "# Special case here, one field is the exposure count and the total.", "layer", ".", "keywords", "[", "'inasafe_fields'", "]", "[", "key", "]", "=", "output_field", "layer", ".", "keywords", "[", "'inasafe_fields'", "]", "[", "total_field", "[", "'key'", "]", "]", "=", "output_field", "layer", ".", "keywords", "[", "'exposure_keywords'", "]", "=", "raster", ".", "keywords", ".", "copy", "(", ")", "layer", ".", "keywords", "[", "'hazard_keywords'", "]", "=", "vector", ".", "keywords", "[", "'hazard_keywords'", "]", ".", "copy", "(", ")", "layer", ".", "keywords", "[", "'aggregation_keywords'", "]", "=", "(", "vector", ".", "keywords", "[", "'aggregation_keywords'", "]", ")", "layer", ".", "keywords", "[", "'layer_purpose'", "]", "=", "(", "layer_purpose_aggregate_hazard_impacted", "[", "'key'", "]", ")", "layer", ".", "keywords", "[", "'title'", "]", "=", "output_layer_name", "check_layer", "(", "layer", ")", "return", "layer" ]
Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3190 The algorithm will take care about projections. We don't want to reproject the raster layer. So if CRS are different, we reproject the vector layer and then we do a lookup from the reprojected layer to the original vector layer. :param raster: The raster layer. :type raster: QgsRasterLayer :param vector: The vector layer. :type vector: QgsVectorLayer :return: The output of the zonal stats. :rtype: QgsVectorLayer .. versionadded:: 4.0
[ "Reclassify", "a", "continuous", "raster", "layer", "." ]
python
train
34.974576
senaite/senaite.core
bika/lims/workflow/analysis/guards.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/analysis/guards.py#L104-L145
def guard_submit(analysis): """Return whether the transition "submit" can be performed or not """ # Cannot submit without a result if not analysis.getResult(): return False # Cannot submit with interims without value for interim in analysis.getInterimFields(): if not interim.get("value", ""): return False # Cannot submit if attachment not set, but is required if not analysis.getAttachment(): if analysis.getAttachmentOption() == 'r': return False # Check if can submit based on the Analysis Request state if IRequestAnalysis.providedBy(analysis): point_of_capture = analysis.getPointOfCapture() # Cannot submit if the Sample has not been received if point_of_capture == "lab" and not analysis.isSampleReceived(): return False # Cannot submit if the Sample has not been sampled if point_of_capture == "field" and not analysis.isSampleSampled(): return False # Check if the current user can submit if is not assigned if not analysis.bika_setup.getAllowToSubmitNotAssigned(): if not user_has_super_roles(): # Cannot submit if unassigned if not analysis.getAnalyst(): return False # Cannot submit if assigned analyst is not the current user if analysis.getAnalyst() != api.get_current_user().getId(): return False # Cannot submit unless all dependencies are submitted or can be submitted for dependency in analysis.getDependencies(): if not is_submitted_or_submittable(dependency): return False return True
[ "def", "guard_submit", "(", "analysis", ")", ":", "# Cannot submit without a result", "if", "not", "analysis", ".", "getResult", "(", ")", ":", "return", "False", "# Cannot submit with interims without value", "for", "interim", "in", "analysis", ".", "getInterimFields", "(", ")", ":", "if", "not", "interim", ".", "get", "(", "\"value\"", ",", "\"\"", ")", ":", "return", "False", "# Cannot submit if attachment not set, but is required", "if", "not", "analysis", ".", "getAttachment", "(", ")", ":", "if", "analysis", ".", "getAttachmentOption", "(", ")", "==", "'r'", ":", "return", "False", "# Check if can submit based on the Analysis Request state", "if", "IRequestAnalysis", ".", "providedBy", "(", "analysis", ")", ":", "point_of_capture", "=", "analysis", ".", "getPointOfCapture", "(", ")", "# Cannot submit if the Sample has not been received", "if", "point_of_capture", "==", "\"lab\"", "and", "not", "analysis", ".", "isSampleReceived", "(", ")", ":", "return", "False", "# Cannot submit if the Sample has not been sampled", "if", "point_of_capture", "==", "\"field\"", "and", "not", "analysis", ".", "isSampleSampled", "(", ")", ":", "return", "False", "# Check if the current user can submit if is not assigned", "if", "not", "analysis", ".", "bika_setup", ".", "getAllowToSubmitNotAssigned", "(", ")", ":", "if", "not", "user_has_super_roles", "(", ")", ":", "# Cannot submit if unassigned", "if", "not", "analysis", ".", "getAnalyst", "(", ")", ":", "return", "False", "# Cannot submit if assigned analyst is not the current user", "if", "analysis", ".", "getAnalyst", "(", ")", "!=", "api", ".", "get_current_user", "(", ")", ".", "getId", "(", ")", ":", "return", "False", "# Cannot submit unless all dependencies are submitted or can be submitted", "for", "dependency", "in", "analysis", ".", "getDependencies", "(", ")", ":", "if", "not", "is_submitted_or_submittable", "(", "dependency", ")", ":", "return", "False", "return", "True" ]
Return whether the transition "submit" can be performed or not
[ "Return", "whether", "the", "transition", "submit", "can", "be", "performed", "or", "not" ]
python
train
39.261905
flo-compbio/genometools
genometools/ncbi/geo/generate_sample_sheet.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ncbi/geo/generate_sample_sheet.py#L38-L73
def get_argument_parser(): """Create the argument parser for the script. Parameters ---------- Returns ------- `argparse.ArgumentParser` The arguemnt parser. """ desc = 'Generate a sample sheet based on a GEO series matrix.' parser = cli.get_argument_parser(desc=desc) g = parser.add_argument_group('Input and output files') g.add_argument( '-s', '--series-matrix-file', type=cli.str_type, required=True, metavar=cli.file_mv, help='The GEO series matrix file.' ) g.add_argument( '-o', '--output-file', type=cli.str_type, required=True, metavar=cli.file_mv, help='The output file.' ) g.add_argument( '-e', '--encoding', type=cli.str_type, metavar=cli.str_mv, default='UTF-8', help='The encoding of the series matrix file. [UTF-8]' ) cli.add_reporting_args(parser) return parser
[ "def", "get_argument_parser", "(", ")", ":", "desc", "=", "'Generate a sample sheet based on a GEO series matrix.'", "parser", "=", "cli", ".", "get_argument_parser", "(", "desc", "=", "desc", ")", "g", "=", "parser", ".", "add_argument_group", "(", "'Input and output files'", ")", "g", ".", "add_argument", "(", "'-s'", ",", "'--series-matrix-file'", ",", "type", "=", "cli", ".", "str_type", ",", "required", "=", "True", ",", "metavar", "=", "cli", ".", "file_mv", ",", "help", "=", "'The GEO series matrix file.'", ")", "g", ".", "add_argument", "(", "'-o'", ",", "'--output-file'", ",", "type", "=", "cli", ".", "str_type", ",", "required", "=", "True", ",", "metavar", "=", "cli", ".", "file_mv", ",", "help", "=", "'The output file.'", ")", "g", ".", "add_argument", "(", "'-e'", ",", "'--encoding'", ",", "type", "=", "cli", ".", "str_type", ",", "metavar", "=", "cli", ".", "str_mv", ",", "default", "=", "'UTF-8'", ",", "help", "=", "'The encoding of the series matrix file. [UTF-8]'", ")", "cli", ".", "add_reporting_args", "(", "parser", ")", "return", "parser" ]
Create the argument parser for the script. Parameters ---------- Returns ------- `argparse.ArgumentParser` The arguemnt parser.
[ "Create", "the", "argument", "parser", "for", "the", "script", "." ]
python
train
24.805556
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/twistedtools.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/twistedtools.py#L86-L172
def deferred(timeout=None): """ By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz") """ reactor, reactor_thread = threaded_reactor() if reactor is None: raise ImportError("twisted is not available or could not be imported") # Check for common syntax mistake # (otherwise, tests can be silently ignored # if one writes "@deferred" instead of "@deferred()") try: timeout is None or timeout + 0 except TypeError: raise TypeError("'timeout' argument must be a number or None") def decorate(func): def wrapper(*args, **kargs): q = Queue() def callback(value): q.put(None) def errback(failure): # Retrieve and save full exception info try: failure.raiseException() except: q.put(sys.exc_info()) def g(): try: d = func(*args, **kargs) try: d.addCallbacks(callback, errback) # Check for a common mistake and display a nice error # message except AttributeError: raise TypeError("you must return a twisted Deferred " "from your test case!") # Catch exceptions raised in the test body (from the # Twisted thread) except: q.put(sys.exc_info()) reactor.callFromThread(g) try: error = q.get(timeout=timeout) except Empty: raise TimeExpired("timeout expired before end of test (%f s.)" % timeout) # Re-raise all exceptions if error is not None: exc_type, exc_value, tb = error raise exc_type, exc_value, tb wrapper = make_decorator(func)(wrapper) return wrapper return decorate
[ "def", "deferred", "(", "timeout", "=", "None", ")", ":", "reactor", ",", "reactor_thread", "=", "threaded_reactor", "(", ")", "if", "reactor", "is", "None", ":", "raise", "ImportError", "(", "\"twisted is not available or could not be imported\"", ")", "# Check for common syntax mistake", "# (otherwise, tests can be silently ignored", "# if one writes \"@deferred\" instead of \"@deferred()\")", "try", ":", "timeout", "is", "None", "or", "timeout", "+", "0", "except", "TypeError", ":", "raise", "TypeError", "(", "\"'timeout' argument must be a number or None\"", ")", "def", "decorate", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kargs", ")", ":", "q", "=", "Queue", "(", ")", "def", "callback", "(", "value", ")", ":", "q", ".", "put", "(", "None", ")", "def", "errback", "(", "failure", ")", ":", "# Retrieve and save full exception info", "try", ":", "failure", ".", "raiseException", "(", ")", "except", ":", "q", ".", "put", "(", "sys", ".", "exc_info", "(", ")", ")", "def", "g", "(", ")", ":", "try", ":", "d", "=", "func", "(", "*", "args", ",", "*", "*", "kargs", ")", "try", ":", "d", ".", "addCallbacks", "(", "callback", ",", "errback", ")", "# Check for a common mistake and display a nice error", "# message", "except", "AttributeError", ":", "raise", "TypeError", "(", "\"you must return a twisted Deferred \"", "\"from your test case!\"", ")", "# Catch exceptions raised in the test body (from the", "# Twisted thread)", "except", ":", "q", ".", "put", "(", "sys", ".", "exc_info", "(", ")", ")", "reactor", ".", "callFromThread", "(", "g", ")", "try", ":", "error", "=", "q", ".", "get", "(", "timeout", "=", "timeout", ")", "except", "Empty", ":", "raise", "TimeExpired", "(", "\"timeout expired before end of test (%f s.)\"", "%", "timeout", ")", "# Re-raise all exceptions", "if", "error", "is", "not", "None", ":", "exc_type", ",", "exc_value", ",", "tb", "=", "error", "raise", "exc_type", ",", "exc_value", ",", "tb", "wrapper", "=", "make_decorator", "(", "func", ")", "(", "wrapper", ")", "return", "wrapper", "return", "decorate" ]
By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz")
[ "By", "wrapping", "a", "test", "function", "with", "this", "decorator", "you", "can", "return", "a", "twisted", "Deferred", "and", "the", "test", "will", "wait", "for", "the", "deferred", "to", "be", "triggered", ".", "The", "whole", "test", "function", "will", "run", "inside", "the", "Twisted", "event", "loop", "." ]
python
test
36.689655
bspaans/python-mingus
mingus/core/intervals.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L292-L302
def invert(interval): """Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C'] """ interval.reverse() res = list(interval) interval.reverse() return res
[ "def", "invert", "(", "interval", ")", ":", "interval", ".", "reverse", "(", ")", "res", "=", "list", "(", "interval", ")", "interval", ".", "reverse", "(", ")", "return", "res" ]
Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C']
[ "Invert", "an", "interval", "." ]
python
train
17.090909
PyThaiNLP/pythainlp
pythainlp/tag/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/tag/__init__.py#L160-L180
def pos_tag_sents( sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid" ) -> List[List[Tuple[str, str]]]: """ Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is """ if not sentences: return [] return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
[ "def", "pos_tag_sents", "(", "sentences", ":", "List", "[", "List", "[", "str", "]", "]", ",", "engine", ":", "str", "=", "\"perceptron\"", ",", "corpus", ":", "str", "=", "\"orchid\"", ")", "->", "List", "[", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", "]", ":", "if", "not", "sentences", ":", "return", "[", "]", "return", "[", "pos_tag", "(", "sent", ",", "engine", "=", "engine", ",", "corpus", "=", "corpus", ")", "for", "sent", "in", "sentences", "]" ]
Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is
[ "Part", "of", "Speech", "tagging", "Sentence", "function", "." ]
python
train
38.952381
ellmetha/django-machina
machina/apps/forum_permission/handler.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/handler.py#L225-L234
def can_unsubscribe_from_topic(self, topic, user): """ Given a topic, checks whether the user can remove it from their subscription list. """ # A user can unsubscribe from topics if they are authenticated and if they have the # permission to read the related forum. Of course a user can unsubscribe only if they are # already a subscriber of the considered topic. return ( user.is_authenticated and topic.has_subscriber(user) and self._perform_basic_permission_check(topic.forum, user, 'can_read_forum') )
[ "def", "can_unsubscribe_from_topic", "(", "self", ",", "topic", ",", "user", ")", ":", "# A user can unsubscribe from topics if they are authenticated and if they have the", "# permission to read the related forum. Of course a user can unsubscribe only if they are", "# already a subscriber of the considered topic.", "return", "(", "user", ".", "is_authenticated", "and", "topic", ".", "has_subscriber", "(", "user", ")", "and", "self", ".", "_perform_basic_permission_check", "(", "topic", ".", "forum", ",", "user", ",", "'can_read_forum'", ")", ")" ]
Given a topic, checks whether the user can remove it from their subscription list.
[ "Given", "a", "topic", "checks", "whether", "the", "user", "can", "remove", "it", "from", "their", "subscription", "list", "." ]
python
train
58.1
lsst-sqre/documenteer
documenteer/stackdocs/build.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L159-L198
def discover_setup_packages(): """Summarize packages currently set up by EUPS, listing their set up directories and EUPS version names. Returns ------- packages : `dict` Dictionary with keys that are EUPS package names. Values are dictionaries with fields: - ``'dir'``: absolute directory path of the set up package. - ``'version'``: EUPS version string for package. Notes ----- This function imports the ``eups`` Python package, which is assumed to be available in the build environmen. This function is designed to encapsulate all direct EUPS interactions need by the stack documentation build process. """ logger = logging.getLogger(__name__) # Not a PyPI dependency; assumed to be available in the build environment. import eups eups_client = eups.Eups() products = eups_client.getSetupProducts() packages = {} for package in products: name = package.name info = { 'dir': package.dir, 'version': package.version } packages[name] = info logger.debug('Found setup package: {name} {version} {dir}'.format( name=name, **info)) return packages
[ "def", "discover_setup_packages", "(", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Not a PyPI dependency; assumed to be available in the build environment.", "import", "eups", "eups_client", "=", "eups", ".", "Eups", "(", ")", "products", "=", "eups_client", ".", "getSetupProducts", "(", ")", "packages", "=", "{", "}", "for", "package", "in", "products", ":", "name", "=", "package", ".", "name", "info", "=", "{", "'dir'", ":", "package", ".", "dir", ",", "'version'", ":", "package", ".", "version", "}", "packages", "[", "name", "]", "=", "info", "logger", ".", "debug", "(", "'Found setup package: {name} {version} {dir}'", ".", "format", "(", "name", "=", "name", ",", "*", "*", "info", ")", ")", "return", "packages" ]
Summarize packages currently set up by EUPS, listing their set up directories and EUPS version names. Returns ------- packages : `dict` Dictionary with keys that are EUPS package names. Values are dictionaries with fields: - ``'dir'``: absolute directory path of the set up package. - ``'version'``: EUPS version string for package. Notes ----- This function imports the ``eups`` Python package, which is assumed to be available in the build environmen. This function is designed to encapsulate all direct EUPS interactions need by the stack documentation build process.
[ "Summarize", "packages", "currently", "set", "up", "by", "EUPS", "listing", "their", "set", "up", "directories", "and", "EUPS", "version", "names", "." ]
python
train
29.85
timgabets/bpc8583
examples/isoClient.py
https://github.com/timgabets/bpc8583/blob/1b8e95d73ad273ad9d11bff40d1af3f06f0f3503/examples/isoClient.py#L301-L314
def show_help(name): """ Show help and basic usage """ print('Usage: python3 {} [OPTIONS]... '.format(name)) print('ISO8583 message client') print(' -v, --verbose\t\tRun transactions verbosely') print(' -p, --port=[PORT]\t\tTCP port to connect to, 1337 by default') print(' -s, --server=[IP]\t\tIP of the ISO host to connect to, 127.0.0.1 by default') print(' -t, --terminal=[ID]\t\tTerminal ID (used in DE 41 ISO field, 10001337 by default)') print(' -m, --merchant=[ID]\t\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)') print(' -k, --terminal-key=[KEY]\t\tTerminal key (\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\' by default)') print(' -K, --master-key=[KEY]\t\Master key (\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\' by default)') print(' -f, --file=[file.xml]\t\tUse transaction data from the given XML-file')
[ "def", "show_help", "(", "name", ")", ":", "print", "(", "'Usage: python3 {} [OPTIONS]... '", ".", "format", "(", "name", ")", ")", "print", "(", "'ISO8583 message client'", ")", "print", "(", "' -v, --verbose\\t\\tRun transactions verbosely'", ")", "print", "(", "' -p, --port=[PORT]\\t\\tTCP port to connect to, 1337 by default'", ")", "print", "(", "' -s, --server=[IP]\\t\\tIP of the ISO host to connect to, 127.0.0.1 by default'", ")", "print", "(", "' -t, --terminal=[ID]\\t\\tTerminal ID (used in DE 41 ISO field, 10001337 by default)'", ")", "print", "(", "' -m, --merchant=[ID]\\t\\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)'", ")", "print", "(", "' -k, --terminal-key=[KEY]\\t\\tTerminal key (\\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\\' by default)'", ")", "print", "(", "' -K, --master-key=[KEY]\\t\\Master key (\\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\\' by default)'", ")", "print", "(", "' -f, --file=[file.xml]\\t\\tUse transaction data from the given XML-file'", ")" ]
Show help and basic usage
[ "Show", "help", "and", "basic", "usage" ]
python
train
62.285714
bcbio/bcbio-nextgen
bcbio/variation/population.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L56-L71
def _back_compatible_gemini(conf_files, data): """Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations. """ if vcfanno.is_human(data, builds=["37"]): for f in conf_files: if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f): with open(f) as in_handle: for line in in_handle: if line.startswith("file"): fname = line.strip().split("=")[-1].replace('"', '').strip() if fname.find(".tidy.") > 0: return install.get_gemini_dir(data) return None
[ "def", "_back_compatible_gemini", "(", "conf_files", ",", "data", ")", ":", "if", "vcfanno", ".", "is_human", "(", "data", ",", "builds", "=", "[", "\"37\"", "]", ")", ":", "for", "f", "in", "conf_files", ":", "if", "f", "and", "os", ".", "path", ".", "basename", "(", "f", ")", "==", "\"gemini.conf\"", "and", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "with", "open", "(", "f", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"file\"", ")", ":", "fname", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", ".", "replace", "(", "'\"'", ",", "''", ")", ".", "strip", "(", ")", "if", "fname", ".", "find", "(", "\".tidy.\"", ")", ">", "0", ":", "return", "install", ".", "get_gemini_dir", "(", "data", ")", "return", "None" ]
Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations.
[ "Provide", "old", "install", "directory", "for", "configuration", "with", "GEMINI", "supplied", "tidy", "VCFs", "." ]
python
train
47.625
DAI-Lab/Copulas
copulas/univariate/base.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/univariate/base.py#L267-L296
def fit(self, X, *args, **kwargs): """Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None """ self.constant_value = self._get_constant_value(X) if self.constant_value is None: if self.unfittable_model: self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs) else: self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs) for name in self.METHOD_NAMES: attribute = getattr(self.__class__, name) if isinstance(attribute, str): setattr(self, name, getattr(self.model, attribute)) elif attribute is None: setattr(self, name, missing_method_scipy_wrapper(lambda x: x)) else: self._replace_constant_methods() self.fitted = True
[ "def", "fit", "(", "self", ",", "X", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "constant_value", "=", "self", ".", "_get_constant_value", "(", "X", ")", "if", "self", ".", "constant_value", "is", "None", ":", "if", "self", ".", "unfittable_model", ":", "self", ".", "model", "=", "getattr", "(", "scipy", ".", "stats", ",", "self", ".", "model_class", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "model", "=", "getattr", "(", "scipy", ".", "stats", ",", "self", ".", "model_class", ")", "(", "X", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "name", "in", "self", ".", "METHOD_NAMES", ":", "attribute", "=", "getattr", "(", "self", ".", "__class__", ",", "name", ")", "if", "isinstance", "(", "attribute", ",", "str", ")", ":", "setattr", "(", "self", ",", "name", ",", "getattr", "(", "self", ".", "model", ",", "attribute", ")", ")", "elif", "attribute", "is", "None", ":", "setattr", "(", "self", ",", "name", ",", "missing_method_scipy_wrapper", "(", "lambda", "x", ":", "x", ")", ")", "else", ":", "self", ".", "_replace_constant_methods", "(", ")", "self", ".", "fitted", "=", "True" ]
Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None
[ "Fit", "scipy", "model", "to", "an", "array", "of", "values", "." ]
python
train
32.4
tcalmant/ipopo
pelix/threadpool.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/threadpool.py#L97-L103
def set(self, data=None): """ Sets the event """ self.__data = data self.__exception = None self.__event.set()
[ "def", "set", "(", "self", ",", "data", "=", "None", ")", ":", "self", ".", "__data", "=", "data", "self", ".", "__exception", "=", "None", "self", ".", "__event", ".", "set", "(", ")" ]
Sets the event
[ "Sets", "the", "event" ]
python
train
21.714286
rkhleics/wagtailmodeladmin
wagtailmodeladmin/options.py
https://github.com/rkhleics/wagtailmodeladmin/blob/7fddc853bab2ff3868b8c7a03329308c55f16358/wagtailmodeladmin/options.py#L589-L598
def get_permissions_for_registration(self): """ Utilised by Wagtail's 'register_permissions' hook to allow permissions for a all models grouped by this class to be assigned to Groups in settings. """ qs = Permission.objects.none() for instance in self.modeladmin_instances: qs = qs | instance.get_permissions_for_registration() return qs
[ "def", "get_permissions_for_registration", "(", "self", ")", ":", "qs", "=", "Permission", ".", "objects", ".", "none", "(", ")", "for", "instance", "in", "self", ".", "modeladmin_instances", ":", "qs", "=", "qs", "|", "instance", ".", "get_permissions_for_registration", "(", ")", "return", "qs" ]
Utilised by Wagtail's 'register_permissions' hook to allow permissions for a all models grouped by this class to be assigned to Groups in settings.
[ "Utilised", "by", "Wagtail", "s", "register_permissions", "hook", "to", "allow", "permissions", "for", "a", "all", "models", "grouped", "by", "this", "class", "to", "be", "assigned", "to", "Groups", "in", "settings", "." ]
python
train
40.4
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L873-L904
def _pre_analysis(self): """ Initialization work. Executed prior to the analysis. :return: None """ # Fill up self._starts for item in self._starts: callstack = None if isinstance(item, tuple): # (addr, jumpkind) ip = item[0] state = self._create_initial_state(item[0], item[1]) elif isinstance(item, SimState): # SimState state = item.copy() # pylint: disable=no-member ip = state.solver.eval_one(state.ip) self._reset_state_mode(state, 'fastpath') else: raise AngrCFGError('Unsupported CFG start type: %s.' % str(type(item))) self._symbolic_function_initial_state[ip] = state path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack) key = path_wrapper.block_id if key not in self._start_keys: self._start_keys.append(key) self._insert_job(path_wrapper) self._register_analysis_job(path_wrapper.func_addr, path_wrapper)
[ "def", "_pre_analysis", "(", "self", ")", ":", "# Fill up self._starts", "for", "item", "in", "self", ".", "_starts", ":", "callstack", "=", "None", "if", "isinstance", "(", "item", ",", "tuple", ")", ":", "# (addr, jumpkind)", "ip", "=", "item", "[", "0", "]", "state", "=", "self", ".", "_create_initial_state", "(", "item", "[", "0", "]", ",", "item", "[", "1", "]", ")", "elif", "isinstance", "(", "item", ",", "SimState", ")", ":", "# SimState", "state", "=", "item", ".", "copy", "(", ")", "# pylint: disable=no-member", "ip", "=", "state", ".", "solver", ".", "eval_one", "(", "state", ".", "ip", ")", "self", ".", "_reset_state_mode", "(", "state", ",", "'fastpath'", ")", "else", ":", "raise", "AngrCFGError", "(", "'Unsupported CFG start type: %s.'", "%", "str", "(", "type", "(", "item", ")", ")", ")", "self", ".", "_symbolic_function_initial_state", "[", "ip", "]", "=", "state", "path_wrapper", "=", "CFGJob", "(", "ip", ",", "state", ",", "self", ".", "_context_sensitivity_level", ",", "None", ",", "None", ",", "call_stack", "=", "callstack", ")", "key", "=", "path_wrapper", ".", "block_id", "if", "key", "not", "in", "self", ".", "_start_keys", ":", "self", ".", "_start_keys", ".", "append", "(", "key", ")", "self", ".", "_insert_job", "(", "path_wrapper", ")", "self", ".", "_register_analysis_job", "(", "path_wrapper", ".", "func_addr", ",", "path_wrapper", ")" ]
Initialization work. Executed prior to the analysis. :return: None
[ "Initialization", "work", ".", "Executed", "prior", "to", "the", "analysis", "." ]
python
train
35.96875
openego/eDisGo
edisgo/grid/components.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/components.py#L772-L836
def timeseries(self): """ Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'. """ if self._timeseries is None: # get time series for active power depending on if they are # differentiated by weather cell ID or not if isinstance(self.grid.network.timeseries.generation_fluctuating. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[ self.type, self.weather_cell_id].to_frame('p') except KeyError: logger.exception("No time series for type {} and " "weather cell ID {} given.".format( self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[self.type].to_frame('p') except KeyError: logger.exception("No time series for type {} " "given.".format(self.type)) raise timeseries = timeseries * self.nominal_capacity # subtract curtailment if self.curtailment is not None: timeseries = timeseries.join( self.curtailment.to_frame('curtailment'), how='left') timeseries.p = timeseries.p - timeseries.curtailment.fillna(0) if self.timeseries_reactive is not None: timeseries['q'] = self.timeseries_reactive else: timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos( self.power_factor)) return timeseries else: return self._timeseries.loc[ self.grid.network.timeseries.timeindex, :]
[ "def", "timeseries", "(", "self", ")", ":", "if", "self", ".", "_timeseries", "is", "None", ":", "# get time series for active power depending on if they are", "# differentiated by weather cell ID or not", "if", "isinstance", "(", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_fluctuating", ".", "columns", ",", "pd", ".", "MultiIndex", ")", ":", "if", "self", ".", "weather_cell_id", ":", "try", ":", "timeseries", "=", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_fluctuating", "[", "self", ".", "type", ",", "self", ".", "weather_cell_id", "]", ".", "to_frame", "(", "'p'", ")", "except", "KeyError", ":", "logger", ".", "exception", "(", "\"No time series for type {} and \"", "\"weather cell ID {} given.\"", ".", "format", "(", "self", ".", "type", ",", "self", ".", "weather_cell_id", ")", ")", "raise", "else", ":", "logger", ".", "exception", "(", "\"No weather cell ID provided for \"", "\"fluctuating generator {}.\"", ".", "format", "(", "repr", "(", "self", ")", ")", ")", "raise", "KeyError", "else", ":", "try", ":", "timeseries", "=", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_fluctuating", "[", "self", ".", "type", "]", ".", "to_frame", "(", "'p'", ")", "except", "KeyError", ":", "logger", ".", "exception", "(", "\"No time series for type {} \"", "\"given.\"", ".", "format", "(", "self", ".", "type", ")", ")", "raise", "timeseries", "=", "timeseries", "*", "self", ".", "nominal_capacity", "# subtract curtailment", "if", "self", ".", "curtailment", "is", "not", "None", ":", "timeseries", "=", "timeseries", ".", "join", "(", "self", ".", "curtailment", ".", "to_frame", "(", "'curtailment'", ")", ",", "how", "=", "'left'", ")", "timeseries", ".", "p", "=", "timeseries", ".", "p", "-", "timeseries", ".", "curtailment", ".", "fillna", "(", "0", ")", "if", "self", ".", "timeseries_reactive", "is", "not", "None", ":", "timeseries", "[", "'q'", "]", "=", "self", ".", "timeseries_reactive", "else", ":", "timeseries", "[", "'q'", "]", "=", "timeseries", "[", "'p'", "]", "*", "self", ".", "q_sign", "*", "tan", "(", "acos", "(", "self", ".", "power_factor", ")", ")", "return", "timeseries", "else", ":", "return", "self", ".", "_timeseries", ".", "loc", "[", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "timeindex", ",", ":", "]" ]
Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'.
[ "Feed", "-", "in", "time", "series", "of", "generator" ]
python
train
42.2
Cornices/cornice.ext.swagger
cornice_swagger/swagger.py
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/swagger.py#L110-L145
def from_schema(self, schema_node): """ Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters. """ params = [] for param_schema in schema_node.children: location = param_schema.name if location is 'body': name = param_schema.__class__.__name__ if name == 'body': name = schema_node.__class__.__name__ + 'Body' param = self.parameter_converter(location, param_schema) param['name'] = name if self.ref: param = self._ref(param) params.append(param) elif location in (('path', 'header', 'headers', 'querystring', 'GET')): for node_schema in param_schema.children: param = self.parameter_converter(location, node_schema) if self.ref: param = self._ref(param) params.append(param) return params
[ "def", "from_schema", "(", "self", ",", "schema_node", ")", ":", "params", "=", "[", "]", "for", "param_schema", "in", "schema_node", ".", "children", ":", "location", "=", "param_schema", ".", "name", "if", "location", "is", "'body'", ":", "name", "=", "param_schema", ".", "__class__", ".", "__name__", "if", "name", "==", "'body'", ":", "name", "=", "schema_node", ".", "__class__", ".", "__name__", "+", "'Body'", "param", "=", "self", ".", "parameter_converter", "(", "location", ",", "param_schema", ")", "param", "[", "'name'", "]", "=", "name", "if", "self", ".", "ref", ":", "param", "=", "self", ".", "_ref", "(", "param", ")", "params", ".", "append", "(", "param", ")", "elif", "location", "in", "(", "(", "'path'", ",", "'header'", ",", "'headers'", ",", "'querystring'", ",", "'GET'", ")", ")", ":", "for", "node_schema", "in", "param_schema", ".", "children", ":", "param", "=", "self", ".", "parameter_converter", "(", "location", ",", "node_schema", ")", "if", "self", ".", "ref", ":", "param", "=", "self", ".", "_ref", "(", "param", ")", "params", ".", "append", "(", "param", ")", "return", "params" ]
Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters.
[ "Creates", "a", "list", "of", "Swagger", "params", "from", "a", "colander", "request", "schema", "." ]
python
valid
35.472222
kblin/ncbi-genome-download
ncbi_genome_download/core.py
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L213-L235
def select_candidates(config): """Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>) """ download_candidates = [] for group in config.group: summary_file = get_summary(config.section, group, config.uri, config.use_cache) entries = parse_summary(summary_file) for entry in filter_entries(entries, config): download_candidates.append((entry, group)) return download_candidates
[ "def", "select_candidates", "(", "config", ")", ":", "download_candidates", "=", "[", "]", "for", "group", "in", "config", ".", "group", ":", "summary_file", "=", "get_summary", "(", "config", ".", "section", ",", "group", ",", "config", ".", "uri", ",", "config", ".", "use_cache", ")", "entries", "=", "parse_summary", "(", "summary_file", ")", "for", "entry", "in", "filter_entries", "(", "entries", ",", "config", ")", ":", "download_candidates", ".", "append", "(", "(", "entry", ",", "group", ")", ")", "return", "download_candidates" ]
Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>)
[ "Select", "candidates", "to", "download", "." ]
python
train
24.26087
quantopian/zipline
zipline/finance/ledger.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L582-L621
def process_dividends(self, next_session, asset_finder, adjustment_reader): """Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session """ position_tracker = self.position_tracker # Earn dividends whose ex_date is the next trading day. We need to # check if we own any of these stocks so we know to pay them out when # the pay date comes. held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) # Earning a dividend just marks that we need to get paid out on # the dividend's pay-date. This does not affect our cash yet. position_tracker.earn_dividends( cash_dividends, stock_dividends, ) # Pay out the dividends whose pay-date is the next session. This does # affect out cash. self._cash_flow( position_tracker.pay_dividends( next_session, ), )
[ "def", "process_dividends", "(", "self", ",", "next_session", ",", "asset_finder", ",", "adjustment_reader", ")", ":", "position_tracker", "=", "self", ".", "position_tracker", "# Earn dividends whose ex_date is the next trading day. We need to", "# check if we own any of these stocks so we know to pay them out when", "# the pay date comes.", "held_sids", "=", "set", "(", "position_tracker", ".", "positions", ")", "if", "held_sids", ":", "cash_dividends", "=", "adjustment_reader", ".", "get_dividends_with_ex_date", "(", "held_sids", ",", "next_session", ",", "asset_finder", ")", "stock_dividends", "=", "(", "adjustment_reader", ".", "get_stock_dividends_with_ex_date", "(", "held_sids", ",", "next_session", ",", "asset_finder", ")", ")", "# Earning a dividend just marks that we need to get paid out on", "# the dividend's pay-date. This does not affect our cash yet.", "position_tracker", ".", "earn_dividends", "(", "cash_dividends", ",", "stock_dividends", ",", ")", "# Pay out the dividends whose pay-date is the next session. This does", "# affect out cash.", "self", ".", "_cash_flow", "(", "position_tracker", ".", "pay_dividends", "(", "next_session", ",", ")", ",", ")" ]
Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session
[ "Process", "dividends", "for", "the", "next", "session", "." ]
python
train
36.75
ray-project/ray
python/ray/tune/ray_trial_executor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L258-L276
def reset_trial(self, trial, new_config, new_experiment_tag): """Tries to invoke `Trainable.reset_config()` to reset trial. Args: trial (Trial): Trial to be reset. new_config (dict): New configuration for Trial trainable. new_experiment_tag (str): New experiment name for trial. Returns: True if `reset_config` is successful else False. """ trial.experiment_tag = new_experiment_tag trial.config = new_config trainable = trial.runner with warn_if_slow("reset_config"): reset_val = ray.get(trainable.reset_config.remote(new_config)) return reset_val
[ "def", "reset_trial", "(", "self", ",", "trial", ",", "new_config", ",", "new_experiment_tag", ")", ":", "trial", ".", "experiment_tag", "=", "new_experiment_tag", "trial", ".", "config", "=", "new_config", "trainable", "=", "trial", ".", "runner", "with", "warn_if_slow", "(", "\"reset_config\"", ")", ":", "reset_val", "=", "ray", ".", "get", "(", "trainable", ".", "reset_config", ".", "remote", "(", "new_config", ")", ")", "return", "reset_val" ]
Tries to invoke `Trainable.reset_config()` to reset trial. Args: trial (Trial): Trial to be reset. new_config (dict): New configuration for Trial trainable. new_experiment_tag (str): New experiment name for trial. Returns: True if `reset_config` is successful else False.
[ "Tries", "to", "invoke", "Trainable", ".", "reset_config", "()", "to", "reset", "trial", "." ]
python
train
36.631579
quodlibet/mutagen
mutagen/easymp4.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/easymp4.py#L83-L101
def RegisterTextKey(cls, key, atomid): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") """ def getter(tags, key): return tags[atomid] def setter(tags, key, value): tags[atomid] = value def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
[ "def", "RegisterTextKey", "(", "cls", ",", "key", ",", "atomid", ")", ":", "def", "getter", "(", "tags", ",", "key", ")", ":", "return", "tags", "[", "atomid", "]", "def", "setter", "(", "tags", ",", "key", ",", "value", ")", ":", "tags", "[", "atomid", "]", "=", "value", "def", "deleter", "(", "tags", ",", "key", ")", ":", "del", "(", "tags", "[", "atomid", "]", ")", "cls", ".", "RegisterKey", "(", "key", ",", "getter", ",", "setter", ",", "deleter", ")" ]
Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
[ "Register", "a", "text", "key", "." ]
python
train
28.263158
watson-developer-cloud/python-sdk
ibm_watson/visual_recognition_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/visual_recognition_v3.py#L722-L731
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'class_name') and self.class_name is not None: _dict['class'] = self.class_name if hasattr(self, 'score') and self.score is not None: _dict['score'] = self.score if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None: _dict['type_hierarchy'] = self.type_hierarchy return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'class_name'", ")", "and", "self", ".", "class_name", "is", "not", "None", ":", "_dict", "[", "'class'", "]", "=", "self", ".", "class_name", "if", "hasattr", "(", "self", ",", "'score'", ")", "and", "self", ".", "score", "is", "not", "None", ":", "_dict", "[", "'score'", "]", "=", "self", ".", "score", "if", "hasattr", "(", "self", ",", "'type_hierarchy'", ")", "and", "self", ".", "type_hierarchy", "is", "not", "None", ":", "_dict", "[", "'type_hierarchy'", "]", "=", "self", ".", "type_hierarchy", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
47.1
bitshares/uptick
uptick/markets.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L56-L64
def ticker(ctx, market): """ Show ticker of a market """ market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
[ "def", "ticker", "(", "ctx", ",", "market", ")", ":", "market", "=", "Market", "(", "market", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "ticker", "=", "market", ".", "ticker", "(", ")", "t", "=", "[", "[", "\"key\"", ",", "\"value\"", "]", "]", "for", "key", "in", "ticker", ":", "t", ".", "append", "(", "[", "key", ",", "str", "(", "ticker", "[", "key", "]", ")", "]", ")", "print_table", "(", "t", ")" ]
Show ticker of a market
[ "Show", "ticker", "of", "a", "market" ]
python
train
28.666667
pytest-dev/pytest-xdist
xdist/dsession.py
https://github.com/pytest-dev/pytest-xdist/blob/9fcf8fa636bc69ee6cac9348a6ec20c87f2bb5e4/xdist/dsession.py#L210-L236
def worker_collectionfinish(self, node, ids): """worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. """ if self.shuttingdown: return self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids) # tell session which items were effectively collected otherwise # the master node will finish the session with EXIT_NOTESTSCOLLECTED self._session.testscollected = len(ids) self.sched.add_node_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.has_pending: self.trdist.ensure_show_status() self.terminal.write_line("") if self.config.option.verbose > 0: self.terminal.write_line( "scheduling tests via %s" % (self.sched.__class__.__name__) ) self.sched.schedule()
[ "def", "worker_collectionfinish", "(", "self", ",", "node", ",", "ids", ")", ":", "if", "self", ".", "shuttingdown", ":", "return", "self", ".", "config", ".", "hook", ".", "pytest_xdist_node_collection_finished", "(", "node", "=", "node", ",", "ids", "=", "ids", ")", "# tell session which items were effectively collected otherwise", "# the master node will finish the session with EXIT_NOTESTSCOLLECTED", "self", ".", "_session", ".", "testscollected", "=", "len", "(", "ids", ")", "self", ".", "sched", ".", "add_node_collection", "(", "node", ",", "ids", ")", "if", "self", ".", "terminal", ":", "self", ".", "trdist", ".", "setstatus", "(", "node", ".", "gateway", ".", "spec", ",", "\"[%d]\"", "%", "(", "len", "(", "ids", ")", ")", ")", "if", "self", ".", "sched", ".", "collection_is_completed", ":", "if", "self", ".", "terminal", "and", "not", "self", ".", "sched", ".", "has_pending", ":", "self", ".", "trdist", ".", "ensure_show_status", "(", ")", "self", ".", "terminal", ".", "write_line", "(", "\"\"", ")", "if", "self", ".", "config", ".", "option", ".", "verbose", ">", "0", ":", "self", ".", "terminal", ".", "write_line", "(", "\"scheduling tests via %s\"", "%", "(", "self", ".", "sched", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "sched", ".", "schedule", "(", ")" ]
worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use.
[ "worker", "has", "finished", "test", "collection", "." ]
python
train
49.666667
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L286-L294
def rounding_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Rounding accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions)) labels = tf.squeeze(labels) weights = weights_fn(labels) labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, labels)), weights
[ "def", "rounding_accuracy", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "common_layers", ".", "weights_nonzero", ")", ":", "outputs", "=", "tf", ".", "squeeze", "(", "tf", ".", "to_int32", "(", "predictions", ")", ")", "labels", "=", "tf", ".", "squeeze", "(", "labels", ")", "weights", "=", "weights_fn", "(", "labels", ")", "labels", "=", "tf", ".", "to_int32", "(", "labels", ")", "return", "tf", ".", "to_float", "(", "tf", ".", "equal", "(", "outputs", ",", "labels", ")", ")", ",", "weights" ]
Rounding accuracy for L1/L2 losses: round down the predictions to ints.
[ "Rounding", "accuracy", "for", "L1", "/", "L2", "losses", ":", "round", "down", "the", "predictions", "to", "ints", "." ]
python
train
44.333333
wonambi-python/wonambi
wonambi/viz/base.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/viz/base.py#L30-L37
def _repr_png_(self): """This is used by ipython to plot inline. """ app.process_events() QApplication.processEvents() img = read_pixels() return bytes(_make_png(img))
[ "def", "_repr_png_", "(", "self", ")", ":", "app", ".", "process_events", "(", ")", "QApplication", ".", "processEvents", "(", ")", "img", "=", "read_pixels", "(", ")", "return", "bytes", "(", "_make_png", "(", "img", ")", ")" ]
This is used by ipython to plot inline.
[ "This", "is", "used", "by", "ipython", "to", "plot", "inline", "." ]
python
train
26.125
chatfirst/chatfirst
chatfirst/client.py
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L29-L36
def bots_create(self, bot): """ Save new bot :param bot: bot object to save :type bot: Bot """ self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token))
[ "def", "bots_create", "(", "self", ",", "bot", ")", ":", "self", ".", "client", ".", "bots", "(", "_method", "=", "\"POST\"", ",", "_json", "=", "bot", ".", "to_json", "(", ")", ",", "_params", "=", "dict", "(", "userToken", "=", "self", ".", "token", ")", ")" ]
Save new bot :param bot: bot object to save :type bot: Bot
[ "Save", "new", "bot" ]
python
train
28.25
nteract/papermill
papermill/execute.py
https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/execute.py#L143-L181
def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error
[ "def", "raise_for_execution_errors", "(", "nb", ",", "output_path", ")", ":", "error", "=", "None", "for", "cell", "in", "nb", ".", "cells", ":", "if", "cell", ".", "get", "(", "\"outputs\"", ")", "is", "None", ":", "continue", "for", "output", "in", "cell", ".", "outputs", ":", "if", "output", ".", "output_type", "==", "\"error\"", ":", "error", "=", "PapermillExecutionError", "(", "exec_count", "=", "cell", ".", "execution_count", ",", "source", "=", "cell", ".", "source", ",", "ename", "=", "output", ".", "ename", ",", "evalue", "=", "output", ".", "evalue", ",", "traceback", "=", "output", ".", "traceback", ",", ")", "break", "if", "error", ":", "# Write notebook back out with the Error Message at the top of the Notebook.", "error_msg", "=", "ERROR_MESSAGE_TEMPLATE", "%", "str", "(", "error", ".", "exec_count", ")", "error_msg_cell", "=", "nbformat", ".", "v4", ".", "new_code_cell", "(", "source", "=", "\"%%html\\n\"", "+", "error_msg", ",", "outputs", "=", "[", "nbformat", ".", "v4", ".", "new_output", "(", "output_type", "=", "\"display_data\"", ",", "data", "=", "{", "\"text/html\"", ":", "error_msg", "}", ")", "]", ",", "metadata", "=", "{", "\"inputHidden\"", ":", "True", ",", "\"hide_input\"", ":", "True", "}", ",", ")", "nb", ".", "cells", "=", "[", "error_msg_cell", "]", "+", "nb", ".", "cells", "write_ipynb", "(", "nb", ",", "output_path", ")", "raise", "error" ]
Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook
[ "Assigned", "parameters", "into", "the", "appropriate", "place", "in", "the", "input", "notebook" ]
python
train
33.74359
angr/angr
angr/storage/file.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/file.py#L413-L477
def read(self, pos, size, **kwargs): """ Read a packet from the stream. :param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream. :param size: The size to read. May be symbolic. :param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option. :return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read. """ short_reads = kwargs.pop('short_reads', None) # sanity check on read/write modes if self.write_mode is None: self.write_mode = False elif self.write_mode is True: raise SimFileError("Cannot read and write to the same SimPackets") # sanity check on packet number and determine if data is already present if pos is None: pos = len(self.content) if pos < 0: raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos) elif pos > len(self.content): raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content))) elif pos != len(self.content): _, realsize = self.content[pos] self.state.solver.add(size <= realsize) if not self.state.solver.satisfiable(): raise SimFileError("Packet read size constraint made state unsatisfiable???") return self.content[pos] + (pos+1,) # typecheck if type(size) is int: size = self.state.solver.BVV(size, self.state.arch.bits) # The read is on the frontier. let's generate a new packet. orig_size = size max_size = None # if short reads are enabled, replace size with a symbol if short_reads is True or (short_reads is None and sim_options.SHORT_READS in self.state.options): size = self.state.solver.BVS('packetsize_%d_%s' % (len(self.content), self.ident), self.state.arch.bits, key=('file', self.ident, 'packetsize', len(self.content))) self.state.solver.add(size <= orig_size) # figure out the maximum size of the read if not self.state.solver.symbolic(size): max_size = self.state.solver.eval(size) elif self.state.solver.satisfiable(extra_constraints=(size <= self.state.libc.max_packet_size,)): l.info("Constraining symbolic packet size to be less than %d", self.state.libc.max_packet_size) if not self.state.solver.is_true(orig_size <= self.state.libc.max_packet_size): self.state.solver.add(size <= self.state.libc.max_packet_size) if not self.state.solver.symbolic(orig_size): max_size = min(self.state.solver.eval(orig_size), self.state.libc.max_packet_size) else: max_size = self.state.solver.max(size) else: max_size = self.state.solver.min(size) l.warning("Could not constrain symbolic packet size to <= %d; using minimum %d for size", self.state.libc.max_packet_size, max_size) self.state.solver.add(size == max_size) # generate the packet data and return it data = self.state.solver.BVS('packet_%d_%s' % (len(self.content), self.ident), max_size * self.state.arch.byte_width, key=('file', self.ident, 'packet', len(self.content))) packet = (data, size) self.content.append(packet) return packet + (pos+1,)
[ "def", "read", "(", "self", ",", "pos", ",", "size", ",", "*", "*", "kwargs", ")", ":", "short_reads", "=", "kwargs", ".", "pop", "(", "'short_reads'", ",", "None", ")", "# sanity check on read/write modes", "if", "self", ".", "write_mode", "is", "None", ":", "self", ".", "write_mode", "=", "False", "elif", "self", ".", "write_mode", "is", "True", ":", "raise", "SimFileError", "(", "\"Cannot read and write to the same SimPackets\"", ")", "# sanity check on packet number and determine if data is already present", "if", "pos", "is", "None", ":", "pos", "=", "len", "(", "self", ".", "content", ")", "if", "pos", "<", "0", ":", "raise", "SimFileError", "(", "\"SimPacket.read(%d): Negative packet number?\"", "%", "pos", ")", "elif", "pos", ">", "len", "(", "self", ".", "content", ")", ":", "raise", "SimFileError", "(", "\"SimPacket.read(%d): Packet number is past frontier of %d?\"", "%", "(", "pos", ",", "len", "(", "self", ".", "content", ")", ")", ")", "elif", "pos", "!=", "len", "(", "self", ".", "content", ")", ":", "_", ",", "realsize", "=", "self", ".", "content", "[", "pos", "]", "self", ".", "state", ".", "solver", ".", "add", "(", "size", "<=", "realsize", ")", "if", "not", "self", ".", "state", ".", "solver", ".", "satisfiable", "(", ")", ":", "raise", "SimFileError", "(", "\"Packet read size constraint made state unsatisfiable???\"", ")", "return", "self", ".", "content", "[", "pos", "]", "+", "(", "pos", "+", "1", ",", ")", "# typecheck", "if", "type", "(", "size", ")", "is", "int", ":", "size", "=", "self", ".", "state", ".", "solver", ".", "BVV", "(", "size", ",", "self", ".", "state", ".", "arch", ".", "bits", ")", "# The read is on the frontier. let's generate a new packet.", "orig_size", "=", "size", "max_size", "=", "None", "# if short reads are enabled, replace size with a symbol", "if", "short_reads", "is", "True", "or", "(", "short_reads", "is", "None", "and", "sim_options", ".", "SHORT_READS", "in", "self", ".", "state", ".", "options", ")", ":", "size", "=", "self", ".", "state", ".", "solver", ".", "BVS", "(", "'packetsize_%d_%s'", "%", "(", "len", "(", "self", ".", "content", ")", ",", "self", ".", "ident", ")", ",", "self", ".", "state", ".", "arch", ".", "bits", ",", "key", "=", "(", "'file'", ",", "self", ".", "ident", ",", "'packetsize'", ",", "len", "(", "self", ".", "content", ")", ")", ")", "self", ".", "state", ".", "solver", ".", "add", "(", "size", "<=", "orig_size", ")", "# figure out the maximum size of the read", "if", "not", "self", ".", "state", ".", "solver", ".", "symbolic", "(", "size", ")", ":", "max_size", "=", "self", ".", "state", ".", "solver", ".", "eval", "(", "size", ")", "elif", "self", ".", "state", ".", "solver", ".", "satisfiable", "(", "extra_constraints", "=", "(", "size", "<=", "self", ".", "state", ".", "libc", ".", "max_packet_size", ",", ")", ")", ":", "l", ".", "info", "(", "\"Constraining symbolic packet size to be less than %d\"", ",", "self", ".", "state", ".", "libc", ".", "max_packet_size", ")", "if", "not", "self", ".", "state", ".", "solver", ".", "is_true", "(", "orig_size", "<=", "self", ".", "state", ".", "libc", ".", "max_packet_size", ")", ":", "self", ".", "state", ".", "solver", ".", "add", "(", "size", "<=", "self", ".", "state", ".", "libc", ".", "max_packet_size", ")", "if", "not", "self", ".", "state", ".", "solver", ".", "symbolic", "(", "orig_size", ")", ":", "max_size", "=", "min", "(", "self", ".", "state", ".", "solver", ".", "eval", "(", "orig_size", ")", ",", "self", ".", "state", ".", "libc", ".", "max_packet_size", ")", "else", ":", "max_size", "=", "self", ".", "state", ".", "solver", ".", "max", "(", "size", ")", "else", ":", "max_size", "=", "self", ".", "state", ".", "solver", ".", "min", "(", "size", ")", "l", ".", "warning", "(", "\"Could not constrain symbolic packet size to <= %d; using minimum %d for size\"", ",", "self", ".", "state", ".", "libc", ".", "max_packet_size", ",", "max_size", ")", "self", ".", "state", ".", "solver", ".", "add", "(", "size", "==", "max_size", ")", "# generate the packet data and return it", "data", "=", "self", ".", "state", ".", "solver", ".", "BVS", "(", "'packet_%d_%s'", "%", "(", "len", "(", "self", ".", "content", ")", ",", "self", ".", "ident", ")", ",", "max_size", "*", "self", ".", "state", ".", "arch", ".", "byte_width", ",", "key", "=", "(", "'file'", ",", "self", ".", "ident", ",", "'packet'", ",", "len", "(", "self", ".", "content", ")", ")", ")", "packet", "=", "(", "data", ",", "size", ")", "self", ".", "content", ".", "append", "(", "packet", ")", "return", "packet", "+", "(", "pos", "+", "1", ",", ")" ]
Read a packet from the stream. :param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream. :param size: The size to read. May be symbolic. :param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option. :return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
[ "Read", "a", "packet", "from", "the", "stream", "." ]
python
train
55.169231
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L725-L733
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' handle = ct.c_int() if (sys.version_info[0] == 3) and (type(distanceObjectName) is str): distanceObjectName=distanceObjectName.encode('utf-8') return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
[ "def", "simxGetDistanceHandle", "(", "clientID", ",", "distanceObjectName", ",", "operationMode", ")", ":", "handle", "=", "ct", ".", "c_int", "(", ")", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "distanceObjectName", ")", "is", "str", ")", ":", "distanceObjectName", "=", "distanceObjectName", ".", "encode", "(", "'utf-8'", ")", "return", "c_GetDistanceHandle", "(", "clientID", ",", "distanceObjectName", ",", "ct", ".", "byref", "(", "handle", ")", ",", "operationMode", ")", ",", "handle", ".", "value" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
48.555556
xeroc/python-graphenelib
graphenestorage/masterpassword.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenestorage/masterpassword.py#L116-L131
def _new_masterpassword(self, password): """ Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption """ # make sure to not overwrite an existing key if self.config_key in self.config and self.config[self.config_key]: raise Exception("Storage already has a masterpassword!") self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") # Encrypt and save master self.password = password self._save_encrypted_masterpassword() return self.masterkey
[ "def", "_new_masterpassword", "(", "self", ",", "password", ")", ":", "# make sure to not overwrite an existing key", "if", "self", ".", "config_key", "in", "self", ".", "config", "and", "self", ".", "config", "[", "self", ".", "config_key", "]", ":", "raise", "Exception", "(", "\"Storage already has a masterpassword!\"", ")", "self", ".", "decrypted_master", "=", "hexlify", "(", "os", ".", "urandom", "(", "32", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "# Encrypt and save master", "self", ".", "password", "=", "password", "self", ".", "_save_encrypted_masterpassword", "(", ")", "return", "self", ".", "masterkey" ]
Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption
[ "Generate", "a", "new", "random", "masterkey", "encrypt", "it", "with", "the", "password", "and", "store", "it", "in", "the", "store", "." ]
python
valid
39.6875
nabla-c0d3/sslyze
sslyze/plugins/utils/certificate_utils.py
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/plugins/utils/certificate_utils.py#L95-L108
def has_ocsp_must_staple_extension(certificate: cryptography.x509.Certificate) -> bool: """Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066. """ has_ocsp_must_staple = False try: tls_feature_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE) for feature_type in tls_feature_ext.value: if feature_type == cryptography.x509.TLSFeatureType.status_request: has_ocsp_must_staple = True break except ExtensionNotFound: pass return has_ocsp_must_staple
[ "def", "has_ocsp_must_staple_extension", "(", "certificate", ":", "cryptography", ".", "x509", ".", "Certificate", ")", "->", "bool", ":", "has_ocsp_must_staple", "=", "False", "try", ":", "tls_feature_ext", "=", "certificate", ".", "extensions", ".", "get_extension_for_oid", "(", "ExtensionOID", ".", "TLS_FEATURE", ")", "for", "feature_type", "in", "tls_feature_ext", ".", "value", ":", "if", "feature_type", "==", "cryptography", ".", "x509", ".", "TLSFeatureType", ".", "status_request", ":", "has_ocsp_must_staple", "=", "True", "break", "except", "ExtensionNotFound", ":", "pass", "return", "has_ocsp_must_staple" ]
Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066.
[ "Return", "True", "if", "the", "certificate", "has", "the", "OCSP", "Must", "-", "Staple", "extension", "defined", "in", "RFC", "6066", "." ]
python
train
45.428571
PyCQA/pydocstyle
src/pydocstyle/config.py
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/config.py#L515-L545
def _fix_set_options(cls, options): """Alter the set options from None/strings to sets in place.""" optional_set_options = ('ignore', 'select') mandatory_set_options = ('add_ignore', 'add_select') def _get_set(value_str): """Split `value_str` by the delimiter `,` and return a set. Removes any occurrences of '' in the set. Also expand error code prefixes, to avoid doing this for every file. """ return cls._expand_error_codes(set(value_str.split(',')) - {''}) for opt in optional_set_options: value = getattr(options, opt) if value is not None: setattr(options, opt, _get_set(value)) for opt in mandatory_set_options: value = getattr(options, opt) if value is None: value = '' if not isinstance(value, Set): value = _get_set(value) setattr(options, opt, value) return options
[ "def", "_fix_set_options", "(", "cls", ",", "options", ")", ":", "optional_set_options", "=", "(", "'ignore'", ",", "'select'", ")", "mandatory_set_options", "=", "(", "'add_ignore'", ",", "'add_select'", ")", "def", "_get_set", "(", "value_str", ")", ":", "\"\"\"Split `value_str` by the delimiter `,` and return a set.\n\n Removes any occurrences of '' in the set.\n Also expand error code prefixes, to avoid doing this for every\n file.\n\n \"\"\"", "return", "cls", ".", "_expand_error_codes", "(", "set", "(", "value_str", ".", "split", "(", "','", ")", ")", "-", "{", "''", "}", ")", "for", "opt", "in", "optional_set_options", ":", "value", "=", "getattr", "(", "options", ",", "opt", ")", "if", "value", "is", "not", "None", ":", "setattr", "(", "options", ",", "opt", ",", "_get_set", "(", "value", ")", ")", "for", "opt", "in", "mandatory_set_options", ":", "value", "=", "getattr", "(", "options", ",", "opt", ")", "if", "value", "is", "None", ":", "value", "=", "''", "if", "not", "isinstance", "(", "value", ",", "Set", ")", ":", "value", "=", "_get_set", "(", "value", ")", "setattr", "(", "options", ",", "opt", ",", "value", ")", "return", "options" ]
Alter the set options from None/strings to sets in place.
[ "Alter", "the", "set", "options", "from", "None", "/", "strings", "to", "sets", "in", "place", "." ]
python
train
32.354839
google/apitools
ez_setup.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/ez_setup.py#L232-L259
def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close()
[ "def", "update_md5", "(", "filenames", ")", ":", "import", "re", "for", "name", "in", "filenames", ":", "base", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "f", "=", "open", "(", "name", ",", "'rb'", ")", "md5_data", "[", "base", "]", "=", "md5", "(", "f", ".", "read", "(", ")", ")", ".", "hexdigest", "(", ")", "f", ".", "close", "(", ")", "data", "=", "[", "\" %r: %r,\\n\"", "%", "it", "for", "it", "in", "md5_data", ".", "items", "(", ")", "]", "data", ".", "sort", "(", ")", "repl", "=", "\"\"", ".", "join", "(", "data", ")", "import", "inspect", "srcfile", "=", "inspect", ".", "getsourcefile", "(", "sys", ".", "modules", "[", "__name__", "]", ")", "f", "=", "open", "(", "srcfile", ",", "'rb'", ")", "src", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "match", "=", "re", ".", "search", "(", "\"\\nmd5_data = {\\n([^}]+)}\"", ",", "src", ")", "if", "not", "match", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Internal error!\"", "sys", ".", "exit", "(", "2", ")", "src", "=", "src", "[", ":", "match", ".", "start", "(", "1", ")", "]", "+", "repl", "+", "src", "[", "match", ".", "end", "(", "1", ")", ":", "]", "f", "=", "open", "(", "srcfile", ",", "'w'", ")", "f", ".", "write", "(", "src", ")", "f", ".", "close", "(", ")" ]
Update our built-in md5 registry
[ "Update", "our", "built", "-", "in", "md5", "registry" ]
python
train
25.5
saltstack/salt
salt/states/pcs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L360-L444
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None): ''' Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: [] ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} auth_required = False authorized = __salt__['pcs.is_auth'](nodes=nodes) log.trace('Output of pcs.is_auth: %s', authorized) authorized_dict = {} for line in authorized['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace('authorized_dict: %s', authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == 'Already authorized': ret['comment'] += 'Node {0} is already authorized\n'.format(node) else: auth_required = True if __opts__['test']: ret['comment'] += 'Node is set to authorize: {0}\n'.format(node) if not auth_required: return ret if __opts__['test']: ret['result'] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if '--force' not in extra_args: extra_args += ['--force'] authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace('Output of pcs.auth: %s', authorize) authorize_dict = {} for line in authorize['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace('authorize_dict: %s', authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == 'Authorized': ret['comment'] += 'Authorized {0}\n'.format(node) ret['changes'].update({node: {'old': '', 'new': 'Authorized'}}) else: ret['result'] = False if node in authorized_dict: ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node]) if node in authorize_dict: ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node]) return ret
[ "def", "auth", "(", "name", ",", "nodes", ",", "pcsuser", "=", "'hacluster'", ",", "pcspasswd", "=", "'hacluster'", ",", "extra_args", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "auth_required", "=", "False", "authorized", "=", "__salt__", "[", "'pcs.is_auth'", "]", "(", "nodes", "=", "nodes", ")", "log", ".", "trace", "(", "'Output of pcs.is_auth: %s'", ",", "authorized", ")", "authorized_dict", "=", "{", "}", "for", "line", "in", "authorized", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "node", "=", "line", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "strip", "(", ")", "auth_state", "=", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "node", "in", "nodes", ":", "authorized_dict", ".", "update", "(", "{", "node", ":", "auth_state", "}", ")", "log", ".", "trace", "(", "'authorized_dict: %s'", ",", "authorized_dict", ")", "for", "node", "in", "nodes", ":", "if", "node", "in", "authorized_dict", "and", "authorized_dict", "[", "node", "]", "==", "'Already authorized'", ":", "ret", "[", "'comment'", "]", "+=", "'Node {0} is already authorized\\n'", ".", "format", "(", "node", ")", "else", ":", "auth_required", "=", "True", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "+=", "'Node is set to authorize: {0}\\n'", ".", "format", "(", "node", ")", "if", "not", "auth_required", ":", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "if", "not", "isinstance", "(", "extra_args", ",", "(", "list", ",", "tuple", ")", ")", ":", "extra_args", "=", "[", "]", "if", "'--force'", "not", "in", "extra_args", ":", "extra_args", "+=", "[", "'--force'", "]", "authorize", "=", "__salt__", "[", "'pcs.auth'", "]", "(", "nodes", "=", "nodes", ",", "pcsuser", "=", "pcsuser", ",", "pcspasswd", "=", "pcspasswd", ",", "extra_args", "=", "extra_args", ")", "log", ".", "trace", "(", "'Output of pcs.auth: %s'", ",", "authorize", ")", "authorize_dict", "=", "{", "}", "for", "line", "in", "authorize", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "node", "=", "line", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "strip", "(", ")", "auth_state", "=", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "node", "in", "nodes", ":", "authorize_dict", ".", "update", "(", "{", "node", ":", "auth_state", "}", ")", "log", ".", "trace", "(", "'authorize_dict: %s'", ",", "authorize_dict", ")", "for", "node", "in", "nodes", ":", "if", "node", "in", "authorize_dict", "and", "authorize_dict", "[", "node", "]", "==", "'Authorized'", ":", "ret", "[", "'comment'", "]", "+=", "'Authorized {0}\\n'", ".", "format", "(", "node", ")", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "node", ":", "{", "'old'", ":", "''", ",", "'new'", ":", "'Authorized'", "}", "}", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "if", "node", "in", "authorized_dict", ":", "ret", "[", "'comment'", "]", "+=", "'Authorization check for node {0} returned: {1}\\n'", ".", "format", "(", "node", ",", "authorized_dict", "[", "node", "]", ")", "if", "node", "in", "authorize_dict", ":", "ret", "[", "'comment'", "]", "+=", "'Failed to authorize {0} with error {1}\\n'", ".", "format", "(", "node", ",", "authorize_dict", "[", "node", "]", ")", "return", "ret" ]
Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: []
[ "Ensure", "all", "nodes", "are", "authorized", "to", "the", "cluster" ]
python
train
34.411765
zhanglab/psamm
psamm/balancecheck.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/balancecheck.py#L31-L43
def reaction_charge(reaction, compound_charge): """Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values. """ charge_sum = 0.0 for compound, value in reaction.compounds: charge = compound_charge.get(compound.name, float('nan')) charge_sum += charge * float(value) return charge_sum
[ "def", "reaction_charge", "(", "reaction", ",", "compound_charge", ")", ":", "charge_sum", "=", "0.0", "for", "compound", ",", "value", "in", "reaction", ".", "compounds", ":", "charge", "=", "compound_charge", ".", "get", "(", "compound", ".", "name", ",", "float", "(", "'nan'", ")", ")", "charge_sum", "+=", "charge", "*", "float", "(", "value", ")", "return", "charge_sum" ]
Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values.
[ "Calculate", "the", "overall", "charge", "for", "the", "specified", "reaction", "." ]
python
train
33.769231
camptocamp/Studio
studio/lib/helpers.py
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/helpers.py#L39-L44
def gen_mapname(): """ Generate a uniq mapfile pathname. """ filepath = None while (filepath is None) or (os.path.exists(os.path.join(config['mapfiles_dir'], filepath))): filepath = '%s.map' % _gen_string() return filepath
[ "def", "gen_mapname", "(", ")", ":", "filepath", "=", "None", "while", "(", "filepath", "is", "None", ")", "or", "(", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "config", "[", "'mapfiles_dir'", "]", ",", "filepath", ")", ")", ")", ":", "filepath", "=", "'%s.map'", "%", "_gen_string", "(", ")", "return", "filepath" ]
Generate a uniq mapfile pathname.
[ "Generate", "a", "uniq", "mapfile", "pathname", "." ]
python
train
40.166667
RI-imaging/ODTbrain
odtbrain/_alg3d_bpp.py
https://github.com/RI-imaging/ODTbrain/blob/abbab8b790f10c0c7aea8d858d7d60f2fdd7161e/odtbrain/_alg3d_bpp.py#L84-L547
def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None, weight_angles=True, onlyreal=False, padding=(True, True), padfac=1.75, padval=None, intp_order=2, dtype=None, num_cores=ncores, save_memory=False, copy=True, count=None, max_count=None, verbose=0): r"""3D backpropagation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm :cite:`Mueller2015arxiv`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse :math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the rotational operator :math:`D_{-\phi_j}`, the angular distance between the projections :math:`\Delta \phi_0`, the ramp filter in Fourier space :math:`|k_\mathrm{Dx}|`, and the propagation distance :math:`(z_{\phi_j}-l_\mathrm{D})`. Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} .. versionadded:: 0.1.1 onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.rotate` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). """ A = angles.size if len(uSin.shape) != 3: raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).") if len(uSin) != A: raise ValueError("`len(angles)` must be equal to `len(uSin)`.") if len(list(padding)) != 2: raise ValueError("`padding` must be boolean tuple of length 2!") if np.array(padding).dtype is not np.dtype(bool): raise ValueError("Parameter `padding` must be boolean tuple.") if coords is not None: raise NotImplementedError("Setting coordinates is not yet supported.") if num_cores > ncores: raise ValueError("`num_cores` must not exceed number " + "of physical cores: {}".format(ncores)) # setup dtype if dtype is None: dtype = np.float_ dtype = np.dtype(dtype) if dtype.name not in ["float32", "float64"]: raise ValueError("dtype must be float32 or float64!") dtype_complex = np.dtype("complex{}".format( 2 * np.int(dtype.name.strip("float")))) # set ctype ct_dt_map = {np.dtype(np.float32): ctypes.c_float, np.dtype(np.float64): ctypes.c_double } # progress if max_count is not None: max_count.value += A + 2 ne.set_num_threads(num_cores) uSin = np.array(uSin, copy=copy) # lengths of the input data lny, lnx = uSin.shape[1], uSin.shape[2] # The z-size of the output array must match the x-size. # The rotation is performed about the y-axis (lny). ln = lnx # We perform zero-padding before performing the Fourier transform. # This gets rid of artifacts due to false periodicity and also # speeds up Fourier transforms of the input image size is not # a power of 2. orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2)))) ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2)))) if padding[0]: padx = orderx - lnx else: padx = 0 if padding[1]: pady = ordery - lny else: pady = 0 padyl = np.int(np.ceil(pady / 2)) padyr = pady - padyl padxl = np.int(np.ceil(padx / 2)) padxr = padx - padxl # zero-padded length of sinogram. lNx, lNy = lnx + padx, lny + pady lNz = ln if verbose > 0: print("......Image size (x,y): {}x{}, padded: {}x{}".format( lnx, lny, lNx, lNy)) # Perform weighting if weight_angles: weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1) uSin *= weights # Cut-Off frequency # km [1/px] km = (2 * np.pi * nm) / res # Here, the notation for # a wave propagating to the right is: # # u0(x) = exp(ikx) # # However, in physics usually we use the other sign convention: # # u0(x) = exp(-ikx) # # In order to be consistent with programs like Meep or our # scattering script for a dielectric cylinder, we want to use the # latter sign convention. # This is not a big problem. We only need to multiply the imaginary # part of the scattered wave by -1. # Ask for the filter. Do not include zero (first element). # # Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ] # - double coverage factor 1/2 already included # - unitary angular frequency to unitary ordinary frequency # conversion performed in calculation of UB=FT(uB). # # f(r) = -i kₘ / ((2π)² a₀) (prefactor) # * iiint dϕ₀ dkx dky (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UBϕ₀(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r) # (r and s₀ are vectors. The last term contains a dot-product) # # kₘM = sqrt( kₘ² - kx² - ky² ) # t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) ) # # The filter can be split into two parts # # 1) part without dependence on the z-coordinate # # -i kₘ / ((2π)² a₀) # * iiint dϕ₀ dkx dky # * |kx| # * exp(-i kₘ M lD ) # # 2) part with dependence of the z-coordinate # # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # The filter (1) can be performed using the classical filter process # as in the backprojection algorithm. # # # Corresponding sample frequencies fx = np.fft.fftfreq(lNx) # 1D array fy = np.fft.fftfreq(lNy) # 1D array # kx is a 1D array. kx = 2 * np.pi * fx ky = 2 * np.pi * fy # Differentials for integral dphi0 = 2 * np.pi / A # We will later multiply with phi0. # y, x kx = kx.reshape(1, -1) ky = ky.reshape(-1, 1) # Low-pass filter: # less-than-or-equal would give us zero division error. filter_klp = (kx**2 + ky**2 < km**2) # Filter M so there are no nans from the root M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp) prefactor = -1j * km / (2 * np.pi) prefactor *= dphi0 # Also filter the prefactor, so nothing outside the required # low-pass contributes to the sum. prefactor *= np.abs(kx) * filter_klp # prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp ) # new in version 0.1.4: # We multiply by the factor (M-1) instead of just (M) # to take into account that we have a scattered # wave that is normalized by u0. prefactor *= np.exp(-1j * km * (M-1) * lD) if count is not None: count.value += 1 # filter (2) must be applied before rotation as well # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # kₘM = sqrt( kₘ² - kx² - ky² ) # t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) ) # # This filter is effectively an inverse Fourier transform # # exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD ) # # xD = x cos(ϕ₀) + z sin(ϕ₀) # zD = - x sin(ϕ₀) + z cos(ϕ₀) # Everything is in pixels center = lNz / 2.0 z = np.linspace(-center, center, lNz, endpoint=False) zv = z.reshape(-1, 1, 1) # z, y, x Mp = M.reshape(lNy, lNx) # filter2 = np.exp(1j * zv * km * (Mp - 1)) f2_exp_fac = 1j * km * (Mp - 1) if save_memory: # compute filter2 later pass else: # compute filter2 now filter2 = ne.evaluate("exp(factor * zv)", local_dict={"factor": f2_exp_fac, "zv": zv}) # occupies some amount of ram, but yields faster # computation later if count is not None: count.value += 1 # Prepare complex output image if onlyreal: outarr = np.zeros((ln, lny, lnx), dtype=dtype) else: outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex) # Create plan for FFTW # save memory by in-place operations # projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor # FFTW-flag is "estimate": # specifies that, instead of actual measurements of different # algorithms, a simple heuristic is used to pick a (probably # sub-optimal) plan quickly. With this flag, the input/output # arrays are not overwritten during planning. # Byte-aligned arrays oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores, flags=["FFTW_ESTIMATE"], axes=(0, 1)) # Create plan for IFFTW: inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex) # inarr[:] = (projection[0]*filter2)[0,:,:] # plan is "patient": # FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range # of algorithms and often produces a “more optimal” plan # (especially for large transforms), but at the expense of # several times longer planning time (especially for large # transforms). # print(inarr.flags) myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores, axes=(0, 1), direction="FFTW_BACKWARD", flags=["FFTW_MEASURE"]) # Setup a shared array shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx) arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx) # Initialize the pool with the shared array pool4loop = mp.Pool(processes=num_cores, initializer=_init_worker, initargs=(shared_array, (ln, lny, lnx), dtype)) # filtered projections in loop filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex) for aa in np.arange(A): if padval is None: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="edge") else: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="linear_ramp", end_values=(padval,)) myfftw_plan.execute() # normalize to (lNx * lNy) for FFTW and multiply with prefactor oneslice *= prefactor / (lNx * lNy) # 14x Speedup with fftw3 compared to numpy fft and # memory reduction by a factor of 2! # ifft will be computed in-place for p in range(len(zv)): if save_memory: # compute filter2 here; # this is comparatively slower than the other case ne.evaluate("exp(factor * zvp) * projectioni", local_dict={"zvp": zv[p], "projectioni": oneslice, "factor": f2_exp_fac}, out=inarr) else: # use universal functions np.multiply(filter2[p], oneslice, out=inarr) myifftw_plan.execute() filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl] # resize image to original size # The copy is necessary to prevent memory leakage. arr[:] = filtered_proj.real phi0 = np.rad2deg(angles[aa]) if not onlyreal: filtered_proj_imag = filtered_proj.imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.real += arr if not onlyreal: arr[:] = filtered_proj_imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.imag += arr if count is not None: count.value += 1 pool4loop.terminate() pool4loop.join() _cleanup_worker() return outarr
[ "def", "backpropagate_3d", "(", "uSin", ",", "angles", ",", "res", ",", "nm", ",", "lD", "=", "0", ",", "coords", "=", "None", ",", "weight_angles", "=", "True", ",", "onlyreal", "=", "False", ",", "padding", "=", "(", "True", ",", "True", ")", ",", "padfac", "=", "1.75", ",", "padval", "=", "None", ",", "intp_order", "=", "2", ",", "dtype", "=", "None", ",", "num_cores", "=", "ncores", ",", "save_memory", "=", "False", ",", "copy", "=", "True", ",", "count", "=", "None", ",", "max_count", "=", "None", ",", "verbose", "=", "0", ")", ":", "A", "=", "angles", ".", "size", "if", "len", "(", "uSin", ".", "shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"Input data `uSin` must have shape (A,Ny,Nx).\"", ")", "if", "len", "(", "uSin", ")", "!=", "A", ":", "raise", "ValueError", "(", "\"`len(angles)` must be equal to `len(uSin)`.\"", ")", "if", "len", "(", "list", "(", "padding", ")", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"`padding` must be boolean tuple of length 2!\"", ")", "if", "np", ".", "array", "(", "padding", ")", ".", "dtype", "is", "not", "np", ".", "dtype", "(", "bool", ")", ":", "raise", "ValueError", "(", "\"Parameter `padding` must be boolean tuple.\"", ")", "if", "coords", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "\"Setting coordinates is not yet supported.\"", ")", "if", "num_cores", ">", "ncores", ":", "raise", "ValueError", "(", "\"`num_cores` must not exceed number \"", "+", "\"of physical cores: {}\"", ".", "format", "(", "ncores", ")", ")", "# setup dtype", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "float_", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "if", "dtype", ".", "name", "not", "in", "[", "\"float32\"", ",", "\"float64\"", "]", ":", "raise", "ValueError", "(", "\"dtype must be float32 or float64!\"", ")", "dtype_complex", "=", "np", ".", "dtype", "(", "\"complex{}\"", ".", "format", "(", "2", "*", "np", ".", "int", "(", "dtype", ".", "name", ".", "strip", "(", "\"float\"", ")", ")", ")", ")", "# set ctype", "ct_dt_map", "=", "{", "np", ".", "dtype", "(", "np", ".", "float32", ")", ":", "ctypes", ".", "c_float", ",", "np", ".", "dtype", "(", "np", ".", "float64", ")", ":", "ctypes", ".", "c_double", "}", "# progress", "if", "max_count", "is", "not", "None", ":", "max_count", ".", "value", "+=", "A", "+", "2", "ne", ".", "set_num_threads", "(", "num_cores", ")", "uSin", "=", "np", ".", "array", "(", "uSin", ",", "copy", "=", "copy", ")", "# lengths of the input data", "lny", ",", "lnx", "=", "uSin", ".", "shape", "[", "1", "]", ",", "uSin", ".", "shape", "[", "2", "]", "# The z-size of the output array must match the x-size.", "# The rotation is performed about the y-axis (lny).", "ln", "=", "lnx", "# We perform zero-padding before performing the Fourier transform.", "# This gets rid of artifacts due to false periodicity and also", "# speeds up Fourier transforms of the input image size is not", "# a power of 2.", "orderx", "=", "np", ".", "int", "(", "max", "(", "64.", ",", "2", "**", "np", ".", "ceil", "(", "np", ".", "log", "(", "lnx", "*", "padfac", ")", "/", "np", ".", "log", "(", "2", ")", ")", ")", ")", "ordery", "=", "np", ".", "int", "(", "max", "(", "64.", ",", "2", "**", "np", ".", "ceil", "(", "np", ".", "log", "(", "lny", "*", "padfac", ")", "/", "np", ".", "log", "(", "2", ")", ")", ")", ")", "if", "padding", "[", "0", "]", ":", "padx", "=", "orderx", "-", "lnx", "else", ":", "padx", "=", "0", "if", "padding", "[", "1", "]", ":", "pady", "=", "ordery", "-", "lny", "else", ":", "pady", "=", "0", "padyl", "=", "np", ".", "int", "(", "np", ".", "ceil", "(", "pady", "/", "2", ")", ")", "padyr", "=", "pady", "-", "padyl", "padxl", "=", "np", ".", "int", "(", "np", ".", "ceil", "(", "padx", "/", "2", ")", ")", "padxr", "=", "padx", "-", "padxl", "# zero-padded length of sinogram.", "lNx", ",", "lNy", "=", "lnx", "+", "padx", ",", "lny", "+", "pady", "lNz", "=", "ln", "if", "verbose", ">", "0", ":", "print", "(", "\"......Image size (x,y): {}x{}, padded: {}x{}\"", ".", "format", "(", "lnx", ",", "lny", ",", "lNx", ",", "lNy", ")", ")", "# Perform weighting", "if", "weight_angles", ":", "weights", "=", "util", ".", "compute_angle_weights_1d", "(", "angles", ")", ".", "reshape", "(", "-", "1", ",", "1", ",", "1", ")", "uSin", "*=", "weights", "# Cut-Off frequency", "# km [1/px]", "km", "=", "(", "2", "*", "np", ".", "pi", "*", "nm", ")", "/", "res", "# Here, the notation for", "# a wave propagating to the right is:", "#", "# u0(x) = exp(ikx)", "#", "# However, in physics usually we use the other sign convention:", "#", "# u0(x) = exp(-ikx)", "#", "# In order to be consistent with programs like Meep or our", "# scattering script for a dielectric cylinder, we want to use the", "# latter sign convention.", "# This is not a big problem. We only need to multiply the imaginary", "# part of the scattered wave by -1.", "# Ask for the filter. Do not include zero (first element).", "#", "# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]", "# - double coverage factor 1/2 already included", "# - unitary angular frequency to unitary ordinary frequency", "# conversion performed in calculation of UB=FT(uB).", "#", "# f(r) = -i kₘ / ((2π)² a₀) (prefactor)", "# * iiint dϕ₀ dkx dky (prefactor)", "# * |kx| (prefactor)", "# * exp(-i kₘ M lD ) (prefactor)", "# * UBϕ₀(kx) (dependent on ϕ₀)", "# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)", "# (r and s₀ are vectors. The last term contains a dot-product)", "#", "# kₘM = sqrt( kₘ² - kx² - ky² )", "# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )", "# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )", "#", "# The filter can be split into two parts", "#", "# 1) part without dependence on the z-coordinate", "#", "# -i kₘ / ((2π)² a₀)", "# * iiint dϕ₀ dkx dky", "# * |kx|", "# * exp(-i kₘ M lD )", "#", "# 2) part with dependence of the z-coordinate", "#", "# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )", "#", "# The filter (1) can be performed using the classical filter process", "# as in the backprojection algorithm.", "#", "#", "# Corresponding sample frequencies", "fx", "=", "np", ".", "fft", ".", "fftfreq", "(", "lNx", ")", "# 1D array", "fy", "=", "np", ".", "fft", ".", "fftfreq", "(", "lNy", ")", "# 1D array", "# kx is a 1D array.", "kx", "=", "2", "*", "np", ".", "pi", "*", "fx", "ky", "=", "2", "*", "np", ".", "pi", "*", "fy", "# Differentials for integral", "dphi0", "=", "2", "*", "np", ".", "pi", "/", "A", "# We will later multiply with phi0.", "# y, x", "kx", "=", "kx", ".", "reshape", "(", "1", ",", "-", "1", ")", "ky", "=", "ky", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Low-pass filter:", "# less-than-or-equal would give us zero division error.", "filter_klp", "=", "(", "kx", "**", "2", "+", "ky", "**", "2", "<", "km", "**", "2", ")", "# Filter M so there are no nans from the root", "M", "=", "1.", "/", "km", "*", "np", ".", "sqrt", "(", "(", "km", "**", "2", "-", "kx", "**", "2", "-", "ky", "**", "2", ")", "*", "filter_klp", ")", "prefactor", "=", "-", "1j", "*", "km", "/", "(", "2", "*", "np", ".", "pi", ")", "prefactor", "*=", "dphi0", "# Also filter the prefactor, so nothing outside the required", "# low-pass contributes to the sum.", "prefactor", "*=", "np", ".", "abs", "(", "kx", ")", "*", "filter_klp", "# prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp )", "# new in version 0.1.4:", "# We multiply by the factor (M-1) instead of just (M)", "# to take into account that we have a scattered", "# wave that is normalized by u0.", "prefactor", "*=", "np", ".", "exp", "(", "-", "1j", "*", "km", "*", "(", "M", "-", "1", ")", "*", "lD", ")", "if", "count", "is", "not", "None", ":", "count", ".", "value", "+=", "1", "# filter (2) must be applied before rotation as well", "# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )", "#", "# kₘM = sqrt( kₘ² - kx² - ky² )", "# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )", "# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )", "#", "# This filter is effectively an inverse Fourier transform", "#", "# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )", "#", "# xD = x cos(ϕ₀) + z sin(ϕ₀)", "# zD = - x sin(ϕ₀) + z cos(ϕ₀)", "# Everything is in pixels", "center", "=", "lNz", "/", "2.0", "z", "=", "np", ".", "linspace", "(", "-", "center", ",", "center", ",", "lNz", ",", "endpoint", "=", "False", ")", "zv", "=", "z", ".", "reshape", "(", "-", "1", ",", "1", ",", "1", ")", "# z, y, x", "Mp", "=", "M", ".", "reshape", "(", "lNy", ",", "lNx", ")", "# filter2 = np.exp(1j * zv * km * (Mp - 1))", "f2_exp_fac", "=", "1j", "*", "km", "*", "(", "Mp", "-", "1", ")", "if", "save_memory", ":", "# compute filter2 later", "pass", "else", ":", "# compute filter2 now", "filter2", "=", "ne", ".", "evaluate", "(", "\"exp(factor * zv)\"", ",", "local_dict", "=", "{", "\"factor\"", ":", "f2_exp_fac", ",", "\"zv\"", ":", "zv", "}", ")", "# occupies some amount of ram, but yields faster", "# computation later", "if", "count", "is", "not", "None", ":", "count", ".", "value", "+=", "1", "# Prepare complex output image", "if", "onlyreal", ":", "outarr", "=", "np", ".", "zeros", "(", "(", "ln", ",", "lny", ",", "lnx", ")", ",", "dtype", "=", "dtype", ")", "else", ":", "outarr", "=", "np", ".", "zeros", "(", "(", "ln", ",", "lny", ",", "lnx", ")", ",", "dtype", "=", "dtype_complex", ")", "# Create plan for FFTW", "# save memory by in-place operations", "# projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor", "# FFTW-flag is \"estimate\":", "# specifies that, instead of actual measurements of different", "# algorithms, a simple heuristic is used to pick a (probably", "# sub-optimal) plan quickly. With this flag, the input/output", "# arrays are not overwritten during planning.", "# Byte-aligned arrays", "oneslice", "=", "pyfftw", ".", "empty_aligned", "(", "(", "lNy", ",", "lNx", ")", ",", "dtype_complex", ")", "myfftw_plan", "=", "pyfftw", ".", "FFTW", "(", "oneslice", ",", "oneslice", ",", "threads", "=", "num_cores", ",", "flags", "=", "[", "\"FFTW_ESTIMATE\"", "]", ",", "axes", "=", "(", "0", ",", "1", ")", ")", "# Create plan for IFFTW:", "inarr", "=", "pyfftw", ".", "empty_aligned", "(", "(", "lNy", ",", "lNx", ")", ",", "dtype_complex", ")", "# inarr[:] = (projection[0]*filter2)[0,:,:]", "# plan is \"patient\":", "# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range", "# of algorithms and often produces a “more optimal” plan", "# (especially for large transforms), but at the expense of", "# several times longer planning time (especially for large", "# transforms).", "# print(inarr.flags)", "myifftw_plan", "=", "pyfftw", ".", "FFTW", "(", "inarr", ",", "inarr", ",", "threads", "=", "num_cores", ",", "axes", "=", "(", "0", ",", "1", ")", ",", "direction", "=", "\"FFTW_BACKWARD\"", ",", "flags", "=", "[", "\"FFTW_MEASURE\"", "]", ")", "# Setup a shared array", "shared_array", "=", "mp", ".", "RawArray", "(", "ct_dt_map", "[", "dtype", "]", ",", "ln", "*", "lny", "*", "lnx", ")", "arr", "=", "np", ".", "frombuffer", "(", "shared_array", ",", "dtype", "=", "dtype", ")", ".", "reshape", "(", "ln", ",", "lny", ",", "lnx", ")", "# Initialize the pool with the shared array", "pool4loop", "=", "mp", ".", "Pool", "(", "processes", "=", "num_cores", ",", "initializer", "=", "_init_worker", ",", "initargs", "=", "(", "shared_array", ",", "(", "ln", ",", "lny", ",", "lnx", ")", ",", "dtype", ")", ")", "# filtered projections in loop", "filtered_proj", "=", "np", ".", "zeros", "(", "(", "ln", ",", "lny", ",", "lnx", ")", ",", "dtype", "=", "dtype_complex", ")", "for", "aa", "in", "np", ".", "arange", "(", "A", ")", ":", "if", "padval", "is", "None", ":", "oneslice", "[", ":", "]", "=", "np", ".", "pad", "(", "uSin", "[", "aa", "]", ",", "(", "(", "padyl", ",", "padyr", ")", ",", "(", "padxl", ",", "padxr", ")", ")", ",", "mode", "=", "\"edge\"", ")", "else", ":", "oneslice", "[", ":", "]", "=", "np", ".", "pad", "(", "uSin", "[", "aa", "]", ",", "(", "(", "padyl", ",", "padyr", ")", ",", "(", "padxl", ",", "padxr", ")", ")", ",", "mode", "=", "\"linear_ramp\"", ",", "end_values", "=", "(", "padval", ",", ")", ")", "myfftw_plan", ".", "execute", "(", ")", "# normalize to (lNx * lNy) for FFTW and multiply with prefactor", "oneslice", "*=", "prefactor", "/", "(", "lNx", "*", "lNy", ")", "# 14x Speedup with fftw3 compared to numpy fft and", "# memory reduction by a factor of 2!", "# ifft will be computed in-place", "for", "p", "in", "range", "(", "len", "(", "zv", ")", ")", ":", "if", "save_memory", ":", "# compute filter2 here;", "# this is comparatively slower than the other case", "ne", ".", "evaluate", "(", "\"exp(factor * zvp) * projectioni\"", ",", "local_dict", "=", "{", "\"zvp\"", ":", "zv", "[", "p", "]", ",", "\"projectioni\"", ":", "oneslice", ",", "\"factor\"", ":", "f2_exp_fac", "}", ",", "out", "=", "inarr", ")", "else", ":", "# use universal functions", "np", ".", "multiply", "(", "filter2", "[", "p", "]", ",", "oneslice", ",", "out", "=", "inarr", ")", "myifftw_plan", ".", "execute", "(", ")", "filtered_proj", "[", "p", ",", ":", ",", ":", "]", "=", "inarr", "[", "padyl", ":", "lny", "+", "padyl", ",", "padxl", ":", "lnx", "+", "padxl", "]", "# resize image to original size", "# The copy is necessary to prevent memory leakage.", "arr", "[", ":", "]", "=", "filtered_proj", ".", "real", "phi0", "=", "np", ".", "rad2deg", "(", "angles", "[", "aa", "]", ")", "if", "not", "onlyreal", ":", "filtered_proj_imag", "=", "filtered_proj", ".", "imag", "_mprotate", "(", "phi0", ",", "lny", ",", "pool4loop", ",", "intp_order", ")", "outarr", ".", "real", "+=", "arr", "if", "not", "onlyreal", ":", "arr", "[", ":", "]", "=", "filtered_proj_imag", "_mprotate", "(", "phi0", ",", "lny", ",", "pool4loop", ",", "intp_order", ")", "outarr", ".", "imag", "+=", "arr", "if", "count", "is", "not", "None", ":", "count", ".", "value", "+=", "1", "pool4loop", ".", "terminate", "(", ")", "pool4loop", ".", "join", "(", ")", "_cleanup_worker", "(", ")", "return", "outarr" ]
r"""3D backpropagation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm :cite:`Mueller2015arxiv`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse :math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the rotational operator :math:`D_{-\phi_j}`, the angular distance between the projections :math:`\Delta \phi_0`, the ramp filter in Fourier space :math:`|k_\mathrm{Dx}|`, and the propagation distance :math:`(z_{\phi_j}-l_\mathrm{D})`. Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} .. versionadded:: 0.1.1 onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.rotate` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`).
[ "r", "3D", "backpropagation" ]
python
train
35.724138
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/config.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/config.py#L62-L96
def replace_config(config, name): '''Replace the top-level pipeline configurable object. This investigates a number of sources, including `external_stages_path` and `external_stages_modules` configuration and `streamcorpus_pipeline.stages` entry points, and uses these to find the actual :data:`sub_modules` for :mod:`streamcorpus_pipeline`. ''' global static_stages if static_stages is None: static_stages = PipelineStages() stages = static_stages if 'external_stages_path' in config: path = config['external_stages_path'] if not os.path.isabs(path) and config.get('root_path'): path = os.path.join(config['root_path'], path) try: stages.load_external_stages(config['external_stages_path']) except IOError: return streamcorpus_pipeline # let check_config re-raise this if 'external_stages_modules' in config: for mod in config['external_stages_modules']: try: stages.load_module_stages(mod) except ImportError: return streamcorpus_pipeline # let check_config re-raise this else: stages = static_stages new_sub_modules = set(stage for stage in stages.itervalues() if hasattr(stage, 'config_name')) return NewSubModules(streamcorpus_pipeline, new_sub_modules)
[ "def", "replace_config", "(", "config", ",", "name", ")", ":", "global", "static_stages", "if", "static_stages", "is", "None", ":", "static_stages", "=", "PipelineStages", "(", ")", "stages", "=", "static_stages", "if", "'external_stages_path'", "in", "config", ":", "path", "=", "config", "[", "'external_stages_path'", "]", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", "and", "config", ".", "get", "(", "'root_path'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "config", "[", "'root_path'", "]", ",", "path", ")", "try", ":", "stages", ".", "load_external_stages", "(", "config", "[", "'external_stages_path'", "]", ")", "except", "IOError", ":", "return", "streamcorpus_pipeline", "# let check_config re-raise this", "if", "'external_stages_modules'", "in", "config", ":", "for", "mod", "in", "config", "[", "'external_stages_modules'", "]", ":", "try", ":", "stages", ".", "load_module_stages", "(", "mod", ")", "except", "ImportError", ":", "return", "streamcorpus_pipeline", "# let check_config re-raise this", "else", ":", "stages", "=", "static_stages", "new_sub_modules", "=", "set", "(", "stage", "for", "stage", "in", "stages", ".", "itervalues", "(", ")", "if", "hasattr", "(", "stage", ",", "'config_name'", ")", ")", "return", "NewSubModules", "(", "streamcorpus_pipeline", ",", "new_sub_modules", ")" ]
Replace the top-level pipeline configurable object. This investigates a number of sources, including `external_stages_path` and `external_stages_modules` configuration and `streamcorpus_pipeline.stages` entry points, and uses these to find the actual :data:`sub_modules` for :mod:`streamcorpus_pipeline`.
[ "Replace", "the", "top", "-", "level", "pipeline", "configurable", "object", "." ]
python
test
41.4
boriel/zxbasic
arch/zx48k/backend/__init__.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L1374-L1380
def _ret8(ins): """ Returns from a procedure / function an 8bits value """ output = _8bit_oper(ins.quad[1]) output.append('#pragma opt require a') output.append('jp %s' % str(ins.quad[2])) return output
[ "def", "_ret8", "(", "ins", ")", ":", "output", "=", "_8bit_oper", "(", "ins", ".", "quad", "[", "1", "]", ")", "output", ".", "append", "(", "'#pragma opt require a'", ")", "output", ".", "append", "(", "'jp %s'", "%", "str", "(", "ins", ".", "quad", "[", "2", "]", ")", ")", "return", "output" ]
Returns from a procedure / function an 8bits value
[ "Returns", "from", "a", "procedure", "/", "function", "an", "8bits", "value" ]
python
train
31.428571
bram85/topydo
topydo/lib/TodoList.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/TodoList.py#L240-L247
def children(self, p_todo, p_only_direct=False): """ Returns a list of child todos that the given todo (in)directly depends on. """ children = \ self._depgraph.outgoing_neighbors(hash(p_todo), not p_only_direct) return [self._tododict[child] for child in children]
[ "def", "children", "(", "self", ",", "p_todo", ",", "p_only_direct", "=", "False", ")", ":", "children", "=", "self", ".", "_depgraph", ".", "outgoing_neighbors", "(", "hash", "(", "p_todo", ")", ",", "not", "p_only_direct", ")", "return", "[", "self", ".", "_tododict", "[", "child", "]", "for", "child", "in", "children", "]" ]
Returns a list of child todos that the given todo (in)directly depends on.
[ "Returns", "a", "list", "of", "child", "todos", "that", "the", "given", "todo", "(", "in", ")", "directly", "depends", "on", "." ]
python
train
39.625
pbrisk/businessdate
businessdate/businessdate.py
https://github.com/pbrisk/businessdate/blob/79a0c5a4e557cbacca82a430403b18413404a9bc/businessdate/businessdate.py#L319-L338
def is_businessdate(in_date): """ checks whether the provided date is a date :param BusinessDate, int or float in_date: :return bool: """ # Note: if the data range has been created from pace_xl, then all the dates are bank dates # and here it remains to check the validity. # !!! However, if the data has been read from json string via json.load() function # it does not recognize that this numbers are bankdates, just considers them as integers # therefore, additional check is useful here, first to convert the date if it is integer to BusinessDate, # then check the validity. # (as the parameter to this method should always be a BusinessDate) if not isinstance(in_date, BaseDate): try: # to be removed in_date = BusinessDate(in_date) except: return False y, m, d, = in_date.to_ymd() return is_valid_ymd(y, m, d)
[ "def", "is_businessdate", "(", "in_date", ")", ":", "# Note: if the data range has been created from pace_xl, then all the dates are bank dates", "# and here it remains to check the validity.", "# !!! However, if the data has been read from json string via json.load() function", "# it does not recognize that this numbers are bankdates, just considers them as integers", "# therefore, additional check is useful here, first to convert the date if it is integer to BusinessDate,", "# then check the validity.", "# (as the parameter to this method should always be a BusinessDate)", "if", "not", "isinstance", "(", "in_date", ",", "BaseDate", ")", ":", "try", ":", "# to be removed", "in_date", "=", "BusinessDate", "(", "in_date", ")", "except", ":", "return", "False", "y", ",", "m", ",", "d", ",", "=", "in_date", ".", "to_ymd", "(", ")", "return", "is_valid_ymd", "(", "y", ",", "m", ",", "d", ")" ]
checks whether the provided date is a date :param BusinessDate, int or float in_date: :return bool:
[ "checks", "whether", "the", "provided", "date", "is", "a", "date", ":", "param", "BusinessDate", "int", "or", "float", "in_date", ":", ":", "return", "bool", ":" ]
python
valid
48.65
sony/nnabla
python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py#L27-L53
def convert(self, vroot, entry_variables): """ All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables cnt = 0 with nn.parameter_scope(self.name): # Function loop in the forward order for t, func in enumerate(self.graph_info.funcs): if func.name == "BatchNormalization": bn_func = func # TODO: should deal with both? if bn_func.info.args["batch_stat"] == False: o = self._bn_linear_conversion(bn_func, cnt) cnt += 1 continue # Identity conversion o = self._identity_conversion(func) self.end_variable = o return self.end_variable
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "self", ".", "graph_info", "=", "GraphInfo", "(", "vroot", ")", "self", ".", "entry_variables", "=", "entry_variables", "cnt", "=", "0", "with", "nn", ".", "parameter_scope", "(", "self", ".", "name", ")", ":", "# Function loop in the forward order", "for", "t", ",", "func", "in", "enumerate", "(", "self", ".", "graph_info", ".", "funcs", ")", ":", "if", "func", ".", "name", "==", "\"BatchNormalization\"", ":", "bn_func", "=", "func", "# TODO: should deal with both?", "if", "bn_func", ".", "info", ".", "args", "[", "\"batch_stat\"", "]", "==", "False", ":", "o", "=", "self", ".", "_bn_linear_conversion", "(", "bn_func", ",", "cnt", ")", "cnt", "+=", "1", "continue", "# Identity conversion", "o", "=", "self", ".", "_identity_conversion", "(", "func", ")", "self", ".", "end_variable", "=", "o", "return", "self", ".", "end_variable" ]
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "All", "functions", "are", "replaced", "with", "the", "same", "new", "function", "." ]
python
train
37.925926
merll/docker-map
dockermap/map/config/client.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/config/client.py#L89-L102
def get_init_kwargs(self): """ Generates keyword arguments for creating a new Docker client instance. :return: Keyword arguments as defined through this configuration. :rtype: dict """ init_kwargs = {} for k in self.init_kwargs: if k in self.core_property_set: init_kwargs[k] = getattr(self, k) elif k in self: init_kwargs[k] = self[k] return init_kwargs
[ "def", "get_init_kwargs", "(", "self", ")", ":", "init_kwargs", "=", "{", "}", "for", "k", "in", "self", ".", "init_kwargs", ":", "if", "k", "in", "self", ".", "core_property_set", ":", "init_kwargs", "[", "k", "]", "=", "getattr", "(", "self", ",", "k", ")", "elif", "k", "in", "self", ":", "init_kwargs", "[", "k", "]", "=", "self", "[", "k", "]", "return", "init_kwargs" ]
Generates keyword arguments for creating a new Docker client instance. :return: Keyword arguments as defined through this configuration. :rtype: dict
[ "Generates", "keyword", "arguments", "for", "creating", "a", "new", "Docker", "client", "instance", "." ]
python
train
33
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_html.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_html.py#L78-L95
def current(self, value): """set current cursor position""" current = min(max(self._min, value), self._max) self._current = current if current > self._stop : self._stop = current self._start = current-self._width elif current < self._start : self._start = current self._stop = current + self._width if abs(self._start - self._min) <= self._sticky_lenght : self._start = self._min if abs(self._stop - self._max) <= self._sticky_lenght : self._stop = self._max
[ "def", "current", "(", "self", ",", "value", ")", ":", "current", "=", "min", "(", "max", "(", "self", ".", "_min", ",", "value", ")", ",", "self", ".", "_max", ")", "self", ".", "_current", "=", "current", "if", "current", ">", "self", ".", "_stop", ":", "self", ".", "_stop", "=", "current", "self", ".", "_start", "=", "current", "-", "self", ".", "_width", "elif", "current", "<", "self", ".", "_start", ":", "self", ".", "_start", "=", "current", "self", ".", "_stop", "=", "current", "+", "self", ".", "_width", "if", "abs", "(", "self", ".", "_start", "-", "self", ".", "_min", ")", "<=", "self", ".", "_sticky_lenght", ":", "self", ".", "_start", "=", "self", ".", "_min", "if", "abs", "(", "self", ".", "_stop", "-", "self", ".", "_max", ")", "<=", "self", ".", "_sticky_lenght", ":", "self", ".", "_stop", "=", "self", ".", "_max" ]
set current cursor position
[ "set", "current", "cursor", "position" ]
python
test
32.388889
codeinn/vcs
vcs/utils/lockfiles.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/utils/lockfiles.py#L55-L72
def _release_lock(self): """Release our lock if we have one""" if not self._has_lock(): return # if someone removed our file beforhand, lets just flag this issue # instead of failing, to make it more usable. lfp = self._lock_file_path() try: # on bloody windows, the file needs write permissions to be removable. # Why ... if os.name == 'nt': os.chmod(lfp, 0777) # END handle win32 os.remove(lfp) except OSError: pass self._owns_lock = False
[ "def", "_release_lock", "(", "self", ")", ":", "if", "not", "self", ".", "_has_lock", "(", ")", ":", "return", "# if someone removed our file beforhand, lets just flag this issue", "# instead of failing, to make it more usable.", "lfp", "=", "self", ".", "_lock_file_path", "(", ")", "try", ":", "# on bloody windows, the file needs write permissions to be removable.", "# Why ...", "if", "os", ".", "name", "==", "'nt'", ":", "os", ".", "chmod", "(", "lfp", ",", "0777", ")", "# END handle win32", "os", ".", "remove", "(", "lfp", ")", "except", "OSError", ":", "pass", "self", ".", "_owns_lock", "=", "False" ]
Release our lock if we have one
[ "Release", "our", "lock", "if", "we", "have", "one" ]
python
train
32.722222
Jarn/jarn.mkrelease
jarn/mkrelease/exit.py
https://github.com/Jarn/jarn.mkrelease/blob/844377f37a3cdc0a154148790a926f991019ec4a/jarn/mkrelease/exit.py#L27-L31
def trace(msg): """Print a trace message to stderr if environment variable is set. """ if os.environ.get('JARN_TRACE') == '1': print('TRACE:', msg, file=sys.stderr)
[ "def", "trace", "(", "msg", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "'JARN_TRACE'", ")", "==", "'1'", ":", "print", "(", "'TRACE:'", ",", "msg", ",", "file", "=", "sys", ".", "stderr", ")" ]
Print a trace message to stderr if environment variable is set.
[ "Print", "a", "trace", "message", "to", "stderr", "if", "environment", "variable", "is", "set", "." ]
python
train
36
openvax/varcode
varcode/util.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/util.py#L29-L92
def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): """ Generate a VariantCollection with random variants that overlap at least one complete coding transcript. """ rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] # we should finish way before this loop is over but just in case # something is wrong with PyEnsembl we want to avoid an infinite loop for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
[ "def", "random_variants", "(", "count", ",", "genome_name", "=", "\"GRCh38\"", ",", "deletions", "=", "True", ",", "insertions", "=", "True", ",", "random_seed", "=", "None", ")", ":", "rng", "=", "random", ".", "Random", "(", "random_seed", ")", "ensembl", "=", "genome_for_reference_name", "(", "genome_name", ")", "if", "ensembl", "in", "_transcript_ids_cache", ":", "transcript_ids", "=", "_transcript_ids_cache", "[", "ensembl", "]", "else", ":", "transcript_ids", "=", "ensembl", ".", "transcript_ids", "(", ")", "_transcript_ids_cache", "[", "ensembl", "]", "=", "transcript_ids", "variants", "=", "[", "]", "# we should finish way before this loop is over but just in case", "# something is wrong with PyEnsembl we want to avoid an infinite loop", "for", "_", "in", "range", "(", "count", "*", "100", ")", ":", "if", "len", "(", "variants", ")", "<", "count", ":", "transcript_id", "=", "rng", ".", "choice", "(", "transcript_ids", ")", "transcript", "=", "ensembl", ".", "transcript_by_id", "(", "transcript_id", ")", "if", "not", "transcript", ".", "complete", ":", "continue", "exon", "=", "rng", ".", "choice", "(", "transcript", ".", "exons", ")", "base1_genomic_position", "=", "rng", ".", "randint", "(", "exon", ".", "start", ",", "exon", ".", "end", ")", "transcript_offset", "=", "transcript", ".", "spliced_offset", "(", "base1_genomic_position", ")", "seq", "=", "transcript", ".", "sequence", "ref", "=", "str", "(", "seq", "[", "transcript_offset", "]", ")", "if", "transcript", ".", "on_backward_strand", ":", "ref", "=", "reverse_complement", "(", "ref", ")", "alt_nucleotides", "=", "[", "x", "for", "x", "in", "STANDARD_NUCLEOTIDES", "if", "x", "!=", "ref", "]", "if", "insertions", ":", "nucleotide_pairs", "=", "[", "x", "+", "y", "for", "x", "in", "STANDARD_NUCLEOTIDES", "for", "y", "in", "STANDARD_NUCLEOTIDES", "]", "alt_nucleotides", ".", "extend", "(", "nucleotide_pairs", ")", "if", "deletions", ":", "alt_nucleotides", ".", "append", "(", "\"\"", ")", "alt", "=", "rng", ".", "choice", "(", "alt_nucleotides", ")", "variant", "=", "Variant", "(", "transcript", ".", "contig", ",", "base1_genomic_position", ",", "ref", "=", "ref", ",", "alt", "=", "alt", ",", "ensembl", "=", "ensembl", ")", "variants", ".", "append", "(", "variant", ")", "else", ":", "return", "VariantCollection", "(", "variants", ")", "raise", "ValueError", "(", "(", "\"Unable to generate %d random variants, \"", "\"there may be a problem with PyEnsembl\"", ")", "%", "count", ")" ]
Generate a VariantCollection with random variants that overlap at least one complete coding transcript.
[ "Generate", "a", "VariantCollection", "with", "random", "variants", "that", "overlap", "at", "least", "one", "complete", "coding", "transcript", "." ]
python
train
34.09375
aiidalab/aiidalab-widgets-base
aiidalab_widgets_base/crystal_sim_crystal.py
https://github.com/aiidalab/aiidalab-widgets-base/blob/291a9b159eac902aee655862322670ec1b0cd5b1/aiidalab_widgets_base/crystal_sim_crystal.py#L15-L56
def check_crystal_equivalence(crystal_a, crystal_b): """Function that identifies whether two crystals are equivalent""" # getting symmetry datasets for both crystals cryst_a = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_a), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) cryst_b = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_b), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) samecell = np.allclose(cryst_a['std_lattice'], cryst_b['std_lattice'], atol=1e-5) samenatoms = len(cryst_a['std_positions']) == len(cryst_b['std_positions']) samespg = cryst_a['number'] == cryst_b['number'] def test_rotations_translations(cryst_a, cryst_b, repeat): cell = cryst_a['std_lattice'] pristine = crystal('Mg', [(0, 0., 0.)], spacegroup=int(cryst_a['number']), cellpar=[cell[0]/repeat[0], cell[1]/repeat[1], cell[2]/repeat[2]]).repeat(repeat) sym_set_p = spglib.get_symmetry_dataset(ase_to_spgcell(pristine), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) for _,trans in enumerate(zip(sym_set_p['rotations'], sym_set_p['translations'])): pnew=(np.matmul(trans[0],cryst_a['std_positions'].T).T + trans[1]) % 1.0 fulln = np.concatenate([cryst_a['std_types'][:, None], pnew], axis=1) fullb = np.concatenate([cryst_b['std_types'][:, None], cryst_b['std_positions']], axis=1) sorted_n = np.array(sorted([ list(row) for row in list(fulln) ])) sorted_b = np.array(sorted([ list(row) for row in list(fullb) ])) if np.allclose(sorted_n, sorted_b, atol=1e-5): return True return False if samecell and samenatoms and samespg: cell = cryst_a['std_lattice'] # we assume there are no crystals with a lattice parameter smaller than 2 A rng1 = range(1, int(norm(cell[0])/2.)) rng2 = range(1, int(norm(cell[1])/2.)) rng3 = range(1, int(norm(cell[2])/2.)) for repeat in itertools.product(rng1, rng2, rng3): if test_rotations_translations(cryst_a, cryst_b, repeat): return True return False
[ "def", "check_crystal_equivalence", "(", "crystal_a", ",", "crystal_b", ")", ":", "# getting symmetry datasets for both crystals", "cryst_a", "=", "spglib", ".", "get_symmetry_dataset", "(", "ase_to_spgcell", "(", "crystal_a", ")", ",", "symprec", "=", "1e-5", ",", "angle_tolerance", "=", "-", "1.0", ",", "hall_number", "=", "0", ")", "cryst_b", "=", "spglib", ".", "get_symmetry_dataset", "(", "ase_to_spgcell", "(", "crystal_b", ")", ",", "symprec", "=", "1e-5", ",", "angle_tolerance", "=", "-", "1.0", ",", "hall_number", "=", "0", ")", "samecell", "=", "np", ".", "allclose", "(", "cryst_a", "[", "'std_lattice'", "]", ",", "cryst_b", "[", "'std_lattice'", "]", ",", "atol", "=", "1e-5", ")", "samenatoms", "=", "len", "(", "cryst_a", "[", "'std_positions'", "]", ")", "==", "len", "(", "cryst_b", "[", "'std_positions'", "]", ")", "samespg", "=", "cryst_a", "[", "'number'", "]", "==", "cryst_b", "[", "'number'", "]", "def", "test_rotations_translations", "(", "cryst_a", ",", "cryst_b", ",", "repeat", ")", ":", "cell", "=", "cryst_a", "[", "'std_lattice'", "]", "pristine", "=", "crystal", "(", "'Mg'", ",", "[", "(", "0", ",", "0.", ",", "0.", ")", "]", ",", "spacegroup", "=", "int", "(", "cryst_a", "[", "'number'", "]", ")", ",", "cellpar", "=", "[", "cell", "[", "0", "]", "/", "repeat", "[", "0", "]", ",", "cell", "[", "1", "]", "/", "repeat", "[", "1", "]", ",", "cell", "[", "2", "]", "/", "repeat", "[", "2", "]", "]", ")", ".", "repeat", "(", "repeat", ")", "sym_set_p", "=", "spglib", ".", "get_symmetry_dataset", "(", "ase_to_spgcell", "(", "pristine", ")", ",", "symprec", "=", "1e-5", ",", "angle_tolerance", "=", "-", "1.0", ",", "hall_number", "=", "0", ")", "for", "_", ",", "trans", "in", "enumerate", "(", "zip", "(", "sym_set_p", "[", "'rotations'", "]", ",", "sym_set_p", "[", "'translations'", "]", ")", ")", ":", "pnew", "=", "(", "np", ".", "matmul", "(", "trans", "[", "0", "]", ",", "cryst_a", "[", "'std_positions'", "]", ".", "T", ")", ".", "T", "+", "trans", "[", "1", "]", ")", "%", "1.0", "fulln", "=", "np", ".", "concatenate", "(", "[", "cryst_a", "[", "'std_types'", "]", "[", ":", ",", "None", "]", ",", "pnew", "]", ",", "axis", "=", "1", ")", "fullb", "=", "np", ".", "concatenate", "(", "[", "cryst_b", "[", "'std_types'", "]", "[", ":", ",", "None", "]", ",", "cryst_b", "[", "'std_positions'", "]", "]", ",", "axis", "=", "1", ")", "sorted_n", "=", "np", ".", "array", "(", "sorted", "(", "[", "list", "(", "row", ")", "for", "row", "in", "list", "(", "fulln", ")", "]", ")", ")", "sorted_b", "=", "np", ".", "array", "(", "sorted", "(", "[", "list", "(", "row", ")", "for", "row", "in", "list", "(", "fullb", ")", "]", ")", ")", "if", "np", ".", "allclose", "(", "sorted_n", ",", "sorted_b", ",", "atol", "=", "1e-5", ")", ":", "return", "True", "return", "False", "if", "samecell", "and", "samenatoms", "and", "samespg", ":", "cell", "=", "cryst_a", "[", "'std_lattice'", "]", "# we assume there are no crystals with a lattice parameter smaller than 2 A", "rng1", "=", "range", "(", "1", ",", "int", "(", "norm", "(", "cell", "[", "0", "]", ")", "/", "2.", ")", ")", "rng2", "=", "range", "(", "1", ",", "int", "(", "norm", "(", "cell", "[", "1", "]", ")", "/", "2.", ")", ")", "rng3", "=", "range", "(", "1", ",", "int", "(", "norm", "(", "cell", "[", "2", "]", ")", "/", "2.", ")", ")", "for", "repeat", "in", "itertools", ".", "product", "(", "rng1", ",", "rng2", ",", "rng3", ")", ":", "if", "test_rotations_translations", "(", "cryst_a", ",", "cryst_b", ",", "repeat", ")", ":", "return", "True", "return", "False" ]
Function that identifies whether two crystals are equivalent
[ "Function", "that", "identifies", "whether", "two", "crystals", "are", "equivalent" ]
python
train
52.380952
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L190-L215
def simple_moving_matrix(x, n=10): """ Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average """ if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
[ "def", "simple_moving_matrix", "(", "x", ",", "n", "=", "10", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "x", "=", "np", ".", "average", "(", "x", ",", "axis", "=", "1", ")", "h", "=", "n", "/", "2", "o", "=", "0", "if", "h", "*", "2", "==", "n", "else", "1", "xx", "=", "[", "]", "for", "i", "in", "range", "(", "h", ",", "len", "(", "x", ")", "-", "h", ")", ":", "xx", ".", "append", "(", "x", "[", "i", "-", "h", ":", "i", "+", "h", "+", "o", "]", ")", "return", "np", ".", "array", "(", "xx", ")" ]
Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average
[ "Create", "simple", "moving", "matrix", "." ]
python
train
22.192308
drj11/pypng
code/texttopng.py
https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/texttopng.py#L118-L127
def char(i): """Get image data for the character `i` (a one character string). Returned as a list of rows. Each row is a tuple containing the packed pixels. """ i = ord(i) if i not in font: return [(0,)] * 8 return [(ord(row),) for row in font[i].decode('hex')]
[ "def", "char", "(", "i", ")", ":", "i", "=", "ord", "(", "i", ")", "if", "i", "not", "in", "font", ":", "return", "[", "(", "0", ",", ")", "]", "*", "8", "return", "[", "(", "ord", "(", "row", ")", ",", ")", "for", "row", "in", "font", "[", "i", "]", ".", "decode", "(", "'hex'", ")", "]" ]
Get image data for the character `i` (a one character string). Returned as a list of rows. Each row is a tuple containing the packed pixels.
[ "Get", "image", "data", "for", "the", "character", "i", "(", "a", "one", "character", "string", ")", ".", "Returned", "as", "a", "list", "of", "rows", ".", "Each", "row", "is", "a", "tuple", "containing", "the", "packed", "pixels", "." ]
python
train
29
LuminosoInsight/wordfreq
wordfreq/preprocess.py
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/preprocess.py#L211-L218
def casefold_with_i_dots(text): """ Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters. """ text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı') return text.casefold()
[ "def", "casefold_with_i_dots", "(", "text", ")", ":", "text", "=", "unicodedata", ".", "normalize", "(", "'NFC'", ",", "text", ")", ".", "replace", "(", "'İ',", " ", "i')", ".", "r", "eplace(", "'", "I',", " ", "ı')", "", "return", "text", ".", "casefold", "(", ")" ]
Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters.
[ "Convert", "capital", "I", "s", "and", "capital", "dotted", "İ", "s", "to", "lowercase", "in", "the", "way", "that", "s", "appropriate", "for", "Turkish", "and", "related", "languages", "then", "case", "-", "fold", "the", "rest", "of", "the", "letters", "." ]
python
train
40.25
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/gallery/gallery_client.py#L1175-L1186
def query_publishers(self, publisher_query): """QueryPublishers. [Preview API] :param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query: :rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>` """ content = self._serialize.body(publisher_query, 'PublisherQuery') response = self._send(http_method='POST', location_id='2ad6ee0a-b53f-4034-9d1d-d009fda1212e', version='5.0-preview.1', content=content) return self._deserialize('PublisherQueryResult', response)
[ "def", "query_publishers", "(", "self", ",", "publisher_query", ")", ":", "content", "=", "self", ".", "_serialize", ".", "body", "(", "publisher_query", ",", "'PublisherQuery'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'2ad6ee0a-b53f-4034-9d1d-d009fda1212e'", ",", "version", "=", "'5.0-preview.1'", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'PublisherQueryResult'", ",", "response", ")" ]
QueryPublishers. [Preview API] :param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query: :rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>`
[ "QueryPublishers", ".", "[", "Preview", "API", "]", ":", "param", ":", "class", ":", "<PublisherQuery", ">", "<azure", ".", "devops", ".", "v5_0", ".", "gallery", ".", "models", ".", "PublisherQuery", ">", "publisher_query", ":", ":", "rtype", ":", ":", "class", ":", "<PublisherQueryResult", ">", "<azure", ".", "devops", ".", "v5_0", ".", "gallery", ".", "models", ".", "PublisherQueryResult", ">" ]
python
train
56.833333
tariqdaouda/rabaDB
rabaDB/filters.py
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/filters.py#L76-L128
def addFilter(self, *lstFilters, **dctFilters) : "add a new filter to the query" dstF = {} if len(lstFilters) > 0 : if type(lstFilters[0]) is types.DictType : dstF = lstFilters[0] lstFilters = lstFilters[1:] if len(dctFilters) > 0 : dstF = dict(dstF, **dctFilters) filts = {} for k, v in dstF.iteritems() : sk = k.split(' ') if len(sk) == 2 : operator = sk[-1].strip().upper() if operator not in self.operators : raise ValueError('Unrecognized operator "%s"' % operator) kk = '%s.%s'% (self.rabaClass.__name__, k) elif len(sk) == 1 : operator = "=" kk = '%s.%s ='% (self.rabaClass.__name__, k) else : raise ValueError('Invalid field %s' % k) if isRabaObject(v) : vv = v.getJsonEncoding() else : vv = v if sk[0].find('.') > -1 : kk = self._parseJoint(sk[0], operator) filts[kk] = vv for lt in lstFilters : for l in lt : match = self.fieldPattern.match(l) if match == None : raise ValueError("RabaQuery Error: Invalid filter '%s'" % l) field = match.group(1) operator = match.group(2) value = match.group(4) if field.find('.') > -1 : joink = self._parseJoint(field, operator, value) filts[joink] = value else : filts['%s.%s %s' %(self.rabaClass.__name__, field, operator)] = value self.filters.append(filts)
[ "def", "addFilter", "(", "self", ",", "*", "lstFilters", ",", "*", "*", "dctFilters", ")", ":", "dstF", "=", "{", "}", "if", "len", "(", "lstFilters", ")", ">", "0", ":", "if", "type", "(", "lstFilters", "[", "0", "]", ")", "is", "types", ".", "DictType", ":", "dstF", "=", "lstFilters", "[", "0", "]", "lstFilters", "=", "lstFilters", "[", "1", ":", "]", "if", "len", "(", "dctFilters", ")", ">", "0", ":", "dstF", "=", "dict", "(", "dstF", ",", "*", "*", "dctFilters", ")", "filts", "=", "{", "}", "for", "k", ",", "v", "in", "dstF", ".", "iteritems", "(", ")", ":", "sk", "=", "k", ".", "split", "(", "' '", ")", "if", "len", "(", "sk", ")", "==", "2", ":", "operator", "=", "sk", "[", "-", "1", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", "if", "operator", "not", "in", "self", ".", "operators", ":", "raise", "ValueError", "(", "'Unrecognized operator \"%s\"'", "%", "operator", ")", "kk", "=", "'%s.%s'", "%", "(", "self", ".", "rabaClass", ".", "__name__", ",", "k", ")", "elif", "len", "(", "sk", ")", "==", "1", ":", "operator", "=", "\"=\"", "kk", "=", "'%s.%s ='", "%", "(", "self", ".", "rabaClass", ".", "__name__", ",", "k", ")", "else", ":", "raise", "ValueError", "(", "'Invalid field %s'", "%", "k", ")", "if", "isRabaObject", "(", "v", ")", ":", "vv", "=", "v", ".", "getJsonEncoding", "(", ")", "else", ":", "vv", "=", "v", "if", "sk", "[", "0", "]", ".", "find", "(", "'.'", ")", ">", "-", "1", ":", "kk", "=", "self", ".", "_parseJoint", "(", "sk", "[", "0", "]", ",", "operator", ")", "filts", "[", "kk", "]", "=", "vv", "for", "lt", "in", "lstFilters", ":", "for", "l", "in", "lt", ":", "match", "=", "self", ".", "fieldPattern", ".", "match", "(", "l", ")", "if", "match", "==", "None", ":", "raise", "ValueError", "(", "\"RabaQuery Error: Invalid filter '%s'\"", "%", "l", ")", "field", "=", "match", ".", "group", "(", "1", ")", "operator", "=", "match", ".", "group", "(", "2", ")", "value", "=", "match", ".", "group", "(", "4", ")", "if", "field", ".", "find", "(", "'.'", ")", ">", "-", "1", ":", "joink", "=", "self", ".", "_parseJoint", "(", "field", ",", "operator", ",", "value", ")", "filts", "[", "joink", "]", "=", "value", "else", ":", "filts", "[", "'%s.%s %s'", "%", "(", "self", ".", "rabaClass", ".", "__name__", ",", "field", ",", "operator", ")", "]", "=", "value", "self", ".", "filters", ".", "append", "(", "filts", ")" ]
add a new filter to the query
[ "add", "a", "new", "filter", "to", "the", "query" ]
python
train
24.830189
ultrabug/py3status
py3status/udev_monitor.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/udev_monitor.py#L74-L84
def trigger_actions(self, subsystem): """ Refresh all modules which subscribed to the given subsystem. """ for py3_module, trigger_action in self.udev_consumers[subsystem]: if trigger_action in ON_TRIGGER_ACTIONS: self.py3_wrapper.log( "%s udev event, refresh consumer %s" % (subsystem, py3_module.module_full_name) ) py3_module.force_update()
[ "def", "trigger_actions", "(", "self", ",", "subsystem", ")", ":", "for", "py3_module", ",", "trigger_action", "in", "self", ".", "udev_consumers", "[", "subsystem", "]", ":", "if", "trigger_action", "in", "ON_TRIGGER_ACTIONS", ":", "self", ".", "py3_wrapper", ".", "log", "(", "\"%s udev event, refresh consumer %s\"", "%", "(", "subsystem", ",", "py3_module", ".", "module_full_name", ")", ")", "py3_module", ".", "force_update", "(", ")" ]
Refresh all modules which subscribed to the given subsystem.
[ "Refresh", "all", "modules", "which", "subscribed", "to", "the", "given", "subsystem", "." ]
python
train
42.272727
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L1156-L1176
def post_status(self, body="", id="", parentid="", stashid=""): """Post a status :param username: The body of the status :param id: The id of the object you wish to share :param parentid: The parentid of the object you wish to share :param stashid: The stashid of the object you wish to add to the status """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/user/statuses/post', post_data={ "body":body, "id":id, "parentid":parentid, "stashid":stashid }) return response['statusid']
[ "def", "post_status", "(", "self", ",", "body", "=", "\"\"", ",", "id", "=", "\"\"", ",", "parentid", "=", "\"\"", ",", "stashid", "=", "\"\"", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", "\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\"", ")", "response", "=", "self", ".", "_req", "(", "'/user/statuses/post'", ",", "post_data", "=", "{", "\"body\"", ":", "body", ",", "\"id\"", ":", "id", ",", "\"parentid\"", ":", "parentid", ",", "\"stashid\"", ":", "stashid", "}", ")", "return", "response", "[", "'statusid'", "]" ]
Post a status :param username: The body of the status :param id: The id of the object you wish to share :param parentid: The parentid of the object you wish to share :param stashid: The stashid of the object you wish to add to the status
[ "Post", "a", "status" ]
python
train
36.571429
fermiPy/fermipy
fermipy/scripts/cluster_sources.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/cluster_sources.py#L187-L258
def make_clusters(span_tree, cut_value): """ Find clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster. """ iv0, iv1 = span_tree.nonzero() # This is the dictionary of all the pairings for each source match_dict = {} for i0, i1 in zip(iv0, iv1): d = span_tree[i0, i1] # Cut on the link distance if d > cut_value: continue imin = int(min(i0, i1)) imax = int(max(i0, i1)) if imin in match_dict: match_dict[imin][imax] = True else: match_dict[imin] = {imax: True} working = True while working: working = False rev_dict = make_rev_dict_unique(match_dict) k_sort = rev_dict.keys() k_sort.sort() for k in k_sort: v = rev_dict[k] # Multiple mappings if len(v) > 1: working = True v_sort = v.keys() v_sort.sort() cluster_idx = v_sort[0] for vv in v_sort[1:]: try: to_merge = match_dict.pop(vv) except: continue try: match_dict[cluster_idx].update(to_merge) match_dict[cluster_idx][vv] = True except: continue # remove self references try: match_dict[cluster_idx].pop(cluster_idx) except: pass # Convert to a int:list dictionary cdict = {} for k, v in match_dict.items(): cdict[k] = v.keys() # make the reverse dictionary rdict = make_reverse_dict(cdict) return cdict, rdict
[ "def", "make_clusters", "(", "span_tree", ",", "cut_value", ")", ":", "iv0", ",", "iv1", "=", "span_tree", ".", "nonzero", "(", ")", "# This is the dictionary of all the pairings for each source", "match_dict", "=", "{", "}", "for", "i0", ",", "i1", "in", "zip", "(", "iv0", ",", "iv1", ")", ":", "d", "=", "span_tree", "[", "i0", ",", "i1", "]", "# Cut on the link distance", "if", "d", ">", "cut_value", ":", "continue", "imin", "=", "int", "(", "min", "(", "i0", ",", "i1", ")", ")", "imax", "=", "int", "(", "max", "(", "i0", ",", "i1", ")", ")", "if", "imin", "in", "match_dict", ":", "match_dict", "[", "imin", "]", "[", "imax", "]", "=", "True", "else", ":", "match_dict", "[", "imin", "]", "=", "{", "imax", ":", "True", "}", "working", "=", "True", "while", "working", ":", "working", "=", "False", "rev_dict", "=", "make_rev_dict_unique", "(", "match_dict", ")", "k_sort", "=", "rev_dict", ".", "keys", "(", ")", "k_sort", ".", "sort", "(", ")", "for", "k", "in", "k_sort", ":", "v", "=", "rev_dict", "[", "k", "]", "# Multiple mappings", "if", "len", "(", "v", ")", ">", "1", ":", "working", "=", "True", "v_sort", "=", "v", ".", "keys", "(", ")", "v_sort", ".", "sort", "(", ")", "cluster_idx", "=", "v_sort", "[", "0", "]", "for", "vv", "in", "v_sort", "[", "1", ":", "]", ":", "try", ":", "to_merge", "=", "match_dict", ".", "pop", "(", "vv", ")", "except", ":", "continue", "try", ":", "match_dict", "[", "cluster_idx", "]", ".", "update", "(", "to_merge", ")", "match_dict", "[", "cluster_idx", "]", "[", "vv", "]", "=", "True", "except", ":", "continue", "# remove self references", "try", ":", "match_dict", "[", "cluster_idx", "]", ".", "pop", "(", "cluster_idx", ")", "except", ":", "pass", "# Convert to a int:list dictionary", "cdict", "=", "{", "}", "for", "k", ",", "v", "in", "match_dict", ".", "items", "(", ")", ":", "cdict", "[", "k", "]", "=", "v", ".", "keys", "(", ")", "# make the reverse dictionary", "rdict", "=", "make_reverse_dict", "(", "cdict", ")", "return", "cdict", ",", "rdict" ]
Find clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
[ "Find", "clusters", "from", "the", "spanning", "tree" ]
python
train
30.111111
pydata/xarray
xarray/backends/api.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/api.py#L735-L826
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None, engine=None, encoding=None, unlimited_dims=None, compute=True, multifile=False): """This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset. """ if isinstance(path_or_file, Path): path_or_file = str(path_or_file) if encoding is None: encoding = {} if path_or_file is None: if engine is None: engine = 'scipy' elif engine != 'scipy': raise ValueError('invalid engine for creating bytes with ' 'to_netcdf: %r. Only the default engine ' "or engine='scipy' is supported" % engine) if not compute: raise NotImplementedError( 'to_netcdf() with compute=False is not yet implemented when ' 'returning bytes') elif isinstance(path_or_file, str): if engine is None: engine = _get_default_engine(path_or_file) path_or_file = _normalize_path(path_or_file) else: # file-like object engine = 'scipy' # validate Dataset keys, DataArray names, and attr keys/values _validate_dataset_names(dataset) _validate_attrs(dataset) try: store_open = WRITEABLE_STORES[engine] except KeyError: raise ValueError('unrecognized engine for to_netcdf: %r' % engine) if format is not None: format = format.upper() # handle scheduler specific logic scheduler = _get_scheduler() have_chunks = any(v.chunks for v in dataset.variables.values()) autoclose = have_chunks and scheduler in ['distributed', 'multiprocessing'] if autoclose and engine == 'scipy': raise NotImplementedError("Writing netCDF files with the %s backend " "is not currently supported with dask's %s " "scheduler" % (engine, scheduler)) target = path_or_file if path_or_file is not None else BytesIO() kwargs = dict(autoclose=True) if autoclose else {} store = store_open(target, mode, format, group, **kwargs) if unlimited_dims is None: unlimited_dims = dataset.encoding.get('unlimited_dims', None) if isinstance(unlimited_dims, str): unlimited_dims = [unlimited_dims] writer = ArrayWriter() # TODO: figure out how to refactor this logic (here and in save_mfdataset) # to avoid this mess of conditionals try: # TODO: allow this work (setting up the file for writing array data) # to be parallelized with dask dump_to_store(dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims) if autoclose: store.close() if multifile: return writer, store writes = writer.sync(compute=compute) if path_or_file is None: store.sync() return target.getvalue() finally: if not multifile and compute: store.close() if not compute: import dask return dask.delayed(_finalize_store)(writes, store)
[ "def", "to_netcdf", "(", "dataset", ",", "path_or_file", "=", "None", ",", "mode", "=", "'w'", ",", "format", "=", "None", ",", "group", "=", "None", ",", "engine", "=", "None", ",", "encoding", "=", "None", ",", "unlimited_dims", "=", "None", ",", "compute", "=", "True", ",", "multifile", "=", "False", ")", ":", "if", "isinstance", "(", "path_or_file", ",", "Path", ")", ":", "path_or_file", "=", "str", "(", "path_or_file", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "{", "}", "if", "path_or_file", "is", "None", ":", "if", "engine", "is", "None", ":", "engine", "=", "'scipy'", "elif", "engine", "!=", "'scipy'", ":", "raise", "ValueError", "(", "'invalid engine for creating bytes with '", "'to_netcdf: %r. Only the default engine '", "\"or engine='scipy' is supported\"", "%", "engine", ")", "if", "not", "compute", ":", "raise", "NotImplementedError", "(", "'to_netcdf() with compute=False is not yet implemented when '", "'returning bytes'", ")", "elif", "isinstance", "(", "path_or_file", ",", "str", ")", ":", "if", "engine", "is", "None", ":", "engine", "=", "_get_default_engine", "(", "path_or_file", ")", "path_or_file", "=", "_normalize_path", "(", "path_or_file", ")", "else", ":", "# file-like object", "engine", "=", "'scipy'", "# validate Dataset keys, DataArray names, and attr keys/values", "_validate_dataset_names", "(", "dataset", ")", "_validate_attrs", "(", "dataset", ")", "try", ":", "store_open", "=", "WRITEABLE_STORES", "[", "engine", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'unrecognized engine for to_netcdf: %r'", "%", "engine", ")", "if", "format", "is", "not", "None", ":", "format", "=", "format", ".", "upper", "(", ")", "# handle scheduler specific logic", "scheduler", "=", "_get_scheduler", "(", ")", "have_chunks", "=", "any", "(", "v", ".", "chunks", "for", "v", "in", "dataset", ".", "variables", ".", "values", "(", ")", ")", "autoclose", "=", "have_chunks", "and", "scheduler", "in", "[", "'distributed'", ",", "'multiprocessing'", "]", "if", "autoclose", "and", "engine", "==", "'scipy'", ":", "raise", "NotImplementedError", "(", "\"Writing netCDF files with the %s backend \"", "\"is not currently supported with dask's %s \"", "\"scheduler\"", "%", "(", "engine", ",", "scheduler", ")", ")", "target", "=", "path_or_file", "if", "path_or_file", "is", "not", "None", "else", "BytesIO", "(", ")", "kwargs", "=", "dict", "(", "autoclose", "=", "True", ")", "if", "autoclose", "else", "{", "}", "store", "=", "store_open", "(", "target", ",", "mode", ",", "format", ",", "group", ",", "*", "*", "kwargs", ")", "if", "unlimited_dims", "is", "None", ":", "unlimited_dims", "=", "dataset", ".", "encoding", ".", "get", "(", "'unlimited_dims'", ",", "None", ")", "if", "isinstance", "(", "unlimited_dims", ",", "str", ")", ":", "unlimited_dims", "=", "[", "unlimited_dims", "]", "writer", "=", "ArrayWriter", "(", ")", "# TODO: figure out how to refactor this logic (here and in save_mfdataset)", "# to avoid this mess of conditionals", "try", ":", "# TODO: allow this work (setting up the file for writing array data)", "# to be parallelized with dask", "dump_to_store", "(", "dataset", ",", "store", ",", "writer", ",", "encoding", "=", "encoding", ",", "unlimited_dims", "=", "unlimited_dims", ")", "if", "autoclose", ":", "store", ".", "close", "(", ")", "if", "multifile", ":", "return", "writer", ",", "store", "writes", "=", "writer", ".", "sync", "(", "compute", "=", "compute", ")", "if", "path_or_file", "is", "None", ":", "store", ".", "sync", "(", ")", "return", "target", ".", "getvalue", "(", ")", "finally", ":", "if", "not", "multifile", "and", "compute", ":", "store", ".", "close", "(", ")", "if", "not", "compute", ":", "import", "dask", "return", "dask", ".", "delayed", "(", "_finalize_store", ")", "(", "writes", ",", "store", ")" ]
This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset.
[ "This", "function", "creates", "an", "appropriate", "datastore", "for", "writing", "a", "dataset", "to", "disk", "as", "a", "netCDF", "file" ]
python
train
35.01087
jmbhughes/suvi-trainer
suvitrainer/gui.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L179-L196
def interpret_header(self): """ Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel """ # handle special cases since date-obs field changed names if 'DATE_OBS' in self.header: self.date = self.header['DATE_OBS'] elif 'DATE-OBS' in self.header: self.date = self.header['DATE-OBS'] else: raise Exception("Image does not have a DATE_OBS or DATE-OBS field") self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2'] sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec arcsec_per_pixel = self.header['CDELT1'] self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel)
[ "def", "interpret_header", "(", "self", ")", ":", "# handle special cases since date-obs field changed names", "if", "'DATE_OBS'", "in", "self", ".", "header", ":", "self", ".", "date", "=", "self", ".", "header", "[", "'DATE_OBS'", "]", "elif", "'DATE-OBS'", "in", "self", ".", "header", ":", "self", ".", "date", "=", "self", ".", "header", "[", "'DATE-OBS'", "]", "else", ":", "raise", "Exception", "(", "\"Image does not have a DATE_OBS or DATE-OBS field\"", ")", "self", ".", "cy", ",", "self", ".", "cx", "=", "self", ".", "header", "[", "'CRPIX1'", "]", ",", "self", ".", "header", "[", "'CRPIX2'", "]", "sun_radius_angular", "=", "sun", ".", "solar_semidiameter_angular_size", "(", "t", "=", "time", ".", "parse_time", "(", "self", ".", "date", ")", ")", ".", "arcsec", "arcsec_per_pixel", "=", "self", ".", "header", "[", "'CDELT1'", "]", "self", ".", "sun_radius_pixel", "=", "(", "sun_radius_angular", "/", "arcsec_per_pixel", ")" ]
Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel
[ "Read", "pertinent", "information", "from", "the", "image", "headers", "especially", "location", "and", "radius", "of", "the", "Sun", "to", "calculate", "the", "default", "thematic", "map", ":", "return", ":", "setes", "self", ".", "date", "self", ".", "cy", "self", ".", "cx", "and", "self", ".", "sun_radius_pixel" ]
python
train
49.444444
esterhui/pypu
pypu/pusher.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L89-L118
def _computeStatus(self, dfile, service): """Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status""" # If only one service requested if service: if not dfile['services'].has_key(service): return self.ST_UNTRACKED else: return dfile['services'][service]['status'] # Otherwise go through all services and compute # a sensible status first_service_key=dfile['services'].keys()[0] # Save off one of the statuses so we can compute # if they are all the same between services. first_status=dfile['services'][first_service_key]['status'] all_status_match=True # Return ST_COMPLICATED "C" if status # differs for service in dfile['services']: if dfile['services'][service]['status']!=first_status: return self.ST_COMPLICATED return first_status
[ "def", "_computeStatus", "(", "self", ",", "dfile", ",", "service", ")", ":", "# If only one service requested", "if", "service", ":", "if", "not", "dfile", "[", "'services'", "]", ".", "has_key", "(", "service", ")", ":", "return", "self", ".", "ST_UNTRACKED", "else", ":", "return", "dfile", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "# Otherwise go through all services and compute", "# a sensible status", "first_service_key", "=", "dfile", "[", "'services'", "]", ".", "keys", "(", ")", "[", "0", "]", "# Save off one of the statuses so we can compute", "# if they are all the same between services.", "first_status", "=", "dfile", "[", "'services'", "]", "[", "first_service_key", "]", "[", "'status'", "]", "all_status_match", "=", "True", "# Return ST_COMPLICATED \"C\" if status", "# differs", "for", "service", "in", "dfile", "[", "'services'", "]", ":", "if", "dfile", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "!=", "first_status", ":", "return", "self", ".", "ST_COMPLICATED", "return", "first_status" ]
Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status
[ "Computes", "status", "for", "file", "basically", "this", "means", "if", "more", "than", "one", "service", "handles", "the", "file", "it", "will", "place", "a", "C", "(", "for", "complicated", ")", "otherwise", "if", "status", "matches", "between", "all", "services", "will", "place", "that", "status" ]
python
train
36.066667
apache/incubator-heron
heron/instance/src/python/instance/st_heron_instance.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/instance/st_heron_instance.py#L205-L223
def _handle_state_change_msg(self, new_helper): """Called when state change is commanded by stream manager""" assert self.my_pplan_helper is not None assert self.my_instance is not None and self.my_instance.py_class is not None if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state(): # handle state change # update the pplan_helper self.my_pplan_helper = new_helper if new_helper.is_topology_running(): if not self.is_instance_started: self.start_instance_if_possible() self.my_instance.py_class.invoke_activate() elif new_helper.is_topology_paused(): self.my_instance.py_class.invoke_deactivate() else: raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state()) else: Log.info("Topology state remains the same.")
[ "def", "_handle_state_change_msg", "(", "self", ",", "new_helper", ")", ":", "assert", "self", ".", "my_pplan_helper", "is", "not", "None", "assert", "self", ".", "my_instance", "is", "not", "None", "and", "self", ".", "my_instance", ".", "py_class", "is", "not", "None", "if", "self", ".", "my_pplan_helper", ".", "get_topology_state", "(", ")", "!=", "new_helper", ".", "get_topology_state", "(", ")", ":", "# handle state change", "# update the pplan_helper", "self", ".", "my_pplan_helper", "=", "new_helper", "if", "new_helper", ".", "is_topology_running", "(", ")", ":", "if", "not", "self", ".", "is_instance_started", ":", "self", ".", "start_instance_if_possible", "(", ")", "self", ".", "my_instance", ".", "py_class", ".", "invoke_activate", "(", ")", "elif", "new_helper", ".", "is_topology_paused", "(", ")", ":", "self", ".", "my_instance", ".", "py_class", ".", "invoke_deactivate", "(", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unexpected TopologyState update: %s\"", "%", "new_helper", ".", "get_topology_state", "(", ")", ")", "else", ":", "Log", ".", "info", "(", "\"Topology state remains the same.\"", ")" ]
Called when state change is commanded by stream manager
[ "Called", "when", "state", "change", "is", "commanded", "by", "stream", "manager" ]
python
valid
45.157895
major/supernova
supernova/utils.py
https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/utils.py#L87-L102
def is_valid_group(group_name, nova_creds): """ Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option. """ valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append('all') if group_name in valid_groups: return True else: return False
[ "def", "is_valid_group", "(", "group_name", ",", "nova_creds", ")", ":", "valid_groups", "=", "[", "]", "for", "key", ",", "value", "in", "nova_creds", ".", "items", "(", ")", ":", "supernova_groups", "=", "value", ".", "get", "(", "'SUPERNOVA_GROUP'", ",", "[", "]", ")", "if", "hasattr", "(", "supernova_groups", ",", "'startswith'", ")", ":", "supernova_groups", "=", "[", "supernova_groups", "]", "valid_groups", ".", "extend", "(", "supernova_groups", ")", "valid_groups", ".", "append", "(", "'all'", ")", "if", "group_name", "in", "valid_groups", ":", "return", "True", "else", ":", "return", "False" ]
Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option.
[ "Checks", "to", "see", "if", "the", "configuration", "file", "contains", "a", "SUPERNOVA_GROUP", "configuration", "option", "." ]
python
train
33.125
pyQode/pyqode.core
pyqode/core/panels/folding.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/folding.py#L348-L385
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter): """ Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter """ rect = QtCore.QRect(0, top, self.sizeHint().width(), self.sizeHint().height()) if self._native: if os.environ['QT_API'].lower() not in PYQT5_API: opt = QtGui.QStyleOptionViewItemV2() else: opt = QtWidgets.QStyleOptionViewItem() opt.rect = rect opt.state = (QtWidgets.QStyle.State_Active | QtWidgets.QStyle.State_Item | QtWidgets.QStyle.State_Children) if not collapsed: opt.state |= QtWidgets.QStyle.State_Open if mouse_over: opt.state |= (QtWidgets.QStyle.State_MouseOver | QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Selected) opt.palette.setBrush(QtGui.QPalette.Window, self.palette().highlight()) opt.rect.translate(-2, 0) self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch, opt, painter, self) else: index = 0 if not collapsed: index = 2 if mouse_over: index += 1 QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect)
[ "def", "_draw_fold_indicator", "(", "self", ",", "top", ",", "mouse_over", ",", "collapsed", ",", "painter", ")", ":", "rect", "=", "QtCore", ".", "QRect", "(", "0", ",", "top", ",", "self", ".", "sizeHint", "(", ")", ".", "width", "(", ")", ",", "self", ".", "sizeHint", "(", ")", ".", "height", "(", ")", ")", "if", "self", ".", "_native", ":", "if", "os", ".", "environ", "[", "'QT_API'", "]", ".", "lower", "(", ")", "not", "in", "PYQT5_API", ":", "opt", "=", "QtGui", ".", "QStyleOptionViewItemV2", "(", ")", "else", ":", "opt", "=", "QtWidgets", ".", "QStyleOptionViewItem", "(", ")", "opt", ".", "rect", "=", "rect", "opt", ".", "state", "=", "(", "QtWidgets", ".", "QStyle", ".", "State_Active", "|", "QtWidgets", ".", "QStyle", ".", "State_Item", "|", "QtWidgets", ".", "QStyle", ".", "State_Children", ")", "if", "not", "collapsed", ":", "opt", ".", "state", "|=", "QtWidgets", ".", "QStyle", ".", "State_Open", "if", "mouse_over", ":", "opt", ".", "state", "|=", "(", "QtWidgets", ".", "QStyle", ".", "State_MouseOver", "|", "QtWidgets", ".", "QStyle", ".", "State_Enabled", "|", "QtWidgets", ".", "QStyle", ".", "State_Selected", ")", "opt", ".", "palette", ".", "setBrush", "(", "QtGui", ".", "QPalette", ".", "Window", ",", "self", ".", "palette", "(", ")", ".", "highlight", "(", ")", ")", "opt", ".", "rect", ".", "translate", "(", "-", "2", ",", "0", ")", "self", ".", "style", "(", ")", ".", "drawPrimitive", "(", "QtWidgets", ".", "QStyle", ".", "PE_IndicatorBranch", ",", "opt", ",", "painter", ",", "self", ")", "else", ":", "index", "=", "0", "if", "not", "collapsed", ":", "index", "=", "2", "if", "mouse_over", ":", "index", "+=", "1", "QtGui", ".", "QIcon", "(", "self", ".", "_custom_indicators", "[", "index", "]", ")", ".", "paint", "(", "painter", ",", "rect", ")" ]
Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter
[ "Draw", "the", "fold", "indicator", "/", "trigger", "(", "arrow", ")", "." ]
python
train
43.605263
intel-analytics/BigDL
pyspark/bigdl/util/common.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/common.py#L520-L541
def get_spark_context(conf=None): """ Get the current active spark context and create one if no active instance :param conf: combining bigdl configs into spark conf :return: SparkContext """ if hasattr(SparkContext, "getOrCreate"): with SparkContext._lock: if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext.getOrCreate(spark_conf) else: return SparkContext.getOrCreate() else: # Might have threading issue but we cann't add _lock here # as it's not RLock in spark1.5; if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext(conf=spark_conf) else: return SparkContext._active_spark_context
[ "def", "get_spark_context", "(", "conf", "=", "None", ")", ":", "if", "hasattr", "(", "SparkContext", ",", "\"getOrCreate\"", ")", ":", "with", "SparkContext", ".", "_lock", ":", "if", "SparkContext", ".", "_active_spark_context", "is", "None", ":", "spark_conf", "=", "create_spark_conf", "(", ")", "if", "conf", "is", "None", "else", "conf", "return", "SparkContext", ".", "getOrCreate", "(", "spark_conf", ")", "else", ":", "return", "SparkContext", ".", "getOrCreate", "(", ")", "else", ":", "# Might have threading issue but we cann't add _lock here", "# as it's not RLock in spark1.5;", "if", "SparkContext", ".", "_active_spark_context", "is", "None", ":", "spark_conf", "=", "create_spark_conf", "(", ")", "if", "conf", "is", "None", "else", "conf", "return", "SparkContext", "(", "conf", "=", "spark_conf", ")", "else", ":", "return", "SparkContext", ".", "_active_spark_context" ]
Get the current active spark context and create one if no active instance :param conf: combining bigdl configs into spark conf :return: SparkContext
[ "Get", "the", "current", "active", "spark", "context", "and", "create", "one", "if", "no", "active", "instance", ":", "param", "conf", ":", "combining", "bigdl", "configs", "into", "spark", "conf", ":", "return", ":", "SparkContext" ]
python
test
40.454545
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L226-L254
def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands))
[ "def", "get_distutils_display_options", "(", ")", ":", "short_display_opts", "=", "set", "(", "'-'", "+", "o", "[", "1", "]", "for", "o", "in", "Distribution", ".", "display_options", "if", "o", "[", "1", "]", ")", "long_display_opts", "=", "set", "(", "'--'", "+", "o", "[", "0", "]", "for", "o", "in", "Distribution", ".", "display_options", ")", "# Include -h and --help which are not explicitly listed in", "# Distribution.display_options (as they are handled by optparse)", "short_display_opts", ".", "add", "(", "'-h'", ")", "long_display_opts", ".", "add", "(", "'--help'", ")", "# This isn't the greatest approach to hardcode these commands.", "# However, there doesn't seem to be a good way to determine", "# whether build *will be* run as part of the command at this", "# phase.", "display_commands", "=", "set", "(", "[", "'clean'", ",", "'register'", ",", "'setopt'", ",", "'saveopts'", ",", "'egg_info'", ",", "'alias'", "]", ")", "return", "short_display_opts", ".", "union", "(", "long_display_opts", ".", "union", "(", "display_commands", ")", ")" ]
Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or --
[ "Returns", "a", "set", "of", "all", "the", "distutils", "display", "options", "in", "their", "long", "and", "short", "forms", ".", "These", "are", "the", "setup", ".", "py", "arguments", "such", "as", "--", "name", "or", "--", "version", "which", "print", "the", "project", "s", "metadata", "and", "then", "exit", "." ]
python
train
39.448276
ContextLab/hypertools
hypertools/tools/cluster.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/cluster.py#L28-L100
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True): """ Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels """ if cluster == None: return x elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \ (isinstance(cluster, dict) and cluster['model']=='HDBSCAN'): if not _has_hdbscan: raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11') if ndims != None: warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.') if format_data: x = formatter(x, ppca=True) # if reduce is a string, find the corresponding model if isinstance(cluster, six.string_types): model = models[cluster] if cluster != 'HDBSCAN': model_params = { 'n_clusters' : n_clusters } else: model_params = {} # if its a dict, use custom params elif type(cluster) is dict: if isinstance(cluster['model'], six.string_types): model = models[cluster['model']] model_params = cluster['params'] # initialize model model = model(**model_params) # fit the model model.fit(np.vstack(x)) # return the labels return list(model.labels_)
[ "def", "cluster", "(", "x", ",", "cluster", "=", "'KMeans'", ",", "n_clusters", "=", "3", ",", "ndims", "=", "None", ",", "format_data", "=", "True", ")", ":", "if", "cluster", "==", "None", ":", "return", "x", "elif", "(", "isinstance", "(", "cluster", ",", "six", ".", "string_types", ")", "and", "cluster", "==", "'HDBSCAN'", ")", "or", "(", "isinstance", "(", "cluster", ",", "dict", ")", "and", "cluster", "[", "'model'", "]", "==", "'HDBSCAN'", ")", ":", "if", "not", "_has_hdbscan", ":", "raise", "ImportError", "(", "'HDBSCAN is not installed. Please install hdbscan>=0.8.11'", ")", "if", "ndims", "!=", "None", ":", "warnings", ".", "warn", "(", "'The ndims argument is now deprecated. Ignoring dimensionality reduction step.'", ")", "if", "format_data", ":", "x", "=", "formatter", "(", "x", ",", "ppca", "=", "True", ")", "# if reduce is a string, find the corresponding model", "if", "isinstance", "(", "cluster", ",", "six", ".", "string_types", ")", ":", "model", "=", "models", "[", "cluster", "]", "if", "cluster", "!=", "'HDBSCAN'", ":", "model_params", "=", "{", "'n_clusters'", ":", "n_clusters", "}", "else", ":", "model_params", "=", "{", "}", "# if its a dict, use custom params", "elif", "type", "(", "cluster", ")", "is", "dict", ":", "if", "isinstance", "(", "cluster", "[", "'model'", "]", ",", "six", ".", "string_types", ")", ":", "model", "=", "models", "[", "cluster", "[", "'model'", "]", "]", "model_params", "=", "cluster", "[", "'params'", "]", "# initialize model", "model", "=", "model", "(", "*", "*", "model_params", ")", "# fit the model", "model", ".", "fit", "(", "np", ".", "vstack", "(", "x", ")", ")", "# return the labels", "return", "list", "(", "model", ".", "labels_", ")" ]
Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels
[ "Performs", "clustering", "analysis", "and", "returns", "a", "list", "of", "cluster", "labels" ]
python
train
34.369863
robinandeer/puzzle
puzzle/plugins/sql/mixins/actions/gemini.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L10-L17
def gemini_query(self, query_id): """Return a gemini query Args: name (str) """ logger.debug("Looking for query with id {0}".format(query_id)) return self.query(GeminiQuery).filter_by(id=query_id).first()
[ "def", "gemini_query", "(", "self", ",", "query_id", ")", ":", "logger", ".", "debug", "(", "\"Looking for query with id {0}\"", ".", "format", "(", "query_id", ")", ")", "return", "self", ".", "query", "(", "GeminiQuery", ")", ".", "filter_by", "(", "id", "=", "query_id", ")", ".", "first", "(", ")" ]
Return a gemini query Args: name (str)
[ "Return", "a", "gemini", "query" ]
python
train
31.25
SmokinCaterpillar/pypet
pypet/utils/decorators.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/decorators.py#L48-L72
def deprecated(msg=''): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. :param msg: Additional message added to the warning. """ def wrapper(func): @functools.wraps(func) def new_func(*args, **kwargs): warning_string = "Call to deprecated function or property `%s`." % func.__name__ warning_string = warning_string + ' ' + msg warnings.warn( warning_string, category=DeprecationWarning, ) return func(*args, **kwargs) return new_func return wrapper
[ "def", "deprecated", "(", "msg", "=", "''", ")", ":", "def", "wrapper", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warning_string", "=", "\"Call to deprecated function or property `%s`.\"", "%", "func", ".", "__name__", "warning_string", "=", "warning_string", "+", "' '", "+", "msg", "warnings", ".", "warn", "(", "warning_string", ",", "category", "=", "DeprecationWarning", ",", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_func", "return", "wrapper" ]
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. :param msg: Additional message added to the warning.
[ "This", "is", "a", "decorator", "which", "can", "be", "used", "to", "mark", "functions", "as", "deprecated", ".", "It", "will", "result", "in", "a", "warning", "being", "emitted", "when", "the", "function", "is", "used", "." ]
python
test
27.08
PMEAL/OpenPNM
openpnm/topotools/topotools.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L1791-L1814
def template_sphere_shell(outer_radius, inner_radius=0): r""" This method generates an image array of a sphere-shell. It is useful for passing to Cubic networks as a ``template`` to make spherical shaped networks. Parameters ---------- outer_radius : int Number of nodes in the outer radius of the sphere. inner_radius : int Number of nodes in the inner radius of the shell. a value of 0 will result in a solid sphere. Returns ------- A Numpy array containing 1's to demarcate the sphere-shell, and 0's elsewhere. """ img = _template_sphere_disc(dim=3, outer_radius=outer_radius, inner_radius=inner_radius) return img
[ "def", "template_sphere_shell", "(", "outer_radius", ",", "inner_radius", "=", "0", ")", ":", "img", "=", "_template_sphere_disc", "(", "dim", "=", "3", ",", "outer_radius", "=", "outer_radius", ",", "inner_radius", "=", "inner_radius", ")", "return", "img" ]
r""" This method generates an image array of a sphere-shell. It is useful for passing to Cubic networks as a ``template`` to make spherical shaped networks. Parameters ---------- outer_radius : int Number of nodes in the outer radius of the sphere. inner_radius : int Number of nodes in the inner radius of the shell. a value of 0 will result in a solid sphere. Returns ------- A Numpy array containing 1's to demarcate the sphere-shell, and 0's elsewhere.
[ "r", "This", "method", "generates", "an", "image", "array", "of", "a", "sphere", "-", "shell", ".", "It", "is", "useful", "for", "passing", "to", "Cubic", "networks", "as", "a", "template", "to", "make", "spherical", "shaped", "networks", "." ]
python
train
29.791667
ToFuProject/tofu
tofu/pathfile.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L73-L121
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2): """ Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) """ assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" # Load PolyFileObj if file and check shape addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) # Include PathFileExt in ID for tracability addInfo = {'Input':PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {'Input':PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out='m') return Poly, addInfo
[ "def", "get_PolyFromPolyFileObj", "(", "PolyFileObj", ",", "SavePathInp", "=", "None", ",", "units", "=", "'m'", ",", "comments", "=", "'#'", ",", "skiprows", "=", "0", ",", "shape0", "=", "2", ")", ":", "assert", "type", "(", "PolyFileObj", ")", "in", "[", "list", ",", "str", "]", "or", "hasattr", "(", "PolyFileObj", ",", "\"Poly\"", ")", "or", "np", ".", "asarray", "(", "PolyFileObj", ")", ".", "ndim", "==", "2", ",", "\"Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !\"", "# Load PolyFileObj if file and check shape", "addInfo", "=", "{", "}", "if", "type", "(", "PolyFileObj", ")", "in", "[", "list", ",", "str", "]", ":", "PathFileExt", "=", "get_FileFromInfos", "(", "Path", "=", "SavePathInp", ",", "Name", "=", "PolyFileObj", ")", "# Include PathFileExt in ID for tracability", "addInfo", "=", "{", "'Input'", ":", "PathFileExt", "}", "PolyFileObj", "=", "np", ".", "loadtxt", "(", "PathFileExt", ",", "dtype", "=", "float", ",", "comments", "=", "comments", ",", "delimiter", "=", "None", ",", "converters", "=", "None", ",", "skiprows", "=", "skiprows", ",", "usecols", "=", "None", ",", "unpack", "=", "False", ",", "ndmin", "=", "2", ")", "elif", "hasattr", "(", "PolyFileObj", ",", "\"Poly\"", ")", ":", "addInfo", "=", "{", "'Input'", ":", "PolyFileObj", ".", "Id", ".", "SaveName", "}", "PolyFileObj", "=", "PolyFileObj", ".", "Poly", "Poly", "=", "np", ".", "asarray", "(", "PolyFileObj", ")", "assert", "Poly", ".", "ndim", "==", "2", "and", "shape0", "in", "Poly", ".", "shape", "and", "max", "(", "Poly", ".", "shape", ")", ">=", "3", "and", "not", "np", ".", "any", "(", "np", ".", "isnan", "(", "Poly", ")", ")", ",", "\"Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !\"", "Poly", "=", "Poly", "if", "Poly", ".", "shape", "[", "0", "]", "==", "shape0", "else", "Poly", ".", "T", "Poly", "=", "convert_units", "(", "Poly", ",", "In", "=", "units", ",", "Out", "=", "'m'", ")", "return", "Poly", ",", "addInfo" ]
Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
[ "Return", "a", "polygon", "as", "a", "np", ".", "ndarray", "extracted", "from", "a", "txt", "file", "or", "from", "a", "ToFu", "object", "with", "appropriate", "units" ]
python
train
56.77551
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L341-L353
def get_upgrades(self, remove_applied=True): """Get upgrades (ordered according to their dependencies). :param remove_applied: Set to false to return all upgrades, otherwise already applied upgrades are removed from their graph (incl. all their dependencies. """ if self.upgrades is None: plugins = self._load_upgrades(remove_applied=remove_applied) # List of un-applied upgrades in topological order self.upgrades = self.order_upgrades(plugins, self.history) return self.upgrades
[ "def", "get_upgrades", "(", "self", ",", "remove_applied", "=", "True", ")", ":", "if", "self", ".", "upgrades", "is", "None", ":", "plugins", "=", "self", ".", "_load_upgrades", "(", "remove_applied", "=", "remove_applied", ")", "# List of un-applied upgrades in topological order", "self", ".", "upgrades", "=", "self", ".", "order_upgrades", "(", "plugins", ",", "self", ".", "history", ")", "return", "self", ".", "upgrades" ]
Get upgrades (ordered according to their dependencies). :param remove_applied: Set to false to return all upgrades, otherwise already applied upgrades are removed from their graph (incl. all their dependencies.
[ "Get", "upgrades", "(", "ordered", "according", "to", "their", "dependencies", ")", "." ]
python
train
43.846154
GNS3/gns3-server
gns3server/controller/import_project.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/import_project.py#L191-L200
def _upload_file(compute, project_id, file_path, path): """ Upload a file to a remote project :param file_path: File path on the controller file system :param path: File path on the remote system relative to project directory """ path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/")) with open(file_path, "rb") as f: yield from compute.http_query("POST", path, f, timeout=None)
[ "def", "_upload_file", "(", "compute", ",", "project_id", ",", "file_path", ",", "path", ")", ":", "path", "=", "\"/projects/{}/files/{}\"", ".", "format", "(", "project_id", ",", "path", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", ")", "with", "open", "(", "file_path", ",", "\"rb\"", ")", "as", "f", ":", "yield", "from", "compute", ".", "http_query", "(", "\"POST\"", ",", "path", ",", "f", ",", "timeout", "=", "None", ")" ]
Upload a file to a remote project :param file_path: File path on the controller file system :param path: File path on the remote system relative to project directory
[ "Upload", "a", "file", "to", "a", "remote", "project" ]
python
train
42.6
wilson-eft/wilson
wilson/run/wet/rge.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/wet/rge.py#L38-L50
def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau): """Compute the eigenvalues and eigenvectors for a QCD anomalous dimension matrix that is defined in `adm.adm_s_X` where X is the name of the sector. Supports memoization. Output analogous to `np.linalg.eig`.""" args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau A = getattr(adm, 'adm_s_' + classname)(*args) perm_keys = get_permissible_wcs(classname, f) if perm_keys != 'all': # remove disallowed rows & columns if necessary A = A[perm_keys][:, perm_keys] w, v = np.linalg.eig(A.T) return w, v
[ "def", "admeig", "(", "classname", ",", "f", ",", "m_u", ",", "m_d", ",", "m_s", ",", "m_c", ",", "m_b", ",", "m_e", ",", "m_mu", ",", "m_tau", ")", ":", "args", "=", "f", ",", "m_u", ",", "m_d", ",", "m_s", ",", "m_c", ",", "m_b", ",", "m_e", ",", "m_mu", ",", "m_tau", "A", "=", "getattr", "(", "adm", ",", "'adm_s_'", "+", "classname", ")", "(", "*", "args", ")", "perm_keys", "=", "get_permissible_wcs", "(", "classname", ",", "f", ")", "if", "perm_keys", "!=", "'all'", ":", "# remove disallowed rows & columns if necessary", "A", "=", "A", "[", "perm_keys", "]", "[", ":", ",", "perm_keys", "]", "w", ",", "v", "=", "np", ".", "linalg", ".", "eig", "(", "A", ".", "T", ")", "return", "w", ",", "v" ]
Compute the eigenvalues and eigenvectors for a QCD anomalous dimension matrix that is defined in `adm.adm_s_X` where X is the name of the sector. Supports memoization. Output analogous to `np.linalg.eig`.
[ "Compute", "the", "eigenvalues", "and", "eigenvectors", "for", "a", "QCD", "anomalous", "dimension", "matrix", "that", "is", "defined", "in", "adm", ".", "adm_s_X", "where", "X", "is", "the", "name", "of", "the", "sector", "." ]
python
train
46.461538