code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_configuration_dict(self, secret_attrs=False):
"""Overrides superclass method and renames some properties"""
cd = super(TaxonomicAmendmentsShard, self).get_configuration_dict(secret_attrs=secret_attrs)
# "rename" some keys in the dict provided
cd['number of amendments'] = cd.pop('number of documents')
cd['amendments'] = cd.pop('documents')
# add keys particular to this shard subclass
if self._next_ott_id is not None:
cd['_next_ott_id'] = self._next_ott_id,
return cd | Overrides superclass method and renames some properties | Below is the the instruction that describes the task:
### Input:
Overrides superclass method and renames some properties
### Response:
def get_configuration_dict(self, secret_attrs=False):
"""Overrides superclass method and renames some properties"""
cd = super(TaxonomicAmendmentsShard, self).get_configuration_dict(secret_attrs=secret_attrs)
# "rename" some keys in the dict provided
cd['number of amendments'] = cd.pop('number of documents')
cd['amendments'] = cd.pop('documents')
# add keys particular to this shard subclass
if self._next_ott_id is not None:
cd['_next_ott_id'] = self._next_ott_id,
return cd |
def disconnect(cls):
""" Disconnect from the current network (if connected).
Returns
--------
result: future
A future that resolves to true if the disconnect was
successful. Will be set to None if the change network
permission is denied.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_permission_result(result):
if not result:
f.set_result(None)
return
def on_ready(mgr):
mgr.disconnect().then(f.set_result)
#: Get the service
WifiManager.get().then(on_ready)
#: Request permissions
WifiManager.request_permission([
WifiManager.PERMISSION_CHANGE_WIFI_STATE
]).then(on_permission_result)
return f | Disconnect from the current network (if connected).
Returns
--------
result: future
A future that resolves to true if the disconnect was
successful. Will be set to None if the change network
permission is denied. | Below is the the instruction that describes the task:
### Input:
Disconnect from the current network (if connected).
Returns
--------
result: future
A future that resolves to true if the disconnect was
successful. Will be set to None if the change network
permission is denied.
### Response:
def disconnect(cls):
""" Disconnect from the current network (if connected).
Returns
--------
result: future
A future that resolves to true if the disconnect was
successful. Will be set to None if the change network
permission is denied.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_permission_result(result):
if not result:
f.set_result(None)
return
def on_ready(mgr):
mgr.disconnect().then(f.set_result)
#: Get the service
WifiManager.get().then(on_ready)
#: Request permissions
WifiManager.request_permission([
WifiManager.PERMISSION_CHANGE_WIFI_STATE
]).then(on_permission_result)
return f |
def _wire_events(self):
"""
Wires up the internal device events.
"""
self._device.on_open += self._on_open
self._device.on_close += self._on_close
self._device.on_read += self._on_read
self._device.on_write += self._on_write
self._zonetracker.on_fault += self._on_zone_fault
self._zonetracker.on_restore += self._on_zone_restore | Wires up the internal device events. | Below is the the instruction that describes the task:
### Input:
Wires up the internal device events.
### Response:
def _wire_events(self):
"""
Wires up the internal device events.
"""
self._device.on_open += self._on_open
self._device.on_close += self._on_close
self._device.on_read += self._on_read
self._device.on_write += self._on_write
self._zonetracker.on_fault += self._on_zone_fault
self._zonetracker.on_restore += self._on_zone_restore |
def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v | Refetches a list of fields from the DB | Below is the the instruction that describes the task:
### Input:
Refetches a list of fields from the DB
### Response:
def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v |
def _force_https(self):
"""Redirect any non-https requests to https.
Based largely on flask-sslify.
"""
if self.session_cookie_secure:
if not self.app.debug:
self.app.config['SESSION_COOKIE_SECURE'] = True
criteria = [
self.app.debug,
flask.request.is_secure,
flask.request.headers.get('X-Forwarded-Proto', 'http') == 'https',
]
local_options = self._get_local_options()
if local_options['force_https'] and not any(criteria):
if flask.request.url.startswith('http://'):
url = flask.request.url.replace('http://', 'https://', 1)
code = 302
if self.force_https_permanent:
code = 301
r = flask.redirect(url, code=code)
return r | Redirect any non-https requests to https.
Based largely on flask-sslify. | Below is the the instruction that describes the task:
### Input:
Redirect any non-https requests to https.
Based largely on flask-sslify.
### Response:
def _force_https(self):
"""Redirect any non-https requests to https.
Based largely on flask-sslify.
"""
if self.session_cookie_secure:
if not self.app.debug:
self.app.config['SESSION_COOKIE_SECURE'] = True
criteria = [
self.app.debug,
flask.request.is_secure,
flask.request.headers.get('X-Forwarded-Proto', 'http') == 'https',
]
local_options = self._get_local_options()
if local_options['force_https'] and not any(criteria):
if flask.request.url.startswith('http://'):
url = flask.request.url.replace('http://', 'https://', 1)
code = 302
if self.force_https_permanent:
code = 301
r = flask.redirect(url, code=code)
return r |
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex) | Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written | Below is the the instruction that describes the task:
### Input:
Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
### Response:
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex) |
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
logger.debug("Create new phantomjs web driver")
self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,
**self.driver_args)
self.set_cookies(self.current_cookies)
self.driver.set_window_size(1920, 1080) | Creates a fresh session with no/default headers and proxies | Below is the the instruction that describes the task:
### Input:
Creates a fresh session with no/default headers and proxies
### Response:
def _create_session(self):
"""
Creates a fresh session with no/default headers and proxies
"""
logger.debug("Create new phantomjs web driver")
self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap,
**self.driver_args)
self.set_cookies(self.current_cookies)
self.driver.set_window_size(1920, 1080) |
def compact(self) -> str:
"""
Return a transaction in its compact format from the instance
:return:
"""
"""TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME
PUBLIC_KEY:INDEX
...
INDEX:SOURCE:FINGERPRINT:AMOUNT
...
PUBLIC_KEY:AMOUNT
...
COMMENT
"""
doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version,
len(self.issuers),
len(self.inputs),
len(self.unlocks),
len(self.outputs),
'1' if self.comment != "" else '0',
self.locktime)
if self.version >= 3:
doc += "{0}\n".format(self.blockstamp)
for pubkey in self.issuers:
doc += "{0}\n".format(pubkey)
for i in self.inputs:
doc += "{0}\n".format(i.inline(self.version))
for u in self.unlocks:
doc += "{0}\n".format(u.inline())
for o in self.outputs:
doc += "{0}\n".format(o.inline())
if self.comment != "":
doc += "{0}\n".format(self.comment)
for s in self.signatures:
doc += "{0}\n".format(s)
return doc | Return a transaction in its compact format from the instance
:return: | Below is the the instruction that describes the task:
### Input:
Return a transaction in its compact format from the instance
:return:
### Response:
def compact(self) -> str:
"""
Return a transaction in its compact format from the instance
:return:
"""
"""TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME
PUBLIC_KEY:INDEX
...
INDEX:SOURCE:FINGERPRINT:AMOUNT
...
PUBLIC_KEY:AMOUNT
...
COMMENT
"""
doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version,
len(self.issuers),
len(self.inputs),
len(self.unlocks),
len(self.outputs),
'1' if self.comment != "" else '0',
self.locktime)
if self.version >= 3:
doc += "{0}\n".format(self.blockstamp)
for pubkey in self.issuers:
doc += "{0}\n".format(pubkey)
for i in self.inputs:
doc += "{0}\n".format(i.inline(self.version))
for u in self.unlocks:
doc += "{0}\n".format(u.inline())
for o in self.outputs:
doc += "{0}\n".format(o.inline())
if self.comment != "":
doc += "{0}\n".format(self.comment)
for s in self.signatures:
doc += "{0}\n".format(s)
return doc |
def query(url, output=True, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
'''
if output is not True:
log.warning('Output option has been deprecated. Please use --quiet.')
if 'node' not in kwargs:
kwargs['node'] = 'master'
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
ret = salt.utils.http.query(url=url, opts=opts, **kwargs)
return ret | Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>' | Below is the the instruction that describes the task:
### Input:
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
### Response:
def query(url, output=True, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
'''
if output is not True:
log.warning('Output option has been deprecated. Please use --quiet.')
if 'node' not in kwargs:
kwargs['node'] = 'master'
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
ret = salt.utils.http.query(url=url, opts=opts, **kwargs)
return ret |
def _parse_tree(self, node):
""" Parse a <image> object """
if 'type' in node.attrib:
self.kind = node.attrib['type']
if 'width' in node.attrib:
self.width = int(node.attrib['width'])
if 'height' in node.attrib:
self.height = int(node.attrib['height'])
self.url = node.text | Parse a <image> object | Below is the the instruction that describes the task:
### Input:
Parse a <image> object
### Response:
def _parse_tree(self, node):
""" Parse a <image> object """
if 'type' in node.attrib:
self.kind = node.attrib['type']
if 'width' in node.attrib:
self.width = int(node.attrib['width'])
if 'height' in node.attrib:
self.height = int(node.attrib['height'])
self.url = node.text |
def set_data(self, adjacency_mat=None, **kwargs):
"""Set the data
Parameters
----------
adjacency_mat : ndarray | None
The adjacency matrix.
**kwargs : dict
Keyword arguments to pass to the arrows.
"""
if adjacency_mat is not None:
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self._adjacency_mat = adjacency_mat
for k in self._arrow_attributes:
if k in kwargs:
translated = (self._arrow_kw_trans[k] if k in
self._arrow_kw_trans else k)
setattr(self._edges, translated, kwargs.pop(k))
arrow_kwargs = {}
for k in self._arrow_kwargs:
if k in kwargs:
translated = (self._arrow_kw_trans[k] if k in
self._arrow_kw_trans else k)
arrow_kwargs[translated] = kwargs.pop(k)
node_kwargs = {}
for k in self._node_kwargs:
if k in kwargs:
translated = (self._node_kw_trans[k] if k in
self._node_kw_trans else k)
node_kwargs[translated] = kwargs.pop(k)
if len(kwargs) > 0:
raise TypeError("%s.set_data() got invalid keyword arguments: %S"
% (self.__class__.__name__, list(kwargs.keys())))
# The actual data is set in GraphVisual.animate_layout or
# GraphVisual.set_final_layout
self._arrow_data = arrow_kwargs
self._node_data = node_kwargs
if not self._animate:
self.set_final_layout() | Set the data
Parameters
----------
adjacency_mat : ndarray | None
The adjacency matrix.
**kwargs : dict
Keyword arguments to pass to the arrows. | Below is the the instruction that describes the task:
### Input:
Set the data
Parameters
----------
adjacency_mat : ndarray | None
The adjacency matrix.
**kwargs : dict
Keyword arguments to pass to the arrows.
### Response:
def set_data(self, adjacency_mat=None, **kwargs):
"""Set the data
Parameters
----------
adjacency_mat : ndarray | None
The adjacency matrix.
**kwargs : dict
Keyword arguments to pass to the arrows.
"""
if adjacency_mat is not None:
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self._adjacency_mat = adjacency_mat
for k in self._arrow_attributes:
if k in kwargs:
translated = (self._arrow_kw_trans[k] if k in
self._arrow_kw_trans else k)
setattr(self._edges, translated, kwargs.pop(k))
arrow_kwargs = {}
for k in self._arrow_kwargs:
if k in kwargs:
translated = (self._arrow_kw_trans[k] if k in
self._arrow_kw_trans else k)
arrow_kwargs[translated] = kwargs.pop(k)
node_kwargs = {}
for k in self._node_kwargs:
if k in kwargs:
translated = (self._node_kw_trans[k] if k in
self._node_kw_trans else k)
node_kwargs[translated] = kwargs.pop(k)
if len(kwargs) > 0:
raise TypeError("%s.set_data() got invalid keyword arguments: %S"
% (self.__class__.__name__, list(kwargs.keys())))
# The actual data is set in GraphVisual.animate_layout or
# GraphVisual.set_final_layout
self._arrow_data = arrow_kwargs
self._node_data = node_kwargs
if not self._animate:
self.set_final_layout() |
def free(self):
"""Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
"""
data = WebDavXmlUtils.create_free_space_request_content()
response = self.execute_request(action='free', path='', data=data)
return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname) | Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes. | Below is the the instruction that describes the task:
### Input:
Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
### Response:
def free(self):
"""Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
"""
data = WebDavXmlUtils.create_free_space_request_content()
response = self.execute_request(action='free', path='', data=data)
return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname) |
def _on_message(channel, method, header, body):
"""
Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body
"""
print "Message:"
print "\t%r" % method
print "\t%r" % header
print "\t%r" % body
# Acknowledge message receipt
channel.basic_ack(method.delivery_tag)
# when ready, stop consuming
channel.stop_consuming() | Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body | Below is the the instruction that describes the task:
### Input:
Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body
### Response:
def _on_message(channel, method, header, body):
"""
Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: The Deliver method
:param pika.Spec.BasicProperties properties: The client properties
:param str|unicode body: The message body
"""
print "Message:"
print "\t%r" % method
print "\t%r" % header
print "\t%r" % body
# Acknowledge message receipt
channel.basic_ack(method.delivery_tag)
# when ready, stop consuming
channel.stop_consuming() |
def generate_screenshots(self):
"""
Take a config file as input and generate screenshots
"""
headers = {'content-type': 'application/json', 'Accept': 'application/json'}
resp = requests.post(self.api_url, data=json.dumps(self.config), \
headers=headers, auth=self.auth)
resp = self._process_response(resp)
return resp.json() | Take a config file as input and generate screenshots | Below is the the instruction that describes the task:
### Input:
Take a config file as input and generate screenshots
### Response:
def generate_screenshots(self):
"""
Take a config file as input and generate screenshots
"""
headers = {'content-type': 'application/json', 'Accept': 'application/json'}
resp = requests.post(self.api_url, data=json.dumps(self.config), \
headers=headers, auth=self.auth)
resp = self._process_response(resp)
return resp.json() |
def get_endpoint(self, session, **kwargs):
"""Get the HubiC storage endpoint uri.
If the current session has not been authenticated, this will trigger a
new authentication to the HubiC OAuth service.
:param keystoneclient.Session session: The session object to use for
queries.
:raises keystoneclient.exceptions.AuthorizationFailure: if something
goes wrong.
:returns: The uri to use for object-storage v1 requests.
:rtype: string
"""
if self.endpoint is None:
try:
self._refresh_tokens(session)
self._fetch_credentials(session)
except:
raise AuthorizationFailure()
return self.endpoint | Get the HubiC storage endpoint uri.
If the current session has not been authenticated, this will trigger a
new authentication to the HubiC OAuth service.
:param keystoneclient.Session session: The session object to use for
queries.
:raises keystoneclient.exceptions.AuthorizationFailure: if something
goes wrong.
:returns: The uri to use for object-storage v1 requests.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Get the HubiC storage endpoint uri.
If the current session has not been authenticated, this will trigger a
new authentication to the HubiC OAuth service.
:param keystoneclient.Session session: The session object to use for
queries.
:raises keystoneclient.exceptions.AuthorizationFailure: if something
goes wrong.
:returns: The uri to use for object-storage v1 requests.
:rtype: string
### Response:
def get_endpoint(self, session, **kwargs):
"""Get the HubiC storage endpoint uri.
If the current session has not been authenticated, this will trigger a
new authentication to the HubiC OAuth service.
:param keystoneclient.Session session: The session object to use for
queries.
:raises keystoneclient.exceptions.AuthorizationFailure: if something
goes wrong.
:returns: The uri to use for object-storage v1 requests.
:rtype: string
"""
if self.endpoint is None:
try:
self._refresh_tokens(session)
self._fetch_credentials(session)
except:
raise AuthorizationFailure()
return self.endpoint |
def _create_variables_no_pretrain(self, n_features):
"""Create model variables (no previous unsupervised pretraining).
:param n_features: number of features
:return: self
"""
self.encoding_w_ = []
self.encoding_b_ = []
for l, layer in enumerate(self.layers):
w_name = 'enc-w-{}'.format(l)
b_name = 'enc-b-{}'.format(l)
if l == 0:
w_shape = [n_features, self.layers[l]]
else:
w_shape = [self.layers[l - 1], self.layers[l]]
w_init = tf.truncated_normal(shape=w_shape, stddev=0.1)
W = tf.Variable(w_init, name=w_name)
tf.summary.histogram(w_name, W)
self.encoding_w_.append(W)
b_init = tf.constant(0.1, shape=[self.layers[l]])
b = tf.Variable(b_init, name=b_name)
tf.summary.histogram(b_name, b)
self.encoding_b_.append(b) | Create model variables (no previous unsupervised pretraining).
:param n_features: number of features
:return: self | Below is the the instruction that describes the task:
### Input:
Create model variables (no previous unsupervised pretraining).
:param n_features: number of features
:return: self
### Response:
def _create_variables_no_pretrain(self, n_features):
"""Create model variables (no previous unsupervised pretraining).
:param n_features: number of features
:return: self
"""
self.encoding_w_ = []
self.encoding_b_ = []
for l, layer in enumerate(self.layers):
w_name = 'enc-w-{}'.format(l)
b_name = 'enc-b-{}'.format(l)
if l == 0:
w_shape = [n_features, self.layers[l]]
else:
w_shape = [self.layers[l - 1], self.layers[l]]
w_init = tf.truncated_normal(shape=w_shape, stddev=0.1)
W = tf.Variable(w_init, name=w_name)
tf.summary.histogram(w_name, W)
self.encoding_w_.append(W)
b_init = tf.constant(0.1, shape=[self.layers[l]])
b = tf.Variable(b_init, name=b_name)
tf.summary.histogram(b_name, b)
self.encoding_b_.append(b) |
def individual(self, ind_id=None):
"""Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual)
"""
for ind_obj in self.individuals:
if ind_obj.ind_id == ind_id:
return ind_obj
return None | Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual) | Below is the the instruction that describes the task:
### Input:
Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual)
### Response:
def individual(self, ind_id=None):
"""Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual)
"""
for ind_obj in self.individuals:
if ind_obj.ind_id == ind_id:
return ind_obj
return None |
async def request(
self,
method: str,
endpoint: str,
*,
headers: dict = None,
params: dict = None) -> dict:
"""Make a request against air-matters.com."""
url = '{0}/{1}'.format(API_URL_SCAFFOLD, endpoint)
if not headers:
headers = {}
headers.update({'x-access-token': self._api_key})
if not params:
params = {}
params.update({
'lat': self.latitude,
'lng': self.longitude,
'alt': self.altitude
})
async with self._websession.request(method, url, headers=headers,
params=params) as resp:
try:
resp.raise_for_status()
return await resp.json(content_type=None)
except client_exceptions.ClientError as err:
if any(code in str(err) for code in ('401', '403')):
raise InvalidApiKeyError('Invalid API key')
raise RequestError(
'Error requesting data from {0}: {1}'.format(
endpoint, err)) from None | Make a request against air-matters.com. | Below is the the instruction that describes the task:
### Input:
Make a request against air-matters.com.
### Response:
async def request(
self,
method: str,
endpoint: str,
*,
headers: dict = None,
params: dict = None) -> dict:
"""Make a request against air-matters.com."""
url = '{0}/{1}'.format(API_URL_SCAFFOLD, endpoint)
if not headers:
headers = {}
headers.update({'x-access-token': self._api_key})
if not params:
params = {}
params.update({
'lat': self.latitude,
'lng': self.longitude,
'alt': self.altitude
})
async with self._websession.request(method, url, headers=headers,
params=params) as resp:
try:
resp.raise_for_status()
return await resp.json(content_type=None)
except client_exceptions.ClientError as err:
if any(code in str(err) for code in ('401', '403')):
raise InvalidApiKeyError('Invalid API key')
raise RequestError(
'Error requesting data from {0}: {1}'.format(
endpoint, err)) from None |
def p_expr_BAND_expr(p):
""" expr : expr BAND expr
"""
p[0] = make_binary(p.lineno(2), 'BAND', p[1], p[3], lambda x, y: x & y) | expr : expr BAND expr | Below is the the instruction that describes the task:
### Input:
expr : expr BAND expr
### Response:
def p_expr_BAND_expr(p):
""" expr : expr BAND expr
"""
p[0] = make_binary(p.lineno(2), 'BAND', p[1], p[3], lambda x, y: x & y) |
def attribute_rewrite_map(self):
"""
Example: long_name -> a_b
:return: the rewrite map
:rtype: dict
"""
rewrite_map = dict()
token_rewrite_map = self.generate_attribute_token_rewrite_map()
for attribute_name, type_instance in self.getmembers():
if isinstance(type_instance, DataType):
attribute_tokens = attribute_name.split('_')
rewritten_attribute_name = ''
for token in attribute_tokens:
rewritten_attribute_name += token_rewrite_map[token] + "_"
# remove the trailing underscore
rewritten_attribute_name = rewritten_attribute_name[:-1]
rewrite_map[attribute_name] = rewritten_attribute_name
return rewrite_map | Example: long_name -> a_b
:return: the rewrite map
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Example: long_name -> a_b
:return: the rewrite map
:rtype: dict
### Response:
def attribute_rewrite_map(self):
"""
Example: long_name -> a_b
:return: the rewrite map
:rtype: dict
"""
rewrite_map = dict()
token_rewrite_map = self.generate_attribute_token_rewrite_map()
for attribute_name, type_instance in self.getmembers():
if isinstance(type_instance, DataType):
attribute_tokens = attribute_name.split('_')
rewritten_attribute_name = ''
for token in attribute_tokens:
rewritten_attribute_name += token_rewrite_map[token] + "_"
# remove the trailing underscore
rewritten_attribute_name = rewritten_attribute_name[:-1]
rewrite_map[attribute_name] = rewritten_attribute_name
return rewrite_map |
def delete(self, request, key):
"""Remove an email address, validated or not."""
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204) | Remove an email address, validated or not. | Below is the the instruction that describes the task:
### Input:
Remove an email address, validated or not.
### Response:
def delete(self, request, key):
"""Remove an email address, validated or not."""
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204) |
def join_room(self, room_id_or_alias):
"""Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join.
"""
if not room_id_or_alias:
raise MatrixError("No alias or room ID to join.")
path = "/join/%s" % quote(room_id_or_alias)
return self._send("POST", path) | Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join. | Below is the the instruction that describes the task:
### Input:
Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join.
### Response:
def join_room(self, room_id_or_alias):
"""Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join.
"""
if not room_id_or_alias:
raise MatrixError("No alias or room ID to join.")
path = "/join/%s" % quote(room_id_or_alias)
return self._send("POST", path) |
def private_download_url(self, url, expires=3600):
"""生成私有资源下载链接
Args:
url: 私有空间资源的原始URL
expires: 下载凭证有效期,默认为3600s
Returns:
私有资源的下载链接
"""
deadline = int(time.time()) + expires
if '?' in url:
url += '&'
else:
url += '?'
url = '{0}e={1}'.format(url, str(deadline))
token = self.token(url)
return '{0}&token={1}'.format(url, token) | 生成私有资源下载链接
Args:
url: 私有空间资源的原始URL
expires: 下载凭证有效期,默认为3600s
Returns:
私有资源的下载链接 | Below is the the instruction that describes the task:
### Input:
生成私有资源下载链接
Args:
url: 私有空间资源的原始URL
expires: 下载凭证有效期,默认为3600s
Returns:
私有资源的下载链接
### Response:
def private_download_url(self, url, expires=3600):
"""生成私有资源下载链接
Args:
url: 私有空间资源的原始URL
expires: 下载凭证有效期,默认为3600s
Returns:
私有资源的下载链接
"""
deadline = int(time.time()) + expires
if '?' in url:
url += '&'
else:
url += '?'
url = '{0}e={1}'.format(url, str(deadline))
token = self.token(url)
return '{0}&token={1}'.format(url, token) |
def read(self):
"""Read a line of data from the input source at a time."""
line = self.trace_file.readline()
if line == '':
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get('timestamp', None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00" | Read a line of data from the input source at a time. | Below is the the instruction that describes the task:
### Input:
Read a line of data from the input source at a time.
### Response:
def read(self):
"""Read a line of data from the input source at a time."""
line = self.trace_file.readline()
if line == '':
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get('timestamp', None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00" |
def create(self, req, **kwargs):
"""
Uses POST to send a first metadata statement signing request to
a signing service.
:param req: The metadata statement that the entity wants signed
:return: returns a dictionary with 'sms' and 'loc' as keys.
"""
response = requests.post(self.url, json=req, **self.req_args())
return self.parse_response(response) | Uses POST to send a first metadata statement signing request to
a signing service.
:param req: The metadata statement that the entity wants signed
:return: returns a dictionary with 'sms' and 'loc' as keys. | Below is the the instruction that describes the task:
### Input:
Uses POST to send a first metadata statement signing request to
a signing service.
:param req: The metadata statement that the entity wants signed
:return: returns a dictionary with 'sms' and 'loc' as keys.
### Response:
def create(self, req, **kwargs):
"""
Uses POST to send a first metadata statement signing request to
a signing service.
:param req: The metadata statement that the entity wants signed
:return: returns a dictionary with 'sms' and 'loc' as keys.
"""
response = requests.post(self.url, json=req, **self.req_args())
return self.parse_response(response) |
def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context | This is an iterator that returns each edge of our graph
with its two nodes | Below is the the instruction that describes the task:
### Input:
This is an iterator that returns each edge of our graph
with its two nodes
### Response:
def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context |
def to_igraph(self, weighted=None):
'''Converts this Graph object to an igraph-compatible object.
Requires the python-igraph library.'''
# Import here to avoid ImportErrors when igraph isn't available.
import igraph
ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(),
directed=self.is_directed())
if weighted is not False and self.is_weighted():
ig.es['weight'] = self.edge_weights()
return ig | Converts this Graph object to an igraph-compatible object.
Requires the python-igraph library. | Below is the the instruction that describes the task:
### Input:
Converts this Graph object to an igraph-compatible object.
Requires the python-igraph library.
### Response:
def to_igraph(self, weighted=None):
'''Converts this Graph object to an igraph-compatible object.
Requires the python-igraph library.'''
# Import here to avoid ImportErrors when igraph isn't available.
import igraph
ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(),
directed=self.is_directed())
if weighted is not False and self.is_weighted():
ig.es['weight'] = self.edge_weights()
return ig |
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result | This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} | Below is the the instruction that describes the task:
### Input:
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
### Response:
def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result |
def zonal_stats(raster, vector):
"""Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3190
The algorithm will take care about projections.
We don't want to reproject the raster layer.
So if CRS are different, we reproject the vector layer and then we do a
lookup from the reprojected layer to the original vector layer.
:param raster: The raster layer.
:type raster: QgsRasterLayer
:param vector: The vector layer.
:type vector: QgsVectorLayer
:return: The output of the zonal stats.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = zonal_stats_steps['output_layer_name']
exposure = raster.keywords['exposure']
if raster.crs().authid() != vector.crs().authid():
layer = reproject(vector, raster.crs())
# We prepare the copy
output_layer = create_memory_layer(
output_layer_name,
vector.geometryType(),
vector.crs(),
vector.fields()
)
copy_layer(vector, output_layer)
else:
layer = create_memory_layer(
output_layer_name,
vector.geometryType(),
vector.crs(),
vector.fields()
)
copy_layer(vector, layer)
input_band = layer.keywords.get('active_band', 1)
analysis = QgsZonalStatistics(
layer,
raster,
'exposure_',
input_band,
QgsZonalStatistics.Sum)
result = analysis.calculateStatistics(None)
LOGGER.debug(tr('Zonal stats on %s : %s' % (raster.source(), result)))
output_field = exposure_count_field['field_name'] % exposure
if raster.crs().authid() != vector.crs().authid():
output_layer.startEditing()
field = create_field_from_definition(
exposure_count_field, exposure)
output_layer.addAttribute(field)
new_index = output_layer.fields().lookupField(field.name())
old_index = layer.fields().lookupField('exposure_sum')
for feature_input, feature_output in zip(
layer.getFeatures(), output_layer.getFeatures()):
output_layer.changeAttributeValue(
feature_input.id(), new_index, feature_input[old_index])
output_layer.commitChanges()
layer = output_layer
else:
fields_to_rename = {
'exposure_sum': output_field
}
if qgis_version() >= 21600:
rename_fields(layer, fields_to_rename)
else:
copy_fields(layer, fields_to_rename)
remove_fields(layer, list(fields_to_rename.keys()))
layer.commitChanges()
# The zonal stats is producing some None values. We need to fill these
# with 0. See issue : #3778
# We should start a new editing session as previous fields need to be
# committed first.
layer.startEditing()
request = QgsFeatureRequest()
expression = '\"%s\" is None' % output_field
request.setFilterExpression(expression)
request.setFlags(QgsFeatureRequest.NoGeometry)
index = layer.fields().lookupField(output_field)
for feature in layer.getFeatures():
if feature[output_field] is None:
layer.changeAttributeValue(feature.id(), index, 0)
layer.commitChanges()
layer.keywords = raster.keywords.copy()
layer.keywords['inasafe_fields'] = vector.keywords['inasafe_fields'].copy()
layer.keywords['inasafe_default_values'] = (
raster.keywords['inasafe_default_values'].copy())
key = exposure_count_field['key'] % raster.keywords['exposure']
# Special case here, one field is the exposure count and the total.
layer.keywords['inasafe_fields'][key] = output_field
layer.keywords['inasafe_fields'][total_field['key']] = output_field
layer.keywords['exposure_keywords'] = raster.keywords.copy()
layer.keywords['hazard_keywords'] = vector.keywords[
'hazard_keywords'].copy()
layer.keywords['aggregation_keywords'] = (
vector.keywords['aggregation_keywords'])
layer.keywords['layer_purpose'] = (
layer_purpose_aggregate_hazard_impacted['key'])
layer.keywords['title'] = output_layer_name
check_layer(layer)
return layer | Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3190
The algorithm will take care about projections.
We don't want to reproject the raster layer.
So if CRS are different, we reproject the vector layer and then we do a
lookup from the reprojected layer to the original vector layer.
:param raster: The raster layer.
:type raster: QgsRasterLayer
:param vector: The vector layer.
:type vector: QgsVectorLayer
:return: The output of the zonal stats.
:rtype: QgsVectorLayer
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3190
The algorithm will take care about projections.
We don't want to reproject the raster layer.
So if CRS are different, we reproject the vector layer and then we do a
lookup from the reprojected layer to the original vector layer.
:param raster: The raster layer.
:type raster: QgsRasterLayer
:param vector: The vector layer.
:type vector: QgsVectorLayer
:return: The output of the zonal stats.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
### Response:
def zonal_stats(raster, vector):
"""Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3190
The algorithm will take care about projections.
We don't want to reproject the raster layer.
So if CRS are different, we reproject the vector layer and then we do a
lookup from the reprojected layer to the original vector layer.
:param raster: The raster layer.
:type raster: QgsRasterLayer
:param vector: The vector layer.
:type vector: QgsVectorLayer
:return: The output of the zonal stats.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = zonal_stats_steps['output_layer_name']
exposure = raster.keywords['exposure']
if raster.crs().authid() != vector.crs().authid():
layer = reproject(vector, raster.crs())
# We prepare the copy
output_layer = create_memory_layer(
output_layer_name,
vector.geometryType(),
vector.crs(),
vector.fields()
)
copy_layer(vector, output_layer)
else:
layer = create_memory_layer(
output_layer_name,
vector.geometryType(),
vector.crs(),
vector.fields()
)
copy_layer(vector, layer)
input_band = layer.keywords.get('active_band', 1)
analysis = QgsZonalStatistics(
layer,
raster,
'exposure_',
input_band,
QgsZonalStatistics.Sum)
result = analysis.calculateStatistics(None)
LOGGER.debug(tr('Zonal stats on %s : %s' % (raster.source(), result)))
output_field = exposure_count_field['field_name'] % exposure
if raster.crs().authid() != vector.crs().authid():
output_layer.startEditing()
field = create_field_from_definition(
exposure_count_field, exposure)
output_layer.addAttribute(field)
new_index = output_layer.fields().lookupField(field.name())
old_index = layer.fields().lookupField('exposure_sum')
for feature_input, feature_output in zip(
layer.getFeatures(), output_layer.getFeatures()):
output_layer.changeAttributeValue(
feature_input.id(), new_index, feature_input[old_index])
output_layer.commitChanges()
layer = output_layer
else:
fields_to_rename = {
'exposure_sum': output_field
}
if qgis_version() >= 21600:
rename_fields(layer, fields_to_rename)
else:
copy_fields(layer, fields_to_rename)
remove_fields(layer, list(fields_to_rename.keys()))
layer.commitChanges()
# The zonal stats is producing some None values. We need to fill these
# with 0. See issue : #3778
# We should start a new editing session as previous fields need to be
# committed first.
layer.startEditing()
request = QgsFeatureRequest()
expression = '\"%s\" is None' % output_field
request.setFilterExpression(expression)
request.setFlags(QgsFeatureRequest.NoGeometry)
index = layer.fields().lookupField(output_field)
for feature in layer.getFeatures():
if feature[output_field] is None:
layer.changeAttributeValue(feature.id(), index, 0)
layer.commitChanges()
layer.keywords = raster.keywords.copy()
layer.keywords['inasafe_fields'] = vector.keywords['inasafe_fields'].copy()
layer.keywords['inasafe_default_values'] = (
raster.keywords['inasafe_default_values'].copy())
key = exposure_count_field['key'] % raster.keywords['exposure']
# Special case here, one field is the exposure count and the total.
layer.keywords['inasafe_fields'][key] = output_field
layer.keywords['inasafe_fields'][total_field['key']] = output_field
layer.keywords['exposure_keywords'] = raster.keywords.copy()
layer.keywords['hazard_keywords'] = vector.keywords[
'hazard_keywords'].copy()
layer.keywords['aggregation_keywords'] = (
vector.keywords['aggregation_keywords'])
layer.keywords['layer_purpose'] = (
layer_purpose_aggregate_hazard_impacted['key'])
layer.keywords['title'] = output_layer_name
check_layer(layer)
return layer |
def guard_submit(analysis):
"""Return whether the transition "submit" can be performed or not
"""
# Cannot submit without a result
if not analysis.getResult():
return False
# Cannot submit with interims without value
for interim in analysis.getInterimFields():
if not interim.get("value", ""):
return False
# Cannot submit if attachment not set, but is required
if not analysis.getAttachment():
if analysis.getAttachmentOption() == 'r':
return False
# Check if can submit based on the Analysis Request state
if IRequestAnalysis.providedBy(analysis):
point_of_capture = analysis.getPointOfCapture()
# Cannot submit if the Sample has not been received
if point_of_capture == "lab" and not analysis.isSampleReceived():
return False
# Cannot submit if the Sample has not been sampled
if point_of_capture == "field" and not analysis.isSampleSampled():
return False
# Check if the current user can submit if is not assigned
if not analysis.bika_setup.getAllowToSubmitNotAssigned():
if not user_has_super_roles():
# Cannot submit if unassigned
if not analysis.getAnalyst():
return False
# Cannot submit if assigned analyst is not the current user
if analysis.getAnalyst() != api.get_current_user().getId():
return False
# Cannot submit unless all dependencies are submitted or can be submitted
for dependency in analysis.getDependencies():
if not is_submitted_or_submittable(dependency):
return False
return True | Return whether the transition "submit" can be performed or not | Below is the the instruction that describes the task:
### Input:
Return whether the transition "submit" can be performed or not
### Response:
def guard_submit(analysis):
"""Return whether the transition "submit" can be performed or not
"""
# Cannot submit without a result
if not analysis.getResult():
return False
# Cannot submit with interims without value
for interim in analysis.getInterimFields():
if not interim.get("value", ""):
return False
# Cannot submit if attachment not set, but is required
if not analysis.getAttachment():
if analysis.getAttachmentOption() == 'r':
return False
# Check if can submit based on the Analysis Request state
if IRequestAnalysis.providedBy(analysis):
point_of_capture = analysis.getPointOfCapture()
# Cannot submit if the Sample has not been received
if point_of_capture == "lab" and not analysis.isSampleReceived():
return False
# Cannot submit if the Sample has not been sampled
if point_of_capture == "field" and not analysis.isSampleSampled():
return False
# Check if the current user can submit if is not assigned
if not analysis.bika_setup.getAllowToSubmitNotAssigned():
if not user_has_super_roles():
# Cannot submit if unassigned
if not analysis.getAnalyst():
return False
# Cannot submit if assigned analyst is not the current user
if analysis.getAnalyst() != api.get_current_user().getId():
return False
# Cannot submit unless all dependencies are submitted or can be submitted
for dependency in analysis.getDependencies():
if not is_submitted_or_submittable(dependency):
return False
return True |
def get_argument_parser():
"""Create the argument parser for the script.
Parameters
----------
Returns
-------
`argparse.ArgumentParser`
The arguemnt parser.
"""
desc = 'Generate a sample sheet based on a GEO series matrix.'
parser = cli.get_argument_parser(desc=desc)
g = parser.add_argument_group('Input and output files')
g.add_argument(
'-s', '--series-matrix-file', type=cli.str_type, required=True,
metavar=cli.file_mv, help='The GEO series matrix file.'
)
g.add_argument(
'-o', '--output-file', type=cli.str_type,
required=True,
metavar=cli.file_mv, help='The output file.'
)
g.add_argument(
'-e', '--encoding', type=cli.str_type,
metavar=cli.str_mv, default='UTF-8',
help='The encoding of the series matrix file. [UTF-8]'
)
cli.add_reporting_args(parser)
return parser | Create the argument parser for the script.
Parameters
----------
Returns
-------
`argparse.ArgumentParser`
The arguemnt parser. | Below is the the instruction that describes the task:
### Input:
Create the argument parser for the script.
Parameters
----------
Returns
-------
`argparse.ArgumentParser`
The arguemnt parser.
### Response:
def get_argument_parser():
"""Create the argument parser for the script.
Parameters
----------
Returns
-------
`argparse.ArgumentParser`
The arguemnt parser.
"""
desc = 'Generate a sample sheet based on a GEO series matrix.'
parser = cli.get_argument_parser(desc=desc)
g = parser.add_argument_group('Input and output files')
g.add_argument(
'-s', '--series-matrix-file', type=cli.str_type, required=True,
metavar=cli.file_mv, help='The GEO series matrix file.'
)
g.add_argument(
'-o', '--output-file', type=cli.str_type,
required=True,
metavar=cli.file_mv, help='The output file.'
)
g.add_argument(
'-e', '--encoding', type=cli.str_type,
metavar=cli.str_mv, default='UTF-8',
help='The encoding of the series matrix file. [UTF-8]'
)
cli.add_reporting_args(parser)
return parser |
def deferred(timeout=None):
"""
By wrapping a test function with this decorator, you can return a
twisted Deferred and the test will wait for the deferred to be triggered.
The whole test function will run inside the Twisted event loop.
The optional timeout parameter specifies the maximum duration of the test.
The difference with timed() is that timed() will still wait for the test
to end, while deferred() will stop the test when its timeout has expired.
The latter is more desireable when dealing with network tests, because
the result may actually never arrive.
If the callback is triggered, the test has passed.
If the errback is triggered or the timeout expires, the test has failed.
Example::
@deferred(timeout=5.0)
def test_resolve():
return reactor.resolve("www.python.org")
Attention! If you combine this decorator with other decorators (like
"raises"), deferred() must be called *first*!
In other words, this is good::
@raises(DNSLookupError)
@deferred()
def test_error():
return reactor.resolve("xxxjhjhj.biz")
and this is bad::
@deferred()
@raises(DNSLookupError)
def test_error():
return reactor.resolve("xxxjhjhj.biz")
"""
reactor, reactor_thread = threaded_reactor()
if reactor is None:
raise ImportError("twisted is not available or could not be imported")
# Check for common syntax mistake
# (otherwise, tests can be silently ignored
# if one writes "@deferred" instead of "@deferred()")
try:
timeout is None or timeout + 0
except TypeError:
raise TypeError("'timeout' argument must be a number or None")
def decorate(func):
def wrapper(*args, **kargs):
q = Queue()
def callback(value):
q.put(None)
def errback(failure):
# Retrieve and save full exception info
try:
failure.raiseException()
except:
q.put(sys.exc_info())
def g():
try:
d = func(*args, **kargs)
try:
d.addCallbacks(callback, errback)
# Check for a common mistake and display a nice error
# message
except AttributeError:
raise TypeError("you must return a twisted Deferred "
"from your test case!")
# Catch exceptions raised in the test body (from the
# Twisted thread)
except:
q.put(sys.exc_info())
reactor.callFromThread(g)
try:
error = q.get(timeout=timeout)
except Empty:
raise TimeExpired("timeout expired before end of test (%f s.)"
% timeout)
# Re-raise all exceptions
if error is not None:
exc_type, exc_value, tb = error
raise exc_type, exc_value, tb
wrapper = make_decorator(func)(wrapper)
return wrapper
return decorate | By wrapping a test function with this decorator, you can return a
twisted Deferred and the test will wait for the deferred to be triggered.
The whole test function will run inside the Twisted event loop.
The optional timeout parameter specifies the maximum duration of the test.
The difference with timed() is that timed() will still wait for the test
to end, while deferred() will stop the test when its timeout has expired.
The latter is more desireable when dealing with network tests, because
the result may actually never arrive.
If the callback is triggered, the test has passed.
If the errback is triggered or the timeout expires, the test has failed.
Example::
@deferred(timeout=5.0)
def test_resolve():
return reactor.resolve("www.python.org")
Attention! If you combine this decorator with other decorators (like
"raises"), deferred() must be called *first*!
In other words, this is good::
@raises(DNSLookupError)
@deferred()
def test_error():
return reactor.resolve("xxxjhjhj.biz")
and this is bad::
@deferred()
@raises(DNSLookupError)
def test_error():
return reactor.resolve("xxxjhjhj.biz") | Below is the the instruction that describes the task:
### Input:
By wrapping a test function with this decorator, you can return a
twisted Deferred and the test will wait for the deferred to be triggered.
The whole test function will run inside the Twisted event loop.
The optional timeout parameter specifies the maximum duration of the test.
The difference with timed() is that timed() will still wait for the test
to end, while deferred() will stop the test when its timeout has expired.
The latter is more desireable when dealing with network tests, because
the result may actually never arrive.
If the callback is triggered, the test has passed.
If the errback is triggered or the timeout expires, the test has failed.
Example::
@deferred(timeout=5.0)
def test_resolve():
return reactor.resolve("www.python.org")
Attention! If you combine this decorator with other decorators (like
"raises"), deferred() must be called *first*!
In other words, this is good::
@raises(DNSLookupError)
@deferred()
def test_error():
return reactor.resolve("xxxjhjhj.biz")
and this is bad::
@deferred()
@raises(DNSLookupError)
def test_error():
return reactor.resolve("xxxjhjhj.biz")
### Response:
def deferred(timeout=None):
"""
By wrapping a test function with this decorator, you can return a
twisted Deferred and the test will wait for the deferred to be triggered.
The whole test function will run inside the Twisted event loop.
The optional timeout parameter specifies the maximum duration of the test.
The difference with timed() is that timed() will still wait for the test
to end, while deferred() will stop the test when its timeout has expired.
The latter is more desireable when dealing with network tests, because
the result may actually never arrive.
If the callback is triggered, the test has passed.
If the errback is triggered or the timeout expires, the test has failed.
Example::
@deferred(timeout=5.0)
def test_resolve():
return reactor.resolve("www.python.org")
Attention! If you combine this decorator with other decorators (like
"raises"), deferred() must be called *first*!
In other words, this is good::
@raises(DNSLookupError)
@deferred()
def test_error():
return reactor.resolve("xxxjhjhj.biz")
and this is bad::
@deferred()
@raises(DNSLookupError)
def test_error():
return reactor.resolve("xxxjhjhj.biz")
"""
reactor, reactor_thread = threaded_reactor()
if reactor is None:
raise ImportError("twisted is not available or could not be imported")
# Check for common syntax mistake
# (otherwise, tests can be silently ignored
# if one writes "@deferred" instead of "@deferred()")
try:
timeout is None or timeout + 0
except TypeError:
raise TypeError("'timeout' argument must be a number or None")
def decorate(func):
def wrapper(*args, **kargs):
q = Queue()
def callback(value):
q.put(None)
def errback(failure):
# Retrieve and save full exception info
try:
failure.raiseException()
except:
q.put(sys.exc_info())
def g():
try:
d = func(*args, **kargs)
try:
d.addCallbacks(callback, errback)
# Check for a common mistake and display a nice error
# message
except AttributeError:
raise TypeError("you must return a twisted Deferred "
"from your test case!")
# Catch exceptions raised in the test body (from the
# Twisted thread)
except:
q.put(sys.exc_info())
reactor.callFromThread(g)
try:
error = q.get(timeout=timeout)
except Empty:
raise TimeExpired("timeout expired before end of test (%f s.)"
% timeout)
# Re-raise all exceptions
if error is not None:
exc_type, exc_value, tb = error
raise exc_type, exc_value, tb
wrapper = make_decorator(func)(wrapper)
return wrapper
return decorate |
def invert(interval):
"""Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C']
"""
interval.reverse()
res = list(interval)
interval.reverse()
return res | Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C'] | Below is the the instruction that describes the task:
### Input:
Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C']
### Response:
def invert(interval):
"""Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C']
"""
interval.reverse()
res = list(interval)
interval.reverse()
return res |
def pos_tag_sents(
sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid"
) -> List[List[Tuple[str, str]]]:
"""
Part of Speech tagging Sentence function.
:param list sentences: a list of lists of tokenized words
:param str engine:
* unigram - unigram tagger
* perceptron - perceptron tagger (default)
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles (default)
* orchid_ud - annotated Thai academic articles using Universal Dependencies Tags
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is
"""
if not sentences:
return []
return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences] | Part of Speech tagging Sentence function.
:param list sentences: a list of lists of tokenized words
:param str engine:
* unigram - unigram tagger
* perceptron - perceptron tagger (default)
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles (default)
* orchid_ud - annotated Thai academic articles using Universal Dependencies Tags
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is | Below is the the instruction that describes the task:
### Input:
Part of Speech tagging Sentence function.
:param list sentences: a list of lists of tokenized words
:param str engine:
* unigram - unigram tagger
* perceptron - perceptron tagger (default)
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles (default)
* orchid_ud - annotated Thai academic articles using Universal Dependencies Tags
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is
### Response:
def pos_tag_sents(
sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid"
) -> List[List[Tuple[str, str]]]:
"""
Part of Speech tagging Sentence function.
:param list sentences: a list of lists of tokenized words
:param str engine:
* unigram - unigram tagger
* perceptron - perceptron tagger (default)
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles (default)
* orchid_ud - annotated Thai academic articles using Universal Dependencies Tags
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is
"""
if not sentences:
return []
return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences] |
def can_unsubscribe_from_topic(self, topic, user):
""" Given a topic, checks whether the user can remove it from their subscription list. """
# A user can unsubscribe from topics if they are authenticated and if they have the
# permission to read the related forum. Of course a user can unsubscribe only if they are
# already a subscriber of the considered topic.
return (
user.is_authenticated and
topic.has_subscriber(user) and
self._perform_basic_permission_check(topic.forum, user, 'can_read_forum')
) | Given a topic, checks whether the user can remove it from their subscription list. | Below is the the instruction that describes the task:
### Input:
Given a topic, checks whether the user can remove it from their subscription list.
### Response:
def can_unsubscribe_from_topic(self, topic, user):
""" Given a topic, checks whether the user can remove it from their subscription list. """
# A user can unsubscribe from topics if they are authenticated and if they have the
# permission to read the related forum. Of course a user can unsubscribe only if they are
# already a subscriber of the considered topic.
return (
user.is_authenticated and
topic.has_subscriber(user) and
self._perform_basic_permission_check(topic.forum, user, 'can_read_forum')
) |
def discover_setup_packages():
"""Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process.
"""
logger = logging.getLogger(__name__)
# Not a PyPI dependency; assumed to be available in the build environment.
import eups
eups_client = eups.Eups()
products = eups_client.getSetupProducts()
packages = {}
for package in products:
name = package.name
info = {
'dir': package.dir,
'version': package.version
}
packages[name] = info
logger.debug('Found setup package: {name} {version} {dir}'.format(
name=name, **info))
return packages | Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process. | Below is the the instruction that describes the task:
### Input:
Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process.
### Response:
def discover_setup_packages():
"""Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process.
"""
logger = logging.getLogger(__name__)
# Not a PyPI dependency; assumed to be available in the build environment.
import eups
eups_client = eups.Eups()
products = eups_client.getSetupProducts()
packages = {}
for package in products:
name = package.name
info = {
'dir': package.dir,
'version': package.version
}
packages[name] = info
logger.debug('Found setup package: {name} {version} {dir}'.format(
name=name, **info))
return packages |
def show_help(name):
"""
Show help and basic usage
"""
print('Usage: python3 {} [OPTIONS]... '.format(name))
print('ISO8583 message client')
print(' -v, --verbose\t\tRun transactions verbosely')
print(' -p, --port=[PORT]\t\tTCP port to connect to, 1337 by default')
print(' -s, --server=[IP]\t\tIP of the ISO host to connect to, 127.0.0.1 by default')
print(' -t, --terminal=[ID]\t\tTerminal ID (used in DE 41 ISO field, 10001337 by default)')
print(' -m, --merchant=[ID]\t\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)')
print(' -k, --terminal-key=[KEY]\t\tTerminal key (\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\' by default)')
print(' -K, --master-key=[KEY]\t\Master key (\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\' by default)')
print(' -f, --file=[file.xml]\t\tUse transaction data from the given XML-file') | Show help and basic usage | Below is the the instruction that describes the task:
### Input:
Show help and basic usage
### Response:
def show_help(name):
"""
Show help and basic usage
"""
print('Usage: python3 {} [OPTIONS]... '.format(name))
print('ISO8583 message client')
print(' -v, --verbose\t\tRun transactions verbosely')
print(' -p, --port=[PORT]\t\tTCP port to connect to, 1337 by default')
print(' -s, --server=[IP]\t\tIP of the ISO host to connect to, 127.0.0.1 by default')
print(' -t, --terminal=[ID]\t\tTerminal ID (used in DE 41 ISO field, 10001337 by default)')
print(' -m, --merchant=[ID]\t\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)')
print(' -k, --terminal-key=[KEY]\t\tTerminal key (\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\' by default)')
print(' -K, --master-key=[KEY]\t\Master key (\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\' by default)')
print(' -f, --file=[file.xml]\t\tUse transaction data from the given XML-file') |
def _back_compatible_gemini(conf_files, data):
"""Provide old install directory for configuration with GEMINI supplied tidy VCFs.
Handles new style (bcbio installed) and old style (GEMINI installed)
configuration and data locations.
"""
if vcfanno.is_human(data, builds=["37"]):
for f in conf_files:
if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f):
with open(f) as in_handle:
for line in in_handle:
if line.startswith("file"):
fname = line.strip().split("=")[-1].replace('"', '').strip()
if fname.find(".tidy.") > 0:
return install.get_gemini_dir(data)
return None | Provide old install directory for configuration with GEMINI supplied tidy VCFs.
Handles new style (bcbio installed) and old style (GEMINI installed)
configuration and data locations. | Below is the the instruction that describes the task:
### Input:
Provide old install directory for configuration with GEMINI supplied tidy VCFs.
Handles new style (bcbio installed) and old style (GEMINI installed)
configuration and data locations.
### Response:
def _back_compatible_gemini(conf_files, data):
"""Provide old install directory for configuration with GEMINI supplied tidy VCFs.
Handles new style (bcbio installed) and old style (GEMINI installed)
configuration and data locations.
"""
if vcfanno.is_human(data, builds=["37"]):
for f in conf_files:
if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f):
with open(f) as in_handle:
for line in in_handle:
if line.startswith("file"):
fname = line.strip().split("=")[-1].replace('"', '').strip()
if fname.find(".tidy.") > 0:
return install.get_gemini_dir(data)
return None |
def fit(self, X, *args, **kwargs):
"""Fit scipy model to an array of values.
Args:
X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d
Returns:
None
"""
self.constant_value = self._get_constant_value(X)
if self.constant_value is None:
if self.unfittable_model:
self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs)
else:
self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs)
for name in self.METHOD_NAMES:
attribute = getattr(self.__class__, name)
if isinstance(attribute, str):
setattr(self, name, getattr(self.model, attribute))
elif attribute is None:
setattr(self, name, missing_method_scipy_wrapper(lambda x: x))
else:
self._replace_constant_methods()
self.fitted = True | Fit scipy model to an array of values.
Args:
X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d
Returns:
None | Below is the the instruction that describes the task:
### Input:
Fit scipy model to an array of values.
Args:
X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d
Returns:
None
### Response:
def fit(self, X, *args, **kwargs):
"""Fit scipy model to an array of values.
Args:
X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d
Returns:
None
"""
self.constant_value = self._get_constant_value(X)
if self.constant_value is None:
if self.unfittable_model:
self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs)
else:
self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs)
for name in self.METHOD_NAMES:
attribute = getattr(self.__class__, name)
if isinstance(attribute, str):
setattr(self, name, getattr(self.model, attribute))
elif attribute is None:
setattr(self, name, missing_method_scipy_wrapper(lambda x: x))
else:
self._replace_constant_methods()
self.fitted = True |
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set() | Sets the event | Below is the the instruction that describes the task:
### Input:
Sets the event
### Response:
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set() |
def get_permissions_for_registration(self):
"""
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings.
"""
qs = Permission.objects.none()
for instance in self.modeladmin_instances:
qs = qs | instance.get_permissions_for_registration()
return qs | Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings. | Below is the the instruction that describes the task:
### Input:
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings.
### Response:
def get_permissions_for_registration(self):
"""
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings.
"""
qs = Permission.objects.none()
for instance in self.modeladmin_instances:
qs = qs | instance.get_permissions_for_registration()
return qs |
def _pre_analysis(self):
"""
Initialization work. Executed prior to the analysis.
:return: None
"""
# Fill up self._starts
for item in self._starts:
callstack = None
if isinstance(item, tuple):
# (addr, jumpkind)
ip = item[0]
state = self._create_initial_state(item[0], item[1])
elif isinstance(item, SimState):
# SimState
state = item.copy() # pylint: disable=no-member
ip = state.solver.eval_one(state.ip)
self._reset_state_mode(state, 'fastpath')
else:
raise AngrCFGError('Unsupported CFG start type: %s.' % str(type(item)))
self._symbolic_function_initial_state[ip] = state
path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack)
key = path_wrapper.block_id
if key not in self._start_keys:
self._start_keys.append(key)
self._insert_job(path_wrapper)
self._register_analysis_job(path_wrapper.func_addr, path_wrapper) | Initialization work. Executed prior to the analysis.
:return: None | Below is the the instruction that describes the task:
### Input:
Initialization work. Executed prior to the analysis.
:return: None
### Response:
def _pre_analysis(self):
"""
Initialization work. Executed prior to the analysis.
:return: None
"""
# Fill up self._starts
for item in self._starts:
callstack = None
if isinstance(item, tuple):
# (addr, jumpkind)
ip = item[0]
state = self._create_initial_state(item[0], item[1])
elif isinstance(item, SimState):
# SimState
state = item.copy() # pylint: disable=no-member
ip = state.solver.eval_one(state.ip)
self._reset_state_mode(state, 'fastpath')
else:
raise AngrCFGError('Unsupported CFG start type: %s.' % str(type(item)))
self._symbolic_function_initial_state[ip] = state
path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack)
key = path_wrapper.block_id
if key not in self._start_keys:
self._start_keys.append(key)
self._insert_job(path_wrapper)
self._register_analysis_job(path_wrapper.func_addr, path_wrapper) |
def timeseries(self):
"""
Feed-in time series of generator
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for generation and curtailment time series
of the according type of technology (and weather cell) in
:class:`~.grid.network.TimeSeries`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
if self._timeseries is None:
# get time series for active power depending on if they are
# differentiated by weather cell ID or not
if isinstance(self.grid.network.timeseries.generation_fluctuating.
columns, pd.MultiIndex):
if self.weather_cell_id:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[
self.type, self.weather_cell_id].to_frame('p')
except KeyError:
logger.exception("No time series for type {} and "
"weather cell ID {} given.".format(
self.type, self.weather_cell_id))
raise
else:
logger.exception("No weather cell ID provided for "
"fluctuating generator {}.".format(
repr(self)))
raise KeyError
else:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[self.type].to_frame('p')
except KeyError:
logger.exception("No time series for type {} "
"given.".format(self.type))
raise
timeseries = timeseries * self.nominal_capacity
# subtract curtailment
if self.curtailment is not None:
timeseries = timeseries.join(
self.curtailment.to_frame('curtailment'), how='left')
timeseries.p = timeseries.p - timeseries.curtailment.fillna(0)
if self.timeseries_reactive is not None:
timeseries['q'] = self.timeseries_reactive
else:
timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos(
self.power_factor))
return timeseries
else:
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | Feed-in time series of generator
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for generation and curtailment time series
of the according type of technology (and weather cell) in
:class:`~.grid.network.TimeSeries`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'. | Below is the the instruction that describes the task:
### Input:
Feed-in time series of generator
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for generation and curtailment time series
of the according type of technology (and weather cell) in
:class:`~.grid.network.TimeSeries`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
### Response:
def timeseries(self):
"""
Feed-in time series of generator
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for generation and curtailment time series
of the according type of technology (and weather cell) in
:class:`~.grid.network.TimeSeries`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
if self._timeseries is None:
# get time series for active power depending on if they are
# differentiated by weather cell ID or not
if isinstance(self.grid.network.timeseries.generation_fluctuating.
columns, pd.MultiIndex):
if self.weather_cell_id:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[
self.type, self.weather_cell_id].to_frame('p')
except KeyError:
logger.exception("No time series for type {} and "
"weather cell ID {} given.".format(
self.type, self.weather_cell_id))
raise
else:
logger.exception("No weather cell ID provided for "
"fluctuating generator {}.".format(
repr(self)))
raise KeyError
else:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[self.type].to_frame('p')
except KeyError:
logger.exception("No time series for type {} "
"given.".format(self.type))
raise
timeseries = timeseries * self.nominal_capacity
# subtract curtailment
if self.curtailment is not None:
timeseries = timeseries.join(
self.curtailment.to_frame('curtailment'), how='left')
timeseries.p = timeseries.p - timeseries.curtailment.fillna(0)
if self.timeseries_reactive is not None:
timeseries['q'] = self.timeseries_reactive
else:
timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos(
self.power_factor))
return timeseries
else:
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] |
def from_schema(self, schema_node):
"""
Creates a list of Swagger params from a colander request schema.
:param schema_node:
Request schema to be transformed into Swagger.
:param validators:
Validators used in colander with the schema.
:rtype: list
:returns: List of Swagger parameters.
"""
params = []
for param_schema in schema_node.children:
location = param_schema.name
if location is 'body':
name = param_schema.__class__.__name__
if name == 'body':
name = schema_node.__class__.__name__ + 'Body'
param = self.parameter_converter(location,
param_schema)
param['name'] = name
if self.ref:
param = self._ref(param)
params.append(param)
elif location in (('path', 'header', 'headers', 'querystring', 'GET')):
for node_schema in param_schema.children:
param = self.parameter_converter(location, node_schema)
if self.ref:
param = self._ref(param)
params.append(param)
return params | Creates a list of Swagger params from a colander request schema.
:param schema_node:
Request schema to be transformed into Swagger.
:param validators:
Validators used in colander with the schema.
:rtype: list
:returns: List of Swagger parameters. | Below is the the instruction that describes the task:
### Input:
Creates a list of Swagger params from a colander request schema.
:param schema_node:
Request schema to be transformed into Swagger.
:param validators:
Validators used in colander with the schema.
:rtype: list
:returns: List of Swagger parameters.
### Response:
def from_schema(self, schema_node):
"""
Creates a list of Swagger params from a colander request schema.
:param schema_node:
Request schema to be transformed into Swagger.
:param validators:
Validators used in colander with the schema.
:rtype: list
:returns: List of Swagger parameters.
"""
params = []
for param_schema in schema_node.children:
location = param_schema.name
if location is 'body':
name = param_schema.__class__.__name__
if name == 'body':
name = schema_node.__class__.__name__ + 'Body'
param = self.parameter_converter(location,
param_schema)
param['name'] = name
if self.ref:
param = self._ref(param)
params.append(param)
elif location in (('path', 'header', 'headers', 'querystring', 'GET')):
for node_schema in param_schema.children:
param = self.parameter_converter(location, node_schema)
if self.ref:
param = self._ref(param)
params.append(param)
return params |
def select_candidates(config):
"""Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
"""
download_candidates = []
for group in config.group:
summary_file = get_summary(config.section, group, config.uri, config.use_cache)
entries = parse_summary(summary_file)
for entry in filter_entries(entries, config):
download_candidates.append((entry, group))
return download_candidates | Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>) | Below is the the instruction that describes the task:
### Input:
Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
### Response:
def select_candidates(config):
"""Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
"""
download_candidates = []
for group in config.group:
summary_file = get_summary(config.section, group, config.uri, config.use_cache)
entries = parse_summary(summary_file)
for entry in filter_entries(entries, config):
download_candidates.append((entry, group))
return download_candidates |
def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
stock_dividends = (
adjustment_reader.get_stock_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
) | Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session | Below is the the instruction that describes the task:
### Input:
Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
### Response:
def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
stock_dividends = (
adjustment_reader.get_stock_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
) |
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val | Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False. | Below is the the instruction that describes the task:
### Input:
Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
### Response:
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with warn_if_slow("reset_config"):
reset_val = ray.get(trainable.reset_config.remote(new_config))
return reset_val |
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) | Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") | Below is the the instruction that describes the task:
### Input:
Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
### Response:
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'class_name') and self.class_name is not None:
_dict['class'] = self.class_name
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None:
_dict['type_hierarchy'] = self.type_hierarchy
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'class_name') and self.class_name is not None:
_dict['class'] = self.class_name
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None:
_dict['type_hierarchy'] = self.type_hierarchy
return _dict |
def ticker(ctx, market):
""" Show ticker of a market
"""
market = Market(market, bitshares_instance=ctx.bitshares)
ticker = market.ticker()
t = [["key", "value"]]
for key in ticker:
t.append([key, str(ticker[key])])
print_table(t) | Show ticker of a market | Below is the the instruction that describes the task:
### Input:
Show ticker of a market
### Response:
def ticker(ctx, market):
""" Show ticker of a market
"""
market = Market(market, bitshares_instance=ctx.bitshares)
ticker = market.ticker()
t = [["key", "value"]]
for key in ticker:
t.append([key, str(ticker[key])])
print_table(t) |
def worker_collectionfinish(self, node, ids):
"""worker has finished test collection.
This adds the collection for this node to the scheduler. If
the scheduler indicates collection is finished (i.e. all
initial nodes have submitted their collections), then tells the
scheduler to schedule the collected items. When initiating
scheduling the first time it logs which scheduler is in use.
"""
if self.shuttingdown:
return
self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids)
# tell session which items were effectively collected otherwise
# the master node will finish the session with EXIT_NOTESTSCOLLECTED
self._session.testscollected = len(ids)
self.sched.add_node_collection(node, ids)
if self.terminal:
self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids)))
if self.sched.collection_is_completed:
if self.terminal and not self.sched.has_pending:
self.trdist.ensure_show_status()
self.terminal.write_line("")
if self.config.option.verbose > 0:
self.terminal.write_line(
"scheduling tests via %s" % (self.sched.__class__.__name__)
)
self.sched.schedule() | worker has finished test collection.
This adds the collection for this node to the scheduler. If
the scheduler indicates collection is finished (i.e. all
initial nodes have submitted their collections), then tells the
scheduler to schedule the collected items. When initiating
scheduling the first time it logs which scheduler is in use. | Below is the the instruction that describes the task:
### Input:
worker has finished test collection.
This adds the collection for this node to the scheduler. If
the scheduler indicates collection is finished (i.e. all
initial nodes have submitted their collections), then tells the
scheduler to schedule the collected items. When initiating
scheduling the first time it logs which scheduler is in use.
### Response:
def worker_collectionfinish(self, node, ids):
"""worker has finished test collection.
This adds the collection for this node to the scheduler. If
the scheduler indicates collection is finished (i.e. all
initial nodes have submitted their collections), then tells the
scheduler to schedule the collected items. When initiating
scheduling the first time it logs which scheduler is in use.
"""
if self.shuttingdown:
return
self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids)
# tell session which items were effectively collected otherwise
# the master node will finish the session with EXIT_NOTESTSCOLLECTED
self._session.testscollected = len(ids)
self.sched.add_node_collection(node, ids)
if self.terminal:
self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids)))
if self.sched.collection_is_completed:
if self.terminal and not self.sched.has_pending:
self.trdist.ensure_show_status()
self.terminal.write_line("")
if self.config.option.verbose > 0:
self.terminal.write_line(
"scheduling tests via %s" % (self.sched.__class__.__name__)
)
self.sched.schedule() |
def rounding_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Rounding accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions))
labels = tf.squeeze(labels)
weights = weights_fn(labels)
labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, labels)), weights | Rounding accuracy for L1/L2 losses: round down the predictions to ints. | Below is the the instruction that describes the task:
### Input:
Rounding accuracy for L1/L2 losses: round down the predictions to ints.
### Response:
def rounding_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Rounding accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions))
labels = tf.squeeze(labels)
weights = weights_fn(labels)
labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, labels)), weights |
def _repr_png_(self):
"""This is used by ipython to plot inline.
"""
app.process_events()
QApplication.processEvents()
img = read_pixels()
return bytes(_make_png(img)) | This is used by ipython to plot inline. | Below is the the instruction that describes the task:
### Input:
This is used by ipython to plot inline.
### Response:
def _repr_png_(self):
"""This is used by ipython to plot inline.
"""
app.process_events()
QApplication.processEvents()
img = read_pixels()
return bytes(_make_png(img)) |
def bots_create(self, bot):
"""
Save new bot
:param bot: bot object to save
:type bot: Bot
"""
self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token)) | Save new bot
:param bot: bot object to save
:type bot: Bot | Below is the the instruction that describes the task:
### Input:
Save new bot
:param bot: bot object to save
:type bot: Bot
### Response:
def bots_create(self, bot):
"""
Save new bot
:param bot: bot object to save
:type bot: Bot
"""
self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token)) |
def raise_for_execution_errors(nb, output_path):
"""Assigned parameters into the appropriate place in the input notebook
Parameters
----------
nb : NotebookNode
Executable notebook object
output_path : str
Path to write executed notebook
"""
error = None
for cell in nb.cells:
if cell.get("outputs") is None:
continue
for output in cell.outputs:
if output.output_type == "error":
error = PapermillExecutionError(
exec_count=cell.execution_count,
source=cell.source,
ename=output.ename,
evalue=output.evalue,
traceback=output.traceback,
)
break
if error:
# Write notebook back out with the Error Message at the top of the Notebook.
error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count)
error_msg_cell = nbformat.v4.new_code_cell(
source="%%html\n" + error_msg,
outputs=[
nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg})
],
metadata={"inputHidden": True, "hide_input": True},
)
nb.cells = [error_msg_cell] + nb.cells
write_ipynb(nb, output_path)
raise error | Assigned parameters into the appropriate place in the input notebook
Parameters
----------
nb : NotebookNode
Executable notebook object
output_path : str
Path to write executed notebook | Below is the the instruction that describes the task:
### Input:
Assigned parameters into the appropriate place in the input notebook
Parameters
----------
nb : NotebookNode
Executable notebook object
output_path : str
Path to write executed notebook
### Response:
def raise_for_execution_errors(nb, output_path):
"""Assigned parameters into the appropriate place in the input notebook
Parameters
----------
nb : NotebookNode
Executable notebook object
output_path : str
Path to write executed notebook
"""
error = None
for cell in nb.cells:
if cell.get("outputs") is None:
continue
for output in cell.outputs:
if output.output_type == "error":
error = PapermillExecutionError(
exec_count=cell.execution_count,
source=cell.source,
ename=output.ename,
evalue=output.evalue,
traceback=output.traceback,
)
break
if error:
# Write notebook back out with the Error Message at the top of the Notebook.
error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count)
error_msg_cell = nbformat.v4.new_code_cell(
source="%%html\n" + error_msg,
outputs=[
nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg})
],
metadata={"inputHidden": True, "hide_input": True},
)
nb.cells = [error_msg_cell] + nb.cells
write_ipynb(nb, output_path)
raise error |
def read(self, pos, size, **kwargs):
"""
Read a packet from the stream.
:param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream.
:param size: The size to read. May be symbolic.
:param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
"""
short_reads = kwargs.pop('short_reads', None)
# sanity check on read/write modes
if self.write_mode is None:
self.write_mode = False
elif self.write_mode is True:
raise SimFileError("Cannot read and write to the same SimPackets")
# sanity check on packet number and determine if data is already present
if pos is None:
pos = len(self.content)
if pos < 0:
raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos)
elif pos > len(self.content):
raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content)))
elif pos != len(self.content):
_, realsize = self.content[pos]
self.state.solver.add(size <= realsize)
if not self.state.solver.satisfiable():
raise SimFileError("Packet read size constraint made state unsatisfiable???")
return self.content[pos] + (pos+1,)
# typecheck
if type(size) is int:
size = self.state.solver.BVV(size, self.state.arch.bits)
# The read is on the frontier. let's generate a new packet.
orig_size = size
max_size = None
# if short reads are enabled, replace size with a symbol
if short_reads is True or (short_reads is None and sim_options.SHORT_READS in self.state.options):
size = self.state.solver.BVS('packetsize_%d_%s' % (len(self.content), self.ident), self.state.arch.bits, key=('file', self.ident, 'packetsize', len(self.content)))
self.state.solver.add(size <= orig_size)
# figure out the maximum size of the read
if not self.state.solver.symbolic(size):
max_size = self.state.solver.eval(size)
elif self.state.solver.satisfiable(extra_constraints=(size <= self.state.libc.max_packet_size,)):
l.info("Constraining symbolic packet size to be less than %d", self.state.libc.max_packet_size)
if not self.state.solver.is_true(orig_size <= self.state.libc.max_packet_size):
self.state.solver.add(size <= self.state.libc.max_packet_size)
if not self.state.solver.symbolic(orig_size):
max_size = min(self.state.solver.eval(orig_size), self.state.libc.max_packet_size)
else:
max_size = self.state.solver.max(size)
else:
max_size = self.state.solver.min(size)
l.warning("Could not constrain symbolic packet size to <= %d; using minimum %d for size", self.state.libc.max_packet_size, max_size)
self.state.solver.add(size == max_size)
# generate the packet data and return it
data = self.state.solver.BVS('packet_%d_%s' % (len(self.content), self.ident), max_size * self.state.arch.byte_width, key=('file', self.ident, 'packet', len(self.content)))
packet = (data, size)
self.content.append(packet)
return packet + (pos+1,) | Read a packet from the stream.
:param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream.
:param size: The size to read. May be symbolic.
:param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read. | Below is the the instruction that describes the task:
### Input:
Read a packet from the stream.
:param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream.
:param size: The size to read. May be symbolic.
:param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
### Response:
def read(self, pos, size, **kwargs):
"""
Read a packet from the stream.
:param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream.
:param size: The size to read. May be symbolic.
:param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option.
:return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
"""
short_reads = kwargs.pop('short_reads', None)
# sanity check on read/write modes
if self.write_mode is None:
self.write_mode = False
elif self.write_mode is True:
raise SimFileError("Cannot read and write to the same SimPackets")
# sanity check on packet number and determine if data is already present
if pos is None:
pos = len(self.content)
if pos < 0:
raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos)
elif pos > len(self.content):
raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content)))
elif pos != len(self.content):
_, realsize = self.content[pos]
self.state.solver.add(size <= realsize)
if not self.state.solver.satisfiable():
raise SimFileError("Packet read size constraint made state unsatisfiable???")
return self.content[pos] + (pos+1,)
# typecheck
if type(size) is int:
size = self.state.solver.BVV(size, self.state.arch.bits)
# The read is on the frontier. let's generate a new packet.
orig_size = size
max_size = None
# if short reads are enabled, replace size with a symbol
if short_reads is True or (short_reads is None and sim_options.SHORT_READS in self.state.options):
size = self.state.solver.BVS('packetsize_%d_%s' % (len(self.content), self.ident), self.state.arch.bits, key=('file', self.ident, 'packetsize', len(self.content)))
self.state.solver.add(size <= orig_size)
# figure out the maximum size of the read
if not self.state.solver.symbolic(size):
max_size = self.state.solver.eval(size)
elif self.state.solver.satisfiable(extra_constraints=(size <= self.state.libc.max_packet_size,)):
l.info("Constraining symbolic packet size to be less than %d", self.state.libc.max_packet_size)
if not self.state.solver.is_true(orig_size <= self.state.libc.max_packet_size):
self.state.solver.add(size <= self.state.libc.max_packet_size)
if not self.state.solver.symbolic(orig_size):
max_size = min(self.state.solver.eval(orig_size), self.state.libc.max_packet_size)
else:
max_size = self.state.solver.max(size)
else:
max_size = self.state.solver.min(size)
l.warning("Could not constrain symbolic packet size to <= %d; using minimum %d for size", self.state.libc.max_packet_size, max_size)
self.state.solver.add(size == max_size)
# generate the packet data and return it
data = self.state.solver.BVS('packet_%d_%s' % (len(self.content), self.ident), max_size * self.state.arch.byte_width, key=('file', self.ident, 'packet', len(self.content)))
packet = (data, size)
self.content.append(packet)
return packet + (pos+1,) |
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(distanceObjectName) is str):
distanceObjectName=distanceObjectName.encode('utf-8')
return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(distanceObjectName) is str):
distanceObjectName=distanceObjectName.encode('utf-8')
return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value |
def _new_masterpassword(self, password):
""" Generate a new random masterkey, encrypt it with the password and
store it in the store.
:param str password: Password to use for en-/de-cryption
"""
# make sure to not overwrite an existing key
if self.config_key in self.config and self.config[self.config_key]:
raise Exception("Storage already has a masterpassword!")
self.decrypted_master = hexlify(os.urandom(32)).decode("ascii")
# Encrypt and save master
self.password = password
self._save_encrypted_masterpassword()
return self.masterkey | Generate a new random masterkey, encrypt it with the password and
store it in the store.
:param str password: Password to use for en-/de-cryption | Below is the the instruction that describes the task:
### Input:
Generate a new random masterkey, encrypt it with the password and
store it in the store.
:param str password: Password to use for en-/de-cryption
### Response:
def _new_masterpassword(self, password):
""" Generate a new random masterkey, encrypt it with the password and
store it in the store.
:param str password: Password to use for en-/de-cryption
"""
# make sure to not overwrite an existing key
if self.config_key in self.config and self.config[self.config_key]:
raise Exception("Storage already has a masterpassword!")
self.decrypted_master = hexlify(os.urandom(32)).decode("ascii")
# Encrypt and save master
self.password = password
self._save_encrypted_masterpassword()
return self.masterkey |
def has_ocsp_must_staple_extension(certificate: cryptography.x509.Certificate) -> bool:
"""Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066.
"""
has_ocsp_must_staple = False
try:
tls_feature_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE)
for feature_type in tls_feature_ext.value:
if feature_type == cryptography.x509.TLSFeatureType.status_request:
has_ocsp_must_staple = True
break
except ExtensionNotFound:
pass
return has_ocsp_must_staple | Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066. | Below is the the instruction that describes the task:
### Input:
Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066.
### Response:
def has_ocsp_must_staple_extension(certificate: cryptography.x509.Certificate) -> bool:
"""Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066.
"""
has_ocsp_must_staple = False
try:
tls_feature_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE)
for feature_type in tls_feature_ext.value:
if feature_type == cryptography.x509.TLSFeatureType.status_request:
has_ocsp_must_staple = True
break
except ExtensionNotFound:
pass
return has_ocsp_must_staple |
def _fix_set_options(cls, options):
"""Alter the set options from None/strings to sets in place."""
optional_set_options = ('ignore', 'select')
mandatory_set_options = ('add_ignore', 'add_select')
def _get_set(value_str):
"""Split `value_str` by the delimiter `,` and return a set.
Removes any occurrences of '' in the set.
Also expand error code prefixes, to avoid doing this for every
file.
"""
return cls._expand_error_codes(set(value_str.split(',')) - {''})
for opt in optional_set_options:
value = getattr(options, opt)
if value is not None:
setattr(options, opt, _get_set(value))
for opt in mandatory_set_options:
value = getattr(options, opt)
if value is None:
value = ''
if not isinstance(value, Set):
value = _get_set(value)
setattr(options, opt, value)
return options | Alter the set options from None/strings to sets in place. | Below is the the instruction that describes the task:
### Input:
Alter the set options from None/strings to sets in place.
### Response:
def _fix_set_options(cls, options):
"""Alter the set options from None/strings to sets in place."""
optional_set_options = ('ignore', 'select')
mandatory_set_options = ('add_ignore', 'add_select')
def _get_set(value_str):
"""Split `value_str` by the delimiter `,` and return a set.
Removes any occurrences of '' in the set.
Also expand error code prefixes, to avoid doing this for every
file.
"""
return cls._expand_error_codes(set(value_str.split(',')) - {''})
for opt in optional_set_options:
value = getattr(options, opt)
if value is not None:
setattr(options, opt, _get_set(value))
for opt in mandatory_set_options:
value = getattr(options, opt)
if value is None:
value = ''
if not isinstance(value, Set):
value = _get_set(value)
setattr(options, opt, value)
return options |
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close() | Update our built-in md5 registry | Below is the the instruction that describes the task:
### Input:
Update our built-in md5 registry
### Response:
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close() |
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None):
'''
Ensure all nodes are authorized to the cluster
name
Irrelevant, not used (recommended: pcs_auth__auth)
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communication with pcs (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra args for the \'pcs cluster auth\' command
Example:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: []
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
auth_required = False
authorized = __salt__['pcs.is_auth'](nodes=nodes)
log.trace('Output of pcs.is_auth: %s', authorized)
authorized_dict = {}
for line in authorized['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorized_dict.update({node: auth_state})
log.trace('authorized_dict: %s', authorized_dict)
for node in nodes:
if node in authorized_dict and authorized_dict[node] == 'Already authorized':
ret['comment'] += 'Node {0} is already authorized\n'.format(node)
else:
auth_required = True
if __opts__['test']:
ret['comment'] += 'Node is set to authorize: {0}\n'.format(node)
if not auth_required:
return ret
if __opts__['test']:
ret['result'] = None
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
if '--force' not in extra_args:
extra_args += ['--force']
authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args)
log.trace('Output of pcs.auth: %s', authorize)
authorize_dict = {}
for line in authorize['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorize_dict.update({node: auth_state})
log.trace('authorize_dict: %s', authorize_dict)
for node in nodes:
if node in authorize_dict and authorize_dict[node] == 'Authorized':
ret['comment'] += 'Authorized {0}\n'.format(node)
ret['changes'].update({node: {'old': '', 'new': 'Authorized'}})
else:
ret['result'] = False
if node in authorized_dict:
ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node])
if node in authorize_dict:
ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node])
return ret | Ensure all nodes are authorized to the cluster
name
Irrelevant, not used (recommended: pcs_auth__auth)
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communication with pcs (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra args for the \'pcs cluster auth\' command
Example:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: [] | Below is the the instruction that describes the task:
### Input:
Ensure all nodes are authorized to the cluster
name
Irrelevant, not used (recommended: pcs_auth__auth)
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communication with pcs (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra args for the \'pcs cluster auth\' command
Example:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: []
### Response:
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None):
'''
Ensure all nodes are authorized to the cluster
name
Irrelevant, not used (recommended: pcs_auth__auth)
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communication with pcs (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra args for the \'pcs cluster auth\' command
Example:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: []
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
auth_required = False
authorized = __salt__['pcs.is_auth'](nodes=nodes)
log.trace('Output of pcs.is_auth: %s', authorized)
authorized_dict = {}
for line in authorized['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorized_dict.update({node: auth_state})
log.trace('authorized_dict: %s', authorized_dict)
for node in nodes:
if node in authorized_dict and authorized_dict[node] == 'Already authorized':
ret['comment'] += 'Node {0} is already authorized\n'.format(node)
else:
auth_required = True
if __opts__['test']:
ret['comment'] += 'Node is set to authorize: {0}\n'.format(node)
if not auth_required:
return ret
if __opts__['test']:
ret['result'] = None
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
if '--force' not in extra_args:
extra_args += ['--force']
authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args)
log.trace('Output of pcs.auth: %s', authorize)
authorize_dict = {}
for line in authorize['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorize_dict.update({node: auth_state})
log.trace('authorize_dict: %s', authorize_dict)
for node in nodes:
if node in authorize_dict and authorize_dict[node] == 'Authorized':
ret['comment'] += 'Authorized {0}\n'.format(node)
ret['changes'].update({node: {'old': '', 'new': 'Authorized'}})
else:
ret['result'] = False
if node in authorized_dict:
ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node])
if node in authorize_dict:
ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node])
return ret |
def reaction_charge(reaction, compound_charge):
"""Calculate the overall charge for the specified reaction.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_charge: a map from each compound to charge values.
"""
charge_sum = 0.0
for compound, value in reaction.compounds:
charge = compound_charge.get(compound.name, float('nan'))
charge_sum += charge * float(value)
return charge_sum | Calculate the overall charge for the specified reaction.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_charge: a map from each compound to charge values. | Below is the the instruction that describes the task:
### Input:
Calculate the overall charge for the specified reaction.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_charge: a map from each compound to charge values.
### Response:
def reaction_charge(reaction, compound_charge):
"""Calculate the overall charge for the specified reaction.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_charge: a map from each compound to charge values.
"""
charge_sum = 0.0
for compound, value in reaction.compounds:
charge = compound_charge.get(compound.name, float('nan'))
charge_sum += charge * float(value)
return charge_sum |
def gen_mapname():
""" Generate a uniq mapfile pathname. """
filepath = None
while (filepath is None) or (os.path.exists(os.path.join(config['mapfiles_dir'], filepath))):
filepath = '%s.map' % _gen_string()
return filepath | Generate a uniq mapfile pathname. | Below is the the instruction that describes the task:
### Input:
Generate a uniq mapfile pathname.
### Response:
def gen_mapname():
""" Generate a uniq mapfile pathname. """
filepath = None
while (filepath is None) or (os.path.exists(os.path.join(config['mapfiles_dir'], filepath))):
filepath = '%s.map' % _gen_string()
return filepath |
def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval=None,
intp_order=2, dtype=None,
num_cores=ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.size
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * np.int(dtype.name.strip("float"))))
# set ctype
ct_dt_map = {np.dtype(np.float32): ctypes.c_float,
np.dtype(np.float64): ctypes.c_double
}
# progress
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
# The z-size of the output array must match the x-size.
# The rotation is performed about the y-axis (lny).
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
if padding[0]:
padx = orderx - lnx
else:
padx = 0
if padding[1]:
pady = ordery - lny
else:
pady = 0
padyl = np.int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = np.int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx, lNy = lnx + padx, lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp )
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# z, y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv})
# occupies some amount of ram, but yields faster
# computation later
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW
# save memory by in-place operations
# projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor
# FFTW-flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# inarr[:] = (projection[0]*filter2)[0,:,:]
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# Setup a shared array
shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx)
arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx)
# Initialize the pool with the shared array
pool4loop = mp.Pool(processes=num_cores,
initializer=_init_worker,
initargs=(shared_array, (ln, lny, lnx), dtype))
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
for aa in np.arange(A):
if padval is None:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor
oneslice *= prefactor / (lNx * lNy)
# 14x Speedup with fftw3 compared to numpy fft and
# memory reduction by a factor of 2!
# ifft will be computed in-place
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl]
# resize image to original size
# The copy is necessary to prevent memory leakage.
arr[:] = filtered_proj.real
phi0 = np.rad2deg(angles[aa])
if not onlyreal:
filtered_proj_imag = filtered_proj.imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.real += arr
if not onlyreal:
arr[:] = filtered_proj_imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.imag += arr
if count is not None:
count.value += 1
pool4loop.terminate()
pool4loop.join()
_cleanup_worker()
return outarr | r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). | Below is the the instruction that describes the task:
### Input:
r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
### Response:
def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval=None,
intp_order=2, dtype=None,
num_cores=ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.size
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * np.int(dtype.name.strip("float"))))
# set ctype
ct_dt_map = {np.dtype(np.float32): ctypes.c_float,
np.dtype(np.float64): ctypes.c_double
}
# progress
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
# The z-size of the output array must match the x-size.
# The rotation is performed about the y-axis (lny).
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
if padding[0]:
padx = orderx - lnx
else:
padx = 0
if padding[1]:
pady = ordery - lny
else:
pady = 0
padyl = np.int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = np.int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx, lNy = lnx + padx, lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp )
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# z, y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv})
# occupies some amount of ram, but yields faster
# computation later
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW
# save memory by in-place operations
# projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor
# FFTW-flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# inarr[:] = (projection[0]*filter2)[0,:,:]
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# Setup a shared array
shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx)
arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx)
# Initialize the pool with the shared array
pool4loop = mp.Pool(processes=num_cores,
initializer=_init_worker,
initargs=(shared_array, (ln, lny, lnx), dtype))
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
for aa in np.arange(A):
if padval is None:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor
oneslice *= prefactor / (lNx * lNy)
# 14x Speedup with fftw3 compared to numpy fft and
# memory reduction by a factor of 2!
# ifft will be computed in-place
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl]
# resize image to original size
# The copy is necessary to prevent memory leakage.
arr[:] = filtered_proj.real
phi0 = np.rad2deg(angles[aa])
if not onlyreal:
filtered_proj_imag = filtered_proj.imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.real += arr
if not onlyreal:
arr[:] = filtered_proj_imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.imag += arr
if count is not None:
count.value += 1
pool4loop.terminate()
pool4loop.join()
_cleanup_worker()
return outarr |
def replace_config(config, name):
'''Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
'''
global static_stages
if static_stages is None:
static_stages = PipelineStages()
stages = static_stages
if 'external_stages_path' in config:
path = config['external_stages_path']
if not os.path.isabs(path) and config.get('root_path'):
path = os.path.join(config['root_path'], path)
try:
stages.load_external_stages(config['external_stages_path'])
except IOError:
return streamcorpus_pipeline # let check_config re-raise this
if 'external_stages_modules' in config:
for mod in config['external_stages_modules']:
try:
stages.load_module_stages(mod)
except ImportError:
return streamcorpus_pipeline # let check_config re-raise this
else:
stages = static_stages
new_sub_modules = set(stage
for stage in stages.itervalues()
if hasattr(stage, 'config_name'))
return NewSubModules(streamcorpus_pipeline, new_sub_modules) | Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`. | Below is the the instruction that describes the task:
### Input:
Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
### Response:
def replace_config(config, name):
'''Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
'''
global static_stages
if static_stages is None:
static_stages = PipelineStages()
stages = static_stages
if 'external_stages_path' in config:
path = config['external_stages_path']
if not os.path.isabs(path) and config.get('root_path'):
path = os.path.join(config['root_path'], path)
try:
stages.load_external_stages(config['external_stages_path'])
except IOError:
return streamcorpus_pipeline # let check_config re-raise this
if 'external_stages_modules' in config:
for mod in config['external_stages_modules']:
try:
stages.load_module_stages(mod)
except ImportError:
return streamcorpus_pipeline # let check_config re-raise this
else:
stages = static_stages
new_sub_modules = set(stage
for stage in stages.itervalues()
if hasattr(stage, 'config_name'))
return NewSubModules(streamcorpus_pipeline, new_sub_modules) |
def _ret8(ins):
""" Returns from a procedure / function an 8bits value
"""
output = _8bit_oper(ins.quad[1])
output.append('#pragma opt require a')
output.append('jp %s' % str(ins.quad[2]))
return output | Returns from a procedure / function an 8bits value | Below is the the instruction that describes the task:
### Input:
Returns from a procedure / function an 8bits value
### Response:
def _ret8(ins):
""" Returns from a procedure / function an 8bits value
"""
output = _8bit_oper(ins.quad[1])
output.append('#pragma opt require a')
output.append('jp %s' % str(ins.quad[2]))
return output |
def children(self, p_todo, p_only_direct=False):
"""
Returns a list of child todos that the given todo (in)directly depends
on.
"""
children = \
self._depgraph.outgoing_neighbors(hash(p_todo), not p_only_direct)
return [self._tododict[child] for child in children] | Returns a list of child todos that the given todo (in)directly depends
on. | Below is the the instruction that describes the task:
### Input:
Returns a list of child todos that the given todo (in)directly depends
on.
### Response:
def children(self, p_todo, p_only_direct=False):
"""
Returns a list of child todos that the given todo (in)directly depends
on.
"""
children = \
self._depgraph.outgoing_neighbors(hash(p_todo), not p_only_direct)
return [self._tododict[child] for child in children] |
def is_businessdate(in_date):
"""
checks whether the provided date is a date
:param BusinessDate, int or float in_date:
:return bool:
"""
# Note: if the data range has been created from pace_xl, then all the dates are bank dates
# and here it remains to check the validity.
# !!! However, if the data has been read from json string via json.load() function
# it does not recognize that this numbers are bankdates, just considers them as integers
# therefore, additional check is useful here, first to convert the date if it is integer to BusinessDate,
# then check the validity.
# (as the parameter to this method should always be a BusinessDate)
if not isinstance(in_date, BaseDate):
try: # to be removed
in_date = BusinessDate(in_date)
except:
return False
y, m, d, = in_date.to_ymd()
return is_valid_ymd(y, m, d) | checks whether the provided date is a date
:param BusinessDate, int or float in_date:
:return bool: | Below is the the instruction that describes the task:
### Input:
checks whether the provided date is a date
:param BusinessDate, int or float in_date:
:return bool:
### Response:
def is_businessdate(in_date):
"""
checks whether the provided date is a date
:param BusinessDate, int or float in_date:
:return bool:
"""
# Note: if the data range has been created from pace_xl, then all the dates are bank dates
# and here it remains to check the validity.
# !!! However, if the data has been read from json string via json.load() function
# it does not recognize that this numbers are bankdates, just considers them as integers
# therefore, additional check is useful here, first to convert the date if it is integer to BusinessDate,
# then check the validity.
# (as the parameter to this method should always be a BusinessDate)
if not isinstance(in_date, BaseDate):
try: # to be removed
in_date = BusinessDate(in_date)
except:
return False
y, m, d, = in_date.to_ymd()
return is_valid_ymd(y, m, d) |
def convert(self, vroot, entry_variables):
"""
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
# TODO: should deal with both?
if bn_func.info.args["batch_stat"] == False:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | Below is the the instruction that describes the task:
### Input:
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
### Response:
def convert(self, vroot, entry_variables):
"""
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
# TODO: should deal with both?
if bn_func.info.args["batch_stat"] == False:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable |
def get_init_kwargs(self):
"""
Generates keyword arguments for creating a new Docker client instance.
:return: Keyword arguments as defined through this configuration.
:rtype: dict
"""
init_kwargs = {}
for k in self.init_kwargs:
if k in self.core_property_set:
init_kwargs[k] = getattr(self, k)
elif k in self:
init_kwargs[k] = self[k]
return init_kwargs | Generates keyword arguments for creating a new Docker client instance.
:return: Keyword arguments as defined through this configuration.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Generates keyword arguments for creating a new Docker client instance.
:return: Keyword arguments as defined through this configuration.
:rtype: dict
### Response:
def get_init_kwargs(self):
"""
Generates keyword arguments for creating a new Docker client instance.
:return: Keyword arguments as defined through this configuration.
:rtype: dict
"""
init_kwargs = {}
for k in self.init_kwargs:
if k in self.core_property_set:
init_kwargs[k] = getattr(self, k)
elif k in self:
init_kwargs[k] = self[k]
return init_kwargs |
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max | set current cursor position | Below is the the instruction that describes the task:
### Input:
set current cursor position
### Response:
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max |
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
# on bloody windows, the file needs write permissions to be removable.
# Why ...
if os.name == 'nt':
os.chmod(lfp, 0777)
# END handle win32
os.remove(lfp)
except OSError:
pass
self._owns_lock = False | Release our lock if we have one | Below is the the instruction that describes the task:
### Input:
Release our lock if we have one
### Response:
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
# on bloody windows, the file needs write permissions to be removable.
# Why ...
if os.name == 'nt':
os.chmod(lfp, 0777)
# END handle win32
os.remove(lfp)
except OSError:
pass
self._owns_lock = False |
def trace(msg):
"""Print a trace message to stderr if environment variable is set.
"""
if os.environ.get('JARN_TRACE') == '1':
print('TRACE:', msg, file=sys.stderr) | Print a trace message to stderr if environment variable is set. | Below is the the instruction that describes the task:
### Input:
Print a trace message to stderr if environment variable is set.
### Response:
def trace(msg):
"""Print a trace message to stderr if environment variable is set.
"""
if os.environ.get('JARN_TRACE') == '1':
print('TRACE:', msg, file=sys.stderr) |
def random_variants(
count,
genome_name="GRCh38",
deletions=True,
insertions=True,
random_seed=None):
"""
Generate a VariantCollection with random variants that overlap
at least one complete coding transcript.
"""
rng = random.Random(random_seed)
ensembl = genome_for_reference_name(genome_name)
if ensembl in _transcript_ids_cache:
transcript_ids = _transcript_ids_cache[ensembl]
else:
transcript_ids = ensembl.transcript_ids()
_transcript_ids_cache[ensembl] = transcript_ids
variants = []
# we should finish way before this loop is over but just in case
# something is wrong with PyEnsembl we want to avoid an infinite loop
for _ in range(count * 100):
if len(variants) < count:
transcript_id = rng.choice(transcript_ids)
transcript = ensembl.transcript_by_id(transcript_id)
if not transcript.complete:
continue
exon = rng.choice(transcript.exons)
base1_genomic_position = rng.randint(exon.start, exon.end)
transcript_offset = transcript.spliced_offset(base1_genomic_position)
seq = transcript.sequence
ref = str(seq[transcript_offset])
if transcript.on_backward_strand:
ref = reverse_complement(ref)
alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref]
if insertions:
nucleotide_pairs = [
x + y
for x in STANDARD_NUCLEOTIDES
for y in STANDARD_NUCLEOTIDES
]
alt_nucleotides.extend(nucleotide_pairs)
if deletions:
alt_nucleotides.append("")
alt = rng.choice(alt_nucleotides)
variant = Variant(
transcript.contig,
base1_genomic_position,
ref=ref,
alt=alt,
ensembl=ensembl)
variants.append(variant)
else:
return VariantCollection(variants)
raise ValueError(
("Unable to generate %d random variants, "
"there may be a problem with PyEnsembl") % count) | Generate a VariantCollection with random variants that overlap
at least one complete coding transcript. | Below is the the instruction that describes the task:
### Input:
Generate a VariantCollection with random variants that overlap
at least one complete coding transcript.
### Response:
def random_variants(
count,
genome_name="GRCh38",
deletions=True,
insertions=True,
random_seed=None):
"""
Generate a VariantCollection with random variants that overlap
at least one complete coding transcript.
"""
rng = random.Random(random_seed)
ensembl = genome_for_reference_name(genome_name)
if ensembl in _transcript_ids_cache:
transcript_ids = _transcript_ids_cache[ensembl]
else:
transcript_ids = ensembl.transcript_ids()
_transcript_ids_cache[ensembl] = transcript_ids
variants = []
# we should finish way before this loop is over but just in case
# something is wrong with PyEnsembl we want to avoid an infinite loop
for _ in range(count * 100):
if len(variants) < count:
transcript_id = rng.choice(transcript_ids)
transcript = ensembl.transcript_by_id(transcript_id)
if not transcript.complete:
continue
exon = rng.choice(transcript.exons)
base1_genomic_position = rng.randint(exon.start, exon.end)
transcript_offset = transcript.spliced_offset(base1_genomic_position)
seq = transcript.sequence
ref = str(seq[transcript_offset])
if transcript.on_backward_strand:
ref = reverse_complement(ref)
alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref]
if insertions:
nucleotide_pairs = [
x + y
for x in STANDARD_NUCLEOTIDES
for y in STANDARD_NUCLEOTIDES
]
alt_nucleotides.extend(nucleotide_pairs)
if deletions:
alt_nucleotides.append("")
alt = rng.choice(alt_nucleotides)
variant = Variant(
transcript.contig,
base1_genomic_position,
ref=ref,
alt=alt,
ensembl=ensembl)
variants.append(variant)
else:
return VariantCollection(variants)
raise ValueError(
("Unable to generate %d random variants, "
"there may be a problem with PyEnsembl") % count) |
def check_crystal_equivalence(crystal_a, crystal_b):
"""Function that identifies whether two crystals are equivalent"""
# getting symmetry datasets for both crystals
cryst_a = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_a), symprec=1e-5, angle_tolerance=-1.0, hall_number=0)
cryst_b = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_b), symprec=1e-5, angle_tolerance=-1.0, hall_number=0)
samecell = np.allclose(cryst_a['std_lattice'], cryst_b['std_lattice'], atol=1e-5)
samenatoms = len(cryst_a['std_positions']) == len(cryst_b['std_positions'])
samespg = cryst_a['number'] == cryst_b['number']
def test_rotations_translations(cryst_a, cryst_b, repeat):
cell = cryst_a['std_lattice']
pristine = crystal('Mg', [(0, 0., 0.)],
spacegroup=int(cryst_a['number']),
cellpar=[cell[0]/repeat[0], cell[1]/repeat[1], cell[2]/repeat[2]]).repeat(repeat)
sym_set_p = spglib.get_symmetry_dataset(ase_to_spgcell(pristine), symprec=1e-5,
angle_tolerance=-1.0, hall_number=0)
for _,trans in enumerate(zip(sym_set_p['rotations'], sym_set_p['translations'])):
pnew=(np.matmul(trans[0],cryst_a['std_positions'].T).T + trans[1]) % 1.0
fulln = np.concatenate([cryst_a['std_types'][:, None], pnew], axis=1)
fullb = np.concatenate([cryst_b['std_types'][:, None], cryst_b['std_positions']], axis=1)
sorted_n = np.array(sorted([ list(row) for row in list(fulln) ]))
sorted_b = np.array(sorted([ list(row) for row in list(fullb) ]))
if np.allclose(sorted_n, sorted_b, atol=1e-5):
return True
return False
if samecell and samenatoms and samespg:
cell = cryst_a['std_lattice']
# we assume there are no crystals with a lattice parameter smaller than 2 A
rng1 = range(1, int(norm(cell[0])/2.))
rng2 = range(1, int(norm(cell[1])/2.))
rng3 = range(1, int(norm(cell[2])/2.))
for repeat in itertools.product(rng1, rng2, rng3):
if test_rotations_translations(cryst_a, cryst_b, repeat):
return True
return False | Function that identifies whether two crystals are equivalent | Below is the the instruction that describes the task:
### Input:
Function that identifies whether two crystals are equivalent
### Response:
def check_crystal_equivalence(crystal_a, crystal_b):
"""Function that identifies whether two crystals are equivalent"""
# getting symmetry datasets for both crystals
cryst_a = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_a), symprec=1e-5, angle_tolerance=-1.0, hall_number=0)
cryst_b = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_b), symprec=1e-5, angle_tolerance=-1.0, hall_number=0)
samecell = np.allclose(cryst_a['std_lattice'], cryst_b['std_lattice'], atol=1e-5)
samenatoms = len(cryst_a['std_positions']) == len(cryst_b['std_positions'])
samespg = cryst_a['number'] == cryst_b['number']
def test_rotations_translations(cryst_a, cryst_b, repeat):
cell = cryst_a['std_lattice']
pristine = crystal('Mg', [(0, 0., 0.)],
spacegroup=int(cryst_a['number']),
cellpar=[cell[0]/repeat[0], cell[1]/repeat[1], cell[2]/repeat[2]]).repeat(repeat)
sym_set_p = spglib.get_symmetry_dataset(ase_to_spgcell(pristine), symprec=1e-5,
angle_tolerance=-1.0, hall_number=0)
for _,trans in enumerate(zip(sym_set_p['rotations'], sym_set_p['translations'])):
pnew=(np.matmul(trans[0],cryst_a['std_positions'].T).T + trans[1]) % 1.0
fulln = np.concatenate([cryst_a['std_types'][:, None], pnew], axis=1)
fullb = np.concatenate([cryst_b['std_types'][:, None], cryst_b['std_positions']], axis=1)
sorted_n = np.array(sorted([ list(row) for row in list(fulln) ]))
sorted_b = np.array(sorted([ list(row) for row in list(fullb) ]))
if np.allclose(sorted_n, sorted_b, atol=1e-5):
return True
return False
if samecell and samenatoms and samespg:
cell = cryst_a['std_lattice']
# we assume there are no crystals with a lattice parameter smaller than 2 A
rng1 = range(1, int(norm(cell[0])/2.))
rng2 = range(1, int(norm(cell[1])/2.))
rng3 = range(1, int(norm(cell[2])/2.))
for repeat in itertools.product(rng1, rng2, rng3):
if test_rotations_translations(cryst_a, cryst_b, repeat):
return True
return False |
def simple_moving_matrix(x, n=10):
"""
Create simple moving matrix.
Parameters
----------
x : ndarray
A numpy array
n : integer
The number of sample points used to make average
Returns
-------
ndarray
A n x n numpy array which will be useful for calculating confidentail
interval of simple moving average
"""
if x.ndim > 1 and len(x[0]) > 1:
x = np.average(x, axis=1)
h = n / 2
o = 0 if h * 2 == n else 1
xx = []
for i in range(h, len(x) - h):
xx.append(x[i-h:i+h+o])
return np.array(xx) | Create simple moving matrix.
Parameters
----------
x : ndarray
A numpy array
n : integer
The number of sample points used to make average
Returns
-------
ndarray
A n x n numpy array which will be useful for calculating confidentail
interval of simple moving average | Below is the the instruction that describes the task:
### Input:
Create simple moving matrix.
Parameters
----------
x : ndarray
A numpy array
n : integer
The number of sample points used to make average
Returns
-------
ndarray
A n x n numpy array which will be useful for calculating confidentail
interval of simple moving average
### Response:
def simple_moving_matrix(x, n=10):
"""
Create simple moving matrix.
Parameters
----------
x : ndarray
A numpy array
n : integer
The number of sample points used to make average
Returns
-------
ndarray
A n x n numpy array which will be useful for calculating confidentail
interval of simple moving average
"""
if x.ndim > 1 and len(x[0]) > 1:
x = np.average(x, axis=1)
h = n / 2
o = 0 if h * 2 == n else 1
xx = []
for i in range(h, len(x) - h):
xx.append(x[i-h:i+h+o])
return np.array(xx) |
def char(i):
"""Get image data for the character `i` (a one character string).
Returned as a list of rows. Each row is a tuple containing the
packed pixels.
"""
i = ord(i)
if i not in font:
return [(0,)] * 8
return [(ord(row),) for row in font[i].decode('hex')] | Get image data for the character `i` (a one character string).
Returned as a list of rows. Each row is a tuple containing the
packed pixels. | Below is the the instruction that describes the task:
### Input:
Get image data for the character `i` (a one character string).
Returned as a list of rows. Each row is a tuple containing the
packed pixels.
### Response:
def char(i):
"""Get image data for the character `i` (a one character string).
Returned as a list of rows. Each row is a tuple containing the
packed pixels.
"""
i = ord(i)
if i not in font:
return [(0,)] * 8
return [(ord(row),) for row in font[i].decode('hex')] |
def casefold_with_i_dots(text):
"""
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
"""
text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')
return text.casefold() | Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters. | Below is the the instruction that describes the task:
### Input:
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
### Response:
def casefold_with_i_dots(text):
"""
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
"""
text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')
return text.casefold() |
def query_publishers(self, publisher_query):
"""QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>`
"""
content = self._serialize.body(publisher_query, 'PublisherQuery')
response = self._send(http_method='POST',
location_id='2ad6ee0a-b53f-4034-9d1d-d009fda1212e',
version='5.0-preview.1',
content=content)
return self._deserialize('PublisherQueryResult', response) | QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>` | Below is the the instruction that describes the task:
### Input:
QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>`
### Response:
def query_publishers(self, publisher_query):
"""QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>`
"""
content = self._serialize.body(publisher_query, 'PublisherQuery')
response = self._send(http_method='POST',
location_id='2ad6ee0a-b53f-4034-9d1d-d009fda1212e',
version='5.0-preview.1',
content=content)
return self._deserialize('PublisherQueryResult', response) |
def addFilter(self, *lstFilters, **dctFilters) :
"add a new filter to the query"
dstF = {}
if len(lstFilters) > 0 :
if type(lstFilters[0]) is types.DictType :
dstF = lstFilters[0]
lstFilters = lstFilters[1:]
if len(dctFilters) > 0 :
dstF = dict(dstF, **dctFilters)
filts = {}
for k, v in dstF.iteritems() :
sk = k.split(' ')
if len(sk) == 2 :
operator = sk[-1].strip().upper()
if operator not in self.operators :
raise ValueError('Unrecognized operator "%s"' % operator)
kk = '%s.%s'% (self.rabaClass.__name__, k)
elif len(sk) == 1 :
operator = "="
kk = '%s.%s ='% (self.rabaClass.__name__, k)
else :
raise ValueError('Invalid field %s' % k)
if isRabaObject(v) :
vv = v.getJsonEncoding()
else :
vv = v
if sk[0].find('.') > -1 :
kk = self._parseJoint(sk[0], operator)
filts[kk] = vv
for lt in lstFilters :
for l in lt :
match = self.fieldPattern.match(l)
if match == None :
raise ValueError("RabaQuery Error: Invalid filter '%s'" % l)
field = match.group(1)
operator = match.group(2)
value = match.group(4)
if field.find('.') > -1 :
joink = self._parseJoint(field, operator, value)
filts[joink] = value
else :
filts['%s.%s %s' %(self.rabaClass.__name__, field, operator)] = value
self.filters.append(filts) | add a new filter to the query | Below is the the instruction that describes the task:
### Input:
add a new filter to the query
### Response:
def addFilter(self, *lstFilters, **dctFilters) :
"add a new filter to the query"
dstF = {}
if len(lstFilters) > 0 :
if type(lstFilters[0]) is types.DictType :
dstF = lstFilters[0]
lstFilters = lstFilters[1:]
if len(dctFilters) > 0 :
dstF = dict(dstF, **dctFilters)
filts = {}
for k, v in dstF.iteritems() :
sk = k.split(' ')
if len(sk) == 2 :
operator = sk[-1].strip().upper()
if operator not in self.operators :
raise ValueError('Unrecognized operator "%s"' % operator)
kk = '%s.%s'% (self.rabaClass.__name__, k)
elif len(sk) == 1 :
operator = "="
kk = '%s.%s ='% (self.rabaClass.__name__, k)
else :
raise ValueError('Invalid field %s' % k)
if isRabaObject(v) :
vv = v.getJsonEncoding()
else :
vv = v
if sk[0].find('.') > -1 :
kk = self._parseJoint(sk[0], operator)
filts[kk] = vv
for lt in lstFilters :
for l in lt :
match = self.fieldPattern.match(l)
if match == None :
raise ValueError("RabaQuery Error: Invalid filter '%s'" % l)
field = match.group(1)
operator = match.group(2)
value = match.group(4)
if field.find('.') > -1 :
joink = self._parseJoint(field, operator, value)
filts[joink] = value
else :
filts['%s.%s %s' %(self.rabaClass.__name__, field, operator)] = value
self.filters.append(filts) |
def trigger_actions(self, subsystem):
"""
Refresh all modules which subscribed to the given subsystem.
"""
for py3_module, trigger_action in self.udev_consumers[subsystem]:
if trigger_action in ON_TRIGGER_ACTIONS:
self.py3_wrapper.log(
"%s udev event, refresh consumer %s"
% (subsystem, py3_module.module_full_name)
)
py3_module.force_update() | Refresh all modules which subscribed to the given subsystem. | Below is the the instruction that describes the task:
### Input:
Refresh all modules which subscribed to the given subsystem.
### Response:
def trigger_actions(self, subsystem):
"""
Refresh all modules which subscribed to the given subsystem.
"""
for py3_module, trigger_action in self.udev_consumers[subsystem]:
if trigger_action in ON_TRIGGER_ACTIONS:
self.py3_wrapper.log(
"%s udev event, refresh consumer %s"
% (subsystem, py3_module.module_full_name)
)
py3_module.force_update() |
def post_status(self, body="", id="", parentid="", stashid=""):
"""Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/statuses/post', post_data={
"body":body,
"id":id,
"parentid":parentid,
"stashid":stashid
})
return response['statusid'] | Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status | Below is the the instruction that describes the task:
### Input:
Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status
### Response:
def post_status(self, body="", id="", parentid="", stashid=""):
"""Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/statuses/post', post_data={
"body":body,
"id":id,
"parentid":parentid,
"stashid":stashid
})
return response['statusid'] |
def make_clusters(span_tree, cut_value):
""" Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
"""
iv0, iv1 = span_tree.nonzero()
# This is the dictionary of all the pairings for each source
match_dict = {}
for i0, i1 in zip(iv0, iv1):
d = span_tree[i0, i1]
# Cut on the link distance
if d > cut_value:
continue
imin = int(min(i0, i1))
imax = int(max(i0, i1))
if imin in match_dict:
match_dict[imin][imax] = True
else:
match_dict[imin] = {imax: True}
working = True
while working:
working = False
rev_dict = make_rev_dict_unique(match_dict)
k_sort = rev_dict.keys()
k_sort.sort()
for k in k_sort:
v = rev_dict[k]
# Multiple mappings
if len(v) > 1:
working = True
v_sort = v.keys()
v_sort.sort()
cluster_idx = v_sort[0]
for vv in v_sort[1:]:
try:
to_merge = match_dict.pop(vv)
except:
continue
try:
match_dict[cluster_idx].update(to_merge)
match_dict[cluster_idx][vv] = True
except:
continue
# remove self references
try:
match_dict[cluster_idx].pop(cluster_idx)
except:
pass
# Convert to a int:list dictionary
cdict = {}
for k, v in match_dict.items():
cdict[k] = v.keys()
# make the reverse dictionary
rdict = make_reverse_dict(cdict)
return cdict, rdict | Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster. | Below is the the instruction that describes the task:
### Input:
Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
### Response:
def make_clusters(span_tree, cut_value):
""" Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
"""
iv0, iv1 = span_tree.nonzero()
# This is the dictionary of all the pairings for each source
match_dict = {}
for i0, i1 in zip(iv0, iv1):
d = span_tree[i0, i1]
# Cut on the link distance
if d > cut_value:
continue
imin = int(min(i0, i1))
imax = int(max(i0, i1))
if imin in match_dict:
match_dict[imin][imax] = True
else:
match_dict[imin] = {imax: True}
working = True
while working:
working = False
rev_dict = make_rev_dict_unique(match_dict)
k_sort = rev_dict.keys()
k_sort.sort()
for k in k_sort:
v = rev_dict[k]
# Multiple mappings
if len(v) > 1:
working = True
v_sort = v.keys()
v_sort.sort()
cluster_idx = v_sort[0]
for vv in v_sort[1:]:
try:
to_merge = match_dict.pop(vv)
except:
continue
try:
match_dict[cluster_idx].update(to_merge)
match_dict[cluster_idx][vv] = True
except:
continue
# remove self references
try:
match_dict[cluster_idx].pop(cluster_idx)
except:
pass
# Convert to a int:list dictionary
cdict = {}
for k, v in match_dict.items():
cdict[k] = v.keys()
# make the reverse dictionary
rdict = make_reverse_dict(cdict)
return cdict, rdict |
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None, compute=True,
multifile=False):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = 'scipy'
elif engine != 'scipy':
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
if not compute:
raise NotImplementedError(
'to_netcdf() with compute=False is not yet implemented when '
'returning bytes')
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = 'scipy'
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# handle scheduler specific logic
scheduler = _get_scheduler()
have_chunks = any(v.chunks for v in dataset.variables.values())
autoclose = have_chunks and scheduler in ['distributed', 'multiprocessing']
if autoclose and engine == 'scipy':
raise NotImplementedError("Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler))
target = path_or_file if path_or_file is not None else BytesIO()
kwargs = dict(autoclose=True) if autoclose else {}
store = store_open(target, mode, format, group, **kwargs)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, str):
unlimited_dims = [unlimited_dims]
writer = ArrayWriter()
# TODO: figure out how to refactor this logic (here and in save_mfdataset)
# to avoid this mess of conditionals
try:
# TODO: allow this work (setting up the file for writing array data)
# to be parallelized with dask
dump_to_store(dataset, store, writer, encoding=encoding,
unlimited_dims=unlimited_dims)
if autoclose:
store.close()
if multifile:
return writer, store
writes = writer.sync(compute=compute)
if path_or_file is None:
store.sync()
return target.getvalue()
finally:
if not multifile and compute:
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(writes, store) | This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset. | Below is the the instruction that describes the task:
### Input:
This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
### Response:
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None, compute=True,
multifile=False):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = 'scipy'
elif engine != 'scipy':
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
if not compute:
raise NotImplementedError(
'to_netcdf() with compute=False is not yet implemented when '
'returning bytes')
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = 'scipy'
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# handle scheduler specific logic
scheduler = _get_scheduler()
have_chunks = any(v.chunks for v in dataset.variables.values())
autoclose = have_chunks and scheduler in ['distributed', 'multiprocessing']
if autoclose and engine == 'scipy':
raise NotImplementedError("Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler))
target = path_or_file if path_or_file is not None else BytesIO()
kwargs = dict(autoclose=True) if autoclose else {}
store = store_open(target, mode, format, group, **kwargs)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, str):
unlimited_dims = [unlimited_dims]
writer = ArrayWriter()
# TODO: figure out how to refactor this logic (here and in save_mfdataset)
# to avoid this mess of conditionals
try:
# TODO: allow this work (setting up the file for writing array data)
# to be parallelized with dask
dump_to_store(dataset, store, writer, encoding=encoding,
unlimited_dims=unlimited_dims)
if autoclose:
store.close()
if multifile:
return writer, store
writes = writer.sync(compute=compute)
if path_or_file is None:
store.sync()
return target.getvalue()
finally:
if not multifile and compute:
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(writes, store) |
def interpret_header(self):
"""
Read pertinent information from the image headers,
especially location and radius of the Sun to calculate the default thematic map
:return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel
"""
# handle special cases since date-obs field changed names
if 'DATE_OBS' in self.header:
self.date = self.header['DATE_OBS']
elif 'DATE-OBS' in self.header:
self.date = self.header['DATE-OBS']
else:
raise Exception("Image does not have a DATE_OBS or DATE-OBS field")
self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2']
sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec
arcsec_per_pixel = self.header['CDELT1']
self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel) | Read pertinent information from the image headers,
especially location and radius of the Sun to calculate the default thematic map
:return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel | Below is the the instruction that describes the task:
### Input:
Read pertinent information from the image headers,
especially location and radius of the Sun to calculate the default thematic map
:return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel
### Response:
def interpret_header(self):
"""
Read pertinent information from the image headers,
especially location and radius of the Sun to calculate the default thematic map
:return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel
"""
# handle special cases since date-obs field changed names
if 'DATE_OBS' in self.header:
self.date = self.header['DATE_OBS']
elif 'DATE-OBS' in self.header:
self.date = self.header['DATE-OBS']
else:
raise Exception("Image does not have a DATE_OBS or DATE-OBS field")
self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2']
sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec
arcsec_per_pixel = self.header['CDELT1']
self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel) |
def _computeStatus(self, dfile, service):
"""Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status"""
# If only one service requested
if service:
if not dfile['services'].has_key(service):
return self.ST_UNTRACKED
else:
return dfile['services'][service]['status']
# Otherwise go through all services and compute
# a sensible status
first_service_key=dfile['services'].keys()[0]
# Save off one of the statuses so we can compute
# if they are all the same between services.
first_status=dfile['services'][first_service_key]['status']
all_status_match=True
# Return ST_COMPLICATED "C" if status
# differs
for service in dfile['services']:
if dfile['services'][service]['status']!=first_status:
return self.ST_COMPLICATED
return first_status | Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status | Below is the the instruction that describes the task:
### Input:
Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status
### Response:
def _computeStatus(self, dfile, service):
"""Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status"""
# If only one service requested
if service:
if not dfile['services'].has_key(service):
return self.ST_UNTRACKED
else:
return dfile['services'][service]['status']
# Otherwise go through all services and compute
# a sensible status
first_service_key=dfile['services'].keys()[0]
# Save off one of the statuses so we can compute
# if they are all the same between services.
first_status=dfile['services'][first_service_key]['status']
all_status_match=True
# Return ST_COMPLICATED "C" if status
# differs
for service in dfile['services']:
if dfile['services'][service]['status']!=first_status:
return self.ST_COMPLICATED
return first_status |
def _handle_state_change_msg(self, new_helper):
"""Called when state change is commanded by stream manager"""
assert self.my_pplan_helper is not None
assert self.my_instance is not None and self.my_instance.py_class is not None
if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state():
# handle state change
# update the pplan_helper
self.my_pplan_helper = new_helper
if new_helper.is_topology_running():
if not self.is_instance_started:
self.start_instance_if_possible()
self.my_instance.py_class.invoke_activate()
elif new_helper.is_topology_paused():
self.my_instance.py_class.invoke_deactivate()
else:
raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state())
else:
Log.info("Topology state remains the same.") | Called when state change is commanded by stream manager | Below is the the instruction that describes the task:
### Input:
Called when state change is commanded by stream manager
### Response:
def _handle_state_change_msg(self, new_helper):
"""Called when state change is commanded by stream manager"""
assert self.my_pplan_helper is not None
assert self.my_instance is not None and self.my_instance.py_class is not None
if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state():
# handle state change
# update the pplan_helper
self.my_pplan_helper = new_helper
if new_helper.is_topology_running():
if not self.is_instance_started:
self.start_instance_if_possible()
self.my_instance.py_class.invoke_activate()
elif new_helper.is_topology_paused():
self.my_instance.py_class.invoke_deactivate()
else:
raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state())
else:
Log.info("Topology state remains the same.") |
def is_valid_group(group_name, nova_creds):
"""
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
"""
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False | Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option. | Below is the the instruction that describes the task:
### Input:
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
### Response:
def is_valid_group(group_name, nova_creds):
"""
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
"""
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False |
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter):
"""
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
"""
rect = QtCore.QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
if self._native:
if os.environ['QT_API'].lower() not in PYQT5_API:
opt = QtGui.QStyleOptionViewItemV2()
else:
opt = QtWidgets.QStyleOptionViewItem()
opt.rect = rect
opt.state = (QtWidgets.QStyle.State_Active |
QtWidgets.QStyle.State_Item |
QtWidgets.QStyle.State_Children)
if not collapsed:
opt.state |= QtWidgets.QStyle.State_Open
if mouse_over:
opt.state |= (QtWidgets.QStyle.State_MouseOver |
QtWidgets.QStyle.State_Enabled |
QtWidgets.QStyle.State_Selected)
opt.palette.setBrush(QtGui.QPalette.Window,
self.palette().highlight())
opt.rect.translate(-2, 0)
self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch,
opt, painter, self)
else:
index = 0
if not collapsed:
index = 2
if mouse_over:
index += 1
QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect) | Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter | Below is the the instruction that describes the task:
### Input:
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
### Response:
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter):
"""
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
"""
rect = QtCore.QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
if self._native:
if os.environ['QT_API'].lower() not in PYQT5_API:
opt = QtGui.QStyleOptionViewItemV2()
else:
opt = QtWidgets.QStyleOptionViewItem()
opt.rect = rect
opt.state = (QtWidgets.QStyle.State_Active |
QtWidgets.QStyle.State_Item |
QtWidgets.QStyle.State_Children)
if not collapsed:
opt.state |= QtWidgets.QStyle.State_Open
if mouse_over:
opt.state |= (QtWidgets.QStyle.State_MouseOver |
QtWidgets.QStyle.State_Enabled |
QtWidgets.QStyle.State_Selected)
opt.palette.setBrush(QtGui.QPalette.Window,
self.palette().highlight())
opt.rect.translate(-2, 0)
self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch,
opt, painter, self)
else:
index = 0
if not collapsed:
index = 2
if mouse_over:
index += 1
QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect) |
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context | Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext | Below is the the instruction that describes the task:
### Input:
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
### Response:
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context |
def get_distutils_display_options():
""" Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
"""
short_display_opts = set('-' + o[1] for o in Distribution.display_options
if o[1])
long_display_opts = set('--' + o[0] for o in Distribution.display_options)
# Include -h and --help which are not explicitly listed in
# Distribution.display_options (as they are handled by optparse)
short_display_opts.add('-h')
long_display_opts.add('--help')
# This isn't the greatest approach to hardcode these commands.
# However, there doesn't seem to be a good way to determine
# whether build *will be* run as part of the command at this
# phase.
display_commands = set([
'clean', 'register', 'setopt', 'saveopts', 'egg_info',
'alias'])
return short_display_opts.union(long_display_opts.union(display_commands)) | Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or -- | Below is the the instruction that describes the task:
### Input:
Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
### Response:
def get_distutils_display_options():
""" Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
"""
short_display_opts = set('-' + o[1] for o in Distribution.display_options
if o[1])
long_display_opts = set('--' + o[0] for o in Distribution.display_options)
# Include -h and --help which are not explicitly listed in
# Distribution.display_options (as they are handled by optparse)
short_display_opts.add('-h')
long_display_opts.add('--help')
# This isn't the greatest approach to hardcode these commands.
# However, there doesn't seem to be a good way to determine
# whether build *will be* run as part of the command at this
# phase.
display_commands = set([
'clean', 'register', 'setopt', 'saveopts', 'egg_info',
'alias'])
return short_display_opts.union(long_display_opts.union(display_commands)) |
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_) | Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels | Below is the the instruction that describes the task:
### Input:
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
### Response:
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_) |
def gemini_query(self, query_id):
"""Return a gemini query
Args:
name (str)
"""
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first() | Return a gemini query
Args:
name (str) | Below is the the instruction that describes the task:
### Input:
Return a gemini query
Args:
name (str)
### Response:
def gemini_query(self, query_id):
"""Return a gemini query
Args:
name (str)
"""
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first() |
def deprecated(msg=''):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
:param msg:
Additional message added to the warning.
"""
def wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
warning_string = "Call to deprecated function or property `%s`." % func.__name__
warning_string = warning_string + ' ' + msg
warnings.warn(
warning_string,
category=DeprecationWarning,
)
return func(*args, **kwargs)
return new_func
return wrapper | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
:param msg:
Additional message added to the warning. | Below is the the instruction that describes the task:
### Input:
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
:param msg:
Additional message added to the warning.
### Response:
def deprecated(msg=''):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
:param msg:
Additional message added to the warning.
"""
def wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
warning_string = "Call to deprecated function or property `%s`." % func.__name__
warning_string = warning_string + ' ' + msg
warnings.warn(
warning_string,
category=DeprecationWarning,
)
return func(*args, **kwargs)
return new_func
return wrapper |
def template_sphere_shell(outer_radius, inner_radius=0):
r"""
This method generates an image array of a sphere-shell. It is useful for
passing to Cubic networks as a ``template`` to make spherical shaped
networks.
Parameters
----------
outer_radius : int
Number of nodes in the outer radius of the sphere.
inner_radius : int
Number of nodes in the inner radius of the shell. a value of 0 will
result in a solid sphere.
Returns
-------
A Numpy array containing 1's to demarcate the sphere-shell, and 0's
elsewhere.
"""
img = _template_sphere_disc(dim=3, outer_radius=outer_radius,
inner_radius=inner_radius)
return img | r"""
This method generates an image array of a sphere-shell. It is useful for
passing to Cubic networks as a ``template`` to make spherical shaped
networks.
Parameters
----------
outer_radius : int
Number of nodes in the outer radius of the sphere.
inner_radius : int
Number of nodes in the inner radius of the shell. a value of 0 will
result in a solid sphere.
Returns
-------
A Numpy array containing 1's to demarcate the sphere-shell, and 0's
elsewhere. | Below is the the instruction that describes the task:
### Input:
r"""
This method generates an image array of a sphere-shell. It is useful for
passing to Cubic networks as a ``template`` to make spherical shaped
networks.
Parameters
----------
outer_radius : int
Number of nodes in the outer radius of the sphere.
inner_radius : int
Number of nodes in the inner radius of the shell. a value of 0 will
result in a solid sphere.
Returns
-------
A Numpy array containing 1's to demarcate the sphere-shell, and 0's
elsewhere.
### Response:
def template_sphere_shell(outer_radius, inner_radius=0):
r"""
This method generates an image array of a sphere-shell. It is useful for
passing to Cubic networks as a ``template`` to make spherical shaped
networks.
Parameters
----------
outer_radius : int
Number of nodes in the outer radius of the sphere.
inner_radius : int
Number of nodes in the inner radius of the shell. a value of 0 will
result in a solid sphere.
Returns
-------
A Numpy array containing 1's to demarcate the sphere-shell, and 0's
elsewhere.
"""
img = _template_sphere_disc(dim=3, outer_radius=outer_radius,
inner_radius=inner_radius)
return img |
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2):
""" Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units
Useful for :meth:`tofu.plugins.AUG.Ves._create()`
Parameters
----------
PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray
The source where the polygon is to be found, either:
- str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()`
- A :mod:`tofu.geom` object: with attribute 'Poly'
- np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon
SavePathInp : str / None
The absolute path where the input file is stored
units : str
Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters)
comments : str
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
skiprows : int
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
shape0 : int
Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary)
Returns
-------
Poly : np.ndarray
(2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points
addInfo : dict
Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
"""
assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !"
# Load PolyFileObj if file and check shape
addInfo = {}
if type(PolyFileObj) in [list,str]:
PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj)
# Include PathFileExt in ID for tracability
addInfo = {'Input':PathFileExt}
PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2)
elif hasattr(PolyFileObj,"Poly"):
addInfo = {'Input':PolyFileObj.Id.SaveName}
PolyFileObj = PolyFileObj.Poly
Poly = np.asarray(PolyFileObj)
assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !"
Poly = Poly if Poly.shape[0]==shape0 else Poly.T
Poly = convert_units(Poly, In=units, Out='m')
return Poly, addInfo | Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units
Useful for :meth:`tofu.plugins.AUG.Ves._create()`
Parameters
----------
PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray
The source where the polygon is to be found, either:
- str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()`
- A :mod:`tofu.geom` object: with attribute 'Poly'
- np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon
SavePathInp : str / None
The absolute path where the input file is stored
units : str
Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters)
comments : str
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
skiprows : int
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
shape0 : int
Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary)
Returns
-------
Poly : np.ndarray
(2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points
addInfo : dict
Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) | Below is the the instruction that describes the task:
### Input:
Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units
Useful for :meth:`tofu.plugins.AUG.Ves._create()`
Parameters
----------
PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray
The source where the polygon is to be found, either:
- str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()`
- A :mod:`tofu.geom` object: with attribute 'Poly'
- np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon
SavePathInp : str / None
The absolute path where the input file is stored
units : str
Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters)
comments : str
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
skiprows : int
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
shape0 : int
Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary)
Returns
-------
Poly : np.ndarray
(2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points
addInfo : dict
Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
### Response:
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2):
""" Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units
Useful for :meth:`tofu.plugins.AUG.Ves._create()`
Parameters
----------
PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray
The source where the polygon is to be found, either:
- str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()`
- A :mod:`tofu.geom` object: with attribute 'Poly'
- np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon
SavePathInp : str / None
The absolute path where the input file is stored
units : str
Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters)
comments : str
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
skiprows : int
Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name
shape0 : int
Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary)
Returns
-------
Poly : np.ndarray
(2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points
addInfo : dict
Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
"""
assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !"
# Load PolyFileObj if file and check shape
addInfo = {}
if type(PolyFileObj) in [list,str]:
PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj)
# Include PathFileExt in ID for tracability
addInfo = {'Input':PathFileExt}
PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2)
elif hasattr(PolyFileObj,"Poly"):
addInfo = {'Input':PolyFileObj.Id.SaveName}
PolyFileObj = PolyFileObj.Poly
Poly = np.asarray(PolyFileObj)
assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !"
Poly = Poly if Poly.shape[0]==shape0 else Poly.T
Poly = convert_units(Poly, In=units, Out='m')
return Poly, addInfo |
def get_upgrades(self, remove_applied=True):
"""Get upgrades (ordered according to their dependencies).
:param remove_applied: Set to false to return all upgrades, otherwise
already applied upgrades are removed from their graph (incl. all
their dependencies.
"""
if self.upgrades is None:
plugins = self._load_upgrades(remove_applied=remove_applied)
# List of un-applied upgrades in topological order
self.upgrades = self.order_upgrades(plugins, self.history)
return self.upgrades | Get upgrades (ordered according to their dependencies).
:param remove_applied: Set to false to return all upgrades, otherwise
already applied upgrades are removed from their graph (incl. all
their dependencies. | Below is the the instruction that describes the task:
### Input:
Get upgrades (ordered according to their dependencies).
:param remove_applied: Set to false to return all upgrades, otherwise
already applied upgrades are removed from their graph (incl. all
their dependencies.
### Response:
def get_upgrades(self, remove_applied=True):
"""Get upgrades (ordered according to their dependencies).
:param remove_applied: Set to false to return all upgrades, otherwise
already applied upgrades are removed from their graph (incl. all
their dependencies.
"""
if self.upgrades is None:
plugins = self._load_upgrades(remove_applied=remove_applied)
# List of un-applied upgrades in topological order
self.upgrades = self.order_upgrades(plugins, self.history)
return self.upgrades |
def _upload_file(compute, project_id, file_path, path):
"""
Upload a file to a remote project
:param file_path: File path on the controller file system
:param path: File path on the remote system relative to project directory
"""
path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/"))
with open(file_path, "rb") as f:
yield from compute.http_query("POST", path, f, timeout=None) | Upload a file to a remote project
:param file_path: File path on the controller file system
:param path: File path on the remote system relative to project directory | Below is the the instruction that describes the task:
### Input:
Upload a file to a remote project
:param file_path: File path on the controller file system
:param path: File path on the remote system relative to project directory
### Response:
def _upload_file(compute, project_id, file_path, path):
"""
Upload a file to a remote project
:param file_path: File path on the controller file system
:param path: File path on the remote system relative to project directory
"""
path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/"))
with open(file_path, "rb") as f:
yield from compute.http_query("POST", path, f, timeout=None) |
def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau):
"""Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`."""
args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau
A = getattr(adm, 'adm_s_' + classname)(*args)
perm_keys = get_permissible_wcs(classname, f)
if perm_keys != 'all':
# remove disallowed rows & columns if necessary
A = A[perm_keys][:, perm_keys]
w, v = np.linalg.eig(A.T)
return w, v | Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`. | Below is the the instruction that describes the task:
### Input:
Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`.
### Response:
def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau):
"""Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`."""
args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau
A = getattr(adm, 'adm_s_' + classname)(*args)
perm_keys = get_permissible_wcs(classname, f)
if perm_keys != 'all':
# remove disallowed rows & columns if necessary
A = A[perm_keys][:, perm_keys]
w, v = np.linalg.eig(A.T)
return w, v |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.