code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def validate_request():
"""
Validates the incoming request
The following are invalid
1. The Request data is not json serializable
2. Query Parameters are sent to the endpoint
3. The Request Content-Type is not application/json
4. 'X-Amz-Log-Type' header is not 'None'
5. 'X-Amz-Invocation-Type' header is not 'RequestResponse'
Returns
-------
flask.Response
If the request is not valid a flask Response is returned
None:
If the request passes all validation
"""
flask_request = request
request_data = flask_request.get_data()
if not request_data:
request_data = b'{}'
request_data = request_data.decode('utf-8')
try:
json.loads(request_data)
except ValueError as json_error:
LOG.debug("Request body was not json. Exception: %s", str(json_error))
return LambdaErrorResponses.invalid_request_content(
"Could not parse request body into json: No JSON object could be decoded")
if flask_request.args:
LOG.debug("Query parameters are in the request but not supported")
return LambdaErrorResponses.invalid_request_content("Query Parameters are not supported")
request_headers = CaseInsensitiveDict(flask_request.headers)
log_type = request_headers.get('X-Amz-Log-Type', 'None')
if log_type != 'None':
LOG.debug("log-type: %s is not supported. None is only supported.", log_type)
return LambdaErrorResponses.not_implemented_locally(
"log-type: {} is not supported. None is only supported.".format(log_type))
invocation_type = request_headers.get('X-Amz-Invocation-Type', 'RequestResponse')
if invocation_type != 'RequestResponse':
LOG.warning("invocation-type: %s is not supported. RequestResponse is only supported.", invocation_type)
return LambdaErrorResponses.not_implemented_locally(
"invocation-type: {} is not supported. RequestResponse is only supported.".format(invocation_type)) | Validates the incoming request
The following are invalid
1. The Request data is not json serializable
2. Query Parameters are sent to the endpoint
3. The Request Content-Type is not application/json
4. 'X-Amz-Log-Type' header is not 'None'
5. 'X-Amz-Invocation-Type' header is not 'RequestResponse'
Returns
-------
flask.Response
If the request is not valid a flask Response is returned
None:
If the request passes all validation |
def _get_rate(self, mag):
"""
Calculate and return the annual occurrence rate for a specific bin.
:param mag:
Magnitude value corresponding to the center of the bin of interest.
:returns:
Float number, the annual occurrence rate for the :param mag value.
"""
mag_lo = mag - self.bin_width / 2.0
mag_hi = mag + self.bin_width / 2.0
if mag >= self.min_mag and mag < self.char_mag - DELTA_CHAR / 2:
# return rate according to exponential distribution
return (10 ** (self.a_val - self.b_val * mag_lo)
- 10 ** (self.a_val - self.b_val * mag_hi))
else:
# return characteristic rate (distributed over the characteristic
# range) for the given bin width
return (self.char_rate / DELTA_CHAR) * self.bin_width | Calculate and return the annual occurrence rate for a specific bin.
:param mag:
Magnitude value corresponding to the center of the bin of interest.
:returns:
Float number, the annual occurrence rate for the :param mag value. |
def delete_refund_transaction_by_id(cls, refund_transaction_id, **kwargs):
"""Delete RefundTransaction
Delete an instance of RefundTransaction by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_refund_transaction_by_id(refund_transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param str refund_transaction_id: ID of refundTransaction to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs)
else:
(data) = cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs)
return data | Delete RefundTransaction
Delete an instance of RefundTransaction by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_refund_transaction_by_id(refund_transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param str refund_transaction_id: ID of refundTransaction to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def cores_orthogonalization_step(coresX, dim, left_to_right=True):
"""TT-Tensor X orthogonalization step.
The function can change the shape of some cores.
"""
cc = coresX[dim]
r1, n, r2 = cc.shape
if left_to_right:
# Left to right orthogonalization step.
assert(0 <= dim < len(coresX) - 1)
cc, rr = np.linalg.qr(reshape(cc, (-1, r2)))
r2 = cc.shape[1]
coresX[dim] = reshape(cc, (r1, n, r2))
coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1)
else:
# Right to left orthogonalization step.
assert(0 < dim < len(coresX))
cc, rr = np.linalg.qr(reshape(cc, (r1, -1)).T)
r1 = cc.shape[1]
coresX[dim] = reshape(cc.T, (r1, n, r2))
coresX[dim-1] = np.tensordot(coresX[dim-1], rr.T, 1)
return coresX | TT-Tensor X orthogonalization step.
The function can change the shape of some cores. |
def add_object_to_scope(self, obj):
"""Add an object to the appropriate scope block.
Args:
obj: JSSObject to add to scope. Accepted subclasses are:
Computer
ComputerGroup
Building
Department
Raises:
TypeError if invalid obj type is provided.
"""
if isinstance(obj, Computer):
self.add_object_to_path(obj, "scope/computers")
elif isinstance(obj, ComputerGroup):
self.add_object_to_path(obj, "scope/computer_groups")
elif isinstance(obj, Building):
self.add_object_to_path(obj, "scope/buildings")
elif isinstance(obj, Department):
self.add_object_to_path(obj, "scope/departments")
else:
raise TypeError | Add an object to the appropriate scope block.
Args:
obj: JSSObject to add to scope. Accepted subclasses are:
Computer
ComputerGroup
Building
Department
Raises:
TypeError if invalid obj type is provided. |
def remove_breakpoint(self, py_db, filename, breakpoint_type, breakpoint_id):
'''
:param str filename:
Note: must be already translated for the server.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id:
'''
file_to_id_to_breakpoint = None
if breakpoint_type == 'python-line':
breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
elif py_db.plugin is not None:
result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.critical('Error removing breakpoint. Cannot handle breakpoint of type %s', breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(filename, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
pydev_log.info('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
filename, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
py_db.consolidate_breakpoints(filename, id_to_pybreakpoint, breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
except KeyError:
pydev_log.info("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n",
filename, breakpoint_id, dict_keys(id_to_pybreakpoint))
py_db.on_breakpoints_changed(removed=True) | :param str filename:
Note: must be already translated for the server.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id: |
def move_distance(self, distance_x_m, distance_y_m, distance_z_m,
velocity=VELOCITY):
"""
Move in a straight line.
positive X is forward
positive Y is left
positive Z is up
:param distance_x_m: The distance to travel along the X-axis (meters)
:param distance_y_m: The distance to travel along the Y-axis (meters)
:param distance_z_m: The distance to travel along the Z-axis (meters)
:param velocity: the velocity of the motion (meters/second)
:return:
"""
distance = math.sqrt(distance_x_m * distance_x_m +
distance_y_m * distance_y_m +
distance_z_m * distance_z_m)
flight_time = distance / velocity
velocity_x = velocity * distance_x_m / distance
velocity_y = velocity * distance_y_m / distance
velocity_z = velocity * distance_z_m / distance
self.start_linear_motion(velocity_x, velocity_y, velocity_z)
time.sleep(flight_time)
self.stop() | Move in a straight line.
positive X is forward
positive Y is left
positive Z is up
:param distance_x_m: The distance to travel along the X-axis (meters)
:param distance_y_m: The distance to travel along the Y-axis (meters)
:param distance_z_m: The distance to travel along the Z-axis (meters)
:param velocity: the velocity of the motion (meters/second)
:return: |
def load_env(print_vars=False):
"""Load environment variables from a .env file, if present.
If an .env file is found in the working directory, and the listed
environment variables are not already set, they will be set according to
the values listed in the file.
"""
env_file = os.environ.get('ENV_FILE', '.env')
try:
variables = open(env_file).read().splitlines()
for v in variables:
if '=' in v:
key, value = v.split('=', 1)
if key.startswith('#'):
continue
if key not in os.environ:
if value.startswith('"') and value.endswith('"') or \
value.startswith("'") and value.endswith("'"):
os.environ[key] = ast.literal_eval(value)
else:
os.environ[key] = value
if print_vars:
print(key, os.environ[key])
except IOError:
pass | Load environment variables from a .env file, if present.
If an .env file is found in the working directory, and the listed
environment variables are not already set, they will be set according to
the values listed in the file. |
def put(self, resource, **params):
"""
Generic TeleSign REST API PUT handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the PUT request with, as a dictionary.
:return: The RestClient Response object.
"""
return self._execute(self.session.put, 'PUT', resource, **params) | Generic TeleSign REST API PUT handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the PUT request with, as a dictionary.
:return: The RestClient Response object. |
def update_ontology(ont_url, rdf_path):
"""Load an ontology formatted like Eidos' from github."""
yaml_root = load_yaml_from_url(ont_url)
G = rdf_graph_from_yaml(yaml_root)
save_hierarchy(G, rdf_path) | Load an ontology formatted like Eidos' from github. |
def from_localhost(self) -> bool:
"""True if :attr:`.peername` is a connection from a ``localhost``
address.
"""
sock_family = self.socket.family
if sock_family == _socket.AF_UNIX:
return True
elif sock_family not in (_socket.AF_INET, _socket.AF_INET6):
return False
sock_address, *_ = self.peername
ip = ipaddress.ip_address(sock_address)
if ip.version == 6 and ip.ipv4_mapped is not None:
ip = ipaddress.ip_address(ip.ipv4_mapped)
return ip.is_loopback | True if :attr:`.peername` is a connection from a ``localhost``
address. |
def create_explicit(bounds):
"""Creates a new instance of distribution with explicit buckets.
bounds is an iterable of ordered floats that define the explicit buckets
Args:
bounds (iterable[float]): initializes the bounds
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance
"""
safe_bounds = sorted(float(x) for x in bounds)
if len(safe_bounds) != len(set(safe_bounds)):
raise ValueError(u'Detected two elements of bounds that are the same')
return sc_messages.Distribution(
bucketCounts=[0] * (len(safe_bounds) + 1),
explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds)) | Creates a new instance of distribution with explicit buckets.
bounds is an iterable of ordered floats that define the explicit buckets
Args:
bounds (iterable[float]): initializes the bounds
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance |
def fit1d(samples, e, remove_zeros = False, **kw):
"""Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis
"""
samples = samples[~np.isnan(samples)]
length = len(e)-1
hist,_ = np.histogramdd(samples, (e,))
hist = hist/sum(hist)
basis, knots = spline_base1d(length, marginal = hist, **kw)
non_zero = hist>0
model = linear_model.BayesianRidge()
if remove_zeros:
model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:])
else:
hist[~non_zero] = np.finfo(float).eps
model.fit(basis, hist[:,np.newaxis])
return model.predict(basis), hist, knots | Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis |
def association_pivot(self, association_resource):
"""Pivot point on association for this resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided resource.
**Example Endpoints URI's**
+---------+--------------------------------------------------------------------------------+
| METHOD | API Endpoint URI's |
+=========+================================================================================+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/indicator/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
Args:
resource_api_branch (string): The resource pivot api branch including resource id.
"""
resource = self.copy()
resource._request_uri = '{}/{}'.format(
association_resource.request_uri, resource._request_uri
)
return resource | Pivot point on association for this resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided resource.
**Example Endpoints URI's**
+---------+--------------------------------------------------------------------------------+
| METHOD | API Endpoint URI's |
+=========+================================================================================+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/indicator/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
Args:
resource_api_branch (string): The resource pivot api branch including resource id. |
def needs_manager_helps():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message |
def key_absent(name, region=None, key=None, keyid=None, profile=None):
'''
Deletes a key pair
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile)
if exists:
if __opts__['test']:
ret['comment'] = 'The key {0} is set to be deleted.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_ec2.delete_key'](name, region,
key, keyid,
profile)
log.debug('exists is %s', deleted)
if deleted:
ret['result'] = True
ret['comment'] = 'The key {0} is deleted.'.format(name)
ret['changes']['old'] = name
else:
ret['result'] = False
ret['comment'] = 'Could not delete key {0} '.format(name)
else:
ret['result'] = True
ret['comment'] = 'The key name {0} does not exist'.format(name)
return ret | Deletes a key pair |
def titlecase(text, callback=None, small_first_last=True):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = line.upper() == line
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if new_word:
# Address #22: If a callback has done something
# specific, leave this string alone from now on
tc_line.append(_mark_immutable(new_word))
continue
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
if APOS_SECOND.match(word):
if len(word[0]) == 1 and word[0] not in 'aeiouAEIOU':
word = word[0].lower() + word[1] + word[2].upper() + word[3:]
else:
word = word[0].upper() + word[1] + word[2].upper() + word[3:]
tc_line.append(word)
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
titlecase(match.group(2),callback,small_first_last)))
continue
if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
if "/" in word and "//" not in word:
slashed = map(
lambda t: titlecase(t,callback,False),
word.split('/')
)
tc_line.append("/".join(slashed))
continue
if '-' in word:
hyphenated = map(
lambda t: titlecase(t,callback,small_first_last),
word.split('-')
)
tc_line.append("-".join(hyphenated))
continue
if all_caps:
word = word.lower()
# Just a normal word that needs to be capitalized
tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
if small_first_last and tc_line:
if not isinstance(tc_line[0], Immutable):
tc_line[0] = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), tc_line[0])
if not isinstance(tc_line[-1], Immutable):
tc_line[-1] = SMALL_LAST.sub(
lambda m: m.group(0).capitalize(), tc_line[-1]
)
result = " ".join(tc_line)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed) | Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'. |
def p_namelist(self,t):
"namelist : namelist ',' NAME \n | NAME"
if len(t)==2: t[0] = [t[1]]
elif len(t)==4: t[0] = t[1] + [t[3]]
else: raise NotImplementedError('unk_len',len(t)) # pragma: no cover | namelist : namelist ',' NAME \n | NAME |
def upgrade(refresh=True, **kwargs):
'''
Upgrade outdated, unpinned brews.
refresh
Fetch the newest version of Homebrew and all formulae from GitHub before installing.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
old = list_pkgs()
if salt.utils.data.is_true(refresh):
refresh_db()
result = _call_brew('upgrade', failhard=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret | Upgrade outdated, unpinned brews.
refresh
Fetch the newest version of Homebrew and all formulae from GitHub before installing.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade |
def create_tomodir(self, directory):
"""Create a tomodir subdirectory structure in the given directory
"""
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
'config',
'exe',
'grid',
'mod',
'mod/pot',
'mod/sens',
'rho',
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) | Create a tomodir subdirectory structure in the given directory |
def _GetMountpoints(only_physical=True):
"""Fetches a list of mountpoints.
Args:
only_physical: Determines whether only mountpoints for physical devices
(e.g. hard disks) should be listed. If false, mountpoints for things such
as memory partitions or `/dev/shm` will be returned as well.
Returns:
A set of mountpoints.
"""
partitions = psutil.disk_partitions(all=not only_physical)
return set(partition.mountpoint for partition in partitions) | Fetches a list of mountpoints.
Args:
only_physical: Determines whether only mountpoints for physical devices
(e.g. hard disks) should be listed. If false, mountpoints for things such
as memory partitions or `/dev/shm` will be returned as well.
Returns:
A set of mountpoints. |
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist | Parse the next address. |
def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0 | you wont believe what this method does |
def write_default_config(self, filename):
"""Write the default config file.
"""
try:
with open(filename, 'wt') as file:
file.write(DEFAULT_CONFIG)
return True
except (IOError, OSError) as e:
print('Error writing %s: %s' % (filename, e.strerror or e), file=sys.stderr)
return False | Write the default config file. |
def report(self, score_map, type="valid", epoch=-1, new_best=False):
"""
Report the scores and record them in the log.
"""
type_str = type
if len(type_str) < 5:
type_str += " " * (5 - len(type_str))
info = " ".join("%s=%.2f" % el for el in score_map.items())
current_epoch = epoch if epoch > 0 else self.current_epoch()
epoch_str = "epoch={}".format(current_epoch + 1)
if epoch < 0:
epoch_str = "dryrun"
sys.stdout.write("\r")
sys.stdout.flush()
marker = " *" if new_best else ""
message = "{} ({}) {}{}".format(type_str, epoch_str, info, marker)
self.network.train_logger.record(message)
logging.info(message) | Report the scores and record them in the log. |
def resize(self, width, height, **kwargs):
"""Resizes the image to the supplied width/height. Returns the
instance. Supports the following optional keyword arguments:
mode - The resizing mode to use, see Image.MODES
filter - The filter to use: see Image.FILTERS
background - The hexadecimal background fill color, RGB or ARGB
position - The position used to crop: see Image.POSITIONS for
pre-defined positions or a custom position ratio
retain - The minimum percentage of the original image to retain
when cropping
"""
opts = Image._normalize_options(kwargs)
size = self._get_size(width, height)
if opts["mode"] == "adapt":
self._adapt(size, opts)
elif opts["mode"] == "clip":
self._clip(size, opts)
elif opts["mode"] == "fill":
self._fill(size, opts)
elif opts["mode"] == "scale":
self._scale(size, opts)
else:
self._crop(size, opts)
return self | Resizes the image to the supplied width/height. Returns the
instance. Supports the following optional keyword arguments:
mode - The resizing mode to use, see Image.MODES
filter - The filter to use: see Image.FILTERS
background - The hexadecimal background fill color, RGB or ARGB
position - The position used to crop: see Image.POSITIONS for
pre-defined positions or a custom position ratio
retain - The minimum percentage of the original image to retain
when cropping |
def MobileDeviceApplication(self, data=None, subset=None):
"""{dynamic_docstring}"""
return self.factory.get_object(jssobjects.MobileDeviceApplication,
data, subset) | {dynamic_docstring} |
def get_string_resources(self, package_name, locale='\x00\x00'):
"""
Get the XML (as string) of all resources of type 'string'.
Read more about string resources:
https://developer.android.com/guide/topics/resources/string-resource.html
:param package_name: the package name to get the resources for
:param locale: the locale to get the resources for (default: '\x00\x00')
"""
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
if any(map(i[1].__contains__, '<&>')):
value = '<![CDATA[%s]]>' % i[1]
else:
value = i[1]
buff += '<string name="{}">{}</string>\n'.format(i[0], value)
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8') | Get the XML (as string) of all resources of type 'string'.
Read more about string resources:
https://developer.android.com/guide/topics/resources/string-resource.html
:param package_name: the package name to get the resources for
:param locale: the locale to get the resources for (default: '\x00\x00') |
def _get_data_by_field(self, field_number):
"""Return a data field by field number.
This is a useful method to get the values for fields that Ladybug
currently doesn't import by default. You can find list of fields by typing
EPWFields.fields
Args:
field_number: a value between 0 to 34 for different available epw fields.
Returns:
An annual Ladybug list
"""
if not self.is_data_loaded:
self._import_data()
# check input data
if not 0 <= field_number < self._num_of_fields:
raise ValueError("Field number should be between 0-%d" % self._num_of_fields)
return self._data[field_number] | Return a data field by field number.
This is a useful method to get the values for fields that Ladybug
currently doesn't import by default. You can find list of fields by typing
EPWFields.fields
Args:
field_number: a value between 0 to 34 for different available epw fields.
Returns:
An annual Ladybug list |
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages | decorator to store messages that are handled by a checker method |
def create(self):
"""Deploy a cluster on Amazon's EKS Service configured
for Jupyterhub Deployments.
"""
steps = [
(self.create_role, (), {}),
(self.create_vpc, (), {}),
(self.create_cluster, (), {}),
(self.create_node_group, (), {}),
(self.create_spot_nodes, (), {}),
(self.create_utilities, (), {}),
]
# Execute creation.
for step in tqdm.tqdm(steps, ncols=70):
method, args, kwargs = step
method(*args, **kwargs) | Deploy a cluster on Amazon's EKS Service configured
for Jupyterhub Deployments. |
def _submit_metrics(self, metrics, metric_name_and_type_by_property):
"""
Resolve metric names and types and submit it.
"""
for metric in metrics:
if (
metric.name not in metric_name_and_type_by_property
and metric.name.lower() not in metric_name_and_type_by_property
):
# Only report the metrics that were specified in the configration
# Ignore added properties like 'Timestamp_Sys100NS', `Frequency_Sys100NS`, etc ...
continue
if metric_name_and_type_by_property.get(metric.name):
metric_name, metric_type = metric_name_and_type_by_property[metric.name]
elif metric_name_and_type_by_property.get(metric.name.lower()):
metric_name, metric_type = metric_name_and_type_by_property[metric.name.lower()]
else:
continue
try:
func = getattr(self, metric_type.lower())
except AttributeError:
raise Exception(u"Invalid metric type: {0}".format(metric_type))
func(metric_name, metric.value, metric.tags) | Resolve metric names and types and submit it. |
def netconf_config_change_changed_by_server_or_user_server_server(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
changed_by = ET.SubElement(netconf_config_change, "changed-by")
server_or_user = ET.SubElement(changed_by, "server-or-user")
server = ET.SubElement(server_or_user, "server")
server = ET.SubElement(server, "server")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def hr_dp996(self, value=None):
""" Corresponds to IDD Field `hr_dp996`
humidity ratio, calculated at standard atmospheric pressure
at elevation of station, corresponding to
Dew-point temperature corresponding to 99.6% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `hr_dp996`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `hr_dp996`'.format(value))
self._hr_dp996 = value | Corresponds to IDD Field `hr_dp996`
humidity ratio, calculated at standard atmospheric pressure
at elevation of station, corresponding to
Dew-point temperature corresponding to 99.6% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `hr_dp996`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def items(self):
"""
On Python 2.7+:
D.items() -> a set-like object providing a view on D's items
On Python 2.6:
D.items() -> an iterator over D's items
"""
if ver == (2, 7):
return self.viewitems()
elif ver == (2, 6):
return self.iteritems()
elif ver >= (3, 0):
return self.items() | On Python 2.7+:
D.items() -> a set-like object providing a view on D's items
On Python 2.6:
D.items() -> an iterator over D's items |
def _read_image_slice(self, arg):
"""
workhorse to read a slice
"""
if 'ndims' not in self._info:
raise ValueError("Attempt to slice empty extension")
if isinstance(arg, slice):
# one-dimensional, e.g. 2:20
return self._read_image_slice((arg,))
if not isinstance(arg, tuple):
raise ValueError("arguments must be slices, one for each "
"dimension, e.g. [2:5] or [2:5,8:25] etc.")
# should be a tuple of slices, one for each dimension
# e.g. [2:3, 8:100]
nd = len(arg)
if nd != self._info['ndims']:
raise ValueError("Got slice dimensions %d, "
"expected %d" % (nd, self._info['ndims']))
targ = arg
arg = []
for a in targ:
if isinstance(a, slice):
arg.append(a)
elif isinstance(a, int):
arg.append(slice(a, a+1, 1))
else:
raise ValueError("arguments must be slices, e.g. 2:12")
dims = self._info['dims']
arrdims = []
first = []
last = []
steps = []
# check the args and reverse dimensions since
# fits is backwards from numpy
dim = 0
for slc in arg:
start = slc.start
stop = slc.stop
step = slc.step
if start is None:
start = 0
if stop is None:
stop = dims[dim]
if step is None:
step = 1
if step < 1:
raise ValueError("slice steps must be >= 1")
if start < 0:
start = dims[dim] + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = dims[dim] + start + 1
# move to 1-offset
start = start + 1
if stop < start:
raise ValueError("python slices but include at least one "
"element, got %s" % slc)
if stop > dims[dim]:
stop = dims[dim]
first.append(start)
last.append(stop)
steps.append(step)
arrdims.append(stop-start+1)
dim += 1
first.reverse()
last.reverse()
steps.reverse()
first = numpy.array(first, dtype='i8')
last = numpy.array(last, dtype='i8')
steps = numpy.array(steps, dtype='i8')
npy_dtype = self._get_image_numpy_dtype()
array = numpy.zeros(arrdims, dtype=npy_dtype)
self._FITS.read_image_slice(self._ext+1, first, last, steps, array)
return array | workhorse to read a slice |
def _push_condition(predicate):
"""As we enter new conditions, this pushes them on the predicate stack."""
global _depth
_check_under_condition()
_depth += 1
if predicate is not otherwise and len(predicate) > 1:
raise PyrtlError('all predicates for conditional assignments must wirevectors of len 1')
_conditions_list_stack[-1].append(predicate)
_conditions_list_stack.append([]) | As we enter new conditions, this pushes them on the predicate stack. |
def validate(self, ticket=None):
"""
Validates all receipts matching this queryset.
Note that, due to how AFIP implements its numbering, this method is not
thread-safe, or even multiprocess-safe.
Because of this, it is possible that not all instances matching this
queryset are validated properly. Obviously, only successfully validated
receipts will be updated.
Returns a list of errors as returned from AFIP's webservices. An
exception is not raised because partial failures are possible.
Receipts that succesfully validate will have a
:class:`~.ReceiptValidation` object attatched to them with a validation
date and CAE information.
Already-validated receipts are ignored.
Attempting to validate an empty queryset will simply return an empty
list.
"""
# Skip any already-validated ones:
qs = self.filter(validation__isnull=True).check_groupable()
if qs.count() == 0:
return []
qs.order_by('issued_date', 'id')._assign_numbers()
return qs._validate(ticket) | Validates all receipts matching this queryset.
Note that, due to how AFIP implements its numbering, this method is not
thread-safe, or even multiprocess-safe.
Because of this, it is possible that not all instances matching this
queryset are validated properly. Obviously, only successfully validated
receipts will be updated.
Returns a list of errors as returned from AFIP's webservices. An
exception is not raised because partial failures are possible.
Receipts that succesfully validate will have a
:class:`~.ReceiptValidation` object attatched to them with a validation
date and CAE information.
Already-validated receipts are ignored.
Attempting to validate an empty queryset will simply return an empty
list. |
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_srv_ctxt.verify(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_ctxt.verify(self._session_id, mic_token) | Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed |
def check_roles(self, account, aws_policies, aws_roles):
"""Iterate through the roles of a specific account and create or update the roles if they're missing or
does not match the roles from Git.
Args:
account (:obj:`Account`): The account to check roles on
aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account
Returns:
`None`
"""
self.log.debug('Checking roles for {}'.format(account.account_name))
max_session_duration = self.dbconfig.get('role_timeout_in_hours', self.ns, 8) * 60 * 60
sess = get_aws_session(account)
iam = sess.client('iam')
# Build a list of default role policies and extra account specific role policies
account_roles = copy.deepcopy(self.cfg_roles)
if account.account_name in self.git_policies:
for role in self.git_policies[account.account_name]:
if role in account_roles:
account_roles[role]['policies'] += list(self.git_policies[account.account_name][role].keys())
for role_name, data in list(account_roles.items()):
if role_name not in aws_roles:
iam.create_role(
Path='/',
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(data['trust'], indent=4),
MaxSessionDuration=max_session_duration
)
self.log.info('Created role {}/{}'.format(account.account_name, role_name))
else:
try:
if aws_roles[role_name]['MaxSessionDuration'] != max_session_duration:
iam.update_role(
RoleName=aws_roles[role_name]['RoleName'],
MaxSessionDuration=max_session_duration
)
self.log.info('Adjusted MaxSessionDuration for role {} in account {} to {} seconds'.format(
role_name,
account.account_name,
max_session_duration
))
except ClientError:
self.log.exception('Unable to adjust MaxSessionDuration for role {} in account {}'.format(
role_name,
account.account_name
))
aws_role_policies = [x['PolicyName'] for x in iam.list_attached_role_policies(
RoleName=role_name)['AttachedPolicies']
]
aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)['PolicyNames']
cfg_role_policies = data['policies']
missing_policies = list(set(cfg_role_policies) - set(aws_role_policies))
extra_policies = list(set(aws_role_policies) - set(cfg_role_policies))
if aws_role_inline_policies:
self.log.info('IAM Role {} on {} has the following inline policies: {}'.format(
role_name,
account.account_name,
', '.join(aws_role_inline_policies)
))
if self.dbconfig.get('delete_inline_policies', self.ns, False) and self.manage_roles:
for policy in aws_role_inline_policies:
iam.delete_role_policy(RoleName=role_name, PolicyName=policy)
auditlog(
event='iam.check_roles.delete_inline_role_policy',
actor=self.ns,
data={
'account': account.account_name,
'roleName': role_name,
'policy': policy
}
)
if missing_policies:
self.log.info('IAM Role {} on {} is missing the following policies: {}'.format(
role_name,
account.account_name,
', '.join(missing_policies)
))
if self.manage_roles:
for policy in missing_policies:
iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy]['Arn'])
auditlog(
event='iam.check_roles.attach_role_policy',
actor=self.ns,
data={
'account': account.account_name,
'roleName': role_name,
'policyArn': aws_policies[policy]['Arn']
}
)
if extra_policies:
self.log.info('IAM Role {} on {} has the following extra policies applied: {}'.format(
role_name,
account.account_name,
', '.join(extra_policies)
))
for policy in extra_policies:
if policy in aws_policies:
polArn = aws_policies[policy]['Arn']
elif policy in self.aws_managed_policies:
polArn = self.aws_managed_policies[policy]['Arn']
else:
polArn = None
self.log.info('IAM Role {} on {} has an unknown policy attached: {}'.format(
role_name,
account.account_name,
policy
))
if self.manage_roles and polArn:
iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn)
auditlog(
event='iam.check_roles.detach_role_policy',
actor=self.ns,
data={
'account': account.account_name,
'roleName': role_name,
'policyArn': polArn
}
) | Iterate through the roles of a specific account and create or update the roles if they're missing or
does not match the roles from Git.
Args:
account (:obj:`Account`): The account to check roles on
aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account
Returns:
`None` |
def updateCurrentValue(self, value):
"""
Disables snapping during the current value update to ensure a smooth
transition for node animations. Since this can only be called via
code, we don't need to worry about snapping to the grid for a user.
"""
xsnap = None
ysnap = None
if value != self.endValue():
xsnap = self.targetObject().isXSnappedToGrid()
ysnap = self.targetObject().isYSnappedToGrid()
self.targetObject().setXSnapToGrid(False)
self.targetObject().setYSnapToGrid(False)
super(XNodeAnimation, self).updateCurrentValue(value)
if value != self.endValue():
self.targetObject().setXSnapToGrid(xsnap)
self.targetObject().setYSnapToGrid(ysnap) | Disables snapping during the current value update to ensure a smooth
transition for node animations. Since this can only be called via
code, we don't need to worry about snapping to the grid for a user. |
def min_rank(series, ascending=True):
"""
Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`).
"""
ranks = series.rank(method='min', ascending=ascending)
return ranks | Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`). |
def parse_sidebar(self, user_page):
"""Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`
"""
user_info = {}
# if MAL says the series doesn't exist, raise an InvalidUserError.
error_tag = user_page.find(u'div', {u'class': u'badresult'})
if error_tag:
raise InvalidUserError(self.username)
try:
username_tag = user_page.find(u'div', {u'id': u'contentWrapper'}).find(u'h1')
if not username_tag.find(u'div'):
# otherwise, raise a MalformedUserPageError.
raise MalformedUserPageError(self.username, user_page, message=u"Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = user_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
user_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# the user ID is always present in the blogfeed link.
all_comments_link = info_panel_first.find(u'a', text=u'Blog Feed')
user_info[u'id'] = int(all_comments_link.get(u'href').split(u'&id=')[1])
except:
if not self.session.suppress_parse_exceptions:
raise
infobar_headers = info_panel_first.find_all(u'div', {u'class': u'normal_header'})
if infobar_headers:
try:
favorite_anime_header = infobar_headers[0]
if u'Favorite Anime' in favorite_anime_header.text:
user_info[u'favorite_anime'] = []
favorite_anime_table = favorite_anime_header.nextSibling.nextSibling
if favorite_anime_table.name == u'table':
for row in favorite_anime_table.find_all(u'tr'):
cols = row.find_all(u'td')
anime_link = cols[1].find(u'a')
link_parts = anime_link.get(u'href').split(u'/')
# of the form /anime/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_anime'].append(self.session.anime(int(link_parts[2])).set({u'title': anime_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_manga_header = infobar_headers[1]
if u'Favorite Manga' in favorite_manga_header.text:
user_info[u'favorite_manga'] = []
favorite_manga_table = favorite_manga_header.nextSibling.nextSibling
if favorite_manga_table.name == u'table':
for row in favorite_manga_table.find_all(u'tr'):
cols = row.find_all(u'td')
manga_link = cols[1].find(u'a')
link_parts = manga_link.get(u'href').split(u'/')
# of the form /manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_manga'].append(self.session.manga(int(link_parts[2])).set({u'title': manga_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_character_header = infobar_headers[2]
if u'Favorite Characters' in favorite_character_header.text:
user_info[u'favorite_characters'] = {}
favorite_character_table = favorite_character_header.nextSibling.nextSibling
if favorite_character_table.name == u'table':
for row in favorite_character_table.find_all(u'tr'):
cols = row.find_all(u'td')
character_link = cols[1].find(u'a')
link_parts = character_link.get(u'href').split(u'/')
# of the form /character/467/Ghost_in_the_Shell:_Stand_Alone_Complex
character = self.session.character(int(link_parts[2])).set({u'title': character_link.text})
media_link = cols[1].find(u'div').find(u'a')
link_parts = media_link.get(u'href').split(u'/')
# of the form /anime|manga/467
anime = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': media_link.text})
user_info[u'favorite_characters'][character] = anime
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorite_people_header = infobar_headers[3]
if u'Favorite People' in favorite_people_header.text:
user_info[u'favorite_people'] = []
favorite_person_table = favorite_people_header.nextSibling.nextSibling
if favorite_person_table.name == u'table':
for row in favorite_person_table.find_all(u'tr'):
cols = row.find_all(u'td')
person_link = cols[1].find(u'a')
link_parts = person_link.get(u'href').split(u'/')
# of the form /person/467/Ghost_in_the_Shell:_Stand_Alone_Complex
user_info[u'favorite_people'].append(self.session.person(int(link_parts[2])).set({u'title': person_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info | Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError` |
def process_request(self, request, client_address):
"""
Call finish_request.
"""
self.finish_request(request, client_address)
self.shutdown_request(request) | Call finish_request. |
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time: # check for next year
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that the start was the last year
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time) | Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple |
def cmd(send, msg, args):
"""Clears the verified admin list
Syntax: {command}
"""
args['db'].query(Permissions).update({"registered": False})
args['handler'].get_admins()
send("Verified admins reset.") | Clears the verified admin list
Syntax: {command} |
def get_context(self, url, expiration):
"""
Build template context with formatted feed content
"""
self._feed = self.get(url, expiration)
return {
self.feed_context_name: self.format_feed_content(self._feed),
} | Build template context with formatted feed content |
def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False):
"""
Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs
"""
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out | Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs |
def check_file(self, filename):
# type: (str) -> bool
"""
Overrides :py:meth:`.Config.check_file`
"""
can_read = super(SecuredConfig, self).check_file(filename)
if not can_read:
return False
mode = get_stat(filename).st_mode
if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):
msg = "File %r is not secure enough. Change it's mode to 600"
self._log.warning(msg, filename)
return False
return True | Overrides :py:meth:`.Config.check_file` |
def get_subnets_count(context, filters=None):
"""Return the number of subnets.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info("get_subnets_count for tenant %s with filters %s" %
(context.tenant_id, filters))
return db_api.subnet_count_all(context, **filters) | Return the number of subnets.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API. |
def fix_repeat_dt(dt_list, offset_s=0.001):
"""Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times
"""
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list | Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times |
def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg | Create the function call statement as a string used for timeit. |
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii') | Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned. |
def status_line(self):
"""
Returns a status line for an item.
Only really interesting when called for a draft
item as it can tell you if the draft is the same as
another version.
"""
date = self.date_published
status = self.state.title()
if self.state == self.DRAFT:
# Check if this item has changed since
# our last publish
status = "Draft saved"
date = self.last_save
if date and self.last_save == self.last_scheduled:
# We need to figure out if the item it is based on
# is either live now or will be live at some point.
# If last_scheduled is less than or equal to
# v_last_save this item is or will go live
# at some point. Otherwise it won't
# so we'll leave state as draft.
if self.v_last_save:
if self.last_scheduled >= self.v_last_save:
status = self.PUBLISHED.title()
# The date this was scheduled is greater than
# what is currently live, this will go live at
# some point
if self.last_scheduled > self.v_last_save:
status = "Publish Scheduled"
else:
status = "Publish Scheduled"
date = self.date_published
if date:
status = "%s: %s" % (status, formats.date_format(date, "SHORT_DATE_FORMAT"))
return status | Returns a status line for an item.
Only really interesting when called for a draft
item as it can tell you if the draft is the same as
another version. |
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus, mem_test=0)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
#process.stdout.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
# In principle Abinit should have written a complete log file
# because we called .wait() but sometimes the Yaml doc is incomplete and
# the parser raises. Let's wait 5 secs and then try again.
time.sleep(5)
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparal run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0 | Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success |
def from_pubkey(cls: Type[CRCPubkeyType], pubkey: str) -> CRCPubkeyType:
"""
Return CRCPubkey instance from public key string
:param pubkey: Public key
:return:
"""
hash_root = hashlib.sha256()
hash_root.update(base58.b58decode(pubkey))
hash_squared = hashlib.sha256()
hash_squared.update(hash_root.digest())
b58_checksum = ensure_str(base58.b58encode(hash_squared.digest()))
crc = b58_checksum[:3]
return cls(pubkey, crc) | Return CRCPubkey instance from public key string
:param pubkey: Public key
:return: |
def get_output(src):
"""
parse lines looking for commands
"""
output = ''
lines = open(src.path, 'rU').readlines()
for line in lines:
m = re.match(config.import_regex,line)
if m:
include_path = os.path.abspath(src.dir + '/' + m.group('script'));
if include_path not in config.sources:
script = Script(include_path)
script.parents.append(src)
config.sources[script.path] = script
include_file = config.sources[include_path]
#require statements dont include if the file has already been included
if include_file not in config.stack or m.group('command') == 'import':
config.stack.append(include_file)
output += get_output(include_file)
else:
output += line
return output | parse lines looking for commands |
def do_transition_for(brain_or_object, transition):
"""Performs a workflow transition for the passed in object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: The object where the transtion was performed
"""
if not isinstance(transition, basestring):
fail("Transition type needs to be string, got '%s'" % type(transition))
obj = get_object(brain_or_object)
ploneapi.content.transition(obj, transition)
return obj | Performs a workflow transition for the passed in object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: The object where the transtion was performed |
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if six.PY2:
buffer_types = buffer, memoryview # noqa: F821
else:
buffer_types = memoryview
if value is None:
return u''
elif isinstance(value, buffer_types):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
else:
return six.text_type(value) | Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded. |
def expand_cause_repertoire(self, new_purview=None):
"""See |Subsystem.expand_repertoire()|."""
return self.subsystem.expand_cause_repertoire(
self.cause.repertoire, new_purview) | See |Subsystem.expand_repertoire()|. |
def visibleCount(self):
"""
Returns the number of visible items in this list.
:return <int>
"""
return sum(int(not self.item(i).isHidden()) for i in range(self.count())) | Returns the number of visible items in this list.
:return <int> |
def fit_predict(self, y, exogenous=None, n_periods=10, **fit_args):
"""Fit an ARIMA to a vector, ``y``, of observations with an
optional matrix of ``exogenous`` variables, and then generate
predictions.
Parameters
----------
y : array-like or iterable, shape=(n_samples,)
The time-series to which to fit the ``ARIMA`` estimator. This may
either be a Pandas ``Series`` object (statsmodels can internally
use the dates in the index), or a numpy array. This should be a
one-dimensional array of floats, and should not contain any
``np.nan`` or ``np.inf`` values.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
fit_args : dict or kwargs, optional (default=None)
Any keyword args to pass to the fit method.
"""
self.fit(y, exogenous, **fit_args)
return self.predict(n_periods=n_periods, exogenous=exogenous) | Fit an ARIMA to a vector, ``y``, of observations with an
optional matrix of ``exogenous`` variables, and then generate
predictions.
Parameters
----------
y : array-like or iterable, shape=(n_samples,)
The time-series to which to fit the ``ARIMA`` estimator. This may
either be a Pandas ``Series`` object (statsmodels can internally
use the dates in the index), or a numpy array. This should be a
one-dimensional array of floats, and should not contain any
``np.nan`` or ``np.inf`` values.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
fit_args : dict or kwargs, optional (default=None)
Any keyword args to pass to the fit method. |
def REV(self, params):
"""
REV Ra, Rb
Reverse the byte order in register Rb and store the result in Ra
"""
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def REV_func():
self.register[Ra] = ((self.register[Rb] & 0xFF000000) >> 24) | \
((self.register[Rb] & 0x00FF0000) >> 8) | \
((self.register[Rb] & 0x0000FF00) << 8) | \
((self.register[Rb] & 0x000000FF) << 24)
return REV_func | REV Ra, Rb
Reverse the byte order in register Rb and store the result in Ra |
def get_schema_input_format(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
format = ET.SubElement(input, "format")
format.text = kwargs.pop('format')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def execute(self, points, *args, **kwargs):
# TODO array of Points, (x, y) pairs of shape (N, 2)
"""
Parameters
----------
points: dict
Returns:
-------
Prediction array
Variance array
"""
if isinstance(self.model, OrdinaryKriging) or \
isinstance(self.model, OrdinaryKriging3D):
prediction, variance = \
self.model.execute('points',
n_closest_points=self.n_closest_points,
backend='loop',
**points)
else:
print('n_closest_points will be ignored for UniversalKriging')
prediction, variance = \
self.model.execute('points', backend='loop', **points)
return prediction, variance | Parameters
----------
points: dict
Returns:
-------
Prediction array
Variance array |
def call_ck(i):
"""
Input: {
Input for CK
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(stdout) - stdout, if available
(stderr) - stderr, if available
(std) - stdout+stderr
}
"""
import subprocess
import re
# Check action
action=i.get('action','')
if action=='':
return {'return':1, 'error':'action is not defined'}
# Check that no special characters, otherwise can run any command from CMD
if not re.match('^[A-Za-z0-9-_]*$', action):
return {'return':1, 'error':'action contains illegal characters'}
# Generate tmp file
fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file!
os.close(fd)
dc=i.get('detach_console','')
if dc=='yes': i['out']='con' # If detach, output as console
# Prepare dummy output
rr={'return':0}
rr['stdout']=''
rr['stderr']=''
# Save json to temporay file
rx=ck.save_json_to_file({'json_file':fn, 'dict':i})
if rx['return']>0: return rx
# Prepare command line
cmd='ck '+action+' @'+fn
if dc=='yes':
# Check platform
rx=ck.get_os_ck({})
if rx['return']>0: return rx
plat=rx['platform']
dci=ck.cfg.get('detached_console',{}).get(plat,{})
dcmd=dci.get('cmd','')
if dcmd=='':
return {'return':1, 'error':'detached console is requested but cmd is not defined in kernel configuration'}
dcmd=dcmd.replace('$#cmd#$', cmd)
if dci.get('use_create_new_console_flag','')=='yes':
process=subprocess.Popen(dcmd, stdin=None, stdout=None, stderr=None, shell=True, close_fds=True, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
# Will need to do the forking
try:
pid=os.fork()
except OSError as e:
return {'return':1, 'error':'forking detached console failed ('+format(e)+')'}
if pid==0:
os.setsid()
pid=os.fork()
if pid!=0: os._exit(0)
try:
maxfd=os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd=1024
for fd in range(maxfd):
try:
os.close(fd)
except OSError:
pass
os.open('/dev/null', os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
# Normally child process
process=os.system(dcmd)
os._exit(0)
stdout=ck.cfg.get('detached_console_html', 'Console was detached ...')
stderr=''
else:
process=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout,stderr=process.communicate()
try: stdout=stdout.decode('utf8')
except Exception as e: pass
try: stderr=stderr.decode('utf8')
except Exception as e: pass
rr['std']=stdout+stderr
rr['stdout']=stdout
rr['stderr']=stderr
return rr | Input: {
Input for CK
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(stdout) - stdout, if available
(stderr) - stderr, if available
(std) - stdout+stderr
} |
def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs) | Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable`` |
def instancelist(obj_list, check=False, shared_attrs=None):
"""
Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha())
"""
class InstanceList_(object):
def __init__(self, obj_list, shared_attrs=None):
self._obj_list = []
self._shared_public_attrs = []
self._example_type = None
if len(obj_list) > 0:
import utool as ut
self._obj_list = obj_list
example_obj = obj_list[0]
example_type = type(example_obj)
self._example_type = example_type
if shared_attrs is None:
if check:
attrsgen = [set(dir(obj)) for obj in obj_list]
shared_attrs = list(reduce(set.intersection, attrsgen))
else:
shared_attrs = dir(example_obj)
#allowed = ['__getitem__'] # TODO, put in metaclass
allowed = []
self._shared_public_attrs = [
a for a in shared_attrs
if a in allowed or not a.startswith('_')
]
for attrname in self._shared_public_attrs:
attrtype = getattr(example_type, attrname, None)
if attrtype is not None and isinstance(attrtype, property):
# need to do this as metaclass
setattr(InstanceList_, attrname,
property(self._define_prop(attrname)))
else:
func = self._define_func(attrname)
ut.inject_func_as_method(self, func, attrname)
def __nice__(self):
if self._example_type is None:
typename = 'object'
else:
typename = self._example_type.__name__
return 'of %d %s(s)' % (len(self._obj_list), typename)
def __repr__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s) at %s>' % (classname, devnice, hex(id(self)))
def __str__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s)>' % (classname, devnice)
def __getitem__(self, key):
# TODO, put in metaclass
return self._map_method('__getitem__', key)
def _define_func(self, attrname):
import utool as ut
def _wrapper(self, *args, **kwargs):
return self._map_method(attrname, *args, **kwargs)
ut.set_funcname(_wrapper, attrname)
return _wrapper
def _map_method(self, attrname, *args, **kwargs):
mapped_vals = [getattr(obj, attrname)(*args, **kwargs)
for obj in self._obj_list]
return mapped_vals
def _define_prop(self, attrname):
import utool as ut
def _getter(self):
return self._map_property(attrname)
ut.set_funcname(_getter, 'get_' + attrname)
return _getter
def _map_property(self, attrname):
mapped_vals = [getattr(obj, attrname) for obj in self._obj_list]
return mapped_vals
return InstanceList_(obj_list, shared_attrs) | Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha()) |
def _head(self, uri):
"""
Handles the communication with the API when performing a HEAD request
on a specific resource managed by this class. Returns the headers
contained in the response.
"""
resp, resp_body = self.api.method_head(uri)
return resp | Handles the communication with the API when performing a HEAD request
on a specific resource managed by this class. Returns the headers
contained in the response. |
def _get_and_assert_slice_param(url_dict, param_name, default_int):
"""Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest.
"""
param_str = url_dict['query'].get(param_name, default_int)
try:
n = int(param_str)
except ValueError:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter is not a valid integer. {}="{}"'.format(
param_name, param_str
),
)
if n < 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter cannot be a negative number. {}="{}"'.format(
param_name, param_str
),
)
return n | Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest. |
def inputs(ctx, client, revision, paths):
r"""Show inputs files in the repository.
<PATHS> Files to show. If no files are given all input files are shown.
"""
from renku.models.provenance import ProcessRun
graph = Graph(client)
paths = set(paths)
nodes = graph.build(revision=revision)
commits = {node.commit for node in nodes}
candidates = {(node.commit, node.path)
for node in nodes if not paths or node.path in paths}
input_paths = set()
for commit in commits:
activity = graph.activities[commit]
if isinstance(activity, ProcessRun):
for usage in activity.qualified_usage:
for entity in usage.entity.entities:
path = str((usage.client.path / entity.path).relative_to(
client.path
))
usage_key = (entity.commit, entity.path)
if path not in input_paths and usage_key in candidates:
input_paths.add(path)
click.echo('\n'.join(graph._format_path(path) for path in input_paths))
ctx.exit(0 if not paths or len(input_paths) == len(paths) else 1) | r"""Show inputs files in the repository.
<PATHS> Files to show. If no files are given all input files are shown. |
def _expon_solve_lam_from_mu(mu, b):
"""
For the expon_uptrunc, given mu and b, return lam.
Similar to geom_uptrunc
"""
def lam_eq(lam, mu, b):
# Small offset added to denominator to avoid 0/0 erors
lam, mu, b = Decimal(lam), Decimal(mu), Decimal(b)
return ( (1 - (lam*b + 1) * np.exp(-lam*b)) /
(lam - lam * np.exp(-lam*b) + Decimal(1e-32)) - mu )
return optim.brentq(lam_eq, -100, 100, args=(mu, b), disp=True) | For the expon_uptrunc, given mu and b, return lam.
Similar to geom_uptrunc |
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6):
'''GaussianLogDensity loss calculation for layer wise loss
'''
c = mx.sym.ones_like(log_var)*2.0 * 3.1416
c = mx.symbol.log(c)
var = mx.sym.exp(log_var)
x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not?
x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
log_prob = -0.5 * (c + log_var + x_mu2_over_var)
log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True,
return log_prob | GaussianLogDensity loss calculation for layer wise loss |
def size(self):
"""The size of the schema. If the underlying data source changes, it may be outdated.
"""
if self._size is None:
self._size = 0
for csv_file in self.files:
self._size += sum(1 if line else 0 for line in _util.open_local_or_gcs(csv_file, 'r'))
return self._size | The size of the schema. If the underlying data source changes, it may be outdated. |
def reflectance(self, band):
"""
:param band: An optical band, i.e. 1-5, 7
:return: At satellite reflectance, [-]
"""
if band == 6:
raise ValueError('LT5 reflectance must be other than band 6')
rad = self.radiance(band)
esun = self.ex_atm_irrad[band - 1]
toa_reflect = (pi * rad * self.earth_sun_dist ** 2) / (esun * cos(self.solar_zenith_rad))
return toa_reflect | :param band: An optical band, i.e. 1-5, 7
:return: At satellite reflectance, [-] |
def _scrollView( self, value ):
"""
Updates the gantt view scrolling to the inputed value.
:param value | <int>
"""
if self._scrolling:
return
view_bar = self.uiGanttVIEW.verticalScrollBar()
self._scrolling = True
view_bar.setValue(value)
self._scrolling = False | Updates the gantt view scrolling to the inputed value.
:param value | <int> |
def _init_db(self):
"""Creates the database tables."""
with self._get_db() as db:
with open(self.schemapath) as f:
db.cursor().executescript(f.read())
db.commit() | Creates the database tables. |
def timestampFormat(self, timestampFormat):
"""
Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.
"""
if not isinstance(timestampFormat, str):
raise TypeError('not of type unicode')
#assert isinstance(timestampFormat, unicode) or timestampFormat.__class__.__name__ == "DateFormat", "not of type unicode"
self._timestampFormat = timestampFormat | Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method. |
def WriteSignedBinaryBlobs(binary_urn,
blobs,
token = None):
"""Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
"""
if _ShouldUseLegacyDatastore():
aff4.FACTORY.Delete(binary_urn, token=token)
with data_store.DB.GetMutationPool() as mutation_pool:
with aff4.FACTORY.Create(
binary_urn,
collects.GRRSignedBlob,
mode="w",
mutation_pool=mutation_pool,
token=token) as fd:
for blob in blobs:
fd.Add(blob, mutation_pool=mutation_pool)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=current_offset, size=len(blob.data), blob_id=blob_id))
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) | Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore. |
def list_to_string(input, delimiter):
"""converts list to string recursively so that nested lists are supported
:param input: a list of strings and lists of strings (and so on recursive)
:type input: list
:param delimiter: the deimiter to use when joining the items
:type delimiter: str
:returns: the recursively joined list
:rtype: str
"""
if isinstance(input, list):
return delimiter.join(
list_to_string(item, delimiter) for item in input)
return input | converts list to string recursively so that nested lists are supported
:param input: a list of strings and lists of strings (and so on recursive)
:type input: list
:param delimiter: the deimiter to use when joining the items
:type delimiter: str
:returns: the recursively joined list
:rtype: str |
def get_axes(process_or_domain):
"""Returns a dictionary of all Axis in a domain or dictionary of domains.
:param process_or_domain: a process or a domain object
:type process_or_domain: :class:`~climlab.process.process.Process` or
:class:`~climlab.domain.domain._Domain`
:raises: :exc: `TypeError` if input is not or not having a domain
:returns: dictionary of input's Axis
:rtype: dict
:Example:
::
>>> import climlab
>>> from climlab.process.process import get_axes
>>> model = climlab.EBM()
>>> get_axes(model)
{'lat': <climlab.domain.axis.Axis object at 0x7ff13b9dd2d0>,
'depth': <climlab.domain.axis.Axis object at 0x7ff13b9dd310>}
"""
if isinstance(process_or_domain, Process):
dom = process_or_domain.domains
else:
dom = process_or_domain
if isinstance(dom, _Domain):
return dom.axes
elif isinstance(dom, dict):
axes = {}
for thisdom in list(dom.values()):
assert isinstance(thisdom, _Domain)
axes.update(thisdom.axes)
return axes
else:
raise TypeError('dom must be a domain or dictionary of domains.') | Returns a dictionary of all Axis in a domain or dictionary of domains.
:param process_or_domain: a process or a domain object
:type process_or_domain: :class:`~climlab.process.process.Process` or
:class:`~climlab.domain.domain._Domain`
:raises: :exc: `TypeError` if input is not or not having a domain
:returns: dictionary of input's Axis
:rtype: dict
:Example:
::
>>> import climlab
>>> from climlab.process.process import get_axes
>>> model = climlab.EBM()
>>> get_axes(model)
{'lat': <climlab.domain.axis.Axis object at 0x7ff13b9dd2d0>,
'depth': <climlab.domain.axis.Axis object at 0x7ff13b9dd310>} |
def findall(obj, prs, forced_type=None,
cls=anyconfig.models.processor.Processor):
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or None
:param cls: A class object to compare with 'forced_type' later
:return: A list of instances of processor classes to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
if (obj is None or not obj) and forced_type is None:
raise ValueError("The first argument 'obj' or the second argument "
"'forced_type' must be something other than "
"None or False.")
if forced_type is None:
pclss = find_by_maybe_file(obj, prs) # :: [Processor], never []
else:
pclss = find_by_type_or_id(forced_type, prs) # Do.
return pclss | :param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or None
:param cls: A class object to compare with 'forced_type' later
:return: A list of instances of processor classes to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError |
def find_file(folder, filename):
"""
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
"""
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
for root, _, filenames in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn))
if not matches:
raise IOError('File %s could not be found' % filename)
return matches[-1] | Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder. |
def define_snowflake_config():
'''Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html
'''
account = Field(
String,
description='Your Snowflake account name. For more details, see https://bit.ly/2FBL320.',
is_optional=True,
)
user = Field(String, description='User login name.', is_optional=False)
password = Field(String, description='User password.', is_optional=False)
database = Field(
String,
description='''Name of the default database to use. After login, you can use USE DATABASE
to change the database.''',
is_optional=True,
)
schema = Field(
String,
description='''Name of the default schema to use. After login, you can use USE SCHEMA to
change the schema.''',
is_optional=True,
)
role = Field(
String,
description='''Name of the default role to use. After login, you can use USE ROLE to change
the role.''',
is_optional=True,
)
warehouse = Field(
String,
description='''Name of the default warehouse to use. After login, you can use USE WAREHOUSE
to change the role.''',
is_optional=True,
)
autocommit = Field(
Bool,
description='''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True
or False to enable or disable autocommit mode in the session, respectively.''',
is_optional=True,
)
client_prefetch_threads = Field(
Int,
description='''Number of threads used to download the results sets (4 by default).
Increasing the value improves fetch performance but requires more memory.''',
is_optional=True,
)
client_session_keep_alive = Field(
String,
description='''False by default. Set this to True to keep the session active indefinitely,
even if there is no activity from the user. Make certain to call the close method to
terminate the thread properly or the process may hang.''',
is_optional=True,
)
login_timeout = Field(
Int,
description='''Timeout in seconds for login. By default, 60 seconds. The login request gives
up after the timeout length if the HTTP response is "success".''',
is_optional=True,
)
network_timeout = Field(
Int,
description='''Timeout in seconds for all other operations. By default, none/infinite. A
general request gives up after the timeout length if the HTTP response is not "success"''',
is_optional=True,
)
ocsp_response_cache_filename = Field(
Path,
description='''URI for the OCSP response cache file.
By default, the OCSP response cache file is created in the cache directory.''',
is_optional=True,
)
validate_default_parameters = Field(
Bool,
description='''False by default. Raise an exception if either one of specified database,
schema or warehouse doesn't exists if True.''',
is_optional=True,
)
paramstyle = Field(
# TODO should validate only against permissible values for this
String,
description='''pyformat by default for client side binding. Specify qmark or numeric to
change bind variable formats for server side binding.''',
is_optional=True,
)
timezone = Field(
String,
description='''None by default, which honors the Snowflake parameter TIMEZONE. Set to a
valid time zone (e.g. America/Los_Angeles) to set the session time zone.''',
is_optional=True,
)
return Field(
Dict(
fields={
'account': account,
'user': user,
'password': password,
'database': database,
'schema': schema,
'role': role,
'warehouse': warehouse,
'autocommit': autocommit,
'client_prefetch_threads': client_prefetch_threads,
'client_session_keep_alive': client_session_keep_alive,
'login_timeout': login_timeout,
'network_timeout': network_timeout,
'ocsp_response_cache_filename': ocsp_response_cache_filename,
'validate_default_parameters': validate_default_parameters,
'paramstyle': paramstyle,
'timezone': timezone,
}
),
description='Snowflake configuration',
) | Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html |
def stack_decoders(self, *layers):
"""
Stack decoding layers.
"""
self.stack(*layers)
self.decoding_layers.extend(layers) | Stack decoding layers. |
def parse(text: str) -> Docstring:
"""
Parse the Google-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring()
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = _titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
# Split by sections determined by titles
matches = list(_titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = {}
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in _valid:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
# Add elements from each chunk
for title, chunk in chunks.items():
# Determine indent
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError(f'Can\'t infer indent from "{chunk}"')
indent = indent_match.group()
# Check for returns/yeilds (only one element)
if _sections[title] in ("returns", "yields"):
part = inspect.cleandoc(chunk)
ret.meta.append(_build_meta(part, title))
continue
# Split based on lines which have exactly that indent
_re = "^" + indent + r"(?=\S)"
c_matches = list(re.finditer(_re, chunk, flags=re.M))
if not c_matches:
raise ParseError(f'No specification for "{title}": "{chunk}"')
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(_build_meta(part, title))
return ret | Parse the Google-style docstring into its components.
:returns: parsed docstring |
def from_pandas(df, value='value', x='x', y='y', cellx=None, celly=None, xmin=None, ymax=None,
geot=None, nodata_value=None, projection=None, datatype=None):
"""
Creates a GeoRaster from a Pandas DataFrame. Useful to plot or export data to rasters.
Usage:
raster = from_pandas(df, value='value', x='x', y='y', cellx= cellx, celly=celly,
xmin=xmin, ymax=ymax, geot=geot, nodata_value=ndv,
projection=projection, datatype=datatype)
Although it does not require all the inputs, it is highly recommended to include
the geographical information, so that the GeoRaster is properly constructed. As usual,
the information can be added afterwards directly to the GeoRaster.
"""
if not cellx:
cellx = (df.sort_values(x)[x]-df.sort_values(x).shift(1)[x]).max()
if not celly:
celly = (df.sort_values(y, ascending=False)[y]-df.sort_values(y, ascending=False).shift(1)[y]).drop_duplicates().replace(0).max()
if not xmin:
xmin = df[x].min()
if not ymax:
ymax = df[y].max()
row, col = map_pixel(df[x], df[y], cellx, celly, xmin, ymax)
dfout = pd.DataFrame(np.array([row, col, df[value]]).T, columns=['row', 'col', 'value'])
dfout = dfout = dfout.set_index(["row","col"]).unstack().value.reindex(index=np.arange(0,np.max(row)+1)).T.reindex(index=np.arange(0,np.max(col)+1)).T
if nodata_value:
dfout[np.isnan(dfout)] = nodata_value
if not nodata_value:
nodata_value = np.nan
if not geot:
geot = (xmin, cellx, 0, ymax, 0, celly)
return GeoRaster(dfout, geot, nodata_value=nodata_value, projection=projection, datatype=datatype) | Creates a GeoRaster from a Pandas DataFrame. Useful to plot or export data to rasters.
Usage:
raster = from_pandas(df, value='value', x='x', y='y', cellx= cellx, celly=celly,
xmin=xmin, ymax=ymax, geot=geot, nodata_value=ndv,
projection=projection, datatype=datatype)
Although it does not require all the inputs, it is highly recommended to include
the geographical information, so that the GeoRaster is properly constructed. As usual,
the information can be added afterwards directly to the GeoRaster. |
def rget(d, key):
"""Recursively get keys from dict, for example:
'a.b.c' --> d['a']['b']['c'], return None if not exist.
"""
if not isinstance(d, dict):
return None
assert isinstance(key, str) or isinstance(key, list)
keys = key.split('.') if isinstance(key, str) else key
cdrs = cdr(keys)
cars = car(keys)
return rget(d.get(cars), cdrs) if cdrs else d.get(cars) | Recursively get keys from dict, for example:
'a.b.c' --> d['a']['b']['c'], return None if not exist. |
def make_app(config=None):
"""
Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here.
"""
config = config or {}
app = CoolMagicApplication(config)
# static stuff
app = SharedDataMiddleware(
app, {"/public": path.join(path.dirname(__file__), "public")}
)
# clean up locals
app = local_manager.make_middleware(app)
return app | Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here. |
def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int,
old_root: bytes, new_root: bytes,
proof: Sequence[bytes]):
"""Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid.
"""
old_size = old_tree_size
new_size = new_tree_size
if old_size < 0 or new_size < 0:
raise ValueError("Negative tree size")
if old_size > new_size:
raise ValueError("Older tree has bigger size (%d vs %d), did "
"you supply inputs in the wrong order?" %
(old_size, new_size))
if old_size == new_size:
if old_root == new_root:
if proof:
logging.debug("Trees are identical, ignoring proof")
return True
else:
raise error.ConsistencyError("Inconsistency: different root "
"hashes for the same tree size")
if old_size == 0:
if proof:
# A consistency proof with an empty tree is an empty proof.
# Anything is consistent with an empty tree, so ignore whatever
# bogus proof was supplied. Note we do not verify here that the
# root hash is a valid hash for an empty tree.
logging.debug("Ignoring non-empty consistency proof for "
"empty tree.")
return True
# Now 0 < old_size < new_size
# A consistency proof is essentially an audit proof for the node with
# index old_size - 1 in the newer tree. The sole difference is that
# the path is already hashed together into a single hash up until the
# first audit node that occurs in the newer tree only.
node = old_size - 1
last_node = new_size - 1
# While we are the right child, everything is in both trees,
# so move one level up.
while node % 2:
node //= 2
last_node //= 2
p = iter(proof)
try:
if node:
# Compute the two root hashes in parallel.
new_hash = old_hash = next(p)
else:
# The old tree was balanced (2**k nodes), so we already have
# the first root hash.
new_hash = old_hash = old_root
while node:
if node % 2:
# node is a right child: left sibling exists in both trees.
next_node = next(p)
old_hash = self.hasher.hash_children(next_node, old_hash)
new_hash = self.hasher.hash_children(next_node, new_hash)
elif node < last_node:
# node is a left child: right sibling only exists in the
# newer tree.
new_hash = self.hasher.hash_children(new_hash, next(p))
# else node == last_node: node is a left child with no sibling
# in either tree.
node //= 2
last_node //= 2
# Now old_hash is the hash of the first subtree. If the two trees
# have different height, continue the path until the new root.
while last_node:
n = next(p)
new_hash = self.hasher.hash_children(new_hash, n)
last_node //= 2
# If the second hash does not match, the proof is invalid for the
# given pair. If, on the other hand, the newer hash matches but the
# older one doesn't, then the proof (together with the signatures
# on the hashes) is proof of inconsistency.
# Continue to find out.
if new_hash != new_root:
raise error.ProofError("Bad Merkle proof: second root hash "
"does not match. Expected hash: %s "
", computed hash: %s" %
(hexlify(new_root).strip(),
hexlify(new_hash).strip()))
elif old_hash != old_root:
raise error.ConsistencyError("Inconsistency: first root hash "
"does not match. Expected hash: "
"%s, computed hash: %s" %
(hexlify(old_root).strip(),
hexlify(old_hash).strip())
)
except StopIteration:
raise error.ProofError("Merkle proof is too short")
# We've already verified consistency, so accept the proof even if
# there's garbage left over (but log a warning).
try:
next(p)
except StopIteration:
pass
else:
logging.debug("Proof has extra nodes")
return True | Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid. |
def get_firewall_rules(self, server):
"""
Return all FirewallRule objects based on a server instance or uuid.
"""
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
res = self.get_request(url)
return [
FirewallRule(server=server_instance, **firewall_rule)
for firewall_rule in res['firewall_rules']['firewall_rule']
] | Return all FirewallRule objects based on a server instance or uuid. |
def registerPolling(self, fd, options = POLLING_IN|POLLING_OUT, daemon = False):
'''
register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag.
'''
self.polling.register(fd, options, daemon) | register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag. |
def _add_docstring(format_dict):
"""
Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x
"""
def add_docstring_context(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ = func.__doc__.format(**format_dict)
return wrapper
return add_docstring_context | Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x |
def _execute(job, f, o=None):
"""
Executes a librsync "job" by reading bytes from `f` and writing results to
`o` if provided. If `o` is omitted, the output is ignored.
"""
# Re-use the same buffer for output, we will read from it after each
# iteration.
out = ctypes.create_string_buffer(RS_JOB_BLOCKSIZE)
while True:
block = f.read(RS_JOB_BLOCKSIZE)
buff = Buffer()
# provide the data block via input buffer.
buff.next_in = ctypes.c_char_p(block)
buff.avail_in = ctypes.c_size_t(len(block))
buff.eof_in = ctypes.c_int(not block)
# Set up our buffer for output.
buff.next_out = ctypes.cast(out, ctypes.c_char_p)
buff.avail_out = ctypes.c_size_t(RS_JOB_BLOCKSIZE)
r = _librsync.rs_job_iter(job, ctypes.byref(buff))
if o:
o.write(out.raw[:RS_JOB_BLOCKSIZE - buff.avail_out])
if r == RS_DONE:
break
elif r != RS_BLOCKED:
raise LibrsyncError(r)
if buff.avail_in > 0:
# There is data left in the input buffer, librsync did not consume
# all of it. Rewind the file a bit so we include that data in our
# next read. It would be better to simply tack data to the end of
# this buffer, but that is very difficult in Python.
f.seek(f.tell() - buff.avail_in)
if o and callable(getattr(o, 'seek', None)):
# As a matter of convenience, rewind the output file.
o.seek(0)
return o | Executes a librsync "job" by reading bytes from `f` and writing results to
`o` if provided. If `o` is omitted, the output is ignored. |
def _complete_exit(self, cmd, args, text):
"""Find candidates for the 'exit' command."""
if args:
return
return [ x for x in { 'root', 'all', } \
if x.startswith(text) ] | Find candidates for the 'exit' command. |
def pointwise_free_energies(self, therm_state=None):
r"""
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy.
"""
assert self.therm_energies is not None, \
'MEMM has to be estimate()\'d before pointwise free energies can be calculated.'
if therm_state is not None:
assert therm_state<=self.nthermo
mu = [_np.zeros(d.shape[0], dtype=_np.float64) for d in self.dtrajs+self.equilibrium_dtrajs]
if self.equilibrium is None:
_tram.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs, self.dtrajs,
self.state_counts, None, None, mu)
else:
_trammbar.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs+self.equilibrium_btrajs, self.dtrajs+self.equilibrium_dtrajs,
self.state_counts, None, None, mu,
equilibrium_therm_state_counts=self.equilibrium_state_counts.sum(axis=1).astype(_np.intc),
overcounting_factor=1.0/self.lag)
return mu | r"""
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy. |
def warning(f, *args, **kwargs):
"""Automatically log progress on function entry and exit. Default logging
value: warning.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: WARNING
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'log': logging.WARNING})
return _stump(f, *args, **kwargs) | Automatically log progress on function entry and exit. Default logging
value: warning.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: WARNING
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args. |
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
project_file = join(getcwd(), project_file)
coproj_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
mcu = MCU_TEMPLATE
IROM1_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IROM1')
IROM2_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IROM2')
IRAM1_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IRAM1')
IRAM2_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IRAM2')
defaultAlgorithm_index = self._coproj_find_option(coproj_dic['Project']['Target']['DebugOption']['Option'], '@name', 'org.coocox.codebugger.gdbjtag.core.defaultAlgorithm')
mcu['tool_specific'] = {
'coide' : {
'Device' : {
'manufacturerId' : [coproj_dic['Project']['Target']['Device']['@manufacturerId']],
'manufacturerName': [coproj_dic['Project']['Target']['Device']['@manufacturerName']],
'chipId': [coproj_dic['Project']['Target']['Device']['@chipId']],
'chipName': [coproj_dic['Project']['Target']['Device']['@chipName']],
},
'DebugOption': {
'defaultAlgorithm': [coproj_dic['Project']['Target']['DebugOption']['Option'][defaultAlgorithm_index]['@value']],
},
'MemoryAreas': {
'IROM1': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@type']],
},
'IRAM1': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@type']],
},
'IROM2': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@type']],
},
'IRAM2': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@type']],
}
}
}
}
return mcu | Parse project file to get mcu definition |
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True | This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). |
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line)) | Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings |
Subsets and Splits