text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def fa(a, b, alpha=2):
"""Returns the factor of 'alpha' (2 or 5 normally)
"""
return np.sum((a > b / alpha) & (a < b * alpha), dtype=float) / len(a) * 100
| 0.012048 |
def _trace_summary(self):
"""
Summarizes the trace of values used to update the DynamicArgs
and the arguments subsequently returned. May be used to
implement the summary method.
"""
for (i, (val, args)) in enumerate(self.trace):
if args is StopIteration:
info = "Terminated"
else:
pprint = ','.join('{' + ','.join('%s=%r' % (k,v)
for (k,v) in arg.items()) + '}' for arg in args)
info = ("exploring arguments [%s]" % pprint )
if i == 0: print("Step %d: Initially %s." % (i, info))
else: print("Step %d: %s after receiving input(s) %s." % (i, info.capitalize(), val))
| 0.013514 |
def idle_task(self):
'''called in idle time'''
try:
data = self.port.recv(1024) # Attempt to read up to 1024 bytes.
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return
raise
try:
self.send_rtcm_msg(data)
except Exception as e:
print("DGPS: GPS Inject Failed:", e)
| 0.012136 |
def new_from_files(self, basepath, basename, repo, \
bohrs=False, \
software=_E_SW.ORCA, \
repo_clobber=False, **kwargs):
""" Initialize with data from files.
"""
# Imports
import os
from os import path as osp
from ..xyz import OpanXYZ as OX
from ..grad import OrcaEngrad as OE
from ..hess import OrcaHess as OH
from .repo import OpanAnharmRepo as OR
from ..const import EnumDispDirection as E_DDir, EnumFileType as E_FT
from ..const import EnumSoftware as E_SW
from ..const import DEF
from ..error import AnharmError as ANHErr
## # Store working directory for restore?
## prev_dir = os.getcwd()
# Complain if anything is already bound
if not self.w_xyz == None:
raise ANHErr(ANHErr.STATUS,
"XYZ object is already bound",
"")
## end if
if not self.w_grad == None:
raise ANHErr(ANHErr.STATUS,
"GRAD object is already bound",
"")
## end if
if not self.w_hess == None:
raise ANHErr(ANHErr.STATUS,
"HESS object is already bound",
"")
## end if
if not self.repo == None:
raise ANHErr(ANHErr.STATUS,
"Repository object is already bound",
"")
## end if
# RESUME: vpt2--factor for loading from different software pkgs
# Load the three data files
self.w_xyz = OX( osp.join(basepath, \
basename + osp.extsep + xyz_ext) )
self.w_grad = OE( osp.join(basepath, \
basename + osp.extsep + engrad_ext), \
0, E_DDir.NO_DISP, 0.0 )
self.w_hess = OH( osp.join(basepath, \
basename + osp.extsep + hess_ext), \
0, E_DDir.NO_DISP, 0.0 )
# Only accept new repos for now
if not isinstance(repo, str):
raise TypeError("Must create new repository when loading " +
"a new dataset.")
## end if
# Repo is string, treat as filename and try to load
# Check if it's a complete path
# If it's a relative path, prepend the basepath
if osp.split(repo[0]) > 0 and not osp.isabs(repo):
repo = osp.join(basepath, repo)
## end if
# Complain if it's a directory
if osp.isdir(repo):
raise IOError("Cannot bind repository -- specified " +
"location is a directory")
## end if
# If file exists ...
if osp.isfile(repo):
# Depending on clobber, either delete existing or raise error
if repo_clobber:
# Clobber old repo
os.remove(repo)
else:
# Raise error
raise IOError("Target repository file exists and " +
"clobber is disabled.")
## end if
## end if
# Should be good to create the repo
self.repo = OR(repo)
| 0.016089 |
def render_formset_errors(formset, **kwargs):
"""
Render formset errors to a Bootstrap layout
"""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors()
| 0.004587 |
def get_checks(self, target_type, tags=None, skips=None):
"""
Get all checks for given type/tags.
:param skips: list of str
:param target_type: TargetType class
:param tags: list of str
:return: list of check instances
"""
skips = skips or []
result = []
for check_struct in self.ruleset_struct.checks:
if check_struct.name in skips:
continue
logger.debug("Processing check_struct {}.".format(check_struct))
usable_targets = check_struct.usable_targets
if target_type and usable_targets \
and target_type.get_compatible_check_class().check_type not in usable_targets:
logger.info("Skipping... Target type does not match.")
continue
if check_struct.import_name:
check_class = self.check_loader.import_class(check_struct.import_name)
else:
try:
check_class = self.check_loader.mapping[check_struct.name]
except KeyError:
logger.error("Check %s was not found -- it can't be loaded", check_struct.name)
raise ColinRulesetException(
"Check {} can't be loaded, we couldn't find it.".format(check_struct.name))
check_instance = check_class()
if check_struct.tags:
logger.info("Overriding check's tags %s with the one defined in ruleset: %s",
check_instance.tags, check_struct.tags)
check_instance.tags = check_struct.tags[:]
if check_struct.additional_tags:
logger.info("Adding additional tags: %s", check_struct.additional_tags)
check_instance.tags += check_struct.additional_tags
if not is_compatible(target_type=target_type, check_instance=check_instance):
logger.error(
"Check '{}' not compatible with the target type: {}".format(
check_instance.name, target_type.get_compatible_check_class().check_type))
raise ColinRulesetException(
"Check {} can't be used for target type {}".format(
check_instance, target_type.get_compatible_check_class().check_type))
if tags:
if not set(tags) < set(check_instance.tags):
logger.debug(
"Check '{}' not passed the tag control: {}".format(check_instance.name,
tags))
continue
# and finally, attach attributes from ruleset to the check instance
for k, v in check_struct.other_attributes.items():
# yes, this overrides things; yes, users may easily and severely broke their setup
setattr(check_instance, k, v)
result.append(check_instance)
logger.debug("Check instance {} added.".format(check_instance.name))
return result
| 0.00511 |
def _connect(self):
"""Connect to the serial port."""
try:
while True:
_LOGGER.info('Trying to connect to %s', self.port)
try:
yield from serial_asyncio.create_serial_connection(
self.loop, lambda: self.protocol, self.port, self.baud)
return
except serial.SerialException:
_LOGGER.error('Unable to connect to %s', self.port)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
yield from asyncio.sleep(
self.reconnect_timeout, loop=self.loop)
except asyncio.CancelledError:
_LOGGER.debug('Connect attempt to %s cancelled', self.port)
| 0.002323 |
def wait(self, method, *args):
"""
Call a method on the zombie.js Browser instance and wait on a callback.
:param method: the method to call, e.g., html()
:param args: one of more arguments for the method
"""
methodargs = encode_args(args, extra=True)
js = """
%s(%s wait_callback);
""" % (method, methodargs)
self._send(js)
| 0.004938 |
def items(self, view=None, prefetch=None, cache=True):
""" Get list of download items.
@param view: Name of the view.
@param prefetch: OPtional list of field names to fetch initially.
@param cache: Cache items for the given view?
"""
# TODO: Cache should be by hash.
# Then get the initial data when cache is empty,
# else get a list of hashes from the view, make a diff
# to what's in the cache, fetch the rest. Getting the
# fields for one hash might be done by a special view
# (filter: $d.hash == hashvalue)
if view is None:
view = engine.TorrentView(self, "default")
elif isinstance(view, basestring):
view = engine.TorrentView(self, self._resolve_viewname(view))
else:
view.viewname = self._resolve_viewname(view.viewname)
if not cache or view.viewname not in self._item_cache:
# Map pyroscope names to rTorrent ones
if prefetch:
prefetch = self.CORE_FIELDS | set((self.PYRO2RT_MAPPING.get(i, i) for i in prefetch))
else:
prefetch = self.PREFETCH_FIELDS
# Fetch items
items = []
try:
# Prepare multi-call arguments
args = ["d.%s%s" % ("" if field.startswith("is_") else "get_", field)
for field in prefetch
]
infohash = view._check_hash_view()
if infohash:
multi_call = self.open().system.multicall
args = [dict(methodName=field.rsplit('=', 1)[0],
params=[infohash] + (field.rsplit('=', 1)[1].split(',') if '=' in field else []))
for field in args]
raw_items = [[i[0] for i in multi_call(args)]]
else:
multi_call = self.open().d.multicall
args = [view.viewname] + [field if '=' in field else field + '=' for field in args]
if view.matcher and int(config.fast_query):
pre_filter = matching.unquote_pre_filter(view.matcher.pre_filter())
self.LOG.info("!!! pre-filter: {}".format(pre_filter or 'N/A'))
if pre_filter:
multi_call = self.open().d.multicall.filtered
args.insert(1, pre_filter)
raw_items = multi_call(*tuple(args))
##self.LOG.debug("multicall %r" % (args,))
##import pprint; self.LOG.debug(pprint.pformat(raw_items))
self.LOG.debug("Got %d items with %d attributes from %r [%s]" % (
len(raw_items), len(prefetch), self.engine_id, multi_call))
for item in raw_items:
items.append(RtorrentItem(self, zip(
[self.RT2PYRO_MAPPING.get(i, i) for i in prefetch], item
)))
yield items[-1]
except xmlrpc.ERRORS as exc:
raise error.EngineError("While getting download items from %r: %s" % (self, exc))
# Everything yielded, store for next iteration
if cache:
self._item_cache[view.viewname] = items
else:
# Yield prefetched results
for item in self._item_cache[view.viewname]:
yield item
| 0.004286 |
def GetCachedPattern(self, patternId: int, cache: bool):
"""
Get a pattern by patternId.
patternId: int, a value in class `PatternId`.
Return a pattern if it supports the pattern else None.
cache: bool, if True, store the pattern for later use, if False, get a new pattern by `self.GetPattern`.
"""
if cache:
pattern = self._supportedPatterns.get(patternId, None)
if pattern:
return pattern
else:
pattern = self.GetPattern(patternId)
if pattern:
self._supportedPatterns[patternId] = pattern
return pattern
else:
pattern = self.GetPattern(patternId)
if pattern:
self._supportedPatterns[patternId] = pattern
return pattern
| 0.003472 |
def unparse(self, dn, record):
"""Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple).
"""
self._unparse_attr('dn', dn)
if isinstance(record, dict):
self._unparse_entry_record(record)
elif isinstance(record, list):
self._unparse_change_record(record)
else:
raise ValueError("Argument record must be dictionary or list")
self._output_file.write(self._line_sep)
self.records_written += 1
| 0.00266 |
def get_wxa_code(self,
path,
width=430,
auto_color=False,
line_color={"r": "0", "g": "0", "b": "0"},
is_hyaline=False):
"""
创建小程序码(接口A: 适用于需要的码数量较少的业务场景)
详情请参考
https://mp.weixin.qq.com/debug/wxadoc/dev/api/qrcode.html
"""
return self._post(
'wxa/getwxacode',
data={
'path': path,
'width': width,
'auto_color': auto_color,
'line_color': line_color,
'is_hyaline': is_hyaline,
}
)
| 0.010703 |
def _ip_assigned(self):
"""Check if IP prefix is assigned to loopback interface.
Returns:
True if IP prefix found assigned otherwise False.
"""
output = []
cmd = [
'/sbin/ip',
'address',
'show',
'dev',
self.config['interface'],
'to',
self.ip_with_prefixlen,
]
if self.ip_check_disabled:
self.log.info("checking for IP assignment on interface %s is "
"disabled", self.config['interface'])
return True
self.log.debug("running %s", ' '.join(cmd))
try:
output = subprocess.check_output(
cmd,
universal_newlines=True,
timeout=1)
except subprocess.CalledProcessError as error:
self.log.error("error checking IP-PREFIX %s: %s",
cmd, error.output)
# Because it is unlikely to ever get an error we return True
return True
except subprocess.TimeoutExpired:
self.log.error("timeout running %s", ' '.join(cmd))
# Because it is unlikely to ever get a timeout we return True
return True
except ValueError as error:
# We have been getting intermittent ValueErrors, see here
# gist.github.com/unixsurfer/67db620d87f667423f6f6e3a04e0bff5
# It has happened ~5 times and this code is executed from multiple
# threads and every ~10secs on several (~40) production servers for
# more than 18months.
# It could be a bug in Python or system returns corrupted data.
# As a consequence of the raised exception thread dies and the
# service isn't monitored anymore!. So, we now catch the exception.
# While checking if an IP is assigned, we get an error unrelated to
# that prevents us from knowing if it's assigned. We simply don't
# know. A retry logic could be a more proper solution.
self.log.error("running %s raised ValueError exception:%s",
' '.join(cmd), error)
return True
else:
if self.ip_with_prefixlen in output: # pylint: disable=E1135,R1705
msg = "{i} assigned to loopback interface".format(
i=self.ip_with_prefixlen)
self.log.debug(msg)
return True
else:
msg = ("{i} isn't assigned to {d} interface"
.format(i=self.ip_with_prefixlen,
d=self.config['interface']))
self.log.warning(msg)
return False
self.log.debug("I shouldn't land here!, it is a BUG")
return False
| 0.000699 |
def twosided_2_centerdc(data):
"""Convert a two-sided PSD to a center-dc PSD"""
N = len(data)
# could us int() or // in python 3
newpsd = np.concatenate((cshift(data[N//2:], 1), data[0:N//2]))
newpsd[0] = data[-1]
return newpsd
| 0.003984 |
def set_input(self):
""" Set inputs from .uwg input file if not already defined, the check if all
the required input parameters are there.
"""
# If a uwgParamFileName is set, then read inputs from .uwg file.
# User-defined class properties will override the inputs from the .uwg file.
if self.uwgParamFileName is not None:
print("\nReading uwg file input.")
self.read_input()
else:
print("\nNo .uwg file input.")
self.check_required_inputs()
# Modify zone to be used as python index
self.zone = int(self.zone)-1
| 0.006192 |
async def read(self, n=None):
"""Read all content
"""
if self._streamed:
return b''
buffer = []
async for body in self:
buffer.append(body)
return b''.join(buffer)
| 0.008511 |
def balance_ss_model(F,L,Qc,H,Pinf,P0,dF=None,dQc=None,dPinf=None,dP0=None):
"""
Balances State-Space model for more numerical stability
This is based on the following:
dx/dt = F x + L w
y = H x
Let T z = x, which gives
dz/dt = inv(T) F T z + inv(T) L w
y = H T z
"""
bF,T,T_inv = balance_matrix(F)
bL = np.dot( T_inv, L)
bQc = Qc # not affected
bH = np.dot(H, T)
bPinf = np.dot(T_inv, np.dot(Pinf, T_inv.T))
#import pdb; pdb.set_trace()
# LL,islower = linalg.cho_factor(Pinf)
# inds = np.triu_indices(Pinf.shape[0],k=1)
# LL[inds] = 0.0
# bLL = np.dot(T_inv, LL)
# bPinf = np.dot( bLL, bLL.T)
bP0 = np.dot(T_inv, np.dot(P0, T_inv.T))
if dF is not None:
bdF = np.zeros(dF.shape)
for i in range(dF.shape[2]):
bdF[:,:,i] = np.dot( T_inv, np.dot( dF[:,:,i], T))
else:
bdF = None
if dPinf is not None:
bdPinf = np.zeros(dPinf.shape)
for i in range(dPinf.shape[2]):
bdPinf[:,:,i] = np.dot( T_inv, np.dot( dPinf[:,:,i], T_inv.T))
# LL,islower = linalg.cho_factor(dPinf[:,:,i])
# inds = np.triu_indices(dPinf[:,:,i].shape[0],k=1)
# LL[inds] = 0.0
# bLL = np.dot(T_inv, LL)
# bdPinf[:,:,i] = np.dot( bLL, bLL.T)
else:
bdPinf = None
if dP0 is not None:
bdP0 = np.zeros(dP0.shape)
for i in range(dP0.shape[2]):
bdP0[:,:,i] = np.dot( T_inv, np.dot( dP0[:,:,i], T_inv.T))
else:
bdP0 = None
bdQc = dQc # not affected
# (F,L,Qc,H,Pinf,P0,dF,dQc,dPinf,dP0)
return bF, bL, bQc, bH, bPinf, bP0, bdF, bdQc, bdPinf, bdP0
| 0.021016 |
def recalculate_extents(self):
"""Adjust x, y, cx, and cy to incorporate all contained shapes.
This would typically be called when a contained shape is added,
removed, or its position or size updated.
This method is recursive "upwards" since a change in a group shape
can change the position and size of its containing group.
"""
if not self.tag == qn('p:grpSp'):
return
x, y, cx, cy = self._child_extents
self.chOff.x = self.x = x
self.chOff.y = self.y = y
self.chExt.cx = self.cx = cx
self.chExt.cy = self.cy = cy
self.getparent().recalculate_extents()
| 0.002967 |
def update_trigger(self, trigger):
"""
Updates on the Alert API the trigger record having the ID of the specified Trigger object: the remote record is
updated with data from the local Trigger object.
:param trigger: the Trigger with updated data
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: ``None`` if update is successful, an error otherwise
"""
assert trigger is not None
assert isinstance(trigger.id, str), "Value must be a string"
the_time_period = {
"start": {
"expression": "after",
"amount": trigger.start_after_millis
},
"end": {
"expression": "after",
"amount": trigger.end_after_millis
}
}
the_conditions = [dict(name=c.weather_param, expression=c.operator, amount=c.amount) for c in trigger.conditions]
the_area = [a.as_dict() for a in trigger.area]
status, _ = self.http_client.put(
NAMED_TRIGGER_URI % trigger.id,
params={'appid': self.API_key},
data=dict(time_period=the_time_period, conditions=the_conditions, area=the_area),
headers={'Content-Type': 'application/json'})
| 0.003922 |
def call(self, identifier, *args, **kwargs):
""" Call the named function with provided arguments
You can pass a custom JSON encoder by passing it in the encoder
keyword only argument.
"""
encoder = kwargs.get('encoder', None)
timeout = kwargs.get('timeout', 0)
max_memory = kwargs.get('max_memory', 0)
json_args = json.dumps(args, separators=(',', ':'), cls=encoder)
js = "{identifier}.apply(this, {json_args})"
return self.eval(js.format(identifier=identifier, json_args=json_args), timeout, max_memory)
| 0.005119 |
def quantity_spec_string(name, quantity_dict):
'''
Returns a quantity specification for docstrings.
Example
-------
>>> quantity_spec_string('Tv')
>>> 'Tv : float or ndarray\n Data for virtual temperature.'
'''
if name not in quantity_dict.keys():
raise ValueError('{0} not present in quantity_dict'.format(name))
s = '{0} : float or ndarray\n'.format(name)
s += doc_paragraph('Data for {0}.'.format(
quantity_string(name, quantity_dict)), indent=4)
return s
| 0.001988 |
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state tensor of zeros.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An optional pair of initializers for the
initial hidden state and cell state.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor tuple `([batch_size, state_size], [batch_size, state_size], ?)`
filled with zeros, with the third entry present when batch norm is enabled
with `max_unique_stats > 1', with value `0` (representing the time step).
"""
if self._max_unique_stats == 1:
return super(BatchNormLSTM, self).initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers,
trainable_regularizers=trainable_regularizers, name=name)
else:
with tf.name_scope(self._initial_state_scope(name)):
if not trainable:
state = self.zero_state(batch_size, dtype)
else:
# We have to manually create the state ourselves so we don't create a
# variable that never gets used for the third entry.
state = rnn_core.trainable_initial_state(
batch_size,
(tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size])),
dtype=dtype,
initializers=trainable_initializers,
regularizers=trainable_regularizers,
name=self._initial_state_scope(name))
return (state[0], state[1], tf.constant(0, dtype=tf.int32))
| 0.004477 |
def AgregarReceptor(self, cod_caracter, **kwargs):
"Agrego los datos del receptor a la liq."
d = {'codCaracter': cod_caracter}
self.solicitud['receptor'].update(d)
return True
| 0.009662 |
def _handle_actiongeturl(self, _):
"""Handle the ActionGetURL action."""
obj = _make_object("ActionGetURL")
obj.UrlString = self._get_struct_string()
obj.TargetString = self._get_struct_string()
yield obj
| 0.008197 |
def get_source(self, spec, row):
""" Sources can be specified as plain strings or as a reference to a column. """
value = self.get_value({'column': spec.get('source_url_column')}, row)
if value is not None:
return value
return spec.get('source_url')
| 0.010239 |
def _combine_sets(self, sets, final_set):
"""
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
If we have a least a sorted set, use zinterstore insted of sunionstore
"""
if self._has_sortedsets:
self.cls.get_connection().zinterstore(final_set, list(sets))
else:
final_set = super(ExtendedCollectionManager, self)._combine_sets(sets, final_set)
return final_set
| 0.005941 |
def _find_form_xobject_images(pdf, container, contentsinfo):
"""Find any images that are in Form XObjects in the container
The container may be a page, or a parent Form XObject.
"""
if '/Resources' not in container:
return
resources = container['/Resources']
if '/XObject' not in resources:
return
xobjs = resources['/XObject'].as_dict()
for xobj in xobjs:
candidate = xobjs[xobj]
if candidate['/Subtype'] != '/Form':
continue
form_xobject = candidate
for settings in contentsinfo.xobject_settings:
if settings.name != xobj:
continue
# Find images once for each time this Form XObject is drawn.
# This could be optimized to cache the multiple drawing events
# but in practice both Form XObjects and multiple drawing of the
# same object are both very rare.
ctm_shorthand = settings.shorthand
yield from _process_content_streams(
pdf=pdf, container=form_xobject, shorthand=ctm_shorthand
)
| 0.0009 |
def _build_service_livestate(self, host_name, service_name, livestate):
# pylint: disable=no-self-use, too-many-locals
"""Build and notify the external command for a service livestate
PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
Create and post a logcheckresult to the backend for the livestate
:param host_name: the concerned host name
:param service_name: the concerned service name
:param livestate: livestate dictionary
:return: external command line
"""
state = livestate.get('state', 'OK').upper()
output = livestate.get('output', '')
long_output = livestate.get('long_output', '')
perf_data = livestate.get('perf_data', '')
try:
timestamp = int(livestate.get('timestamp', 'ABC'))
except ValueError:
timestamp = None
service_state_to_id = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3,
"UNREACHABLE": 4
}
parameters = '%s;%s' % (service_state_to_id.get(state, 3), output)
if long_output and perf_data:
parameters = '%s|%s\n%s' % (parameters, perf_data, long_output)
elif long_output:
parameters = '%s\n%s' % (parameters, long_output)
elif perf_data:
parameters = '%s|%s' % (parameters, perf_data)
command_line = 'PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s' % \
(host_name, service_name, parameters)
if timestamp is not None:
command_line = '[%d] %s' % (timestamp, command_line)
else:
command_line = '[%d] %s' % (int(time.time()), command_line)
return command_line
| 0.001674 |
def run(
command,
timeout=None,
debug=False,
stdin=None,
print_stdout=True,
print_stderr=True,
callback_stdout=None,
callback_stderr=None,
environment=None,
environment_override=None,
win32resolve=True,
working_directory=None,
):
"""
Run an external process.
File system path objects (PEP-519) are accepted in the command, environment,
and working directory arguments.
:param array command: Command line to be run, specified as array.
:param timeout: Terminate program execution after this many seconds.
:param boolean debug: Enable further debug messages.
:param stdin: Optional string that is passed to command stdin.
:param boolean print_stdout: Pass stdout through to sys.stdout.
:param boolean print_stderr: Pass stderr through to sys.stderr.
:param callback_stdout: Optional function which is called for each
stdout line.
:param callback_stderr: Optional function which is called for each
stderr line.
:param dict environment: The full execution environment for the command.
:param dict environment_override: Change environment variables from the
current values for command execution.
:param boolean win32resolve: If on Windows, find the appropriate executable
first. This allows running of .bat, .cmd, etc.
files without explicitly specifying their
extension.
:param string working_directory: If specified, run the executable from
within this working directory.
:return: A ReturnObject() containing the executed command, stdout and stderr
(both as bytestrings), and the exitcode. Further values such as
process runtime can be accessed as dictionary values.
"""
time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime())
logger.debug("Starting external process: %s", command)
if stdin is None:
stdin_pipe = None
else:
assert sys.platform != "win32", "stdin argument not supported on Windows"
stdin_pipe = subprocess.PIPE
start_time = timeit.default_timer()
if timeout is not None:
max_time = start_time + timeout
if environment is not None:
env = {key: _path_resolve(environment[key]) for key in environment}
else:
env = os.environ
if environment_override:
env = copy.copy(env)
env.update(
{
key: str(_path_resolve(environment_override[key]))
for key in environment_override
}
)
command = tuple(_path_resolve(part) for part in command)
if win32resolve and sys.platform == "win32":
command = _windows_resolve(command)
p = subprocess.Popen(
command,
shell=False,
cwd=_path_resolve(working_directory),
env=env,
stdin=stdin_pipe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
thread_pipe_pool = []
notifyee, notifier = Pipe(False)
thread_pipe_pool.append(notifyee)
stdout = _NonBlockingStreamReader(
p.stdout,
output=print_stdout,
debug=debug,
notify=notifier.close,
callback=callback_stdout,
)
notifyee, notifier = Pipe(False)
thread_pipe_pool.append(notifyee)
stderr = _NonBlockingStreamReader(
p.stderr,
output=print_stderr,
debug=debug,
notify=notifier.close,
callback=callback_stderr,
)
if stdin is not None:
notifyee, notifier = Pipe(False)
thread_pipe_pool.append(notifyee)
stdin = _NonBlockingStreamWriter(
p.stdin, data=stdin, debug=debug, notify=notifier.close
)
timeout_encountered = False
while (p.returncode is None) and (
(timeout is None) or (timeit.default_timer() < max_time)
):
if debug and timeout is not None:
logger.debug("still running (T%.2fs)" % (timeit.default_timer() - max_time))
# wait for some time or until a stream is closed
try:
if thread_pipe_pool:
# Wait for up to 0.5 seconds or for a signal on a remaining stream,
# which could indicate that the process has terminated.
try:
event = thread_pipe_pool[0].poll(0.5)
except IOError as e:
# on Windows this raises "IOError: [Errno 109] The pipe has been ended"
# which is for all intents and purposes equivalent to a True return value.
if e.errno != 109:
raise
event = True
if event:
# One-shot, so remove stream and watch remaining streams
thread_pipe_pool.pop(0)
if debug:
logger.debug("Event received from stream thread")
else:
time.sleep(0.5)
except KeyboardInterrupt:
p.kill() # if user pressed Ctrl+C we won't be able to produce a proper report anyway
# but at least make sure the child process dies with us
raise
# check if process is still running
p.poll()
if p.returncode is None:
# timeout condition
timeout_encountered = True
if debug:
logger.debug("timeout (T%.2fs)" % (timeit.default_timer() - max_time))
# send terminate signal and wait some time for buffers to be read
p.terminate()
if thread_pipe_pool:
thread_pipe_pool[0].poll(0.5)
if not stdout.has_finished() or not stderr.has_finished():
time.sleep(2)
p.poll()
if p.returncode is None:
# thread still alive
# send kill signal and wait some more time for buffers to be read
p.kill()
if thread_pipe_pool:
thread_pipe_pool[0].poll(0.5)
if not stdout.has_finished() or not stderr.has_finished():
time.sleep(5)
p.poll()
if p.returncode is None:
raise RuntimeError("Process won't terminate")
runtime = timeit.default_timer() - start_time
if timeout is not None:
logger.debug(
"Process ended after %.1f seconds with exit code %d (T%.2fs)"
% (runtime, p.returncode, timeit.default_timer() - max_time)
)
else:
logger.debug(
"Process ended after %.1f seconds with exit code %d"
% (runtime, p.returncode)
)
stdout = stdout.get_output()
stderr = stderr.get_output()
time_end = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime())
result = ReturnObject(
{
"exitcode": p.returncode,
"command": command,
"stdout": stdout,
"stderr": stderr,
"timeout": timeout_encountered,
"runtime": runtime,
"time_start": time_start,
"time_end": time_end,
}
)
if stdin is not None:
result.update(
{
"stdin_bytes_sent": stdin.bytes_sent(),
"stdin_bytes_remain": stdin.bytes_remaining(),
}
)
return result
| 0.001354 |
def gf_lehmann(eig_e, eig_states, d_dag, beta, omega, d=None):
"""Outputs the lehmann representation of the greens function
omega has to be given, as matsubara or real frequencies"""
ew = np.exp(-beta*eig_e)
zet = ew.sum()
G = np.zeros_like(omega)
basis_create = np.dot(eig_states.T, d_dag.dot(eig_states))
if d is None:
tmat = np.square(basis_create)
else:
tmat = np.dot(eig_states.T, d.T.dot(eig_states))*basis_create
tmat *= np.add.outer(ew, ew)
gap = np.add.outer(-eig_e, eig_e)
N = eig_e.size
for i, j in product(range(N), range(N)):
G += tmat[i, j] / (omega + gap[i, j])
return G / zet
| 0.001486 |
def urlread(url, encoding='utf8'):
"""
Read the content of an URL.
Parameters
----------
url : str
Returns
-------
content : str
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(url)
content = response.read()
content = content.decode(encoding)
return content
| 0.002513 |
def set_priority(self, priority):
""" Set Screen Priority Class """
if priority in ["hidden", "background", "info", "foreground", "alert", "input"]:
self.priority = priority
self.server.request("screen_set %s priority %s" % (self.ref, self.priority))
| 0.013746 |
def InitializeNoPrompt(config=None,
external_hostname = None,
admin_password = None,
mysql_hostname = None,
mysql_port = None,
mysql_username = None,
mysql_password = None,
mysql_db = None,
mysql_client_key_path = None,
mysql_client_cert_path = None,
mysql_ca_cert_path = None,
redownload_templates = False,
repack_templates = True,
token = None):
"""Initialize GRR with no prompts.
Args:
config: config object
external_hostname: A hostname.
admin_password: A password used for the admin user.
mysql_hostname: A hostname used for establishing connection to MySQL.
mysql_port: A port used for establishing connection to MySQL.
mysql_username: A username used for establishing connection to MySQL.
mysql_password: A password used for establishing connection to MySQL.
mysql_db: Name of the MySQL database to use.
mysql_client_key_path: The path name of the client private key file.
mysql_client_cert_path: The path name of the client public key certificate.
mysql_ca_cert_path: The path name of the CA certificate file.
redownload_templates: Indicates whether templates should be re-downloaded.
repack_templates: Indicates whether templates should be re-packed.
token: auth token
Raises:
ValueError: if required flags are not provided, or if the config has
already been initialized.
IOError: if config is not writeable
ConfigInitError: if GRR is unable to connect to a running MySQL instance.
This method does the minimum work necessary to configure GRR without any user
prompting, relying heavily on config default values. User must supply the
external hostname, admin password, and MySQL password; everything else is set
automatically.
"""
if config["Server.initialized"]:
raise ValueError("Config has already been initialized.")
if not external_hostname:
raise ValueError(
"--noprompt set, but --external_hostname was not provided.")
if not admin_password:
raise ValueError("--noprompt set, but --admin_password was not provided.")
if mysql_password is None:
raise ValueError("--noprompt set, but --mysql_password was not provided.")
print("Checking write access on config %s" % config.parser)
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
config_dict = {}
config_dict["Datastore.implementation"] = "MySQLAdvancedDataStore"
config_dict["Mysql.host"] = mysql_hostname or config["Mysql.host"]
config_dict["Mysql.port"] = mysql_port or config["Mysql.port"]
config_dict["Mysql.database_name"] = mysql_db or config["Mysql.database_name"]
config_dict["Mysql.database_username"] = (
mysql_username or config["Mysql.database_username"])
config_dict["Client.server_urls"] = [
"http://%s:%s/" % (external_hostname, config["Frontend.bind_port"])
]
config_dict["AdminUI.url"] = "http://%s:%s" % (external_hostname,
config["AdminUI.port"])
config_dict["Logging.domain"] = external_hostname
config_dict["Monitoring.alert_email"] = ("grr-monitoring@%s" %
external_hostname)
config_dict["Monitoring.emergency_access_email"] = ("grr-emergency@%s" %
external_hostname)
# Print all configuration options, except for the MySQL password.
print("Setting configuration as:\n\n%s" % config_dict)
config_dict["Mysql.database_password"] = mysql_password
if mysql_client_key_path is not None:
config_dict["Mysql.client_key_path"] = mysql_client_key_path
config_dict["Mysql.client_cert_path"] = mysql_client_cert_path
config_dict["Mysql.ca_cert_path"] = mysql_ca_cert_path
if CheckMySQLConnection(config_dict):
print("Successfully connected to MySQL with the given configuration.")
else:
print("Error: Could not connect to MySQL with the given configuration.")
raise ConfigInitError()
for key, value in iteritems(config_dict):
config.Set(key, value)
config_updater_keys_util.GenerateKeys(config)
FinalizeConfigInit(
config,
token,
admin_password=admin_password,
redownload_templates=redownload_templates,
repack_templates=repack_templates,
prompt=False)
| 0.012003 |
def find_packages_requirements_dists(pkg_names, working_set=None):
"""
Return the entire list of dependency requirements, reversed from the
bottom.
"""
working_set = working_set or default_working_set
requirements = [
r for r in (Requirement.parse(req) for req in pkg_names)
if working_set.find(r)
]
return list(reversed(working_set.resolve(requirements)))
| 0.002469 |
def persist_database(metamodel, path, mode='w'):
'''
Persist all instances, class definitions and association definitions in a
*metamodel* by serializing them and saving to a *path* on disk.
'''
with open(path, mode) as f:
for kind in sorted(metamodel.metaclasses.keys()):
metaclass = metamodel.metaclasses[kind]
s = serialize_class(metaclass.clazz)
f.write(s)
for index_name, attribute_names in metaclass.indices.items():
attribute_names = ', '.join(attribute_names)
s = 'CREATE UNIQUE INDEX %s ON %s (%s);\n' % (index_name,
metaclass.kind,
attribute_names)
f.write(s)
for ass in sorted(metamodel.associations, key=lambda x: x.rel_id):
s = serialize_association(ass)
f.write(s)
for inst in metamodel.instances:
s = serialize_instance(inst)
f.write(s)
| 0.002742 |
def catch_end_signal(self, signum, frame):
"""
When a SIGINT/SIGTERM signal is caught, this method is called, asking
for the worker to terminate as soon as possible.
"""
if self.end_signal_caught:
self.log('Previous signal caught, will end soon')
return
signal_name = dict((getattr(signal, n), n) for n in dir(signal)
if n.startswith('SIG') and '_' not in n).get(signum, signum)
if self.status == 'running':
self.log('Catched %s signal: stopping after current job' % signal_name,
level='critical')
else:
delay = self.timeout if self.status == 'waiting' else self.fetch_priorities_delay
self.log('Catched %s signal: stopping in max %d seconds' % (
signal_name, delay), level='critical')
self.end_signal_caught = self.end_forced = True
| 0.006424 |
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme,
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
url_quote(path_info.lstrip('/'), self.map.charset,
safe='/:|+')),
suffix
))
| 0.003478 |
def merge_split_alignments(data):
"""Merge split BAM inputs generated by common workflow language runs.
"""
data = utils.to_single_data(data)
data = _merge_align_bams(data)
data = _merge_hla_fastq_inputs(data)
return [[data]]
| 0.004016 |
def update(self, portfolio, date, perfs=None):
'''
Actualizes the portfolio universe with the alog state
'''
# Make the manager aware of current simulation
self.portfolio = portfolio
self.perfs = perfs
self.date = date
| 0.007299 |
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
if len(waveform) > N:
err_msg = "The time domain template is longer than the intended "
err_msg += "duration in the frequency domain. This situation is "
err_msg += "not supported in this function. Please shorten the "
err_msg += "waveform appropriately before calling this function or "
err_msg += "increase the allowed waveform length. "
err_msg += "Waveform length (in samples): {}".format(len(waveform))
err_msg += ". Intended length: {}.".format(N)
raise ValueError(err_msg)
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
| 0.001719 |
def hardware_info():
"""
Returns basic hardware information about the computer.
Gives actual number of CPU's in the machine, even when hyperthreading is
turned on.
Returns
-------
info : dict
Dictionary containing cpu and memory information.
"""
try:
if sys.platform == 'darwin':
out = _mac_hardware_info()
elif sys.platform == 'win32':
out = _win_hardware_info()
elif sys.platform in ['linux', 'linux2']:
out = _linux_hardware_info()
else:
out = {}
except:
return {}
else:
return out
| 0.00315 |
def _check_env_var(envvar: str) -> bool:
"""Check Environment Variable to verify that it is set and not empty.
:param envvar: Environment Variable to Check.
:returns: True if Environment Variable is set and not empty.
:raises: KeyError if Environment Variable is not set or is empty.
.. versionadded:: 0.0.12
"""
if os.getenv(envvar) is None:
raise KeyError(
"Required ENVVAR: {0} is not set".format(envvar))
if not os.getenv(envvar): # test if env var is empty
raise KeyError(
"Required ENVVAR: {0} is empty".format(envvar))
return True
| 0.001618 |
def chunk(self, size="150", axis=None, padding=None):
"""
Chunks records of a distributed array.
Chunking breaks arrays into subarrays, using an specified
size of chunks along each value dimension. Can alternatively
specify an average chunk byte size (in kilobytes) and the size of
chunks (as ints) will be computed automatically.
Parameters
----------
size : tuple, int, or str, optional, default = "150"
A string giving the size in kilobytes, or a tuple with the size
of chunks along each dimension.
axis : int or tuple, optional, default = None
One or more axis to chunk array along, if None
will use all axes,
padding: tuple or int, default = None
Number of elements per dimension that will overlap with the adjacent chunk.
If a tuple, specifies padding along each chunked dimension; if a int, same
padding will be applied to all chunked dimensions.
Returns
-------
ChunkedArray
"""
if type(size) is not str:
size = tupleize((size))
axis = tupleize((axis))
padding = tupleize((padding))
from bolt.spark.chunk import ChunkedArray
chnk = ChunkedArray(rdd=self._rdd, shape=self._shape, split=self._split, dtype=self._dtype)
return chnk._chunk(size, axis, padding)
| 0.003484 |
def _make_interpolation_node(self, tag_type, tag_key, leading_whitespace):
"""
Create and return a non-section node for the parse tree.
"""
# TODO: switch to using a dictionary instead of a bunch of ifs and elifs.
if tag_type == '!':
return _CommentNode()
if tag_type == '=':
delimiters = tag_key.split()
self._change_delimiters(delimiters)
return _ChangeNode(delimiters)
if tag_type == '':
return _EscapeNode(tag_key)
if tag_type == '&':
return _LiteralNode(tag_key)
if tag_type == '>':
return _PartialNode(tag_key, leading_whitespace)
raise Exception("Invalid symbol for interpolation tag: %s" % repr(tag_type))
| 0.005109 |
def is_running(self, family: str) -> bool:
"""Check if an analysis is currently running/pending for a family."""
latest_analysis = self.analyses(family=family).first()
return latest_analysis and latest_analysis.status in TEMP_STATUSES
| 0.007752 |
def _send_event_to_project(self, project_id, action, event):
"""
Send an event to all the client listening for notifications for
this project
:param project: Project where we need to send the event
:param action: Action name
:param event: Event to send
"""
try:
project_listeners = self._listeners[project_id]
except KeyError:
return
for listener in project_listeners:
listener.put_nowait((action, event, {}))
| 0.003802 |
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
if 'Content-Type' not in self.headers:
self.headers.add_header('Content-Type', self.default_content_type)
if self._cookies:
for c in self._cookies.values():
self.headers.add_header('Set-Cookie', c.OutputString())
return self.headers.items()
| 0.005168 |
def partition_loci(self,verbose=False):
""" break the locus up into unconnected loci
:return: list of loci
:rtype: TranscriptLoci[]
"""
self.g.merge_cycles()
#sys.stderr.write(self.g.get_report()+"\n")
gs = self.g.partition_graph(verbose=verbose)
tls = [] # makea list of transcript loci
for g in gs:
tl = TranscriptLoci()
tl.merge_rules = self.merge_rules
ns = g.get_nodes()
for n in [x.payload for x in ns]:
for tx in n:
tl.add_transcript(tx)
if len(tl.g.get_nodes()) > 0:
tls.append(tl)
#print '-----------------------'
#names = []
#for tl in tls:
# for tx in tl.get_transcripts():
# names.append(tx.get_gene_name())
#for name in sorted(names):
# print name
#print '--------------------------'
return tls
| 0.019002 |
def traverse(self):
"""Traverse proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Traverse proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.traverse_theta
# Calculate beta
if (random() < (theta - 1) / (2 * theta)):
beta = exp(1 / (theta + 1) * log(random()))
else:
beta = exp(1 / (1 - theta) * log(random()))
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = (xp + beta * (xp - x)) * phi + x * (phi == False)
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = (sum(phi) - 2) * log(beta)
| 0.003219 |
def order_by(self, *field_names):
"""
Change translatable field names in an ``order_by`` argument
to translation fields for the current language.
"""
if not self._rewrite:
return super(MultilingualQuerySet, self).order_by(*field_names)
new_args = []
for key in field_names:
new_args.append(rewrite_order_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).order_by(*new_args)
| 0.004158 |
def get_ipv4_or_ipv6(self, ip):
"""
Get a Ipv4 or Ipv6 by IP
:param ip: IPv4 or Ipv6. 'xxx.xxx.xxx.xxx or xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx'
:return: Dictionary with the following structure:
::
{'ips': [{'oct4': < oct4 >, 'oct2': < oct2 >, 'oct3': < oct3 >,
'oct1': < oct1 >, 'version': < version >,
'networkipv4': < networkipv4 >, 'id': < id >, 'descricao': < descricao >}, ... ] }.
or
{'ips': [ {'block1': < block1 >, 'block2': < block2 >, 'block3': < block3 >, 'block4': < block4 >, 'block5': < block5 >, 'block6': < block6 >, 'block7': < block7 >, 'block8': < block8 >,
'version': < version >, 'networkipv6': < networkipv6 >, 'id': < id >, 'descricao': < descricao >}, ... ] }.
:raise IpNaoExisteError: Ipv4 or Ipv6 not found.
:raise UserNotAuthorizedError: User dont have permission to perform operation.
:raise InvalidParameterError: Ip string is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
ip_map = dict()
ip_map['ip'] = ip
url = "ip/getbyoctblock/"
code, xml = self.submit({'ip_map': ip_map}, 'POST', url)
return self.response(code, xml)
| 0.005124 |
def set_energy(self, spins, target_energy):
"""Set the energy of Theta with spins fixed to target_energy.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
target_energy (float): The desired energy for Theta with spins fixed.
Notes:
Add equality constraint to assertions.
"""
spin_energy = self.energy(spins)
self.assertions.add(Equals(spin_energy, limitReal(target_energy)))
| 0.006198 |
def find_next_character(code, position, char):
"""Find next char and return its first and last positions"""
end = LineCol(code, *position)
while not end.eof and end.char() in WHITESPACE:
end.inc()
if not end.eof and end.char() == char:
return end.tuple(), inc_tuple(end.tuple())
return None, None
| 0.003003 |
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
)
| 0.003945 |
def transform_request(self, orig_request, params, method_config):
"""Transforms orig_request to apiserving request.
This method uses orig_request to determine the currently-pending request
and returns a new transformed request ready to send to the backend. This
method accepts a rest-style or RPC-style request.
Args:
orig_request: An ApiRequest, the original request from the user.
params: A dictionary containing path parameters for rest requests, or
None for an RPC request.
method_config: A dict, the API config of the method to be called.
Returns:
An ApiRequest that's a copy of the current request, modified so it can
be sent to the backend. The path is updated and parts of the body or
other properties may also be changed.
"""
method_params = method_config.get('request', {}).get('parameters', {})
request = self.transform_rest_request(orig_request, params, method_params)
request.path = method_config.get('rosyMethod', '')
return request
| 0.000962 |
def get_issues_by_resource(resource_id, table):
"""Get all issues for a specific job."""
v1_utils.verify_existence_and_get(resource_id, table)
# When retrieving the issues for a job, we actually retrieve
# the issues attach to the job itself + the issues attached to
# the components the job has been run with.
if table.name == 'jobs':
JJI = models.JOIN_JOBS_ISSUES
JJC = models.JOIN_JOBS_COMPONENTS
JCI = models.JOIN_COMPONENTS_ISSUES
# Get all the issues attach to all the components attach to a job
j1 = sql.join(
_TABLE,
sql.join(
JCI,
JJC,
sql.and_(
JCI.c.component_id == JJC.c.component_id,
JJC.c.job_id == resource_id,
),
),
_TABLE.c.id == JCI.c.issue_id,
)
query = sql.select([_TABLE]).select_from(j1)
rows = flask.g.db_conn.execute(query)
rows = [dict(row) for row in rows]
# Get all the issues attach to a job
j2 = sql.join(
_TABLE,
JJI,
sql.and_(
_TABLE.c.id == JJI.c.issue_id,
JJI.c.job_id == resource_id
)
)
query2 = sql.select([_TABLE]).select_from(j2)
rows2 = flask.g.db_conn.execute(query2)
rows += [dict(row) for row in rows2]
# When retrieving the issues for a component, we only retrieve the
# issues attached to the specified component.
else:
JCI = models.JOIN_COMPONENTS_ISSUES
query = (sql.select([_TABLE])
.select_from(JCI.join(_TABLE))
.where(JCI.c.component_id == resource_id))
rows = flask.g.db_conn.execute(query)
rows = [dict(row) for row in rows]
for row in rows:
if row['tracker'] == 'github':
l_tracker = github.Github(row['url'])
elif row['tracker'] == 'bugzilla':
l_tracker = bugzilla.Bugzilla(row['url'])
row.update(l_tracker.dump())
return flask.jsonify({'issues': rows,
'_meta': {'count': len(rows)}})
| 0.000458 |
def _resolve_indirect_inner(maybe_idict):
"""Resolve the contents an indirect dictionary (containing promises) to produce
a dictionary actual values, including merging multiple sources into a
single input.
"""
if isinstance(maybe_idict, IndirectDict):
result = {}
for key, value in list(maybe_idict.items()):
if isinstance(value, (MergeInputs, DefaultWithSource)):
result[key] = value.resolve()
else:
result[key] = value[1].get(value[0])
return result
return maybe_idict
| 0.003478 |
async def execute_insert(
self, sql: str, parameters: Iterable[Any] = None
) -> Optional[sqlite3.Row]:
"""Helper to insert and get the last_insert_rowid."""
if parameters is None:
parameters = []
return await self._execute(self._execute_insert, sql, parameters)
| 0.009709 |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if self.masked_median is None:
grand_mean_file = self.file.path
grand_mean_img = nib.load(grand_mean_file)
grand_mean_data = grand_mean_img.get_data()
grand_mean_data = np.ndarray.flatten(grand_mean_data)
mask_img = nib.load(self.mask_file)
mask_data = mask_img.get_data()
mask_data = np.ndarray.flatten(mask_data)
grand_mean_data_in_mask = grand_mean_data[mask_data > 0]
self.masked_median = np.median(
np.array(grand_mean_data_in_mask, dtype=float))
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_MASKED_MEDIAN, self.masked_median),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id))
)
| 0.002139 |
def clean_stale_refs(self):
'''
Remove stale refs so that they are no longer seen as fileserver envs
'''
cleaned = []
cmd_str = 'git remote prune origin'
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env = os.environ.copy()
env[b"LANGUAGE"] = b"C"
env[b"LC_ALL"] = b"C"
cmd = subprocess.Popen(
shlex.split(cmd_str),
close_fds=not salt.utils.platform.is_windows(),
cwd=os.path.dirname(self.gitdir),
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = cmd.communicate()[0]
if six.PY3:
output = output.decode(__salt_system_encoding__)
if cmd.returncode != 0:
log.warning(
'Failed to prune stale branches for %s remote \'%s\'. '
'Output from \'%s\' follows:\n%s',
self.role, self.id, cmd_str, output
)
else:
marker = ' * [pruned] '
for line in salt.utils.itertools.split(output, '\n'):
if line.startswith(marker):
cleaned.append(line[len(marker):].strip())
if cleaned:
log.debug(
'%s pruned the following stale refs: %s',
self.role, ', '.join(cleaned)
)
return cleaned
| 0.002525 |
def error_info():
"""Return information about failed tasks."""
worker = global_worker
worker.check_connected()
return (global_state.error_messages(driver_id=worker.task_driver_id) +
global_state.error_messages(driver_id=DriverID.nil()))
| 0.003788 |
def default_security_rule_get(name, security_group, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a default security rule within a security group.
:param name: The name of the security rule to query.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.default_security_rule_get DenyAllOutBound testnsg testgroup
'''
result = {}
default_rules = default_security_rules_list(
security_group=security_group,
resource_group=resource_group,
**kwargs
)
if isinstance(default_rules, dict) and 'error' in default_rules:
return default_rules
try:
for default_rule in default_rules:
if default_rule['name'] == name:
result = default_rule
if not result:
result = {
'error': 'Unable to find {0} in {1}!'.format(name, security_group)
}
except KeyError as exc:
log.error('Unable to find %s in %s!', name, security_group)
result = {'error': str(exc)}
return result
| 0.002333 |
def _parse_names_dict(feature_names):
"""Helping function of `_parse_feature_names` that parses a dictionary of feature names."""
feature_collection = OrderedDict()
for feature_name, new_feature_name in feature_names.items():
if isinstance(feature_name, str) and (isinstance(new_feature_name, str) or
new_feature_name is ...):
feature_collection[feature_name] = new_feature_name
else:
if not isinstance(feature_name, str):
raise ValueError('Failed to parse {}, expected string'.format(feature_name))
else:
raise ValueError('Failed to parse {}, expected string or Ellipsis'.format(new_feature_name))
return feature_collection
| 0.007229 |
def fit_shifts(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
diff_pts = xy - uv
Pcoeffs = np.array([1.0,0.0,diff_pts[:,0].mean(dtype=np.float64)])
Qcoeffs = np.array([0.0,1.0,diff_pts[:,1].mean(dtype=np.float64)])
fit = build_fit(Pcoeffs, Qcoeffs, 'shift')
resids = diff_pts - fit['offset']
fit['resids'] = resids
fit['rms'] = resids.std(axis=0)
fit['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return fit
| 0.008226 |
def index(self, value):
"""Return the 0-based position of integer `value` in
the sequence this range represents."""
try:
diff = value - self._start
except TypeError:
raise ValueError('%r is not in range' % value)
quotient, remainder = divmod(diff, self._step)
if remainder == 0 and 0 <= quotient < self._len:
return abs(quotient)
raise ValueError('%r is not in range' % value)
| 0.004274 |
def state_push(self):
"""
Push the state of all generators
"""
super(Composite,self).state_push()
for gen in self.generators:
gen.state_push()
| 0.015464 |
def nsname(self, uri: Union[str, URIRef]) -> str:
"""
Return the 'ns:name' format of URI
:param uri: URI to transform
:return: nsname format of URI or straight URI if no mapping
"""
uri = str(uri)
nsuri = ""
prefix = None
for pfx, ns in self:
nss = str(ns)
if uri.startswith(nss) and len(nss) > len(nsuri):
nsuri = nss
prefix = pfx
return (prefix.lower() + ':' + uri[len(nsuri):]) if prefix is not None else uri
| 0.005474 |
def _get_instance(self):
"""Retrieve instance matching instance_id."""
try:
instance = self.compute_driver.ex_get_node(
self.running_instance_id,
zone=self.region
)
except ResourceNotFoundError as e:
raise GCECloudException(
'Instance with id: {id} cannot be found: {error}'.format(
id=self.running_instance_id, error=e
)
)
return instance
| 0.003968 |
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
| 0.027027 |
def parse_options(arguments=None):
""" Reads command-line arguments
>>> parse_options('--indent-comments')
"""
if arguments is None:
arguments = sys.argv[1:]
if isinstance(arguments, str):
arguments = arguments.split()
if isinstance(arguments, argparse.Namespace):
return arguments
parser = create_args_parser()
args = parser.parse_args(arguments)
# pprint(args.__dict__)
args.dialect = args.dialect.lower()
if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']:
parser.error("`{0}' is not a recognized dialect".format(args.dialect))
args.backup_dir = os.path.expanduser(args.backup_dir)
if not os.path.exists(args.backup_dir):
parser.error("Directory `{0}' does not exist".format(args.backup_dir))
if len(args.files) > 1 and args.output_file:
parser.error('Cannot use the -o flag when more than one file is specified')
if not args.files:
# Indentation from standard input
if args.modify and not args.output_file:
args.modify = False
args.backup = False
args.warning = False
if args.output_diff:
# If someone requests a diff we assume he/she doesn't want the file to be
# modified
args.modify = False
return args
| 0.002266 |
def get_username(self):
"""Get a username formatted for a specific token version."""
_from = self.auth_context['from']
if self.token_version == 1:
return '{0}'.format(_from)
elif self.token_version == 2:
_user_type = self.auth_context['user_type']
return '{0}/{1}/{2}'.format(
self.token_version,
_user_type,
_from
)
| 0.004505 |
def allocation_explain(self, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html>`_
:arg body: The index, shard, and primary flag to explain. Empty means
'explain the first unassigned shard'
:arg include_disk_info: Return information about disk usage and shard
sizes (default: false)
:arg include_yes_decisions: Return 'YES' decisions in explanation
(default: false)
"""
return self.transport.perform_request('GET',
'/_cluster/allocation/explain', params=params, body=body)
| 0.004601 |
def _reset(self):
"""
Initializes the :class:`Harvester` object. Once you reset the
:class:`Harvester` object, all allocated resources, including buffers
and remote device, will be released.
:return: None.
"""
#
for ia in self._ias:
ia._destroy()
self._ias.clear()
#
self._logger.info('Started resetting the Harvester object.')
self.remove_cti_files()
self._release_gentl_producers()
if self._profiler:
self._profiler.print_diff()
#
self._logger.info('Completed resetting the Harvester object.')
| 0.003067 |
def main():
"Boots up the command line tool"
logging.captureWarnings(True)
args = build_parser().parse_args()
# Configure logging
args.setup_logging(args)
# Dispatch into the appropriate subcommand function.
try:
return args.func(args)
except SystemExit:
raise
except:
logging.exception('Problem when running command. Sorry!')
sys.exit(1)
| 0.004926 |
def scopusParser(scopusFile):
"""Parses a scopus file, _scopusFile_, to extract the individual lines as [ScopusRecords](../classes/ScopusRecord.html#metaknowledge.scopus.ScopusRecord).
A Scopus file is a csv (Comma-separated values) with a complete header, see [`scopus.scopusHeader`](#metaknowledge.scopus) for the entries, and each line after it containing a record's entry. The string valued entries are quoted with double quotes which means double quotes inside them can cause issues, see [scopusRecordParser()](#metaknowledge.scopus.recordScopus.scopusRecordParser) for more information.
# Parameters
_scopusFile_ : `str`
> A path to a valid scopus file, use [isScopusFile()](#metaknowledge.scopus.scopusHandlers.isScopusFile) to verify
# Returns
`set[ScopusRecord]`
> Records for each of the entries
"""
#assumes the file is Scopus
recSet = set()
error = None
lineNum = 0
try:
with open(scopusFile, 'r', encoding = 'utf-8') as openfile:
#Get rid of the BOM
openfile.read(1)
header = openfile.readline()[:-1].split(',')
if len(set(header) ^ set(scopusHeader)) == 0:
header = None
lineNum = 0
try:
for line, row in enumerate(openfile, start = 2):
lineNum = line
recSet.add(ScopusRecord(row, header = header, sFile = scopusFile, sLine = line))
except BadScopusFile as e:
if error is None:
error = BadScopusFile("The file '{}' becomes unparsable after line: {}, due to the error: {} ".format(scopusFile, lineNum, e))
except (csv.Error, UnicodeDecodeError):
if error is None:
error = BadScopusFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(scopusFile, lineNum))
return recSet, error
| 0.009932 |
def find(self, pattern):
"""
:param pattern: REGULAR EXPRESSION TO MATCH NAME (NOT INCLUDING PATH)
:return: LIST OF File OBJECTS THAT HAVE MATCHING NAME
"""
output = []
def _find(dir):
if re.match(pattern, dir._filename.split("/")[-1]):
output.append(dir)
if dir.is_directory():
for c in dir.children:
_find(c)
_find(self)
return output
| 0.004193 |
def find_or_build(self, constructor, props):
"""Looks for a model that matches the given dictionary constraints. If it is not found, a new
model of the given type is constructed and returned.
"""
model = self.find_model(constructor, props)
return model or constructor(**props)
| 0.006757 |
def recruit(self, n=1):
"""Generate experiemnt URLs and print them to the console."""
logger.info("Recruiting {} CLI participants".format(n))
urls = []
template = "{}/ad?recruiter={}&assignmentId={}&hitId={}&workerId={}&mode={}"
for i in range(n):
ad_url = template.format(
get_base_url(),
self.nickname,
generate_random_id(),
generate_random_id(),
generate_random_id(),
self._get_mode(),
)
logger.info("{} {}".format(NEW_RECRUIT_LOG_PREFIX, ad_url))
urls.append(ad_url)
return urls
| 0.004444 |
def cli(ctx, group_id, users=None):
"""Update the group's admins
Output:
dictionary of group information
"""
return ctx.gi.groups.update_group_admin(group_id, users=users)
| 0.005291 |
def getPinProperties(cardConnection, featureList=None, controlCode=None):
""" return the PIN_PROPERTIES structure
@param cardConnection: L{CardConnection} object
@param featureList: feature list as returned by L{getFeatureRequest()}
@param controlCode: control code for L{FEATURE_IFD_PIN_PROPERTIES}
@rtype: dict
@return: a dict """
if controlCode is None:
if featureList is None:
featureList = getFeatureRequest(cardConnection)
controlCode = hasFeature(featureList, FEATURE_IFD_PIN_PROPERTIES)
if controlCode is None:
return {'raw': []}
response = cardConnection.control(controlCode, [])
d = {
'raw': response,
'LcdLayoutX': response[0],
'LcdLayoutY': response[1],
'EntryValidationCondition': response[2],
'TimeOut2': response[3]}
return d
| 0.00113 |
def restart(self):
"""Restart the debugger after source code changes."""
_module_finder.reset()
linecache.checkcache()
for module_bpts in self.breakpoints.values():
module_bpts.reset()
| 0.008772 |
def json_numpy_obj_hook(dct):
"""Decodes a previously encoded numpy ndarray with proper shape and dtype.
And decompresses the data with blosc
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
array = dct['__ndarray__']
# http://stackoverflow.com/questions/24369666/typeerror-b1-is-not-json-serializable
if sys.version_info >= (3, 0):
array = array.encode('utf-8')
data = base64.b64decode(array)
if has_blosc:
data = blosc.decompress(data)
try:
dtype = np.dtype(ast.literal_eval(dct['dtype']))
except ValueError: # If the array is not a recarray
dtype = dct['dtype']
return np.frombuffer(data, dtype).reshape(dct['shape'])
return dct
| 0.001145 |
def generate_megaman_data(sampling=2):
"""Generate 2D point data of the megaman image"""
data = get_megaman_image()
x = np.arange(sampling * data.shape[1]) / float(sampling)
y = np.arange(sampling * data.shape[0]) / float(sampling)
X, Y = map(np.ravel, np.meshgrid(x, y))
C = data[np.floor(Y.max() - Y).astype(int),
np.floor(X).astype(int)]
return np.vstack([X, Y]).T, C
| 0.002433 |
def reroute(self, body=None, params=None):
"""
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
:arg body: The definition of `commands` to perform (`move`, `cancel`,
`allocate`)
:arg dry_run: Simulate the operation only and return the resulting state
:arg explain: Return an explanation of why the commands can or cannot be
executed
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg metric: Limit the information returned to the specified metrics.
Defaults to all but metadata, valid choices are: '_all', 'blocks',
'metadata', 'nodes', 'routing_table', 'master_node', 'version'
:arg retry_failed: Retries allocation of shards that are blocked due to
too many subsequent allocation failures
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request('POST', '/_cluster/reroute',
params=params, body=body)
| 0.006003 |
def render_archive(entries):
"""Creates the archive page"""
context = GLOBAL_TEMPLATE_CONTEXT.copy()
context['entries'] = entries
_render(context, 'archive_index.html',
os.path.join(CONFIG['output_to'], 'archive/index.html')),
| 0.003937 |
def _respawn(self):
"""
Pick a random location for the star making sure it does
not overwrite an existing piece of text.
"""
self._cycle = randint(0, len(self._star_chars))
(height, width) = self._screen.dimensions
while True:
self._x = randint(0, width - 1)
self._y = self._screen.start_line + randint(0, height - 1)
if self._screen.get_from(self._x, self._y)[0] == 32:
break
self._old_char = " "
| 0.003899 |
def install_logger(logger=None, module=None):
"""
Installs given logger in given module or default logger in caller introspected module.
:param logger: Logger to install.
:type logger: Logger
:param module: Module.
:type module: ModuleType
:return: Logger.
:rtype: Logger
"""
logger = logging.getLogger(Constants.logger) if logger is None else logger
if module is None:
# Note: inspect.getmodule() can return the wrong module if it has been imported with different relatives paths.
module = sys.modules.get(inspect.currentframe().f_back.f_globals["__name__"])
setattr(module, "LOGGER", logger)
foundations.trace.register_module(module)
return logger
| 0.005517 |
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None):
"""
Hessian at y, given link_f, w.r.t link_f.
i.e. second derivative logpdf at y given link(f_i) link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{2}f} = -\\frac{1}{\\sigma^{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: Diagonal of log hessian matrix (second derivative of log likelihood evaluated at points link(f))
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
N = y.shape[0]
D = link_f.shape[1]
hess = -(1.0/self.variance)*np.ones((N, D))
return hess
| 0.006616 |
def _execute(self, endpoint, database, query, default_timeout, properties=None):
"""Executes given query against this client"""
request_payload = {"db": database, "csl": query}
if properties:
request_payload["properties"] = properties.to_json()
request_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json; charset=utf-8",
"x-ms-client-version": "Kusto.Python.Client:" + VERSION,
"x-ms-client-request-id": "KPC.execute;" + str(uuid.uuid4()),
}
if self._auth_provider:
request_headers["Authorization"] = self._auth_provider.acquire_authorization_header()
timeout = self._get_timeout(properties, default_timeout)
response = self._session.post(endpoint, headers=request_headers, json=request_payload, timeout=timeout.seconds)
if response.status_code == 200:
if endpoint.endswith("v2/rest/query"):
return KustoResponseDataSetV2(response.json())
return KustoResponseDataSetV1(response.json())
raise KustoServiceError([response.json()], response)
| 0.004052 |
def adjust_mod_previous(self, holidays_obj=None):
"""
ajusts to Business Day Convention "Modified Preceding" (not in 2006 ISDA Definitons).
"""
month = self.month
new = BusinessDate.adjust_previous(self, holidays_obj)
if month != new.month:
new = BusinessDate.adjust_follow(self, holidays_obj)
self = new
return self
| 0.007653 |
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
| 0.00432 |
def _setup(self, action, line):
''' replace the command name from the group, alert the user of content,
and clean up empty spaces
'''
bot.debug('[in] %s' % line)
# Replace ACTION at beginning
line = re.sub('^%s' % action, '', line)
# Handle continuation lines without ACTION by padding with leading space
line = " " + line
# Split into components
return [x for x in self._split_line(line) if x not in ['', None]]
| 0.006 |
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
"""Log the benchmark metric information to local file.
Currently the logging is done in a synchronized way. This should be updated
to log asynchronously.
Args:
name: string, the name of the metric to log.
value: number, the value of the metric. The value will not be logged if it
is not a number type.
unit: string, the unit of the metric, E.g "image per second".
global_step: int, the global_step when the metric is logged.
extras: map of string:string, the extra information about the metric.
"""
if not isinstance(value, numbers.Number):
tf.logging.warning(
"Metric value to log should be a number. Got %s", type(value))
return
if extras:
extras = [{"name": k, "value": v} for k, v in sorted(extras.items())]
else:
extras = []
with tf.gfile.GFile(
os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), "a") as f:
metric = {
"name": name,
"value": float(value),
"unit": unit,
"global_step": global_step,
"timestamp": datetime.datetime.now().strftime(
_DATE_TIME_FORMAT_PATTERN),
"extras": extras}
try:
json.dump(metric, f)
f.write("\n")
except (TypeError, ValueError) as e:
tf.logging.warning("Failed to dump metric to log file: "
"name %s, value %s, error %s", name, value, e)
| 0.006588 |
def poisson(x,
layer_fn=tf.compat.v1.layers.dense,
log_rate_fn=lambda x: x,
name=None):
"""Constructs a trainable `tfd.Poisson` distribution.
This function creates a Poisson distribution parameterized by log rate.
Using default args, this function is mathematically equivalent to:
```none
Y = Poisson(log_rate=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This can be used as a [Poisson regression](
https://en.wikipedia.org/wiki/Poisson_regression) loss.
```python
# This example fits a poisson regression loss.
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Poisson maximum likelihood estimator.
poisson = tfp.trainable_distributions.poisson(x)
loss = -tf.reduce_mean(poisson.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:37.0814208984 mse:6359.41259766
# iteration:200 loss:1.42010736465 mse:40.7654914856
# iteration:400 loss:1.39027583599 mse:8.77660560608
# iteration:600 loss:1.3902695179 mse:8.78443241119
# iteration:800 loss:1.39026939869 mse:8.78443622589
# iteration:999 loss:1.39026939869 mse:8.78444766998
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
shape and `dtype`.
Default value: `lambda x: x`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "poisson").
Returns:
poisson: An instance of `tfd.Poisson`.
"""
with tf.compat.v1.name_scope(name, 'poisson', [x]):
x = tf.convert_to_tensor(value=x, name='x')
log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1))
return tfd.Poisson(log_rate=log_rate)
| 0.000914 |
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level
)
return modname
| 0.001608 |
def _set_ignored_version(version):
"""
Private helper function that writes the most updated
API version that was ignored by a user in the app
:param version: Most recent ignored API update
"""
data = {'version': version}
with open(filepath, 'w') as data_file:
json.dump(data, data_file)
| 0.003106 |
def counts_to_dicts(df, column):
"""
convert (values, counts) as returned by aggregate.aggregate_counts() to dicts
makes expand_counts much faster
"""
# index where there are counts and they aren't null
d = df[column].apply(lambda c: pd.notnull(c) and len(c[0]) > 0)
return df.loc[d, column].apply(lambda c: {k: v for k, v in zip(*c)})
| 0.00551 |
def get(key, **kwargs):
'''
Gets details for a single, interpreted occurrence
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get(key = 1258202889)
occurrences.get(key = 1227768771)
occurrences.get(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key)
out = gbif_GET(url, {}, **kwargs)
return out
| 0.002179 |
def output(self, result):
"""
Adapts the result of a function based on the returns definition.
"""
if self.returns:
errors = None
try:
return self._adapt_result(result)
except AdaptErrors as e:
errors = e.errors
except AdaptError as e:
errors = [e]
raise AnticipateErrors(
message='Return value %r does not match anticipated type %r'
% (type(result), self.returns),
errors=errors)
elif self.strict:
if result is not None:
raise AnticipateErrors(
message='Return value %r does not match anticipated value '
'of None' % type(result),
errors=None)
return None
else:
return result
| 0.003344 |
def dump(file_name, predictions=None, algo=None, verbose=0):
"""A basic wrapper around Pickle to serialize a list of prediction and/or
an algorithm on drive.
What is dumped is a dictionary with keys ``'predictions'`` and ``'algo'``.
Args:
file_name(str): The name (with full path) specifying where to dump the
predictions.
predictions(list of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`): The
predictions to dump.
algo(:class:`Algorithm\
<surprise.prediction_algorithms.algo_base.AlgoBase>`, optional):
The algorithm to dump.
verbose(int): Level of verbosity. If ``1``, then a message indicates
that the dumping went successfully. Default is ``0``.
"""
dump_obj = {'predictions': predictions,
'algo': algo
}
pickle.dump(dump_obj, open(file_name, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print('The dump has been saved as file', file_name)
| 0.000929 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.