code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def user(self, username=None):
if username is None:
username = self.__getUsername()
parsedUsername = urlparse.quote(username)
url = self.root + "/%s" % parsedUsername
return User(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
|
A user resource that represents a registered user in the portal.
|
def _post_data(options=None, xml=None):
params = {'token': options['token'].strip(), 'cmd': 'submitcheck', 'XMLDATA': xml}
res = salt.utils.http.query(
url=options['url'],
method='POST',
params=params,
data='',
decode=True,
status=True,
header_dict={},
opts=__opts__,
)
if res.get('status', None) == salt.ext.six.moves.http_client.OK:
if res.get('dict', None) and isinstance(res['dict'], list):
_content = res['dict'][0]
if _content.get('status', None):
return True
else:
return False
else:
log.error('No content returned from Nagios NRDP.')
return False
else:
log.error(
'Error returned from Nagios NRDP. Status code: %s.',
res.status_code
)
return False
|
Post data to Nagios NRDP
|
async def skip(source, n):
source = transform.enumerate.raw(source)
async with streamcontext(source) as streamer:
async for i, item in streamer:
if i >= n:
yield item
|
Forward an asynchronous sequence, skipping the first ``n`` elements.
If ``n`` is negative, no elements are skipped.
|
def show_correlation_matrix(self, correlation_matrix):
cr_plot.create_correlation_matrix_plot(
correlation_matrix, self.title, self.headers_to_test
)
pyplot.show()
|
Shows the given correlation matrix as image
:param correlation_matrix: Correlation matrix of features
|
def _lookup_hashes(self, full_hashes):
full_hashes = list(full_hashes)
cues = [fh[0:4] for fh in full_hashes]
result = []
matching_prefixes = {}
matching_full_hashes = set()
is_potential_threat = False
for (hash_prefix, negative_cache_expired) in self.storage.lookup_hash_prefix(cues):
for full_hash in full_hashes:
if full_hash.startswith(hash_prefix):
is_potential_threat = True
matching_prefixes[hash_prefix] = matching_prefixes.get(hash_prefix, False) or negative_cache_expired
matching_full_hashes.add(full_hash)
if not is_potential_threat:
return []
matching_expired_threat_lists = set()
for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes):
if has_expired:
matching_expired_threat_lists.add(threat_list)
else:
result.append(threat_list)
if result:
return result
if len(matching_expired_threat_lists) == 0 and sum(map(int, matching_prefixes.values())) == 0:
log.info('Negative cache hit.')
return []
self._sync_full_hashes(matching_prefixes.keys())
for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes):
if not has_expired:
result.append(threat_list)
return result
|
Lookup URL hash in blacklists
Returns names of lists it was found in.
|
def release_plugin(self, name):
plugin = self._active_plugins[name]
if id(plugin) in self._provided_by_preset:
self._provided_by_preset.remove(id(plugin))
del self._active_plugins[name]
delattr(self, name)
|
Deactivate and remove the plugin with name ``name``.
|
def SetCredentials(api_username,api_passwd):
global V2_API_USERNAME
global V2_API_PASSWD
global _V2_ENABLED
_V2_ENABLED = True
V2_API_USERNAME = api_username
V2_API_PASSWD = api_passwd
|
Establish API username and password associated with APIv2 commands.
|
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2)
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
|
Generate an anticluster problem on a Chimera lattice.
An anticluster problem has weak interactions within a tile and strong
interactions between tiles.
Args:
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default=t):
Size of the shore within each Chimera tile.
multiplier (number, optional, default=3.0):
Strength of the intertile edges.
cls (class, optional, default=:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
subgraph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
A subgraph of a Chimera(m, n, t) graph to build the anticluster
problem on.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
|
def _deserialize(s, proto):
if not isinstance(s, bytes):
raise ValueError('Parameter s must be bytes, but got type: {}'.format(type(s)))
if not (hasattr(proto, 'ParseFromString') and callable(proto.ParseFromString)):
raise ValueError('No ParseFromString method is detected. '
'\ntype is {}'.format(type(proto)))
decoded = cast(Optional[int], proto.ParseFromString(s))
if decoded is not None and decoded != len(s):
raise google.protobuf.message.DecodeError(
"Protobuf decoding consumed too few bytes: {} out of {}".format(
decoded, len(s)))
return proto
|
Parse bytes into a in-memory proto
@params
s is bytes containing serialized proto
proto is a in-memory proto object
@return
The proto instance filled in by s
|
def parse_number(self):
value = self.current_token.value
suffix = value[-1].lower()
try:
if suffix in NUMBER_SUFFIXES:
return NUMBER_SUFFIXES[suffix](value[:-1])
return Double(value) if '.' in value else Int(value)
except (OutOfRange, ValueError):
return String(value)
|
Parse a number from the token stream.
|
def localCheckpoint(self, eager=True):
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
|
Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in the
executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
|
def discretize_path(entities, vertices, path, scale=1.0):
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
discrete = []
for i, entity_id in enumerate(path):
current = entities[entity_id].discrete(vertices, scale=scale)
if i >= (path_len - 1):
discrete.append(current)
else:
discrete.append(current[:-1])
discrete = np.vstack(discrete)
if vertices.shape[1] == 2 and not is_ccw(discrete):
discrete = np.ascontiguousarray(discrete[::-1])
return discrete
|
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
|
def get_network(self, name, batch_size=None, callback=None):
network_proto = nnabla_pb2.Network()
network_proto.CopyFrom(self.network_dict[name])
return NnpNetwork(network_proto, self._params, batch_size, callback=callback)
|
Create a variable graph given network by name
Returns: NnpNetwork
|
def display(self, display):
if display is None:
raise ValueError("Invalid value for `display`, must not be `None`")
allowed_values = ["BANNER", "TOASTER"]
if display not in allowed_values:
raise ValueError(
"Invalid value for `display` ({0}), must be one of {1}"
.format(display, allowed_values)
)
self._display = display
|
Sets the display of this Message.
The form of display for this message # noqa: E501
:param display: The display of this Message. # noqa: E501
:type: str
|
def run():
logfilename = os.path.join(current_app.config['CFG_LOGDIR'],
'invenio_upgrader.log')
upgrader = InvenioUpgrader()
logger = upgrader.get_logger(logfilename=logfilename)
try:
upgrades = upgrader.get_upgrades()
if not upgrades:
logger.info("All upgrades have been applied.")
return
logger.info("Following upgrade(s) will be applied:")
for u in upgrades:
logger.info(" * %s (%s)" % (u.name, u.info))
logger.info("Running pre-upgrade checks...")
upgrader.pre_upgrade_checks(upgrades)
logger.info("Calculating estimated upgrade time...")
estimate = upgrader.human_estimate(upgrades)
click.confirm(
"You are going to upgrade your installation "
"(estimated time: {0})!".format(estimate), abort=True)
for u in upgrades:
logger.info("Applying %s (%s)" % (u.name, u.info))
upgrader.apply_upgrade(u)
logger.info("Running post-upgrade checks...")
upgrader.post_upgrade_checks(upgrades)
if upgrader.has_warnings():
logger.warning("Upgrade completed with %s warnings - please check "
"log-file for further information:\nless %s"
% (upgrader.get_warnings_count(), logfilename))
else:
logger.info("Upgrade completed successfully.")
except RuntimeError as e:
for msg in e.args:
logger.error(unicode(msg))
logger.info("Please check log file for further information:\n"
"less %s" % logfilename)
click.Abort()
|
Command for applying upgrades.
|
def add_properties(props, mol):
if not props:
return
for _, atom in mol.atoms_iter():
atom.charge = 0
atom.multi = 1
atom.mass = None
for prop in props.get("CHG", []):
mol.atom(prop[0]).charge = prop[1]
for prop in props.get("RAD", []):
mol.atom(prop[0]).multi = prop[1]
for prop in props.get("ISO", []):
mol.atom(prop[0]).mass = prop[1]
|
apply properties to the molecule object
Returns:
None (alter molecule object directly)
|
def get_tensors_by_names(names):
ret = []
G = tfv1.get_default_graph()
for n in names:
opn, varn = get_op_tensor_name(n)
ret.append(G.get_tensor_by_name(varn))
return ret
|
Get a list of tensors in the default graph by a list of names.
Args:
names (list):
|
def overall_MCC_calc(classes, table, TOP, P):
try:
cov_x_y = 0
cov_x_x = 0
cov_y_y = 0
matrix_sum = sum(list(TOP.values()))
for i in classes:
cov_x_x += TOP[i] * (matrix_sum - TOP[i])
cov_y_y += P[i] * (matrix_sum - P[i])
cov_x_y += (table[i][i] * matrix_sum - P[i] * TOP[i])
return cov_x_y / (math.sqrt(cov_y_y * cov_x_x))
except Exception:
return "None"
|
Calculate Overall_MCC.
:param classes: classes
:type classes : list
:param table: input matrix
:type table : dict
:param TOP: test outcome positive
:type TOP : dict
:param P: condition positive
:type P : dict
:return: Overall_MCC as float
|
def parse_dge(
dge_path: str,
entrez_id_header: str,
log2_fold_change_header: str,
adj_p_header: str,
entrez_delimiter: str,
base_mean_header: Optional[str] = None
) -> List[Gene]:
if dge_path.endswith('.xlsx'):
return parsers.parse_excel(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.csv'):
return parsers.parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.tsv'):
return parsers.parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
sep="\t"
)
raise ValueError(f'Unsupported extension: {dge_path}')
|
Parse a differential expression file.
:param dge_path: Path to the file.
:param entrez_id_header: Header for the Entrez identifier column
:param log2_fold_change_header: Header for the log2 fold change column
:param adj_p_header: Header for the adjusted p-value column
:param entrez_delimiter: Delimiter between Entrez ids.
:param base_mean_header: Header for the base mean column.
:return: A list of genes.
|
def Convert(self, metadata, grr_message, token=None):
return self.BatchConvert([(metadata, grr_message)], token=token)
|
Converts GrrMessage into a set of RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
grr_message: GrrMessage to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues.
|
def delete_files_in_folder(fldr):
fl = glob.glob(fldr + os.sep + '*.*')
for f in fl:
delete_file(f, True)
|
delete all files in folder 'fldr'
|
def append(self, elem):
new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
|
Return new deque with elem as the rightmost element.
>>> pdeque([1, 2]).append(3)
pdeque([1, 2, 3])
|
def update(self):
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node)
|
Update the device values.
|
def _spectrogram_scipy_fourier(data, fs, nt, nch, fmin=None,
window=('tukey', 0.25), deg=False,
nperseg=None, noverlap=None,
detrend='linear', stft=False,
boundary='constant', padded=True, warn=True):
if nperseg is None and fmin is None:
fmin = _fmin_coef*(fs/nt)
if warn:
msg = "nperseg and fmin were not provided\n"
msg += " => fmin automatically set to 10.*fs/nt:\n"
msg += " fmin = 10.*{0} / {1} = {2} Hz".format(fs,nt,fmin)
warnings.warn(msg)
if nperseg is None:
assert fmin > fs/nt
nperseg = int(np.ceil(fs/fmin))
if nperseg%2==1:
nperseg = nperseg + 1
if noverlap is None:
noverlap = nperseg - 1
n = int(np.ceil(np.log(nperseg)/np.log(2)))
nfft = 2**n
if stft:
f, tf, ssx = scpsig.stft(data, fs=fs,
window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
return_onesided=True, boundary=boundary,
padded=padded, axis=0)
else:
f, tf, ssx = scpsig.spectrogram(data, fs=fs,
window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft,
detrend=detrend, return_onesided=True,
scaling='density', axis=0,
mode='complex')
lssx = np.split(ssx, np.arange(1,nch), axis=1)
lssx = [ss.squeeze().T for ss in lssx]
lpsd = [np.abs(ss)**2 for ss in lssx]
lang = [np.angle(ss, deg=deg) for ss in lssx]
return f, tf, lpsd, lang
|
Return a spectrogram for each channel, and a common frequency vector
The min frequency of interest fmin fixes the nb. of pt. per seg. (if None)
The number of overlapping points is set to nperseg-1 if None
The choice of the window type is a trade-off between:
Spectral resolution between similar frequencies/amplitudes:
=>
Dynamic range (lots of != frequencies of != amplitudes):
=>
Compromise:
=> 'hann'
|
def get_assessment_offered_bank_assignment_session(self, proxy):
if not self.supports_assessment_offered_bank_assignment():
raise errors.Unimplemented()
return sessions.AssessmentOfferedBankAssignmentSession(proxy=proxy, runtime=self._runtime)
|
Gets the session for assigning offered assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentOfferedBankAssignmentSession)
- an ``AssessmentOfferedBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_offered_bank_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_bank_assignment()`` is ``true``.*
|
def ack(self, items):
for item in items:
time_to_ack = item.time_to_ack
if time_to_ack is not None:
self._manager.ack_histogram.add(time_to_ack)
ack_ids = [item.ack_id for item in items]
request = types.StreamingPullRequest(ack_ids=ack_ids)
self._manager.send(request)
self.drop(items)
|
Acknowledge the given messages.
Args:
items(Sequence[AckRequest]): The items to acknowledge.
|
def jump_to(self, *, iterator_type, sequence_number=None):
self.iterator_id = self.session.get_shard_iterator(
stream_arn=self.stream_arn,
shard_id=self.shard_id,
iterator_type=iterator_type,
sequence_number=sequence_number)
self.iterator_type = iterator_type
self.sequence_number = sequence_number
self.empty_responses = 0
|
Move to a new position in the shard using the standard parameters to GetShardIterator.
:param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest"
:param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None.
|
def set_camera(self,
angles=None,
distance=None,
center=None,
resolution=None,
fov=None):
if fov is None:
fov = np.array([60, 45])
if len(self.geometry) == 0:
return
if angles is None:
angles = np.zeros(3)
rotation = transformations.euler_matrix(*angles)
transform = cameras.look_at(self.bounds_corners,
fov=fov,
rotation=rotation,
distance=distance,
center=center)
if hasattr(self, '_camera') and self._camera is not None:
self.camera.fov = fov
self.camera._scene = self
self.camera.transform = transform
else:
self.camera = cameras.Camera(fov=fov,
scene=self,
transform=transform)
return self.camera
|
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
|
def polar_histogram(xdata, ydata, radial_bins="numpy", phi_bins=16,
transformed=False, *args, **kwargs):
dropna = kwargs.pop("dropna", True)
data = np.concatenate([xdata[:, np.newaxis], ydata[:, np.newaxis]], axis=1)
data = _prepare_data(data, transformed=transformed, klass=PolarHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][1]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=2,
binnings=bin_schemas,
weights=weights)
return PolarHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed)
|
Facade construction function for the PolarHistogram.
Parameters
----------
transformed : bool
phi_range : Optional[tuple]
range
|
def remove(self, id):
v = VRF.get(int(id))
v.remove()
redirect(url(controller='vrf', action='list'))
|
Removes a VRF.
|
def token(self):
if self._token is None:
token_type = os.getenv(TOKEN_TYPE_KEY, '')
token_body = os.getenv(TOKEN_BODY_KEY, '')
self._token = _Token(token_type, token_body)
return self._token
|
Returns authorization token provided by Cocaine.
The real meaning of the token is determined by its type. For example OAUTH2 token will
have "bearer" type.
:return: A tuple of token type and body.
|
def get_single_stack(self):
single = None
while self.single_stack:
single = self.single_stack.pop()
return single
|
Get the correct single stack item to use.
|
def get_chembl_id(nlm_mesh):
mesh_id = get_mesh_id(nlm_mesh)
pcid = get_pcid(mesh_id)
url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \
'cid/%s/synonyms/JSON' % pcid
r = requests.get(url_mesh2pcid)
res = r.json()
synonyms = res['InformationList']['Information'][0]['Synonym']
chembl_id = [syn for syn in synonyms
if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0]
return chembl_id
|
Get ChEMBL ID from NLM MESH
Parameters
----------
nlm_mesh : str
Returns
-------
chembl_id : str
|
def extract_values(query):
if isinstance(query, subqueries.UpdateQuery):
row = query.values
return extract_values_inner(row, query)
if isinstance(query, subqueries.InsertQuery):
ret = []
for row in query.objs:
ret.append(extract_values_inner(row, query))
return ret
raise NotSupportedError
|
Extract values from insert or update query.
Supports bulk_create
|
def clear_inputhook(self, app=None):
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original
|
Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`.
|
def tile(self, map_id, x, y, z, retina=False,
file_format="png", style_id=None, timestamp=None):
if x is None or y is None or z is None:
raise ValidationError(
"x, y, and z must be not be None"
)
x = self._validate_x(x, z)
y = self._validate_y(y, z)
z = self._validate_z(z)
retina = self._validate_retina(retina)
file_format = self._validate_file_format(file_format)
path_values = dict(
map_id=map_id,
x=str(x),
y=str(y),
z=str(z)
)
path_part = "/{map_id}/{z}/{x}/{y}"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
path_part = "{}.{}".format(retina, file_format)
uri += path_part
query_parameters = dict()
if style_id is not None and timestamp is not None:
timestamp = self._validate_timestamp(timestamp)
style = "{}@{}".format(style_id, timestamp)
query_parameters["style"] = style
response = self.session.get(uri, params=query_parameters)
self.handle_http_error(response)
return response
|
Returns an image tile, vector tile, or UTFGrid
in the specified file format.
Parameters
----------
map_id : str
The tile's unique identifier in the format username.id.
x : int
The tile's column, where 0 is the minimum value
and ((2**z) - 1) is the maximum value.
y : int
The tile's row, where 0 is the minimum value
and ((2**z) - 1) is the maximum value.
z : int
The tile's zoom level, where 0 is the minimum value
and 20 is the maximum value.
retina : bool, optional
The tile's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
file_format : str, optional
The tile's file format.
The default value is png.
style_id : str, optional
The tile's style id.
style_id must be used together with timestamp.
timestamp : str, optional
The style id's ISO-formatted timestamp, found by
accessing the "modified" property of a style object.
timestamp must be used together with style_id.
Returns
-------
request.Response
The response object with a tile in the specified format.
|
def flush(self):
for key in self.grouping_info.keys():
if self._should_flush(key):
self._write_current_buffer_for_group_key(key)
|
Ensure all remaining buffers are written.
|
def _collect_cpu_info(run_info):
cpu_info = {}
cpu_info["num_cores"] = multiprocessing.cpu_count()
import cpuinfo
info = cpuinfo.get_cpu_info()
cpu_info["cpu_info"] = info["brand"]
cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
run_info["machine_config"]["cpu_info"] = cpu_info
|
Collect the CPU information for the local environment.
|
def get_objective_banks(self):
catalogs = self._get_provider_session('objective_bank_lookup_session').get_objective_banks()
cat_list = []
for cat in catalogs:
cat_list.append(ObjectiveBank(self._provider_manager, cat, self._runtime, self._proxy))
return ObjectiveBankList(cat_list)
|
Pass through to provider ObjectiveBankLookupSession.get_objective_banks
|
def compile_classpath(self, classpath_product_key, target, extra_cp_entries=None):
classpath_entries = list(
entry.path
for entry in self.compile_classpath_entries(classpath_product_key, target, extra_cp_entries)
)
for entry in classpath_entries:
assert entry.startswith(get_buildroot()), \
"Classpath entry does not start with buildroot: {}".format(entry)
return classpath_entries
|
Compute the compile classpath for the given target.
|
def rereference(self):
selectedItems = self.idx_l0.selectedItems()
chan_to_plot = []
for selected in selectedItems:
chan_to_plot.append(selected.text())
self.highlight_channels(self.idx_l1, chan_to_plot)
|
Automatically highlight channels to use as reference, based on
selected channels.
|
def _set_desc(self):
if self.docs['in']['desc']:
self.docs['out']['desc'] = self.docs['in']['desc']
else:
self.docs['out']['desc'] = ''
|
Sets the global description if any
|
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
for column_name in requested_split_adjusted_columns:
if pre:
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
)
|
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
|
def _process_file(self, obj, fobj, field):
from uliweb import settings
paths = []
upload_to = self.upload_to or self._get_upload_path(field, 'upload_to', obj)
if upload_to:
self.fileserving.to_path = upload_to
upload_to_sub = self.upload_to_sub or self._get_upload_path(field, 'upload_to_sub', obj)
if upload_to_sub:
paths.append(upload_to_sub)
paths.append(fobj['filename'])
return self.fileserving.save_file(os.path.join(*paths),
fobj['file'], replace=self.file_replace,
convert=self.file_convert)
|
obj is record object
fobj is data
field is FileField instance
|
def specbits(self):
bits = []
for opt in sorted(self.__options):
m = re.match(r'^! (.*)', opt)
if m:
bits.extend(['!', "--%s" % m.group(1)])
else:
bits.append("--%s" % opt)
optval = self.__options[opt]
if isinstance(optval, list):
bits.extend(optval)
else:
bits.append(optval)
return bits
|
Returns the array of arguments that would be given to
iptables for the current Extension.
|
def split(path):
if path == '/':
return ('/', None)
parent, child = path.rsplit('/', 1)
if parent == '':
parent = '/'
return (parent, child)
|
splits path into parent, child
|
async def on_raw_join(self, message):
nick, metadata = self._parse_user(message.source)
self._sync_user(nick, metadata)
channels = message.params[0].split(',')
if self.is_same_nick(self.nickname, nick):
for channel in channels:
if not self.in_channel(channel):
self._create_channel(channel)
await self.rawmsg('MODE', channel)
else:
for channel in channels:
if self.in_channel(channel):
self.channels[channel]['users'].add(nick)
for channel in channels:
await self.on_join(channel, nick)
|
JOIN command.
|
def set_units_property(self, *, unit_ids=None, property_name, values):
if unit_ids is None:
unit_ids = self.get_unit_ids()
for i, unit in enumerate(unit_ids):
self.set_unit_property(unit_id=unit, property_name=property_name, value=values[i])
|
Sets unit property data for a list of units
Parameters
----------
unit_ids: list
The list of unit ids for which the property will be set
Defaults to get_unit_ids()
property_name: str
The name of the property
value: list
The list of values to be set
|
def _fallback_cleanups(self):
multiprocessing.active_children()
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
|
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
|
def get_blockcypher_walletname_from_mpub(mpub, subchain_indices=[]):
mpub = mpub.encode('utf-8')
if subchain_indices:
mpub += ','.join([str(x) for x in subchain_indices]).encode('utf-8')
return 'X%s' % sha256(mpub).hexdigest()[:24]
|
Blockcypher limits wallet names to 25 chars.
Hash the master pubkey (with subchain indexes) and take the first 25 chars.
Hackey determinstic method for naming.
|
def pfunc_multi(self, strands, permutation=None, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
material = self._set_material(strands, material, multi=True)
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=True)
if permutation is None:
permutation = range(1, len(strands) + 1)
lines = self._multi_lines(strands, permutation)
stdout = self._run('pfunc', cmd_args, lines).split('\n')
return (float(stdout[-3]), float(stdout[-2]))
|
Compute the partition function for an ordered complex of strands.
Runs the \'pfunc\' command.
:param strands: List of strands to use as inputs to pfunc -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: A 2-tuple of the free energy of the ordered complex
(float) and the partition function (float).
:rtype: tuple
|
def patch(patch_inspect=True):
PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator
PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine
PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable
if patch_inspect:
import inspect
PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable
|
Main entry point for patching the ``collections.abc`` and ``inspect``
standard library modules.
|
def childgroup(self, field):
cols = getattr(self, "cols", self.default_cols)
width = self.num_cols / cols
for child in field.children:
child.width = width
res = list(grouper(field.children, cols, fillvalue=None))
return res
|
Return children grouped regarding the grid description
|
def __validInterval(self, start, finish):
url = self.__getURL(1,
start.strftime("%Y-%m-%d"),
finish.strftime("%Y-%m-%d"))
data = self.__readAPI(url)
if data["total_count"] >= 1000:
middle = start + (finish - start)/2
self.__validInterval(start, middle)
self.__validInterval(middle, finish)
else:
self.__intervals.append([start.strftime("%Y-%m-%d"),
finish.strftime("%Y-%m-%d")])
self.__logger.info("New valid interval: " +
start.strftime("%Y-%m-%d") +
" to " +
finish.strftime("%Y-%m-%d"))
|
Check if the interval is correct.
An interval is correct if it has less than 1001
users. If the interval is correct, it will be added
to '_intervals' attribute. Else, interval will be
split in two news intervals and these intervals
will be checked.
:param start: start date of the interval.
:type start: datetime.date.
:param finish: finish date of the interval.
:type finish: datetime.date.
|
def plot_discrete(self, show=False):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
for discrete in self.discrete:
axis.plot(*discrete.T)
if show:
plt.show()
|
Plot closed curves
Parameters
------------
show : bool
If False will not execute matplotlib.pyplot.show
|
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
|
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
Parameters
----------
tensor_dicts : ``List[Dict[str, torch.Tensor]]``
The list of tensor dictionaries to batch.
remove_trailing_dimension : ``bool``
If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
|
def lookup_host(self, name):
res = self.lookup_by_host(name=name)
try:
return dict(ip=res["ip-address"], mac=res["hardware-address"], hostname=res["name"].decode('utf-8'))
except KeyError:
raise OmapiErrorAttributeNotFound()
|
Look for a host object with given name and return the
name, mac, and ip address
@type name: str
@rtype: dict or None
@raises ValueError:
@raises OmapiError:
@raises OmapiErrorNotFound: if no host object with the given name could be found
@raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name
@raises socket.error:
|
def image(self):
if self._image is None:
self._image = Image(self.fname)
return self._image
|
Read the loaded DICOM image data
|
def call(self, cmd, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
self.log.info('Running %s', cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
out, err = p.communicate()
if out:
self.log.info(out)
if err:
if p.returncode == 0:
self.log.info(err)
else:
self.log.error(err)
if p.returncode != 0:
self.log.error('returncode = %d' % p.returncode)
raise Exception
return out, err, p.returncode
|
A simple subprocess wrapper
|
def validate_string(option, value):
if isinstance(value, string_type):
return value
raise TypeError("Wrong type for %s, value must be "
"an instance of %s" % (option, string_type.__name__))
|
Validates that 'value' is an instance of `basestring` for Python 2
or `str` for Python 3.
|
def pick(self, filenames: Iterable[str]) -> str:
filenames = sorted(filenames, reverse=True)
for priority in sorted(self.rules.keys(), reverse=True):
patterns = self.rules[priority]
for pattern in patterns:
for filename in filenames:
if pattern.search(filename):
return filename
return filenames[0]
|
Pick one filename based on priority rules.
|
def create_group(cls, prefix: str, name: str) -> ErrorGroup:
group = cls.ErrorGroup(prefix, name)
cls.groups.append(group)
return group
|
Create a new error group and return it.
|
def noargs(self):
"Returns True if the callable takes no arguments"
noargs = inspect.ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
return self.argspec == noargs
|
Returns True if the callable takes no arguments
|
def _searchservices(device, name=None, uuid=None, uuidbad=None):
if not isinstance(device, _IOBluetooth.IOBluetoothDevice):
raise ValueError("device must be IOBluetoothDevice, was %s" % \
type(device))
services = []
allservices = device.getServices()
if uuid:
gooduuids = (uuid, )
else:
gooduuids = ()
if uuidbad:
baduuids = (uuidbad, )
else:
baduuids = ()
if allservices is not None:
for s in allservices:
if gooduuids and not s.hasServiceFromArray_(gooduuids):
continue
if baduuids and s.hasServiceFromArray_(baduuids):
continue
if name is None or s.getServiceName() == name:
services.append(s)
return services
|
Searches the given IOBluetoothDevice using the specified parameters.
Returns an empty list if the device has no services.
uuid should be IOBluetoothSDPUUID object.
|
def unassign_assessment_taken_from_bank(self, assessment_taken_id, bank_id):
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
lookup_session.get_bank(bank_id)
self._unassign_object_from_catalog(assessment_taken_id, bank_id)
|
Removes an ``AssessmentTaken`` from a ``Bank``.
arg: assessment_taken_id (osid.id.Id): the ``Id`` of the
``AssessmentTaken``
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
raise: NotFound - ``assessment_taken_id`` or ``bank_id`` not
found or ``assessment_taken_id`` not assigned to
``bank_id``
raise: NullArgument - ``assessment_taken_id`` or ``bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
|
async def _handle_job_queue_update(self, message: BackendGetQueue):
self._logger.debug("Received job queue update")
self._queue_update_last_attempt = 0
self._queue_cache = message
new_job_queue_cache = {}
for (job_id, is_local, _, _2, _3, _4, max_end) in message.jobs_running:
if is_local:
new_job_queue_cache[job_id] = (-1, max_end - time.time())
wait_time = 0
nb_tasks = 0
for (job_id, is_local, _, _2, timeout) in message.jobs_waiting:
if timeout > 0:
wait_time += timeout
if is_local:
new_job_queue_cache[job_id] = (nb_tasks, wait_time)
nb_tasks += 1
self._queue_job_cache = new_job_queue_cache
|
Handles a BackendGetQueue containing a snapshot of the job queue
|
def get(path):
file_path = __get_docker_file_path(path)
if file_path is None:
return __standardize_result(False,
'Path {} is not present'.format(path),
None, None)
salt_result = __read_docker_compose_file(file_path)
if not salt_result['status']:
return salt_result
project = __load_project(path)
if isinstance(project, dict):
salt_result['return']['valid'] = False
else:
salt_result['return']['valid'] = True
return salt_result
|
Get the content of the docker-compose file into a directory
path
Path where the docker-compose file is stored on the server
CLI Example:
.. code-block:: bash
salt myminion dockercompose.get /path/where/docker-compose/stored
|
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None):
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or HERE
node_modules = pjoin(node_package, 'node_modules')
is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock'))
npm_cmd = npm
if npm is None:
if is_yarn:
npm_cmd = ['yarn']
else:
npm_cmd = ['npm']
if not which(npm_cmd[0]):
log.error("`{0}` unavailable. If you're running this command "
"using sudo, make sure `{0}` is availble to sudo"
.format(npm_cmd[0]))
return
if force or is_stale(node_modules, pjoin(node_package, 'package.json')):
log.info('Installing build dependencies with npm. This may '
'take a while...')
run(npm_cmd + ['install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(npm_cmd + ['run', build_cmd], cwd=node_package)
return NPM
|
Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable].
|
def get_load(jid):
options = _get_options(ret=None)
_response = _request("GET", options['url'] + options['db'] + '/' + jid)
if 'error' in _response:
log.error('Unable to get JID "%s" : "%s"', jid, _response)
return {}
return {_response['id']: _response}
|
Included for API consistency
|
def _setup_events(plugin):
events = plugin.events
if events and isinstance(events, (list, tuple)):
for event in [e for e in events if e in _EVENT_VALS]:
register('event', event, plugin)
|
Handles setup or teardown of event hook registration for the provided
plugin.
`plugin`
``Plugin`` class.
|
def idfn(fixture_params: Iterable[Any]) -> str:
return ":".join((str(item) for item in fixture_params))
|
Function for pytest to produce uniform names for fixtures.
|
def fetch_exemplars(keyword, outfile, n=50):
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
|
Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers..
|
def get_segments(self, addr, size):
address_wrappers = self.normalize_address(addr, is_write=False)
aw = address_wrappers[0]
region_id = aw.region
if region_id in self.regions:
region = self.regions[region_id]
alocs = region.get_abstract_locations(aw.address, size)
segments = [ ]
for aloc in alocs:
segments.extend(aloc.segments)
segments = sorted(segments, key=lambda x: x.offset)
processed_segments = [ ]
last_seg = None
for seg in segments:
if last_seg is None:
last_seg = seg
processed_segments.append(seg)
else:
if seg.offset >= last_seg.offset and seg.offset <= last_seg.offset + size:
continue
processed_segments.append(seg)
sizes = [ ]
next_pos = aw.address
for seg in processed_segments:
if seg.offset > next_pos:
gap = seg.offset - next_pos
assert gap > 0
sizes.append(gap)
next_pos += gap
if seg.size + next_pos > aw.address + size:
sizes.append(aw.address + size - next_pos)
next_pos += aw.address + size - next_pos
else:
sizes.append(seg.size)
next_pos += seg.size
if not sizes:
return [ size ]
return sizes
else:
return [ size ]
|
Get a segmented memory region based on AbstractLocation information available from VSA.
Here are some assumptions to make this method fast:
- The entire memory region [addr, addr + size] is located within the same MemoryRegion
- The address 'addr' has only one concrete value. It cannot be concretized to multiple values.
:param addr: An address
:param size: Size of the memory area in bytes
:return: An ordered list of sizes each segment in the requested memory region
|
def add_tag(self, tag, value):
index = bisect_left(self.tags, (tag, value))
contains = False
if index < len(self.tags):
contains = self.tags[index] == (tag, value)
if not contains:
self.tags.insert(index, (tag, value))
|
as tags are kept in a sorted order, a bisection is a fastest way to identify a correct position
of or a new tag to be added. An additional check is required to make sure w don't add duplicates
|
def create_image(self, instance_id, name,
description=None, no_reboot=False):
params = {'InstanceId' : instance_id,
'Name' : name}
if description:
params['Description'] = description
if no_reboot:
params['NoReboot'] = 'true'
img = self.get_object('CreateImage', params, Image, verb='POST')
return img.id
|
Will create an AMI from the instance in the running or stopped
state.
:type instance_id: string
:param instance_id: the ID of the instance to image.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the bundling process
should not attempt to shutdown the instance before
bundling. If this flag is True, the responsibility
of maintaining file system integrity is left to the
owner of the instance.
:rtype: string
:return: The new image id
|
def receive(uuid, source):
ret = {}
if not os.path.isdir(source):
ret['Error'] = 'Source must be a directory or host'
return ret
if not os.path.exists(os.path.join(source, '{0}.vmdata'.format(uuid))):
ret['Error'] = 'Unknow vm with uuid in {0}'.format(source)
return ret
cmd = 'vmadm receive < {source}'.format(
source=os.path.join(source, '{0}.vmdata'.format(uuid))
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0 and not res['stderr'].endswith('datasets'):
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
vmobj = get(uuid)
if 'datasets' not in vmobj:
return True
log.warning('one or more datasets detected, this is not supported!')
log.warning('trying to restore datasets, mountpoints will need to be set again...')
for dataset in vmobj['datasets']:
name = dataset.split('/')
name = name[-1]
cmd = 'zfs receive {dataset} < {source}'.format(
dataset=dataset,
source=os.path.join(source, '{0}-{1}.zfsds'.format(uuid, name))
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
cmd = 'vmadm install {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0 and not res['stderr'].endswith('datasets'):
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return True
|
Receive a vm from a directory
uuid : string
uuid of vm to be received
source : string
source directory
CLI Example:
.. code-block:: bash
salt '*' vmadm.receive 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups
|
def get(cls, card_id, custom_headers=None):
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_READ.format(cls._determine_user_id(),
card_id)
response_raw = api_client.get(endpoint_url, {}, custom_headers)
return BunqResponseCard.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_GET)
)
|
Return the details of a specific card.
:type api_context: context.ApiContext
:type user_id: int
:type card_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCard
|
def sim_givenAdj(self, Adj: np.array, model='line'):
examples = [{'func' : 'sawtooth', 'gdist' : 'uniform',
'sigma_glob' : 1.8, 'sigma_noise' : 0.1}]
n_samples = 100
sigma_glob = 1.8
sigma_noise = 0.4
func = self.funcs[model]
sourcedist = 'uniform'
dim = Adj.shape[0]
X = np.zeros((n_samples,dim))
nrpar = 0
children = list(range(dim))
parents = []
for gp in range(dim):
if Adj[gp,:].sum() == nrpar:
if sourcedist == 'gaussian':
X[:,gp] = np.random.normal(0,sigma_glob,n_samples)
if sourcedist == 'uniform':
X[:,gp] = np.random.uniform(-sigma_glob,sigma_glob,n_samples)
parents.append(gp)
children.remove(gp)
children_sorted = []
nrchildren_par = np.zeros(dim)
nrchildren_par[0] = len(parents)
for nrpar in range(1,dim):
for gp in children:
if Adj[gp,:].sum() == nrpar:
children_sorted.append(gp)
nrchildren_par[nrpar] += 1
if nrchildren_par[1] > 1:
if Adj[children_sorted[0],parents[0]] == 0:
help = children_sorted[0]
children_sorted[0] = children_sorted[1]
children_sorted[1] = help
for gp in children_sorted:
for g in range(dim):
if Adj[gp,g] > 0:
X[:,gp] += 1./Adj[gp,:].sum()*func(X[:,g])
X[:,gp] += np.random.normal(0,sigma_noise,n_samples)
return X
|
\
Simulate data given only an adjacancy matrix and a model.
The model is a bivariate funtional dependence. The adjacancy matrix
needs to be acyclic.
Parameters
----------
Adj
adjacancy matrix of shape (dim,dim).
Returns
-------
Data array of shape (n_samples,dim).
|
def add_crs(op, element, **kwargs):
return element.map(lambda x: convert_to_geotype(x, kwargs.get('crs')), Element)
|
Converts any elements in the input to their equivalent geotypes
if given a coordinate reference system.
|
def fields_to_dict(obj, skip_fields=None):
data = {}
obj = api.get_object(obj)
for field_name, field in api.get_fields(obj).items():
if skip_fields and field_name in skip_fields:
continue
if field.type == "computed":
continue
data[field_name] = field.get(obj)
return data
|
Generates a dictionary with the field values of the object passed in, where
keys are the field names. Skips computed fields
|
def get_all_conda_bins():
bcbio_bin = get_bcbio_bin()
conda_dir = os.path.dirname(bcbio_bin)
if os.path.join("anaconda", "envs") in conda_dir:
conda_dir = os.path.join(conda_dir[:conda_dir.rfind(os.path.join("anaconda", "envs"))], "anaconda")
return [bcbio_bin] + list(glob.glob(os.path.join(conda_dir, "envs", "*", "bin")))
|
Retrieve all possible conda bin directories, including environments.
|
def text_bounding_box(self, size_pt, text):
if size_pt == 12:
mult = {"h": 9, "w_digit": 5, "w_space": 2}
elif size_pt == 18:
mult = {"h": 14, "w_digit": 9, "w_space": 2}
num_chars = len(text)
return (num_chars * mult["w_digit"] + (num_chars - 1) * mult["w_space"] + 1, mult["h"])
|
Return the bounding box of the given text
at the given font size.
:param int size_pt: the font size in points
:param string text: the text
:rtype: tuple (width, height)
|
def _is_retryable_exception(e):
if isinstance(e, urllib3.exceptions.ProtocolError):
e = e.args[1]
if isinstance(e, (socket.gaierror, socket.herror)):
return True
if isinstance(e, socket.error) and e.errno in _RETRYABLE_SOCKET_ERRORS:
return True
if isinstance(e, urllib3.exceptions.NewConnectionError):
return True
return False
|
Returns True if the exception is always safe to retry.
This is True if the client was never able to establish a connection
to the server (for example, name resolution failed or the connection
could otherwise not be initialized).
Conservatively, if we can't tell whether a network connection could
have been established, we return False.
|
def sg_concat(tensor, opt):
r
assert opt.target is not None, 'target is mandatory.'
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target]
return tf.concat([tensor] + target, opt.axis, name=opt.name)
|
r"""Concatenates tensors along a axis.
See `tf.concat()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
target: A `Tensor`. Must have the same rank as `tensor`, and
all dimensions except `opt.dim` must be equal.
axis : Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
def percentage_of_reoccurring_datapoints_to_all_datapoints(x):
if len(x) == 0:
return np.nan
unique, counts = np.unique(x, return_counts=True)
if counts.shape[0] == 0:
return 0
return np.sum(counts > 1) / float(counts.shape[0])
|
Returns the percentage of unique values, that are present in the time series
more than once.
len(different values occurring more than once) / len(different values)
This means the percentage is normalized to the number of unique values,
in contrast to the percentage_of_reoccurring_values_to_all_values.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
|
def get_condition_name_by_address(self, address):
if self.lock_reward_condition.address == address:
return 'lockReward'
elif self.access_secret_store_condition.address == address:
return 'accessSecretStore'
elif self.escrow_reward_condition.address == address:
return 'escrowReward'
else:
logging.error(f'The current address {address} is not a condition address')
|
Return the condition name for a given address.
|
def fetch_pcr(*args, **kwargs):
kwargs['token'] = os.getenv("PCR_AUTH_TOKEN", "public")
return fetch(DOMAIN, *args, **kwargs)['result']
|
Wrapper for fetch to automatically parse results from the PCR API.
|
def make_4gaussians_image(noise=True):
table = Table()
table['amplitude'] = [50, 70, 150, 210]
table['x_mean'] = [160, 25, 150, 90]
table['y_mean'] = [70, 40, 25, 60]
table['x_stddev'] = [15.2, 5.1, 3., 8.1]
table['y_stddev'] = [2.6, 2.5, 3., 4.7]
table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180.
shape = (100, 200)
data = make_gaussian_sources_image(shape, table) + 5.
if noise:
data += make_noise_image(shape, type='gaussian', mean=0.,
stddev=5., random_state=12345)
return data
|
Make an example image containing four 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 5 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing four 2D Gaussian sources.
See Also
--------
make_100gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_4gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
|
def _extract_info_from_useragent(user_agent):
parsed_string = user_agent_parser.Parse(user_agent)
return {
'os': parsed_string.get('os', {}).get('family'),
'browser': parsed_string.get('user_agent', {}).get('family'),
'browser_version': parsed_string.get('user_agent', {}).get('major'),
'device': parsed_string.get('device', {}).get('family'),
}
|
Extract extra informations from user.
|
def create_table(self, model):
self.orm[model._meta.table_name] = model
model._meta.database = self.database
self.ops.append(model.create_table)
return model
|
Create model and table in database.
>> migrator.create_table(model)
|
def as_dataframe(self, time_index=False, absolute_time=False):
import pandas as pd
dataframe_dict = OrderedDict()
for key, value in self.objects.items():
if value.has_data:
index = value.time_track(absolute_time) if time_index else None
dataframe_dict[key] = pd.Series(data=value.data, index=index)
return pd.DataFrame.from_dict(dataframe_dict)
|
Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame
|
def analyze(output_dir, dataset, cloud=False, project_id=None):
job = analyze_async(
output_dir=output_dir,
dataset=dataset,
cloud=cloud,
project_id=project_id)
job.wait()
print('Analyze: ' + str(job.state))
|
Blocking version of analyze_async. See documentation of analyze_async.
|
async def set_windows_kms_host(cls, host: typing.Optional[str]):
await cls.set_config("windows_kms_host", "" if host is None else host)
|
See `get_windows_kms_host`.
|
def get_dfdx_callback(self):
dfdx_exprs = self.get_dfdx()
if dfdx_exprs is False:
return None
return self._callback_factory(dfdx_exprs)
|
Generate a callback for evaluating derivative of ``self.exprs``
|
def to_paginated_list(self, result, _ns, _operation, **kwargs):
items, context = self.parse_result(result)
headers = dict()
paginated_list = PaginatedList(
items=items,
_page=self,
_ns=_ns,
_operation=_operation,
_context=context,
)
return paginated_list, headers
|
Convert a controller result to a paginated list.
The result format is assumed to meet the contract of this page class's `parse_result` function.
|
def pattern_for_view(self, view, action):
if getattr(view, 'derive_url_pattern', None):
return view.derive_url_pattern(self.path, action)
else:
return r'^%s/%s/$' % (self.path, action)
|
Returns the URL pattern for the passed in action.
|
def _task_to_dict(task):
payload = task.payload
if payload and task.content_type == 'application/json':
payload = json.loads(payload)
return dict(
task_id=task.task_id,
queue_name=task.queue_name,
eta=_datetime_to_epoch_seconds(task.eta),
source=task.source,
created=_datetime_to_epoch_seconds(task.created),
lease_attempts=task.lease_attempts,
last_lease=_datetime_to_epoch_seconds(task.last_lease),
payload=payload,
content_type=task.content_type)
|
Converts a WorkQueue to a JSON-able dictionary.
|
def _get_feed_data(self, file_paths):
rv = {}
for i in file_paths:
_ = i.split('/')
category = _[-2]
name = _[-1].split('.')[0]
page_config, md = self._get_config_and_content(i)
parsed_md = tools.parse_markdown(md, self.site_config)
rv.setdefault(category, {})
rv[category].update(
{
i: {
'title': page_config.get('title', ''),
'name': name.decode('utf-8'),
'content': parsed_md,
'date': page_config.get('date', '')
}
}
)
return rv
|
get data to display in feed file
|
def _on_enter(self, *args):
self.config(foreground=self._hover_color, cursor=self._cursor)
|
Set the text color to the hover color.
|
def step(self):
logy = self.loglike - rexponential(1)
L = self.stochastic.value - runiform(0, self.w)
R = L + self.w
if self.doubling:
K = self.m
while (K and (logy < self.fll(L) or logy < self.fll(R))):
if random() < 0.5:
L -= R - L
else:
R += R - L
K -= 1
else:
J = np.floor(runiform(0, self.m))
K = (self.m - 1) - J
while(J > 0 and logy < self.fll(L)):
L -= self.w
J -= 1
while(K > 0 and logy < self.fll(R)):
R += self.w
K -= 1
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty
while(logy_new < logy):
if (self.stochastic.value < self.stochastic.last_value):
L = float(self.stochastic.value)
else:
R = float(self.stochastic.value)
self.stochastic.revert()
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty
|
Slice step method
From Neal 2003 (doi:10.1214/aos/1056562461)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.