code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def _next_datetime_with_utc_hour(table_name, utc_hour):
today = datetime.date.today()
start_date_time = datetime.datetime(
year=today.year,
month=today.month,
day=today.day,
hour=utc_hour,
minute=_get_deterministic_value_for_table_name(table_name, 60),
second=_get_deterministic_value_for_table_name(table_name, 60)
)
if start_date_time < datetime.datetime.utcnow():
one_day = datetime.timedelta(days=1)
start_date_time += one_day
return start_date_time
|
Datapipeline API is throttling us, as all the pipelines are started at the same time.
We would like to uniformly distribute the startTime over a 60 minute window.
Return the next future utc datetime where
hour == utc_hour
minute = A value between 0-59 (depending on table name)
second = A value between 0-59 (depending on table name)
|
def set_rest_notification(self, hit_type, url, event_types=None):
return self._set_notification(hit_type, 'REST', url, event_types)
|
Performs a SetHITTypeNotification operation to set REST notification
for a specified HIT type
|
def fetcher_with_object(cls, parent_object, relationship="child"):
fetcher = cls()
fetcher.parent_object = parent_object
fetcher.relationship = relationship
rest_name = cls.managed_object_rest_name()
parent_object.register_fetcher(fetcher, rest_name)
return fetcher
|
Register the fetcher for a served object.
This method will fill the fetcher with `managed_class` instances
Args:
parent_object: the instance of the parent object to serve
Returns:
It returns the fetcher instance.
|
def pad_to(data, alignment, pad_character=b'\xFF'):
pad_mod = len(data) % alignment
if pad_mod != 0:
data += pad_character * (alignment - pad_mod)
return data
|
Pad to the next alignment boundary
|
def is_moderated(self, curr_time, pipe):
value = pipe.get(self.moderate_key)
if value is None:
value = 0.0
else:
value = float(value)
if (curr_time - value) < self.moderation:
return True
return False
|
Tests to see if the moderation limit is not exceeded
@return: True if the moderation limit is exceeded
|
def _find_combo_text(widget, value):
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
|
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
|
def make_compound_word(self, start_index, how_many):
if not self.quiet:
compound_word = ""
for word in self.unit_list[start_index:start_index + how_many]:
compound_word += " " + word.text
print compound_word.strip(), "-->","_".join(compound_word.split())
for other_unit in range(1, how_many):
self.unit_list[start_index].original_text.append(self.unit_list[start_index + other_unit].text)
self.unit_list[start_index].text += "_" + self.unit_list[start_index + other_unit].text
self.unit_list[start_index].end_time = self.unit_list[start_index + how_many - 1].end_time
self.unit_list = self.unit_list[:start_index + 1] + self.unit_list[start_index + how_many:]
|
Combines two Units in self.unit_list to make a compound word token.
:param int start_index: Index of first Unit in self.unit_list to be combined
:param int how_many: Index of how many Units in self.unit_list to be combined.
Modifies:
- self.unit_list: Modifies the Unit corresponding to the first word
in the compound word. Changes the .text property to include .text
properties from subsequent Units, separted by underscores. Modifies
the .original_text property to record each componentword separately.
Modifies the .end_time property to be the .end_time of the final unit
in the compound word. Finally, after extracting the text and timing
information, it removes all units in the compound word except for the
first.
.. note: This method is only used with semantic processing, so we don't need to worry
about the phonetic representation of Units.
|
def iterate_with_name(cls):
for attr_name, field in cls.iterate_over_fields():
structure_name = field.structue_name(attr_name)
yield attr_name, structure_name, field
|
Iterate over fields, but also give `structure_name`.
Format is `(attribute_name, structue_name, field_instance)`.
Structure name is name under which value is seen in structure and
schema (in primitives) and only there.
|
def rpush(self, key, *args):
redis_list = self._get_list(key, 'RPUSH', create=True)
redis_list.extend(map(self._encode, args))
return len(redis_list)
|
Emulate rpush.
|
def _validate_certificate_url(self, cert_url):
parsed_url = urlparse(cert_url)
protocol = parsed_url.scheme
if protocol.lower() != CERT_CHAIN_URL_PROTOCOL.lower():
raise VerificationException(
"Signature Certificate URL has invalid protocol: {}. "
"Expecting {}".format(protocol, CERT_CHAIN_URL_PROTOCOL))
hostname = parsed_url.hostname
if (hostname is None or
hostname.lower() != CERT_CHAIN_URL_HOSTNAME.lower()):
raise VerificationException(
"Signature Certificate URL has invalid hostname: {}. "
"Expecting {}".format(hostname, CERT_CHAIN_URL_HOSTNAME))
normalized_path = os.path.normpath(parsed_url.path)
if not normalized_path.startswith(CERT_CHAIN_URL_STARTPATH):
raise VerificationException(
"Signature Certificate URL has invalid path: {}. "
"Expecting the path to start with {}".format(
normalized_path, CERT_CHAIN_URL_STARTPATH))
port = parsed_url.port
if port is not None and port != CERT_CHAIN_URL_PORT:
raise VerificationException(
"Signature Certificate URL has invalid port: {}. "
"Expecting {}".format(str(port), str(CERT_CHAIN_URL_PORT)))
|
Validate the URL containing the certificate chain.
This method validates if the URL provided adheres to the format
mentioned here :
https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-a-web-service.html#cert-verify-signature-certificate-url
:param cert_url: URL for retrieving certificate chain
:type cert_url: str
:raises: :py:class:`VerificationException` if the URL is invalid
|
def shell(self):
click.echo(click.style("NOTICE!", fg="yellow", bold=True) + " This is a " + click.style("local", fg="green", bold=True) + " shell, inside a " + click.style("Zappa", bold=True) + " object!")
self.zappa.shell()
return
|
Spawn a debug shell.
|
def atomic_open_for_write(target, binary=False, newline=None, encoding=None):
mode = "w+b" if binary else "w"
f = NamedTemporaryFile(
dir=os.path.dirname(target),
prefix=".__atomic-write",
mode=mode,
encoding=encoding,
newline=newline,
delete=False,
)
os.chmod(f.name, stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
yield f
except BaseException:
f.close()
try:
os.remove(f.name)
except OSError:
pass
raise
else:
f.close()
try:
os.remove(target)
except OSError:
pass
os.rename(f.name, target)
|
Atomically open `target` for writing.
This is based on Lektor's `atomic_open()` utility, but simplified a lot
to handle only writing, and skip many multi-process/thread edge cases
handled by Werkzeug.
:param str target: Target filename to write
:param bool binary: Whether to open in binary mode, default False
:param str newline: The newline character to use when writing, determined from system if not supplied
:param str encoding: The encoding to use when writing, defaults to system encoding
How this works:
* Create a temp file (in the same directory of the actual target), and
yield for surrounding code to write to it.
* If some thing goes wrong, try to remove the temp file. The actual target
is not touched whatsoever.
* If everything goes well, close the temp file, and replace the actual
target with this new file.
.. code:: python
>>> fn = "test_file.txt"
>>> def read_test_file(filename=fn):
with open(filename, 'r') as fh:
print(fh.read().strip())
>>> with open(fn, "w") as fh:
fh.write("this is some test text")
>>> read_test_file()
this is some test text
>>> def raise_exception_while_writing(filename):
with open(filename, "w") as fh:
fh.write("writing some new text")
raise RuntimeError("Uh oh, hope your file didn't get overwritten")
>>> raise_exception_while_writing(fn)
Traceback (most recent call last):
...
RuntimeError: Uh oh, hope your file didn't get overwritten
>>> read_test_file()
writing some new text
# Now try with vistir
>>> def raise_exception_while_writing(filename):
with vistir.contextmanagers.atomic_open_for_write(filename) as fh:
fh.write("Overwriting all the text from before with even newer text")
raise RuntimeError("But did it get overwritten now?")
>>> raise_exception_while_writing(fn)
Traceback (most recent call last):
...
RuntimeError: But did it get overwritten now?
>>> read_test_file()
writing some new text
|
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
use the info smoother to solve a symmetric block tridiagonal system
|
def daily_returns(ts, **kwargs):
relative = kwargs.get('relative', 0)
return returns(ts, delta=BDay(), relative=relative)
|
re-compute ts on a daily basis
|
def _rand_sparse(m, n, density, format='csr'):
nnz = max(min(int(m*n*density), m*n), 0)
row = np.random.randint(low=0, high=m-1, size=nnz)
col = np.random.randint(low=0, high=n-1, size=nnz)
data = np.ones(nnz, dtype=float)
return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n))
|
Construct base function for sprand, sprandn.
|
def _try_to_squeeze(obj, raise_=False):
if isinstance(obj, pd.Series):
return obj
elif isinstance(obj, pd.DataFrame) and obj.shape[-1] == 1:
return obj.squeeze()
else:
if raise_:
raise ValueError("Input cannot be squeezed.")
return obj
|
Attempt to squeeze to 1d Series.
Parameters
----------
obj : {pd.Series, pd.DataFrame}
raise_ : bool, default False
|
def submit_and_render():
data = request.files.file
template = env.get_template("results.html")
if not data:
pass
results = analyse_pcap(data.file, data.filename)
results.update(base)
return template.render(results)
|
Blocking POST handler for file submission.
Runs snort on supplied file and returns results as rendered html.
|
def saveAsTextFiles(self, prefix, suffix=None):
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
if 'FileAlreadyExistsException' not in str(e):
raise
return self.foreachRDD(saveAsTextFile)
|
Save each RDD in this DStream as at text file, using string
representation of elements.
|
async def forget_ticket(self, request):
session = await get_session(request)
session.pop(self.cookie_name, '')
|
Called to forget the ticket data a request
Args:
request: aiohttp Request object.
|
def flush_to_index(self):
assert self._smref is not None
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache()
|
Flush changes in our configuration file to the index
|
def database_caller_creator(self, host, port, name=None):
name = name or 0
client = redis.StrictRedis(host=host, port=port, db=name)
pipe = client.pipeline(transaction=False)
return client, pipe
|
creates a redis connection object
which will be later used to modify the db
|
def shell(ctx, package, working_dir, sudo):
ctx.mode = CanariMode.LocalShellDebug
from canari.commands.shell import shell
shell(package, working_dir, sudo)
|
Runs a Canari interactive python shell
|
def coerce_types(**kwargs):
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs))
|
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
|
def _get_digest(self, info):
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
|
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
|
def close(self):
self.logger.info("Closing Rest Service")
self.closed = True
self._close_thread(self._redis_thread, "Redis setup")
self._close_thread(self._heartbeat_thread, "Heartbeat")
self._close_thread(self._kafka_thread, "Kafka setup")
self._close_thread(self._consumer_thread, "Consumer")
if self.consumer is not None:
self.logger.debug("Closing kafka consumer")
self.consumer.close()
if self.producer is not None:
self.logger.debug("Closing kafka producer")
self.producer.close(timeout=10)
|
Cleans up anything from the process
|
def hmetis(hdf5_file_name, N_clusters_max, w = None):
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
|
Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
|
def exec_action(module, action, module_parameter=None, action_parameter=None, state_only=False):
out = __salt__['cmd.run'](
'eselect --brief --colour=no {0} {1} {2} {3}'.format(
module, module_parameter or '', action, action_parameter or ''),
python_shell=False
)
out = out.strip().split('\n')
if out[0].startswith('!!! Error'):
return False
if state_only:
return True
if not out:
return False
if len(out) == 1 and not out[0].strip():
return False
return out
|
Execute an arbitrary action on a module.
module
name of the module to be executed
action
name of the module's action to be run
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
state_only
don't return any output but only the success/failure of the operation
CLI Example (updating the ``php`` implementation used for ``apache2``):
.. code-block:: bash
salt '*' eselect.exec_action php update action_parameter='apache2'
|
def nextSunset(jd, lat, lon):
return swe.sweNextTransit(const.SUN, jd, lat, lon, 'SET')
|
Returns the JD of the next sunset.
|
def _wrap_universe(self, func):
@wraps(func)
def wrapper(graph, *args, **kwargs):
if self.universe is None:
raise MissingUniverseError(
'Can not run universe function [{}] - No universe is set'.format(func.__name__))
return func(self.universe, graph, *args, **kwargs)
return wrapper
|
Take a function that needs a universe graph as the first argument and returns a wrapped one.
|
def cache_key(model, pk):
"Generates a cache key for a model instance."
app = model._meta.app_label
name = model._meta.module_name
return 'api:{0}:{1}:{2}'.format(app, name, pk)
|
Generates a cache key for a model instance.
|
def isempty(self, tables=None):
tables = tables or self.tables
for table in tables:
if self.num_rows(table) > 0:
return False
return True
|
Return whether a table or the entire database is empty.
A database is empty is if it has no tables. A table is empty
if it has no rows.
Arguments:
tables (sequence of str, optional): If provided, check
that the named tables are empty. If not provided, check
that all tables are empty.
Returns:
bool: True if tables are empty, else false.
Raises:
sql.OperationalError: If one or more of the tables do not
exist.
|
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate()
|
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
|
def opt_to_ri(f, res, nm):
r
ri = nm + f / (2 * np.pi) * res
return ri
|
r"""Convert the OPT object function to refractive index
In :abbr:`OPT (Optical Projection Tomography)`, the object function
is computed from the raw phase data. This method converts phase data
to refractive index data.
.. math::
n(\mathbf{r}) = n_\mathrm{m} +
\frac{f(\mathbf{r}) \cdot \lambda}{2 \pi}
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
This function is not meant to be used with diffraction tomography
data. For ODT, use :py:func:`odt_to_ri` instead.
|
def update(self, data=None, priority=None, ttl=None, comment=None):
return self.manager.update_record(self.domain_id, self, data=data,
priority=priority, ttl=ttl, comment=comment)
|
Modifies this record.
|
def multiplicative_jitter(x, epsilon=1e-2):
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)
|
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
|
def update_values(self):
Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C
P, d = self.P, self.d
S1 = Q + self.beta * dot(B.T, dot(P, B))
S2 = self.beta * dot(B.T, dot(P, A)) + N
S3 = self.beta * dot(A.T, dot(P, A))
self.F = solve(S1, S2)
new_P = R - dot(S2.T, self.F) + S3
new_d = self.beta * (d + np.trace(dot(P, dot(C, C.T))))
self.P, self.d = new_P, new_d
|
This method is for updating in the finite horizon case. It
shifts the current value function
.. math::
V_t(x) = x' P_t x + d_t
and the optimal policy :math:`F_t` one step *back* in time,
replacing the pair :math:`P_t` and :math:`d_t` with
:math:`P_{t-1}` and :math:`d_{t-1}`, and :math:`F_t` with
:math:`F_{t-1}`
|
def coderelpath(coderoot, relpath):
from os import chdir, getcwd, path
cd = getcwd()
chdir(coderoot)
result = path.abspath(relpath)
chdir(cd)
return result
|
Returns the absolute path of the 'relpath' relative to the specified code directory.
|
def product(self, factorset, inplace=True):
r
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(*factor_set1.factors)
if not inplace:
return factor_set
|
r"""
Return the factor sets product with the given factor sets
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors
set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets: FactorSet1, FactorSet2, ..., FactorSetn
FactorSets to be multiplied
inplace: A boolean (Default value True)
If inplace = True , then it will modify the FactorSet object, if False, it will
return a new FactorSet object.
Returns
--------
If inpalce = False, will return a new FactorSet object, which is product of two factors
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2.product(factor_set1)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c910>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cc50>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.product(factor_set1, inplace=False)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b060>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b790>])
|
def to_css(self):
if self.a == 1.0:
return "rgb(%d, %d, %d)" % (self.r, self.g, self.b)
else:
return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
|
Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"``
|
def get_connection(self, name):
return self._api_get('/api/connections/{0}'.format(
urllib.parse.quote_plus(name)
))
|
An individual connection.
:param name: The connection name
:type name: str
|
def init(self, value):
value = self.value_or_default(value)
if value is None: return None
if is_hashed(value):
return value
return make_password(value)
|
hash passwords given in the constructor
|
def add_particles_ascii(self, s):
for l in s.split("\n"):
r = l.split()
if len(r):
try:
r = [float(x) for x in r]
p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7])
self.add(p)
except:
raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).")
|
Adds particles from an ASCII string.
Parameters
----------
s : string
One particle per line. Each line should include particle's mass, radius, position and velocity.
|
def from_json(cls, json_moc):
intervals = np.array([])
for order, pix_l in json_moc.items():
if len(pix_l) == 0:
continue
pix = np.array(pix_l)
p1 = pix
p2 = pix + 1
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - int(order))
itv = np.vstack((p1 << shift, p2 << shift)).T
if intervals.size == 0:
intervals = itv
else:
intervals = np.vstack((intervals, itv))
return cls(IntervalSet(intervals))
|
Creates a MOC from a dictionary of HEALPix cell arrays indexed by their depth.
Parameters
----------
json_moc : dict(str : [int]
A dictionary of HEALPix cell arrays indexed by their depth.
Returns
-------
moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
the MOC.
|
def is_file(value, **kwargs):
try:
value = validators.file_exists(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True
|
Indicate whether ``value`` is a file that exists on the local filesystem.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
|
def requires_target(self):
if self.has_combo and PlayReq.REQ_TARGET_FOR_COMBO in self.requirements:
if self.controller.combo:
return True
if PlayReq.REQ_TARGET_IF_AVAILABLE in self.requirements:
return bool(self.play_targets)
if PlayReq.REQ_TARGET_IF_AVAILABLE_AND_DRAGON_IN_HAND in self.requirements:
if self.controller.hand.filter(race=Race.DRAGON):
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_MINIONS)
if req is not None:
if len(self.controller.field) >= req:
return bool(self.play_targets)
req = self.requirements.get(PlayReq.REQ_TARGET_IF_AVAILABLE_AND_MINIMUM_FRIENDLY_SECRETS)
if req is not None:
if len(self.controller.secrets) >= req:
return bool(self.play_targets)
return PlayReq.REQ_TARGET_TO_PLAY in self.requirements
|
True if the card currently requires a target
|
def DEBUG_ON_RESPONSE(self, statusCode, responseHeader, data):
if self.DEBUG_FLAG:
self._frameBuffer[self._frameCount][1:4] = [statusCode, responseHeader, data]
responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount
|
Update current frame with response
Current frame index will be attached to responseHeader
|
def live_migrate_move(self, userid, destination, parms):
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxTotal']))
if 'maxquiesce' in parms:
rd += ('--maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
|
moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster.
|
def get_file_descriptor(self):
return self._subscription.connection and self._subscription.connection._sock.fileno()
|
Returns the file descriptor used for passing to the select call when listening
on the message queue.
|
def fetch_json_by_name(name):
result = fetch_meta_by_name(name)
if result.href:
result = fetch_json_by_href(result.href)
return result
|
Fetch json based on the element name
First gets the href based on a search by name, then makes a
second query to obtain the element json
:method: GET
:param str name: element name
:return: :py:class:`smc.api.web.SMCResult`
|
def clone_from(cls, url, to_path, progress=None, env=None, **kwargs):
git = Git(os.getcwd())
if env is not None:
git.update_environment(**env)
return cls._clone(git, url, to_path, GitCmdObjectDB, progress, **kwargs)
|
Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param env: Optional dictionary containing the desired environment variables.
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory
|
def abort(self, err):
if _debug: IOGroup._debug("abort %r", err)
self.ioState = ABORTED
self.ioError = err
for iocb in self.ioMembers:
iocb.abort(err)
self.trigger()
|
Called by a client to abort all of the member transactions.
When the last pending member is aborted the group callback
function will be called.
|
def _retrieve(self, map):
self._conn.request('GET', "cache_object://%s/%s" % (self._host, map),
None, self._httpHeaders)
rp = self._conn.getresponse()
if rp.status == 200:
data = rp.read()
return data
else:
raise Exception("Retrieval of stats from Squid Proxy Server"
"on host %s and port %s failed.\n"
"HTTP - Status: %s Reason: %s"
% (self._host, self._port, rp.status, rp.reason))
|
Query Squid Proxy Server Manager Interface for stats.
@param map: Statistics map name.
@return: Dictionary of query results.
|
def write_output_file(self,
path: str,
per_identity_data: 'RDD',
spark_session: Optional['SparkSession'] = None) -> None:
_spark_session_ = get_spark_session(spark_session)
if not self._window_bts:
per_identity_data.flatMap(
lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]
).saveAsTextFile(path)
else:
_spark_session_.createDataFrame(per_identity_data.flatMap(lambda x: x[1][1])).write.csv(
path, header=True)
|
Basic helper function to persist data to disk.
If window BTS was provided then the window BTS output to written in csv format, otherwise,
the streaming BTS output is written in JSON format to the `path` provided
:param path: Path where the output should be written.
:param per_identity_data: Output of the `execute()` call.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return:
|
def month_interval(year, month, milliseconds=False, return_string=False):
if milliseconds:
delta = timedelta(milliseconds=1)
else:
delta = timedelta(seconds=1)
if month == 12:
start = datetime(year, month, 1)
end = datetime(year + 1, 1, 1) - delta
else:
start = datetime(year, month, 1)
end = datetime(year, month + 1, 1) - delta
if not return_string:
return start, end
else:
return str(start), str(end)
|
Return a start datetime and end datetime of a month.
:param milliseconds: Minimum time resolution.
:param return_string: If you want string instead of datetime, set True
Usage Example::
>>> start, end = rolex.month_interval(2000, 2)
>>> start
datetime(2000, 2, 1, 0, 0, 0)
>>> end
datetime(2000, 2, 29, 23, 59, 59)
|
def _encode_dbref(name, value, check_keys, opts):
buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00")
begin = len(buf) - 4
buf += _name_value_to_bson(b"$ref\x00",
value.collection, check_keys, opts)
buf += _name_value_to_bson(b"$id\x00",
value.id, check_keys, opts)
if value.database is not None:
buf += _name_value_to_bson(
b"$db\x00", value.database, check_keys, opts)
for key, val in iteritems(value._DBRef__kwargs):
buf += _element_to_bson(key, val, check_keys, opts)
buf += b"\x00"
buf[begin:begin + 4] = _PACK_INT(len(buf) - begin)
return bytes(buf)
|
Encode bson.dbref.DBRef.
|
def normalized(vector):
length = numpy.sum(vector * vector, axis=-1)
length = numpy.sqrt(length.reshape(length.shape + (1, )))
return vector / length
|
Get unit vector for a given one.
:param vector:
Numpy vector as coordinates in Cartesian space, or an array of such.
:returns:
Numpy array of the same shape and structure where all vectors are
normalized. That is, each coordinate component is divided by its
vector's length.
|
def remove_config_lock(name):
ret = _default_ret(name)
ret.update({
'changes': __salt__['panos.remove_config_lock'](),
'result': True
})
return ret
|
Release config lock previously held.
name: The name of the module function to execute.
SLS Example:
.. code-block:: yaml
panos/takelock:
panos.remove_config_lock
|
def OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY(self):
if not self._unauthenticated_session_management_key:
self._unauthenticated_session_management_key = ''.join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(100))
return self._unauthenticated_session_management_key
|
OPTIONAL. Supply a fixed string to use as browser-state key for unauthenticated clients.
|
def copy(self):
o = Option(
name=self.name,
default=self.default,
doc=self.doc,
from_string_converter=self.from_string_converter,
to_string_converter=self.to_string_converter,
value=self.value,
short_form=self.short_form,
exclude_from_print_conf=self.exclude_from_print_conf,
exclude_from_dump_conf=self.exclude_from_dump_conf,
is_argument=self.is_argument,
likely_to_be_changed=self.likely_to_be_changed,
not_for_definition=self.not_for_definition,
reference_value_from=self.reference_value_from,
secret=self.secret,
has_changed=self.has_changed,
foreign_data=self.foreign_data,
)
return o
|
return a copy
|
def init_not_msvc(self):
paths = os.environ.get('LD_LIBRARY_PATH', '').split(':')
for gomp in ('libgomp.so', 'libgomp.dylib'):
if cxx is None:
continue
cmd = [cxx, '-print-file-name=' + gomp]
try:
path = os.path.dirname(check_output(cmd).strip())
if path:
paths.append(path)
except OSError:
pass
libgomp_path = find_library("gomp")
for path in paths:
if libgomp_path:
break
path = path.strip()
if os.path.isdir(path):
libgomp_path = find_library(os.path.join(str(path), "libgomp"))
if not libgomp_path:
raise ImportError("I can't find a shared library for libgomp,"
" you may need to install it or adjust the "
"LD_LIBRARY_PATH environment variable.")
else:
self.libomp = ctypes.CDLL(libgomp_path)
self.version = 45
|
Find OpenMP library and try to load if using ctype interface.
|
def _logoutclient(self, useruuid, clientuuid):
self.log("Cleaning up client of logged in user.", lvl=debug)
try:
self._users[useruuid].clients.remove(clientuuid)
if len(self._users[useruuid].clients) == 0:
self.log("Last client of user disconnected.", lvl=verbose)
self.fireEvent(userlogout(useruuid, clientuuid))
del self._users[useruuid]
self._clients[clientuuid].useruuid = None
except Exception as e:
self.log("Error during client logout: ", e, type(e),
clientuuid, useruuid, lvl=error,
exc=True)
|
Log out a client and possibly associated user
|
def download(self, file_to_be_downloaded, perform_download=True, download_to_path=None):
response = self.get(
'/path/data/', file_to_be_downloaded, raw=False)
if not perform_download:
return response
if not download_to_path:
download_to_path = file_to_be_downloaded.split("/")[-1]
o = open(download_to_path, 'wb')
return shutil.copyfileobj(response.raw, o)
|
file_to_be_downloaded is a file-like object that has already
been uploaded, you cannot download folders
|
async def _wait(self, entity_type, entity_id, action, predicate=None):
q = asyncio.Queue(loop=self._connector.loop)
async def callback(delta, old, new, model):
await q.put(delta.get_id())
self.add_observer(callback, entity_type, action, entity_id, predicate)
entity_id = await q.get()
return self.state._live_entity_map(entity_type).get(entity_id)
|
Block the calling routine until a given action has happened to the
given entity
:param entity_type: The entity's type.
:param entity_id: The entity's id.
:param action: the type of action (e.g., 'add', 'change', or 'remove')
:param predicate: optional callable that must take as an
argument a delta, and must return a boolean, indicating
whether the delta contains the specific action we're looking
for. For example, you might check to see whether a 'change'
has a 'completed' status. See the _Observer class for details.
|
def send_status_message(self, object_id, status):
try:
body = json.dumps({
'id': object_id,
'status': status
})
self.status_queue.send_message(
MessageBody=body,
MessageGroupId='job_status',
MessageDeduplicationId=get_hash((object_id, status))
)
return True
except Exception as ex:
print(ex)
return False
|
Send a message to the `status_queue` to update a job's status.
Returns `True` if the message was sent, else `False`
Args:
object_id (`str`): ID of the job that was executed
status (:obj:`SchedulerStatus`): Status of the job
Returns:
`bool`
|
def plotprofMulti(self, ini, end, delta, what_specie, xlim1, xlim2,
ylim1, ylim2, symbol=None):
plotType=self._classTest()
if plotType=='se':
for i in range(ini,end+1,delta):
step = int(i)
if symbol==None:
symbol_dummy = '-'
for j in range(len(what_specie)):
self.plot_prof_1(step,what_specie[j],xlim1,xlim2,ylim1,ylim2,symbol_dummy)
else:
for j in range(len(what_specie)):
symbol_dummy = symbol[j]
self.plot_prof_1(step,what_specie[j],xlim1,xlim2,ylim1,ylim2,symbol_dummy)
filename = str('%03d' % step)+'_test.png'
pl.savefig(filename, dpi=400)
print('wrote file ', filename)
pl.clf()
else:
print('This method is not supported for '+str(self.__class__))
return
|
create a movie with mass fractions vs mass coordinate between
xlim1 and xlim2, ylim1 and ylim2. Only works with instances of
se.
Parameters
----------
ini : integer
Initial model i.e. cycle.
end : integer
Final model i.e. cycle.
delta : integer
Sparsity factor of the frames.
what_specie : list
Array with species in the plot.
xlim1, xlim2 : integer or float
Mass coordinate range.
ylim1, ylim2 : integer or float
Mass fraction coordinate range.
symbol : list, optional
Array indicating which symbol you want to use. Must be of
the same len of what_specie array. The default is None.
|
def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
return P,R
|
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
|
def all_fields(self):
return [field
for container in FieldsContainer.class_container.values()
for field in getattr(self, container)]
|
A list with all the fields contained in this object.
|
def _multiplyThroughputs(self):
index = 0
for component in self.components:
if component.throughput != None:
break
index += 1
return BaseObservationMode._multiplyThroughputs(self, index)
|
Overrides base class in order to deal with opaque components.
|
def _validate_response(self, response, message, exclude_code=None):
if 'code' in response and response['code'] >= 2000:
if exclude_code is not None and response['code'] == exclude_code:
return
raise Exception("{0}: {1} ({2})".format(
message, response['msg'], response['code']))
|
validate an api server response
:param dict response: server response to check
:param str message: error message to raise
:param int exclude_code: error codes to exclude from errorhandling
:return:
":raises Exception: on error
|
def get_all_suppliers(self, params=None):
if not params:
params = {}
return self._iterate_through_pages(
get_function=self.get_suppliers_per_page,
resource=SUPPLIERS,
**{'params': params}
)
|
Get all suppliers
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
|
def receiver(self):
try:
return current_webhooks.receivers[self.receiver_id]
except KeyError:
raise ReceiverDoesNotExist(self.receiver_id)
|
Return registered receiver.
|
def paranoidclass(cls):
for methname in dir(cls):
meth = getattr(cls, methname)
if U.has_fun_prop(meth, "argtypes"):
argtypes = U.get_fun_prop(meth, "argtypes")
if "self" in argtypes and isinstance(argtypes["self"], T.Self):
argtypes["self"] = T.Generic(cls)
U.set_fun_prop(meth, "argtypes", argtypes)
if U.has_fun_prop(meth, "returntype"):
if isinstance(U.get_fun_prop(meth, "returntype"), T.Self):
U.set_fun_prop(meth, "returntype", T.Generic(cls))
return cls
|
A class decorator to specify that class methods contain paranoid decorators.
Example usage:
| @paranoidclass
| class Point:
| def __init__(self, x, y):
| ...
| @returns(Number)
| def distance_from_zero():
| ...
|
def bivrandom (x0, y0, sx, sy, cxy, size=None):
from numpy.random import multivariate_normal as mvn
p0 = np.asarray ([x0, y0])
cov = np.asarray ([[sx**2, cxy],
[cxy, sy**2]])
return mvn (p0, cov, size)
|
Compute random values distributed according to the specified bivariate
distribution.
Inputs:
* x0: the center of the x distribution (i.e. its intended mean)
* y0: the center of the y distribution
* sx: standard deviation (not variance) of x var
* sy: standard deviation (not variance) of y var
* cxy: covariance (not correlation coefficient) of x and y
* size (optional): the number of values to compute
Returns: array of shape (size, 2); or just (2, ), if size was not
specified.
The bivariate parameters of the generated data are approximately
recoverable by calling 'databiv(retval)'.
|
def add_key(self, ref, mode="shared"):
if ref not in self.keys:
response = self.request("client_add_key %s -%s" % (ref, mode))
if "success" not in response:
return None
self.keys.append(ref)
return ref
|
Add a key.
(ref)
Return key name or None on error
|
def reg_to_lex(conditions, wildcards):
aliases = defaultdict(set)
n_conds = []
for i, _ in enumerate(conditions):
n_cond = []
for char in conditions[i]:
if char in wildcards:
alias = '%s_%s' % (char, len(aliases[char]))
aliases[char].add(alias)
n_cond.append(make_token(alias, reg=wildcards[char]))
else:
n_cond.append(~Literal(char))
n_cond.append(Eos())
n_conds.append(reduce(operator.and_, n_cond) > make_dict)
return tuple(n_conds), aliases
|
Transform a regular expression into a LEPL object.
Replace the wildcards in the conditions by LEPL elements,
like xM will be replaced by Any() & 'M'.
In case of multiple same wildcards (like xMx), aliases
are created to allow the regexp to compile, like
Any() > 'x_0' & 'M' & Any() > 'x_1', and we chech that the matched values
for all aliases like x_0, x_1 are the same.
|
def commands(cls):
cmds = [cmd[4:] for cmd in dir(cls) if cmd.startswith('cmd_')]
return cmds
|
Returns a list of all methods that start with ``cmd_``.
|
def make_value(self, value):
value = self.unicode_escape_sequence_fix(value)
if value and value[0] in ['"', "'"]:
return self.remove_quotes(value)
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.lower() == "true":
return True
if value.lower() == "false":
return False
if value.lower() == "none":
return None
return value
|
Converts to actual value, or remains as string.
|
def get_focus_widget(self):
current_widget = QApplication.focusWidget()
if current_widget is None:
return False
if current_widget.objectName() == "Script_Editor_Output_plainTextEdit" or \
isinstance(current_widget, Editor):
return current_widget
|
Returns the Widget with focus.
:return: Widget with focus.
:rtype: QWidget
|
def stdchannel_redirected(stdchannel):
try:
s = io.StringIO()
old = getattr(sys, stdchannel)
setattr(sys, stdchannel, s)
yield s
finally:
setattr(sys, stdchannel, old)
|
Redirects stdout or stderr to a StringIO object. As of python 3.4, there is a
standard library contextmanager for this, but backwards compatibility!
|
def split_locale(loc):
def split(st, char):
split_st = st.split(char, 1)
if len(split_st) == 1:
split_st.append('')
return split_st
comps = {}
work_st, comps['charmap'] = split(loc, ' ')
work_st, comps['modifier'] = split(work_st, '@')
work_st, comps['codeset'] = split(work_st, '.')
comps['language'], comps['territory'] = split(work_st, '_')
return comps
|
Split a locale specifier. The general format is
language[_territory][.codeset][@modifier] [charmap]
For example:
ca_ES.UTF-8@valencia UTF-8
|
def __GetElementTree(protocol, server, port, path, sslContext):
if protocol == "https":
kwargs = {"context": sslContext} if sslContext else {}
conn = http_client.HTTPSConnection(server, port=port, **kwargs)
elif protocol == "http":
conn = http_client.HTTPConnection(server, port=port)
else:
raise Exception("Protocol " + protocol + " not supported.")
conn.request("GET", path)
response = conn.getresponse()
if response.status == 200:
try:
tree = ElementTree.fromstring(response.read())
return tree
except ExpatError:
pass
return None
|
Private method that returns a root from ElementTree for a remote XML document.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
|
def _repr_html_(self, **kwargs):
from jinja2 import Template
from markdown import markdown as convert_markdown
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition'
]
return convert_markdown(self.markdown, extensions)
|
Produce HTML for Jupyter Notebook
|
def get_hosting_device_configuration(self, context, id):
admin_context = context.is_admin and context or context.elevated()
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [id], admin_state_up=True, schedule=True)
if agents:
cctxt = self.client.prepare(server=agents[0].host)
return cctxt.call(context, 'get_hosting_device_configuration',
payload={'hosting_device_id': id})
|
Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device.
|
def _read_pug_fixed_grid(projection, distance_multiplier=1.0):
a = projection.semi_major_axis
h = projection.perspective_point_height
b = projection.semi_minor_axis
lon_0 = projection.longitude_of_projection_origin
sweep_axis = projection.sweep_angle_axis[0]
proj_dict = {'a': float(a) * distance_multiplier,
'b': float(b) * distance_multiplier,
'lon_0': float(lon_0),
'h': float(h) * distance_multiplier,
'proj': 'geos',
'units': 'm',
'sweep': sweep_axis}
return proj_dict
|
Read from recent PUG format, where axes are in meters
|
def make_module_class(name):
source = sys.modules[name]
members = vars(source)
is_descriptor = lambda x: not isinstance(x, type) and hasattr(x, '__get__')
descriptors = {k: v for (k, v) in members.items() if is_descriptor(v)}
members = {k: v for (k, v) in members.items() if k not in descriptors}
descriptors['__source'] = source
target = type(name, (types.ModuleType,), descriptors)(name)
target.__dict__.update(members)
sys.modules[name] = target
|
Takes the module referenced by name and make it a full class.
|
def rollback(name, database=None, directory=None, verbose=None):
router = get_router(directory, database, verbose)
router.rollback(name)
|
Rollback a migration with given name.
|
def new(self, boot_system_id):
if self._initialized:
raise Exception('Boot Record already initialized')
self.boot_system_identifier = boot_system_id.ljust(32, b'\x00')
self.boot_identifier = b'\x00' * 32
self.boot_system_use = b'\x00' * 197
self._initialized = True
|
A method to create a new Boot Record.
Parameters:
boot_system_id - The system identifier to associate with this Boot
Record.
Returns:
Nothing.
|
def upload_sequence_fileobj(file_obj, file_name, fields, retry_fields, session, samples_resource):
try:
_direct_upload(file_obj, file_name, fields, session, samples_resource)
sample_id = fields["sample_id"]
except RetryableUploadException:
logging.error("{}: Connectivity issue, trying direct upload...".format(file_name))
file_obj.seek(0)
try:
retry_fields = samples_resource.init_multipart_upload(retry_fields)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
retry_fields,
session,
samples_resource._client._root_url + retry_fields["callback_url"],
)
sample_id = s3_upload.get("sample_id", "<UUID not yet assigned>")
logging.info("{}: finished as sample {}".format(file_name, sample_id))
return sample_id
|
Uploads a single file-like object to the One Codex server via either fastx-proxy or directly
to S3.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
retry_fields : `dict`
Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample ID of newly uploaded file.
|
def is_empty(self):
while self.pq:
if self.pq[0][1] != self.INVALID:
return False
else:
_, _, element = heapq.heappop(self.pq)
if element in self.element_finder:
del self.element_finder[element]
return True
|
Determines if the priority queue has any elements.
Performs removal of any elements that were "marked-as-invalid".
:returns: true iff the priority queue has no elements.
|
def help_version(self):
if (len(self.args) == 1 and self.args[0] in ["-h", "--help"] and
self.args[1:] == []):
options()
elif (len(self.args) == 1 and self.args[0] in ["-v", "--version"] and
self.args[1:] == []):
prog_version()
else:
usage("")
|
Help and version info
|
def todate(val):
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
|
Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
|
def log(self, *lines):
if getattr(self, "debug", False):
print(dt.datetime.now().time(), end=' ')
for line in lines:
print(line, end=' ')
print()
|
will print out the lines in console if debug is enabled for the
specific sprite
|
def search_get_class_names(cls):
if hasattr(cls, '_class_key'):
class_names = []
for n in cls._class_key():
class_names.append(n)
return class_names
else:
return [cls.__name__]
|
Returns class names for use in document indexing.
|
def check_platform_variables(self, ds):
platform_names = getattr(ds, 'platform', '').split(' ')
val = all(platform_name in ds.variables for platform_name in platform_names)
msgs = []
if not val:
msgs = [('The value of "platform" global attribute should be set to another variable '
'which contains the details of the platform. If multiple platforms are '
'involved, a variable should be defined for each platform and referenced '
'from the geophysical variable in a space separated string.')]
return [Result(BaseCheck.HIGH, val, 'platform variables', msgs)]
|
The value of platform attribute should be set to another variable which
contains the details of the platform. There can be multiple platforms
involved depending on if all the instances of the featureType in the
collection share the same platform or not. If multiple platforms are
involved, a variable should be defined for each platform and referenced
from the geophysical variable in a space separated string.
:param netCDF4.Dataset ds: An open netCDF dataset
|
def _check_bios_resource(self, properties=[]):
system = self._get_host_details()
if ('links' in system['Oem']['Hp'] and
'BIOS' in system['Oem']['Hp']['links']):
bios_uri = system['Oem']['Hp']['links']['BIOS']['href']
status, headers, bios_settings = self._rest_get(bios_uri)
if status >= 300:
msg = self._get_extended_error(bios_settings)
raise exception.IloError(msg)
for property in properties:
if property not in bios_settings:
msg = ('BIOS Property "' + property + '" is not'
' supported on this system.')
raise exception.IloCommandNotSupportedError(msg)
return headers, bios_uri, bios_settings
else:
msg = ('"links/BIOS" section in ComputerSystem/Oem/Hp'
' does not exist')
raise exception.IloCommandNotSupportedError(msg)
|
Check if the bios resource exists.
|
def _add_arg_datasets(datasets, args):
for dataset in args:
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
if isinstance(dataset, GentyArgs):
dataset_strings = dataset
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
|
Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
|
def validate_config_has_one_of(config, one_of_keys):
intersection = set(config).intersection(one_of_keys)
if len(intersection) > 1:
raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection))
if len(intersection) == 0:
raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
|
Validate a config dictionary to make sure it has one and only one
key in one_of_keys.
Args:
config: the config to validate.
one_of_keys: the list of possible keys that config can have one and only one.
Raises:
Exception if the config does not have any of them, or multiple of them.
|
def Proxy(f):
def Wrapped(self, *args):
return getattr(self, f)(*args)
return Wrapped
|
A helper to create a proxy method in a class.
|
def _combine(self, other, conn='and'):
f = F()
self_filters = copy.deepcopy(self.filters)
other_filters = copy.deepcopy(other.filters)
if not self.filters:
f.filters = other_filters
elif not other.filters:
f.filters = self_filters
elif conn in self.filters[0]:
f.filters = self_filters
f.filters[0][conn].extend(other_filters)
elif conn in other.filters[0]:
f.filters = other_filters
f.filters[0][conn].extend(self_filters)
else:
f.filters = [{conn: self_filters + other_filters}]
return f
|
OR and AND will create a new F, with the filters from both F
objects combined with the connector `conn`.
|
def clean_weight_files(cls):
deleted = []
for f in cls._files:
try:
os.remove(f)
deleted.append(f)
except FileNotFoundError:
pass
print('Deleted %d weight files' % len(deleted))
cls._files = []
|
Cleans existing weight files.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.