Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
17,000 | def search_value(self, xpath, default=None, single_value=True):
matches = [match.value for match in parse(xpath).find(self.retval)]
if len(matches) == 0:
return default
return matches[0] if single_value is True else matches | Try to find a value in the result
:param str xpath: a xpath filter see https://github.com/kennknowles/python-jsonpath-rw#jsonpath-syntax
:param any default: default value if not found
:param bool single_value: is the result is multivalued
:return: the value found or None |
17,001 | def visit_Call(self, node):
node = self.generic_visit(node)
if isinstance(node.func, ast.Attribute):
if node.func.attr in methods:
obj = lhs = node.func.value
while isinstance(obj, ast.Attribute):
obj = obj.value
is_not_module = (not isinstance(obj, ast.Name) or
obj.id not in self.imports)
if is_not_module:
self.update = True
node.args.insert(0, lhs)
mod = methods[node.func.attr][0]
self.to_import.add(mangle(mod[0]))
node.func = reduce(
lambda v, o: ast.Attribute(v, o, ast.Load()),
mod[1:] + (node.func.attr,),
ast.Name(mangle(mod[0]), ast.Load(), None)
)
if node.func.attr in methods or node.func.attr in functions:
def rec(path, cur_module):
err = "Function path is chained attributes and name"
assert isinstance(path, (ast.Name, ast.Attribute)), err
if isinstance(path, ast.Attribute):
new_node, cur_module = rec(path.value, cur_module)
new_id, mname = self.renamer(path.attr, cur_module)
return (ast.Attribute(new_node, new_id, ast.Load()),
cur_module[mname])
else:
new_id, mname = self.renamer(path.id, cur_module)
if mname not in cur_module:
raise PythranSyntaxError(
"Unbound identifier ".format(mname), node)
return (ast.Name(new_id, ast.Load(), None),
cur_module[mname])
node.func.value, _ = rec(node.func.value, MODULES)
self.update = True
return node | Transform call site to have normal function call.
Examples
--------
For methods:
>> a = [1, 2, 3]
>> a.append(1)
Becomes
>> __list__.append(a, 1)
For functions:
>> __builtin__.dict.fromkeys([1, 2, 3])
Becomes
>> __builtin__.__dict__.fromkeys([1, 2, 3]) |
17,002 | def removeXmlElement(name, directory, file_pattern, logger=None):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, file_pattern):
filepath = os.path.join(path, filename)
remove_xml_element_file(name, filepath) | Recursively walk a directory and remove XML elements |
17,003 | def get_exchange_group_info(self, symprec=1e-2, angle_tolerance=5.0):
structure = self.get_structure_with_spin()
return structure.get_space_group_info(
symprec=symprec, angle_tolerance=angle_tolerance
) | Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number |
17,004 | def _validate_slices_form_uniform_grid(slice_datasets):
invariant_properties = [
,
,
,
,
,
,
,
,
,
,
]
for property_name in invariant_properties:
_slice_attribute_equal(slice_datasets, property_name)
_validate_image_orientation(slice_datasets[0].ImageOrientationPatient)
_slice_ndarray_attribute_almost_equal(slice_datasets, , 1e-5)
slice_positions = _slice_positions(slice_datasets)
_check_for_missing_slices(slice_positions) | Perform various data checks to ensure that the list of slices form a
evenly-spaced grid of data.
Some of these checks are probably not required if the data follows the
DICOM specification, however it seems pertinent to check anyway. |
17,005 | def CWDE(cpu):
bit = Operators.EXTRACT(cpu.AX, 15, 1)
cpu.EAX = Operators.SEXTEND(cpu.AX, 16, 32)
cpu.EDX = Operators.SEXTEND(bit, 1, 32) | Converts word to doubleword.
::
DX = sign-extend of AX.
:param cpu: current CPU. |
17,006 | def make_multisig_segwit_wallet( m, n ):
pks = []
for i in xrange(0, n):
pk = BitcoinPrivateKey(compressed=True).to_wif()
pks.append(pk)
return make_multisig_segwit_info(m, pks) | Create a bundle of information
that can be used to generate an
m-of-n multisig witness script. |
17,007 | def diff(candidate, running, *models):
*
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
first = _get_root_object(models)
first.load_dict(candidate)
second = _get_root_object(models)
second.load_dict(running)
return napalm_yang.utils.diff(first, second) | Returns the difference between two configuration entities structured
according to the YANG model.
.. note::
This function is recommended to be used mostly as a state helper.
candidate
First model to compare.
running
Second model to compare.
models
A list of models to be used when comparing.
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.diff {} {} models.openconfig_interfaces
Output Example:
.. code-block:: python
{
"interfaces": {
"interface": {
"both": {
"Port-Channel1": {
"config": {
"mtu": {
"first": "0",
"second": "9000"
}
}
}
},
"first_only": [
"Loopback0"
],
"second_only": [
"Loopback1"
]
}
}
} |
17,008 | def read_stb(library, session):
status = ViUInt16()
ret = library.viReadSTB(session, byref(status))
return status.value, ret | Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:return: Service request status byte, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode` |
17,009 | def transform(self, X):
msg = " should be a 1-dimensional array with length ."
if not dask.is_dask_collection(X):
return super(HashingVectorizer, self).transform(X)
if isinstance(X, db.Bag):
bag2 = X.map_partitions(_transform, estimator=self)
objs = bag2.to_delayed()
arrs = [
da.from_delayed(obj, (np.nan, self.n_features), self.dtype)
for obj in objs
]
result = da.concatenate(arrs, axis=0)
elif isinstance(X, dd.Series):
result = X.map_partitions(_transform, self)
elif isinstance(X, da.Array):
chunks = ((np.nan,) * X.numblocks[0], (self.n_features,))
if X.ndim == 1:
result = X.map_blocks(
_transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1
)
else:
raise ValueError(msg)
else:
raise ValueError(msg)
return result | Transform a sequence of documents to a document-term matrix.
Transformation is done in parallel, and correctly handles dask
collections.
Parameters
----------
X : dask.Bag of raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : dask.array.Array, shape = (n_samples, self.n_features)
Document-term matrix. Each block of the array is a scipy sparse
matrix.
Notes
-----
The returned dask Array is composed scipy sparse matricies. If you need
to compute on the result immediately, you may need to convert the individual
blocks to ndarrays or pydata/sparse matricies.
>>> import sparse
>>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP
See the :doc:`examples/text-vectorization` for more. |
17,010 | def get_runs_by_id(self, config_id):
d = self.data[config_id]
runs = []
for b in d.results.keys():
try:
err_logs = d.exceptions.get(b, None)
if d.results[b] is None:
r = Run(config_id, b, None, None , d.time_stamps[b], err_logs)
else:
r = Run(config_id, b, d.results[b][], d.results[b][] , d.time_stamps[b], err_logs)
runs.append(r)
except:
raise
runs.sort(key=lambda r: r.budget)
return(runs) | returns a list of runs for a given config id
The runs are sorted by ascending budget, so '-1' will give
the longest run for this config. |
17,011 | def target_query(plugin, port, location):
return ((r.row[PLUGIN_NAME_KEY] == plugin) &
(r.row[PORT_FIELD] == port) &
(r.row[LOCATION_FIELD] == location)) | prepared ReQL for target |
17,012 | def _verify_signed_jwt_with_certs(
jwt, time_now, cache,
cert_uri=_DEFAULT_CERT_URI):
segments = jwt.split()
if len(segments) != 3:
raise _AppIdentityError(
)
signed = % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
lsignature = long(signature.encode(), 16)
header_body = _urlsafe_b64decode(segments[0])
try:
header = json.loads(header_body)
except:
raise _AppIdentityError("CanalgRS256Unexpected encryption algorithm: %ralgs not safe to do that without first checking the signature.
certs = _get_cached_certs(cert_uri, cache)
if certs is None:
raise _AppIdentityError(
)
if not _CRYPTO_LOADED:
raise _AppIdentityError(t verify id_token signature. See http://www.pycrypto.org for more information on pycrypto.signedkeyvaluesmodulusexponent%064xsignedsignaturesignedSignature verification error: %s; continuing with the next cert.Invalid token signaturet parse token body")
iat = parsed.get()
if iat is None:
raise _AppIdentityError()
earliest = iat - _CLOCK_SKEW_SECS
exp = parsed.get()
if exp is None:
raise _AppIdentityError()
if exp >= time_now + _MAX_TOKEN_LIFETIME_SECS:
raise _AppIdentityError()
latest = exp + _CLOCK_SKEW_SECS
if time_now < earliest:
raise _AppIdentityError( %
(time_now, earliest))
if time_now > latest:
raise _AppIdentityError( %
(time_now, latest))
return parsed | Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
The PyCrypto library included with Google App Engine is severely limited and
so you have to use it very carefully to verify JWT signatures. The first
issue is that the library can't read X.509 files, so we make a call to a
special URI that has the public cert in modulus/exponent form in JSON.
The second issue is that the RSA.verify method doesn't work, at least for
how the JWT tokens are signed, so we have to manually verify the signature
of the JWT, which means hashing the signed part of the JWT and comparing
that to the signature that's been encrypted with the public key.
Args:
jwt: string, A JWT.
time_now: The current time, as a long (eg. long(time.time())).
cache: Cache to use (eg. the memcache module).
cert_uri: string, URI to get cert modulus and exponent in JSON format.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
_AppIdentityError: if any checks are failed. |
17,013 | def dump(destination, xs, model=None, properties=False, indent=True, **kwargs):
text = dumps(
xs, model=model, properties=properties, indent=indent, **kwargs
)
if hasattr(destination, ):
print(text, file=destination)
else:
with open(destination, ) as fh:
print(text, file=fh) | Serialize Xmrs (or subclass) objects to PENMAN and write to a file.
Args:
destination: filename or file object
xs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to
serialize
model: Xmrs subclass used to get triples
properties: if `True`, encode variable properties
indent: if `True`, adaptively indent; if `False` or `None`,
don't indent; if a non-negative integer N, indent N spaces
per level |
17,014 | def addChild(self,item):
if not isinstance(item,Node):
item = Node(item)
if item in self.children:
return item
self.children.append(item)
item.parents.add(self)
return item | When you add a child to a Node, you are adding yourself as a parent to the child
You cannot have the same node as a child more than once.
If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot
add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child) |
17,015 | def length(self):
return sum([shot.length for shot in self.shots if not shot.is_splay]) | Total surveyed cave length, not including splays. |
17,016 | def iter(self, match="*", count=1000):
replace_this = self.key_prefix+":"
for key in self._client.scan_iter(
match="{}:{}".format(self.key_prefix, match), count=count):
yield self._decode(key).replace(replace_this, "", 1) | Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance |
17,017 | def _prepare_data_payload(data):
if not data: return None
res = {}
for key, value in viewitems(data):
if value is None: continue
if isinstance(value, list):
value = stringify_list(value)
elif isinstance(value, dict):
if "__meta" in value and value["__meta"]["schema_name"].endswith("KeyV3"):
value = value["name"]
else:
value = stringify_dict(value)
else:
value = str(value)
res[key] = value
return res | Make a copy of the `data` object, preparing it to be sent to the server.
The data will be sent via x-www-form-urlencoded or multipart/form-data mechanisms. Both of them work with
plain lists of key/value pairs, so this method converts the data into such format. |
17,018 | def get_banks_by_assessment_taken(self, assessment_taken_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
return lookup_session.get_banks_by_ids(
self.get_bank_ids_by_assessment_taken(assessment_taken_id)) | Gets the list of ``Banks`` mapped to an ``AssessmentTaken``.
arg: assessment_taken_id (osid.id.Id): ``Id`` of an
``AssessmentTaken``
return: (osid.assessment.BankList) - list of banks
raise: NotFound - ``assessment_taken_id`` is not found
raise: NullArgument - ``assessment_taken_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
17,019 | def collect_phrases (sent, ranks, spacy_nlp):
tail = 0
last_idx = sent[0].idx - 1
phrase = []
while tail < len(sent):
w = sent[tail]
if (w.word_id > 0) and (w.root in ranks) and ((w.idx - last_idx) == 1):
rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root], ids=w.word_id, pos=w.pos.lower(), count=1)
phrase.append(rl)
else:
for text, p in enumerate_chunks(phrase, spacy_nlp):
if p:
id_list = [rl.ids for rl in p]
rank_list = [rl.rank for rl in p]
np_rl = RankedLexeme(text=text, rank=rank_list, ids=id_list, pos="np", count=1)
if DEBUG:
print(np_rl)
yield np_rl
phrase = []
last_idx = w.idx
tail += 1 | iterator for collecting the noun phrases |
17,020 | def mem(args, opts):
dbfile, read1file = args[:2]
readtype = opts.readtype
pl = readtype or "illumina"
pf = op.basename(read1file).split(".")[0]
rg = opts.rg or r"@RG\tID:{0}\tSM:sm\tLB:lb\tPL:{1}".format(pf, pl)
dbfile = check_index(dbfile)
args[0] = dbfile
samfile, _, unmapped = get_samfile(read1file, dbfile,
bam=opts.bam, unmapped=opts.unmapped)
if not need_update(read1file, samfile):
logging.error("`{0}` exists. `bwa mem` already run.".format(samfile))
return "", samfile
cmd = "{} mem".format(opts.bwa)
cmd += " -M -t {0}".format(opts.cpus)
cmd += .format(rg)
if readtype:
cmd += " -x {0}".format(readtype)
cmd += " " + opts.extra
cmd += " ".join(args)
return cmd, samfile | %prog mem database.fasta read1.fq [read2.fq]
Wrapper for `bwa mem`. Output will be read1.sam. |
17,021 | def setmode(mode):
if hasattr(mode, ):
set_custom_pin_mappings(mode)
mode = CUSTOM
assert mode in [BCM, BOARD, SUNXI, CUSTOM]
global _mode
_mode = mode | You must call this method prior to using all other calls.
:param mode: the mode, one of :py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM`,
:py:attr:`GPIO.SUNXI`, or a `dict` or `object` representing a custom
pin mapping. |
17,022 | def placeholders(cls,dic):
keys = [str(x) for x in dic]
entete = ",".join(keys)
placeholders = ",".join(cls.named_style.format(x) for x in keys)
entete = f"({entete})"
placeholders = f"({placeholders})"
return entete, placeholders | Placeholders for fields names and value binds |
17,023 | def _function(self):
start_time = datetime.datetime.now()
if self.settings[] == :
stop_time = start_time + datetime.timedelta(seconds= self.settings[])
elif self.settings[] == :
if self.last_execution is None:
stop_time = start_time
else:
loop_time = start_time - self.last_execution
wait_time = datetime.timedelta(seconds= self.settings[])
if wait_time.total_seconds() <0:
stop_time = start_time
else:
stop_time = start_time + wait_time
else:
TypeError()
current_time = start_time
while current_time<stop_time:
if self._abort:
break
current_time = datetime.datetime.now()
time.sleep(1)
self.progress = 100.*(current_time- start_time).total_seconds() / (stop_time - start_time).total_seconds()
self.updateProgress.emit(int(self.progress))
if self.settings[] == :
self.last_execution = None
else:
self.last_execution = start_time | Waits until stopped to keep script live. Gui must handle calling of Toggle_NV function on mouse click. |
17,024 | def requires_lock(function):
def new_lock_requiring_function(self, filename, *args, **kwargs):
if self.owns_lock(filename):
return function(self, filename, *args, **kwargs)
else:
raise RequiresLockException()
return new_lock_requiring_function | Decorator to check if the user owns the required lock.
The first argument must be the filename. |
17,025 | def set_window_pos_callback(window, cbfun):
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_pos_callback_repository:
previous_callback = _window_pos_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowposfun(cbfun)
_window_pos_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowPosCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | Sets the position callback for the specified window.
Wrapper for:
GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun cbfun); |
17,026 | def compute_gaussian_krnl(M):
g = signal.gaussian(M, M // 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 2:] = -G[:M // 2, M // 2:]
return G | Creates a gaussian kernel following Foote's paper. |
17,027 | def getsockopt(self, level, optname, *args, **kwargs):
return self._sock.getsockopt(level, optname, *args, **kwargs) | get the value of a given socket option
the values for ``level`` and ``optname`` will usually come from
constants in the standard library ``socket`` module. consult the unix
manpage ``getsockopt(2)`` for more information.
:param level: the level of the requested socket option
:type level: int
:param optname: the specific socket option requested
:type optname: int
:param buflen:
the length of the buffer to use to collect the raw value of the
socket option. if provided, the buffer is returned as a string and
it is not parsed.
:type buflen: int
:returns: a string of the socket option's value |
17,028 | def essl(lw):
w = np.exp(lw - lw.max())
return (w.sum())**2 / np.sum(w**2) | ESS (Effective sample size) computed from log-weights.
Parameters
----------
lw: (N,) ndarray
log-weights
Returns
-------
float
the ESS of weights w = exp(lw), i.e. the quantity
sum(w**2) / (sum(w))**2
Note
----
The ESS is a popular criterion to determine how *uneven* are the weights.
Its value is in the range [1, N], it equals N when weights are constant,
and 1 if all weights but one are zero. |
17,029 | def dskmi2(vrtces, plates, finscl, corscl, worksz, voxpsz, voxlsz, makvtl, spxisz):
nv = ctypes.c_int(len(vrtces))
vrtces = stypes.toDoubleMatrix(vrtces)
np = ctypes.c_int(len(plates))
plates = stypes.toIntMatrix(plates)
finscl = ctypes.c_double(finscl)
corscl = ctypes.c_int(corscl)
worksz = ctypes.c_int(worksz)
voxpsz = ctypes.c_int(voxpsz)
voxlsz = ctypes.c_int(voxlsz)
makvtl = ctypes.c_int(makvtl)
spxisz = ctypes.c_int(spxisz)
work = stypes.emptyIntMatrix(2, worksz)
spaixd = stypes.emptyDoubleVector(10)
spaixi = stypes.emptyIntVector(spxisz)
libspice.dskmi2_c(nv, vrtces, np, plates, finscl, corscl, worksz, voxpsz, voxlsz, makvtl, spxisz, work, spaixd, spaixi)
return stypes.cVectorToPython(spaixd), stypes.cVectorToPython(spaixi) | Make spatial index for a DSK type 2 segment. The index is returned
as a pair of arrays, one of type int and one of type
float. These arrays are suitable for use with the DSK type 2
writer dskw02.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskmi2_c.html
:param vrtces: Vertices
:type vrtces: NxM-Element Array of floats
:param plates: Plates
:type plates: NxM-Element Array of ints
:param finscl: Fine voxel scale
:type finscl: float
:param corscl: Coarse voxel scale
:type corscl: int
:param worksz: Workspace size
:type worksz: int
:param voxpsz: Voxel plate pointer array size
:type voxpsz: int
:param voxlsz: Voxel plate list array size
:type voxlsz: int
:param makvtl: Vertex plate list flag
:type makvtl: bool
:param spxisz: Spatial index integer component size
:type spxisz: int
:return: double precision and integer components of the spatial index of the segment.
:rtype: tuple |
17,030 | def main():
listname = sys.argv[2]
hostname = sys.argv[1]
msg = email.message_from_file(f, Message.Message)
h = HyperArch.HyperArchive(mlist)
sequence = h.sequence
h.processUnixMailbox(f)
f.close()
archive = h.archive
msgno = % sequence
filename = msgno +
filepath = os.path.join(h.basedir, archive, filename)
h.close()
url = % (mlist.GetBaseArchiveURL(), archive, filename)
ext_process(listname, hostname, url, filepath, msg) | This is the mainline.
It first invokes the pipermail archiver to add the message to the archive,
then calls the function above to do whatever with the archived message
after it's URL and path are known. |
17,031 | def folderitem(self, obj, item, index):
cat = obj.getCategoryTitle()
cat_order = self.an_cats_order.get(cat)
if self.do_cats:
item["category"] = cat
if (cat, cat_order) not in self.categories:
self.categories.append((cat, cat_order))
category = obj.getCategory()
if category:
title = category.Title()
url = category.absolute_url()
item["Category"] = title
item["replace"]["Category"] = get_link(url, value=title)
calculation = obj.getCalculation()
if calculation:
title = calculation.Title()
url = calculation.absolute_url()
item["Calculation"] = title
item["replace"]["Calculation"] = get_link(url, value=title)
methods = obj.getMethods()
if methods:
links = map(
lambda m: get_link(
m.absolute_url(), value=m.Title(), css_class="link"),
methods)
item["replace"]["Methods"] = ", ".join(links)
maxtime = obj.MaxTimeAllowed
if maxtime:
item["MaxTimeAllowed"] = self.format_maxtime(maxtime)
item["Price"] = self.format_price(obj.Price)
dup_variation = obj.DuplicateVariation
if dup_variation:
item["DuplicateVariation"] = self.format_duplication_variation(
dup_variation)
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item |
17,032 | def update_ff(self, ff, mol2=False, force_ff_assign=False):
aff = False
if force_ff_assign:
aff = True
elif not in self.tags:
aff = True
elif not self.tags[]:
aff = True
if aff:
self.assign_force_field(ff, mol2=mol2)
return | Manages assigning the force field parameters.
The aim of this method is to avoid unnecessary assignment of the
force field.
Parameters
----------
ff: BuffForceField
The force field to be used for scoring.
mol2: bool, optional
If true, mol2 style labels will also be used.
force_ff_assign: bool, optional
If true, the force field will be completely reassigned, ignoring the
cached parameters. |
17,033 | def from_json_and_lambdas(cls, file: str, lambdas):
with open(file, "r") as f:
data = json.load(f)
return cls.from_dict(data, lambdas) | Builds a GrFN from a JSON object.
Args:
cls: The class variable for object creation.
file: Filename of a GrFN JSON file.
Returns:
type: A GroundedFunctionNetwork object. |
17,034 | def get_ec_names(ecfile, fasta_names):
df = pd.read_table(ecfile, header=None, names=["ec", "transcripts"])
transcript_groups = [x.split(",") for x in df["transcripts"]]
transcripts = []
for group in transcript_groups:
transcripts.append(":".join([fasta_names[int(x)] for x in group]))
return transcripts | convert equivalence classes to their set of transcripts |
17,035 | def pprint(self, imports=None, prefix="\n ",unknown_value=,
qualify=False, separator=""):
r = Parameterized.pprint(self,imports,prefix,
unknown_value=unknown_value,
qualify=qualify,separator=separator)
classname=self.__class__.__name__
return r.replace(".%s("%classname,".%s.instance("%classname) | Same as Parameterized.pprint, except that X.classname(Y
is replaced with X.classname.instance(Y |
17,036 | def assemble(experiments,
backend=None,
qobj_id=None, qobj_header=None,
shots=1024, memory=False, max_credits=None, seed_simulator=None,
default_qubit_los=None, default_meas_los=None,
schedule_los=None, meas_level=2, meas_return=,
memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
config=None, seed=None,
**run_config):
if config:
warnings.warn(
, DeprecationWarning)
run_config = run_config or config
if seed:
warnings.warn(, DeprecationWarning)
seed_simulator = seed_simulator or seed
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config = _parse_run_args(backend, qobj_id, qobj_header,
shots, memory, max_credits, seed_simulator,
default_qubit_los, default_meas_los,
schedule_los, meas_level, meas_return,
memory_slots, memory_slot_size, rep_time,
parameter_binds, **run_config)
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
bound_experiments, run_config = _expand_parameters(circuits=experiments,
run_config=run_config)
return assemble_circuits(circuits=bound_experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
elif all(isinstance(exp, Schedule) for exp in experiments):
return assemble_schedules(schedules=experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
else:
raise QiskitError("bad input to assemble() function; "
"must be either circuits or schedules") | Assemble a list of circuits or pulse schedules into a Qobj.
This function serializes the payloads, which could be either circuits or schedules,
to create Qobj "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
Circuit(s) or pulse schedule(s) to execute
backend (BaseBackend):
If set, some runtime options are automatically grabbed from
backend.configuration() and backend.defaults().
If any other option is explicitly set (e.g. rep_rate), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id (str):
String identifier to annotate the Qobj
qobj_header (QobjHeader or dict):
User input that will be inserted in Qobj header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots (int):
Number of repetitions of each circuit, for sampling. Default: 2014
memory (bool):
If True, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option. Default: False
max_credits (int):
Maximum credits to spend on job. Default: 10
seed_simulator (int):
Random seed to control sampling, for when backend is a simulator
default_qubit_los (list):
List of default qubit lo frequencies
default_meas_los (list):
List of default meas lo frequencies
schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or
Union[Dict[PulseChannel, float], LoConfig]):
Experiment LO configurations
meas_level (int):
Set the appropriate level of the measurement output for pulse experiments.
meas_return (str):
Level of measurement data for the backend to return
For `meas_level` 0 and 1:
"single" returns information from every shot.
"avg" returns average measurement output (averaged over number of shots).
memory_slots (int):
Number of classical memory slots used in this job.
memory_slot_size (int):
Size of each memory slot if the output is Level 0.
rep_time (int): repetition time of the experiment in μs.
The delay between experiments will be rep_time.
Must be from the list provided by the device.
parameter_binds (list[dict{Parameter: Value}]):
List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments, e.g. if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
seed (int):
DEPRECATED in 0.8: use ``seed_simulator`` kwarg instead
config (dict):
DEPRECATED in 0.8: use run_config instead
run_config (dict):
extra arguments used to configure the run (e.g. for Aer configurable backends)
Refer to the backend documentation for details on these arguments
Returns:
Qobj: a qobj which can be run on a backend. Depending on the type of input,
this will be either a QasmQobj or a PulseQobj.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules |
17,037 | def format_subpages(self, page, subpages):
if not subpages:
return None
try:
template = self.get_template()
except IOError:
return None
ret = etree.XML(template.render({: page,
: subpages}))
assets = ret.xpath()
for asset in assets:
self.__lookup_asset(asset, self.extension.project, page)
return ret | Banana banana |
17,038 | def Q(self):
return np.array(list(self.center_frequencies)) \
/ np.array(list(self.bandwidths)) | The quality factor of the scale, or, the ratio of center frequencies
to bandwidths |
17,039 | def recursive_build_tree(self, intervals):
center = int(round(len(intervals) / 2))
left = intervals[:center]
right = intervals[center + 1:]
node = intervals[center]
if len(left) > 1:
left = self.recursive_build_tree(left)
elif len(left) == 1:
left = [left[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
left = [-1,-1,-1,[]]
if len(right) > 1:
right = self.recursive_build_tree(right)
elif len(right) == 1:
right = [right[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
right = [-1,-1,-1,[]]
return [node, left, right, []] | recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values |
17,040 | def destroy(self, force=False):
try:
if not force:
self.join()
finally:
self._dbg(2, )
self.workqueue.destroy()
self.account_manager.reset()
self.completed = 0
self.total = 0
self.failed = 0
self.status_bar_length = 0
self._dbg(2, )
self._del_status_bar() | Like shutdown(), but also removes all accounts, hosts, etc., and
does not restart the queue. In other words, the queue can no longer
be used after calling this method.
:type force: bool
:param force: Whether to wait until all jobs were processed. |
17,041 | def lease(self, lease_time, num_tasks, group_by_tag=False, tag=None, client=None):
client = self._require_client(client)
if group_by_tag:
query_params = {"leaseSecs": lease_time, "numTasks": num_tasks, "groupByTag": group_by_tag, "tag": tag}
else:
query_params = {"leaseSecs": lease_time, "numTasks": num_tasks}
response = client.connection.api_request(method=, path=self.path + "/tasks/lease",
query_params=query_params)
for item in response.get(, []):
id = item.get()
task = Task(id, taskqueue=self)
task._set_properties(item)
yield task | Acquires a lease on the topmost N unowned tasks in the specified queue.
:type lease_time: int
:param lease_time: How long to lease this task, in seconds.
:type num_tasks: int
:param num_tasks: The number of tasks to lease.
:type group_by_tag: bool
:param group_by_tag: Optional. When True, returns tasks of the same tag. Specify which tag by using the
tag parameter. If tag is not specified, returns tasks of the same tag as the oldest task in the queue.
:type tag: string
:param tag: Optional. Only specify tag if groupByTag is true. If groupByTag is true and tag is not specified,
the tag is assumed to be that of the oldest task by ETA. I.e., the first available tag.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`_TaskIterator`.
:returns: An iterator of tasks. |
17,042 | def compare(args):
data = json.load(sys.stdin)
m = RiverManager(args.hosts)
if m.compare(args.name, data):
sys.exit(0)
else:
sys.exit(1) | Compare the extant river with the given name to the passed JSON. The
command will exit with a return code of 0 if the named river is configured
as specified, and 1 otherwise. |
17,043 | def adjustSizeConstraint(self):
widget = self.currentWidget()
if not widget:
return
offw = 4
offh = 4
minw = min(widget.minimumWidth() + offw, MAX_INT)
minh = min(widget.minimumHeight() + offh, MAX_INT)
maxw = min(widget.maximumWidth() + offw, MAX_INT)
maxh = min(widget.maximumHeight() + offh, MAX_INT)
self.setMinimumSize(minw, minh)
self.setMaximumSize(maxw, maxh)
self.setSizePolicy(widget.sizePolicy()) | Adjusts the min/max size based on the current tab. |
17,044 | def delete_last_line(self, file_path=, date=str(datetime.date.today())):
deleted_line = False
if os.path.isfile(file_path):
with open(file_path, ) as file:
reader = csv.reader(file, delimiter=)
for row in reader:
if date == row[0]:
file.seek(0, os.SEEK_END)
pos = file.tell() - 1
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
deleted_line = True
break
if deleted_line: file.write()
file.close() | The following code was modified from
http://stackoverflow.com/a/10289740 &
http://stackoverflow.com/a/17309010
It essentially will check if the total for the current date already
exists in total.csv. If it does, it just removes the last line.
This is so the script could be run more than once a day and not
create many entries in the total.csv file for the same date. |
17,045 | def get(data_label=None, destination_dir="."):
try:
os.mkdir(destination_dir)
except:
pass
if data_label is None:
data_label=data_urls.keys()
if type(data_label) == str:
data_label = [data_label]
for label in data_label:
data_url = data_urls[label]
if type(data_url) == str:
data_url = [data_url]
data_url.extend([None, None])
data_url = data_url[:3]
url, expected_hash, hash_path = data_url
if hash_path is None:
hash_path = label
try:
computed_hash = checksum(os.path.join(destination_dir, hash_path))
except:
logger.warning("problem with sample_data.checksum()")
computed_hash = None
logger.info("dataset ")
logger.info("expected hash: ")
logger.info("computed hash: ")
if (computed_hash is not None) and (expected_hash == computed_hash):
logger.info("match ok - no download needed")
else:
logger.info("downloading")
downzip(url, destination=destination_dir)
logger.info("finished")
downloaded_hash = checksum(os.path.join(destination_dir, hash_path))
logger.info("downloaded hash: ")
if downloaded_hash != expected_hash:
logger.warning("downloaded hash is different from expected hash\n" + \
"expected hash: \n" + \
"downloaded hash: \n") | Download sample data by data label. Labels can be listed by sample_data.data_urls.keys()
:param data_label: label of data. If it is set to None, all data are downloaded
:param destination_dir: output dir for data
:return: |
17,046 | def are_domains_equal(domain1, domain2):
domain1 = domain1.encode("idna")
domain2 = domain2.encode("idna")
return domain1.lower() == domain2.lower() | Compare two International Domain Names.
:Parameters:
- `domain1`: domains name to compare
- `domain2`: domains name to compare
:Types:
- `domain1`: `unicode`
- `domain2`: `unicode`
:return: True `domain1` and `domain2` are equal as domain names. |
17,047 | def authorized(resp, remote):
if resp and in resp:
if resp[] == :
return redirect(url_for(,
remote_app=))
elif resp[] in [,
]:
raise OAuthResponseError(
, remote, resp
)
return authorized_signup_handler(resp, remote) | Authorized callback handler for GitHub.
:param resp: The response.
:param remote: The remote application. |
17,048 | def formatday(self, day, weekday):
super(MiniEventCalendar, self).formatday(day, weekday)
now = get_now()
self.day = day
if day == 0:
return
elif now.month == self.mo and now.year == self.yr and day == now.day:
if day in self.count:
self.popover_helper()
return self.wkday_today + self.anch + self.cal_event + self.end
else:
return self.wkday_today + self.anch + self.end
elif day in self.count:
self.popover_helper()
return self.wkday_not_today + self.anch + self.cal_event + self.end
else:
return self.wkday_not_today + self.anch + self.end | Return a day as a table cell. |
17,049 | def load(self):
self._validate()
self._logger.logging_load()
self._csv_reader = csv.reader(
six.StringIO(self.source.strip()),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data() | Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader` |
17,050 | def objects_reachable_from(obj):
found = ObjectGraph.vertex_set()
to_process = [obj]
while to_process:
obj = to_process.pop()
found.add(obj)
for referent in gc.get_referents(obj):
if referent not in found:
to_process.append(referent)
return ObjectGraph(found) | Return graph of objects reachable from *obj* via ``gc.get_referrers``.
Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all
objects reachable from the given one by following the output of
``gc.get_referrers``. Note that unlike the
:func:`~refcycle.creators.snapshot` function, the output graph may
include non-gc-tracked objects. |
17,051 | def buy_open_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN) | [int] 买方向挂单量 |
17,052 | def emailclients(self, tag=None, fromdate=None, todate=None):
return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate) | Gets an overview of the email clients used to open your emails.
This is only recorded when open tracking is enabled for that email. |
17,053 | def start(self, **kwargs):
return self.client.api.start(self.id, **kwargs) | Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
17,054 | def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
if connection is None:
if gps is None:
gps = from_gps()
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
out = query("select channel from job where monitorName = ")
return [r[0] for r in out] | Return the names of all channels present in the given HACR database |
17,055 | def get(self, cls, id_field, id_val):
cache_key, flag_key = self.get_keys(cls, id_field, id_val)
result = self.get_cached_or_set_flag(keys=(cache_key, flag_key))
if len(result) == 1:
result.append(None)
previous_flag, cached_data = result
if cached_data is not None:
deserialized = self.deserialize(cls, cached_data)
if self.verify(cls, id_field, id_val, deserialized):
return deserialized
else:
if not previous_flag:
obj_serialized = self.serialize(obj)
self.cache(keys=(cache_key, flag_key), args=(obj_serialized,))
return obj | Retrieve an object which `id_field` matches `id_val`. If it exists in
the cache, it will be fetched from Redis. If not, it will be fetched
via the `fetch` method and cached in Redis (unless the cache flag got
invalidated in the meantime). |
17,056 | def disconnect_container_from_network(container, network_id):
log.debug(
%s\%s\,
container, network_id
)
response = _client_wrapper(,
container,
network_id)
log.debug(
%s\%s\,
container, network_id
)
_clear_context()
return True | .. versionadded:: 2015.8.3
Disconnect container from network
container
Container name or ID
network_id
Network name or ID
CLI Examples:
.. code-block:: bash
salt myminion docker.disconnect_container_from_network web-1 mynet
salt myminion docker.disconnect_container_from_network web-1 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc |
17,057 | def format_property(name, value):
result = b
utf8_name = utf8_bytes_string(name)
result = b + utf8_name
if value is not None:
utf8_value = utf8_bytes_string(value)
result += b + ( % len(utf8_value)).encode() + b + utf8_value
return result | Format the name and value (both unicode) of a property as a string. |
17,058 | def get_astrom(official=,provisional=):
sql= "SELECT m.* FROM measure m "
sql+="LEFT JOIN object o ON m.provisional LIKE o.provisional "
if not official:
sql+="WHERE o.official IS NULL"
else:
sql+="WHERE o.official LIKE " % ( official, )
sql+=" AND m.provisional LIKE " % ( provisional, )
cfeps.execute(sql)
return mk_dict(cfeps.fetchall(), cfeps.description) | Query the measure table for all measurements of a particular object.
Default is to return all the astrometry in the measure table,
sorted by mjdate |
17,059 | def get_realms_and_credentials(self, uri, http_method=, body=None,
headers=None):
request = self._create_request(uri, http_method=http_method, body=body,
headers=headers)
if not self.request_validator.verify_request_token(
request.resource_owner_key, request):
raise errors.InvalidClientError()
realms = self.request_validator.get_realms(
request.resource_owner_key, request)
return realms, {: request.resource_owner_key} | Fetch realms and credentials for the presented request token.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:returns: A tuple of 2 elements.
1. A list of request realms.
2. A dict of credentials which may be useful in creating the
authorization form. |
17,060 | def _removeSegment(self, segment, preserveCurve, **kwargs):
segment = self.segments[segment]
for point in segment.points:
self.removePoint(point, preserveCurve) | segment will be a valid segment index.
preserveCurve will be a boolean.
Subclasses may override this method. |
17,061 | def try_utf8_decode(value):
if not value or not is_string(value):
return value
elif PYTHON3 and not isinstance(value, bytes):
return value
elif not PYTHON3 and not isinstance(value, unicode):
return value
try:
return value.decode()
except UnicodeDecodeError:
pass
return value | Try to decode an object.
:param value:
:return: |
17,062 | def read_bitpacked_deprecated(file_obj, byte_count, count, width, debug_logging):
raw_bytes = array.array(ARRAY_BYTE_STR, file_obj.read(byte_count)).tolist()
mask = _mask_for_bits(width)
index = 0
res = []
word = 0
bits_in_word = 0
while len(res) < count and index <= len(raw_bytes):
if debug_logging:
logger.debug("index = %d", index)
logger.debug("bits in word = %d", bits_in_word)
logger.debug("word = %s", bin(word))
if bits_in_word >= width:
offset = (bits_in_word - width)
value = (word & (mask << offset)) >> offset
if debug_logging:
logger.debug("offset = %d", offset)
logger.debug("value = %d (%s)", value, bin(value))
res.append(value)
bits_in_word -= width
else:
word = (word << 8) | raw_bytes[index]
index += 1
bits_in_word += 8
return res | Read `count` values from `fo` using the deprecated bitpacking encoding. |
17,063 | def do_cspvarica(self, varfit=, random_state=None):
if self.data_ is None:
raise RuntimeError("CSPVARICA requires data to be set")
try:
sorted(self.cl_)
for c in self.cl_:
assert(c is not None)
except (TypeError, AssertionError):
raise RuntimeError("CSPVARICA requires orderable and hashable class labels that are not None")
result = cspvarica(x=self.data_, var=self.var_, cl=self.cl_,
reducedim=self.reducedim_, backend=self.backend_,
varfit=varfit, random_state=random_state)
self.mixing_ = result.mixing
self.unmixing_ = result.unmixing
self.var_ = result.b
self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)
self.activations_ = dot_special(self.unmixing_.T, self.data_)
self.mixmaps_ = []
self.unmixmaps_ = []
return self | Perform CSPVARICA
Perform CSPVARICA source decomposition and VAR model fitting.
Parameters
----------
varfit : string
Determines how to calculate the residuals for source decomposition.
'ensemble' (default) fits one model to the whole data set,
'class' fits a different model for each class, and
'trial' fits a different model for each individual trial.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain data.
See Also
--------
:func:`cspvarica` : CSPVARICA implementation |
17,064 | def timezone(self):
if not self._timezone_group and not self._timezone_location:
return None
if self._timezone_location != "":
return "%s/%s" % (self._timezone_group, self._timezone_location)
else:
return self._timezone_group | The name of the time zone for the location.
A list of time zone names can be obtained from pytz. For example.
>>> from pytz import all_timezones
>>> for timezone in all_timezones:
... print(timezone) |
17,065 | def ancestral_states(self, n):
anc = np.empty(n, dtype=np.intc)
_weighted_choices(self.state_indices, self.freqs, anc)
return anc | Generate ancestral sequence states from the equilibrium frequencies |
17,066 | def asset_view_prj(self, ):
if not self.cur_asset:
return
prj = self.cur_asset.project
self.view_prj(prj) | View the project of the current asset
:returns: None
:rtype: None
:raises: None |
17,067 | def image_plane_pix_grid_from_regular_grid(self, regular_grid):
pixel_scale = regular_grid.mask.pixel_scale
pixel_scales = ((regular_grid.masked_shape_arcsec[0] + pixel_scale) / (self.shape[0]),
(regular_grid.masked_shape_arcsec[1] + pixel_scale) / (self.shape[1]))
return grids.SparseToRegularGrid(unmasked_sparse_grid_shape=self.shape, pixel_scales=pixel_scales,
regular_grid=regular_grid, origin=regular_grid.mask.centre) | Calculate the image-plane pixelization from a regular-grid of coordinates (and its mask).
See *grid_stacks.SparseToRegularGrid* for details on how this grid is calculated.
Parameters
-----------
regular_grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every image value (e.g. image-pixels). |
17,068 | def accept_ws(buf, pos):
match = re_ws.match(buf, pos)
if not match:
return None, pos
return buf[match.start(0):match.end(0)], match.end(0) | Skip whitespace at the current buffer position. |
17,069 | def should_recover(self):
return (self.checkpoint_freq > 0
and (self.num_failures < self.max_failures
or self.max_failures < 0)) | Returns whether the trial qualifies for restoring.
This is if a checkpoint frequency is set and has not failed more than
max_failures. This may return true even when there may not yet
be a checkpoint. |
17,070 | def use_pickle():
from . import serialize
serialize.pickle = serialize._stdlib_pickle
can_map[FunctionType] = _original_can_map[FunctionType] | Revert to using stdlib pickle.
Reverts custom serialization enabled by use_dill|cloudpickle. |
17,071 | def wind_series(self):
return [(timestamp, \
self._station_history.get_measurements()[timestamp][]) \
for timestamp in self._station_history.get_measurements()] | Returns the wind speed time series relative to the
meteostation, in the form of a list of tuples, each one containing the
couple timestamp-value
:returns: a list of tuples |
17,072 | def fromdict(dict):
seed = hb_decode(dict[])
index = dict[]
return Challenge(seed, index) | Takes a dictionary as an argument and returns a new Challenge
object from the dictionary.
:param dict: the dictionary to convert |
17,073 | def session_to_hour(timestamp):
t = datetime.strptime(timestamp, SYNERGY_SESSION_PATTERN)
return t.strftime(SYNERGY_HOURLY_PATTERN) | :param timestamp: as string in YYYYMMDDHHmmSS format
:return string in YYYYMMDDHH format |
17,074 | def _http_headers(self):
if not self.usertag:
return {}
creds = u.format(
self.usertag,
self.password or
)
token = base64.b64encode(creds.encode())
return {
: .format(token.decode())
} | Return dictionary of http headers necessary for making an http
connection to the endpoint of this Connection.
:return: Dictionary of headers |
17,075 | def CheckAccess(self, token):
namespace, _ = self.urn.Split(2)
if namespace != "ACL":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
subject=self.urn,
requested_access=token.requested_access)
user, subject_urn = self.InferUserAndSubjectFromUrn()
if user != token.username:
raise access_control.UnauthorizedAccess(
"Approval object is not for user %s." % token.username,
subject=self.urn,
requested_access=token.requested_access)
now = rdfvalue.RDFDatetime.Now()
break_glass = self.Get(self.Schema.BREAK_GLASS)
if break_glass and now < break_glass:
token.is_emergency = True
return True
approvers = self.GetNonExpiredApprovers()
approvers_required = config.CONFIG["ACL.approvers_required"]
if len(approvers) < approvers_required:
missing = approvers_required - len(approvers)
msg = ("Need at least %d additional approver%s for access." %
(missing, "s" if missing > 1 else ""))
raise access_control.UnauthorizedAccess(
msg, subject=subject_urn, requested_access=token.requested_access)
if self.checked_approvers_label:
approvers_with_label = []
for approver in approvers:
try:
user = aff4.FACTORY.Open(
"aff4:/users/%s" % approver,
aff4_type=aff4_users.GRRUser,
token=token.SetUID())
if self.checked_approvers_label in user.GetLabelsNames():
approvers_with_label.append(approver)
except IOError:
pass
if len(approvers_with_label) < self.min_approvers_with_label:
missing = self.min_approvers_with_label - len(approvers_with_label)
raise access_control.UnauthorizedAccess(
"Need at least 1 admin approver for access.",
subject=subject_urn,
requested_access=token.requested_access)
return True | Enforce a dual approver policy for access. |
17,076 | def combine_sample_regions(*samples):
samples = utils.unpack_worlds(samples)
samples = cwlutils.unpack_tarballs(samples, samples[0])
global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed")
if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples):
global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed")
else:
global_analysis_file = None
out = []
analysis_files = []
batches = []
with shared.bedtools_tmpdir(samples[0]):
for batch, items in vmulti.group_by_batch(samples, require_bam=False).items():
batches.append(items)
if global_analysis_file:
analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file
else:
analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items)
for data in items:
vr_file = dd.get_variant_regions(data)
if analysis_file:
analysis_files.append(analysis_file)
data["config"]["algorithm"]["callable_regions"] = analysis_file
data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count()
elif vr_file:
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count()
if not data.get("work_bam"):
for x in items:
if x.get("work_bam"):
data["work_bam_callable"] = x["work_bam"]
out.append([data])
assert len(out) == len(samples)
sample_indexes = {dd.get_sample_name(d): i for i, d in enumerate(samples)}
def by_input_index(xs):
return sample_indexes[dd.get_sample_name(xs[0])]
out.sort(key=by_input_index)
if len(analysis_files) > 0:
final_regions = pybedtools.BedTool(analysis_files[0])
_analysis_block_stats(final_regions, batches[0])
return out | Create batch-level sets of callable regions for multi-sample calling.
Intersects all non-callable (nblock) regions from all samples in a batch,
producing a global set of callable regions. |
17,077 | def power_off(env, identifier, hard):
virtual_guest = env.client[]
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, )
if not (env.skip_confirmations or
formatting.confirm(
% vs_id)):
raise exceptions.CLIAbort()
if hard:
virtual_guest.powerOff(id=vs_id)
else:
virtual_guest.powerOffSoft(id=vs_id) | Power off an active virtual server. |
17,078 | def _convert(cls, record):
if not record:
return {}
converted_dict = {}
for field in cls.conversion:
key = field[0]
if len(field) >= 2 and field[1]:
converted_key = field[1]
else:
converted_key = key
if len(field) >= 3 and field[2]:
conversion_method = field[2]
else:
conversion_method = cls.default_conversion_method
if len(field) >= 4:
converter = field[3]
else:
converter = None
try:
value = conversion_method(record[key])
except KeyError:
continue
if converter:
value = converter._convert_internal(value)
if converted_key is APPEND:
if isinstance(value, list):
for v in value:
converted_dict.update(v)
else:
converted_dict.update(value)
else:
converted_dict[converted_key] = value
return converted_dict | Core method of the converter. Converts a single dictionary into another dictionary. |
17,079 | def on_redraw(self):
super(WidgetLayer,self).on_redraw()
if not self._initialized:
self.initialize()
self._initialized = True | Called when the Layer should be redrawn.
If a subclass uses the :py:meth:`initialize()` Method, it is very important to also call the Super Class Method to prevent crashes. |
17,080 | def Wp(self):
Wp = trapz_loglog(self._Ep * self._J, self._Ep) * u.GeV
return Wp.to("erg") | Total energy in protons |
17,081 | def skip(self, num_bytes):
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes | Jump the ahead the specified bytes in the buffer. |
17,082 | def _add_child(self, child, logical_block_size, allow_duplicate, check_overflow):
t have any functionality that is not appropriate for both.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are situations where duplicate children are allowed.
check_overflow - Whether to check for overflow; if we are parsing, we don
if not self.isdir:
raise pycdlibexception.PyCdlibInvalidInput()
index = bisect.bisect_left(self.children, child)
if index != len(self.children) and self.children[index].file_ident == child.file_ident:
if not self.children[index].is_associated_file() and not child.is_associated_file():
if not (self.rock_ridge is not None and self.file_identifier() == b):
if not allow_duplicate:
raise pycdlibexception.PyCdlibInvalidInput()
else:
self.children[index].data_continuation = child
index += 1
self.children.insert(index, child)
if child.rock_ridge is not None and not child.is_dot() and not child.is_dotdot():
lo = 0
hi = len(self.rr_children)
while lo < hi:
mid = (lo + hi) // 2
rr = self.rr_children[mid].rock_ridge
if rr is not None:
if rr.name() < child.rock_ridge.name():
lo = mid + 1
else:
hi = mid
else:
raise pycdlibexception.PyCdlibInternalError()
rr_index = lo
self.rr_children.insert(rr_index, child)
num_extents, offset_unused = self._recalculate_extents_and_offsets(index,
logical_block_size)
overflowed = False
if check_overflow and (num_extents * logical_block_size > self.data_length):
overflowed = True
self.data_length += logical_block_size
self.children[0].data_length = self.data_length
if self.parent is None:
self.children[1].data_length = self.data_length
for c in self.children:
if not c.is_dir():
continue
if len(c.children) > 1:
c.children[1].data_length = self.data_length
return overflowed | An internal method to add a child to this object. Note that this is called both
during parsing and when adding a new object to the system, so it
it shouldn't have any functionality that is not appropriate for both.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are situations where duplicate children are allowed.
check_overflow - Whether to check for overflow; if we are parsing, we don't want to do this.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise. |
17,083 | def prepare_inputseries(self, ramflag: bool = True) -> None:
for element in printtools.progressbar(self):
element.prepare_inputseries(ramflag) | Call method |Element.prepare_inputseries| of all handled
|Element| objects. |
17,084 | def lvdisplay(lvname=, quiet=False):
**
ret = {}
cmd = [, ]
if lvname:
cmd.append(lvname)
cmd_ret = __salt__[](cmd, python_shell=False,
ignore_retcode=quiet)
if cmd_ret[] != 0:
return {}
out = cmd_ret[].splitlines()
for line in out:
comps = line.strip().split()
ret[comps[0]] = {
: comps[0],
: comps[1],
: comps[2],
: comps[3],
: comps[4],
: comps[5],
: comps[6],
: comps[7],
: comps[8],
: comps[9],
: comps[10],
: comps[11],
: comps[12],
}
return ret | Return information about the logical volume(s)
lvname
logical device name
quiet
if the logical volume is not present, do not show any error
CLI Examples:
.. code-block:: bash
salt '*' lvm.lvdisplay
salt '*' lvm.lvdisplay /dev/vg_myserver/root |
17,085 | def _process_field_queries(field_dictionary):
def field_item(field):
return {
"match": {
field: field_dictionary[field]
}
}
return [field_item(field) for field in field_dictionary] | We have a field_dictionary - we want to match the values for an elasticsearch "match" query
This is only potentially useful when trying to tune certain search operations |
17,086 | def assemble(asmcode, pc=0, fork=DEFAULT_FORK):
return b.join(x.bytes for x in assemble_all(asmcode, pc=pc, fork=fork)) | Assemble an EVM program
:param asmcode: an evm assembler program
:type asmcode: str
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the hex representation of the bytecode
:rtype: str
Example use::
>>> assemble('''PUSH1 0x60\n \
BLOCKHASH\n \
MSTORE\n \
PUSH1 0x2\n \
PUSH2 0x100\n \
''')
...
b"\x60\x60\x60\x40\x52\x60\x02\x61\x01\x00" |
17,087 | def read_mail(window):
mail = imaplib.IMAP4_SSL(IMAP_SERVER)
(retcode, capabilities) = mail.login(LOGIN_EMAIL, LOGIN_PASSWORD)
mail.list()
typ, data = mail.select()
n = 0
now = datetime.now()
search_string = .format(now.day, calendar.month_abbr[now.month], now.year)
(retcode, messages) = mail.search(None, search_string)
if retcode == :
msg_list = messages[0].split()
msg_list.sort(reverse=True)
for n, message in enumerate(msg_list):
if n >= MAX_EMAILS:
break
from_elem = window.FindElement(.format(n))
date_elem = window.FindElement(.format(n))
from_elem.Update()
date_elem.Update(date_str)
window.Refresh() | Reads late emails from IMAP server and displays them in the Window
:param window: window to display emails in
:return: |
17,088 | def download_unzip(names=None, normalize_filenames=False, verbose=True):
r
names = [names] if isinstance(names, (str, basestring)) else names
file_paths = {}
for name in names:
created = create_big_url(name)
name = (created or name).lower().strip()
if name in BIG_URLS:
filepath = download_name(name, verbose=verbose)
if not filepath:
continue
file_paths[name] = normalize_ext_rename(filepath)
logger.debug(.format(name, file_paths[name]))
fplower = file_paths[name].lower()
if fplower.endswith():
logger.info(.format(file_paths[name]))
file_paths[name] = untar(file_paths[name], verbose=verbose)
logger.debug( + str(file_paths))
elif file_paths[name].lower().endswith():
file_paths[name] = unzip(file_paths[name], verbose=verbose)
logger.debug( + str(file_paths))
else:
df = pd.read_html(DATA_INFO[][name], **DATA_INFO[][name])[-1]
df.columns = clean_columns(df.columns)
file_paths[name] = os.path.join(DATA_PATH, name + )
df.to_csv(file_paths[name])
file_paths[name] = normalize_ext_rename(file_paths[name])
return file_paths | r""" Download CSV or HTML tables listed in `names`, unzip and to DATA_PATH/`names`.csv .txt etc
TODO: move to web or data_utils or futils
Also normalizes file name extensions (.bin.gz -> .w2v.bin.gz).
Uses table in data_info.csv (internal DATA_INFO) to determine URL or file path from dataset name.
Also looks
If names or [names] is a valid URL then download it and create a name
from the url in BIG_URLS (not yet pushed to data_info.csv) |
17,089 | def convert_to_string(data, headers, **_):
return (([utils.to_string(v) for v in row] for row in data),
[utils.to_string(h) for h in headers]) | Convert all *data* and *headers* to strings.
Binary data that cannot be decoded is converted to a hexadecimal
representation via :func:`binascii.hexlify`.
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:return: The processed data and headers.
:rtype: tuple |
17,090 | def add_actors(self, doc:Document, event: str, cameo_code: int) -> List[Document]:
actor1_cdr = {
"ActorName": doc.cdr_document[self.attribute("Actor1Name")],
"ActorCountryCode": doc.cdr_document[self.attribute("Actor1CountryCode")],
"ActorKnownGroupCode": doc.cdr_document[self.attribute("Actor1KnownGroupCode")],
"ActorEthnicCode": doc.cdr_document[self.attribute("Actor1EthnicCode")],
"ActorReligion1Code": doc.cdr_document[self.attribute("Actor1Religion1Code")],
"ActorReligion2Code": doc.cdr_document[self.attribute("Actor1Religion2Code")],
"ActorType1Code": doc.cdr_document[self.attribute("Actor1Type1Code")],
"ActorType2Code": doc.cdr_document[self.attribute("Actor1Type2Code")],
"ActorType3Code": doc.cdr_document[self.attribute("Actor1Type3Code")],
"ActorGeo_Type": doc.cdr_document[self.attribute("Actor1Geo_Type")],
"ActorGeo_FullName": doc.cdr_document[self.attribute("Actor1Geo_FullName")],
"ActorGeo_CountryCode": doc.cdr_document[self.attribute("Actor1Geo_CountryCode")],
"ActorGeo_ADM1Code": doc.cdr_document[self.attribute("Actor1Geo_ADM1Code")],
"ActorGeo_Lat": doc.cdr_document[self.attribute("Actor1Geo_Lat")],
"ActorGeo_Long": doc.cdr_document[self.attribute("Actor1Geo_Long")],
"ActorGeo_FeatureID": doc.cdr_document[self.attribute("Actor1Geo_FeatureID")],
"dataset": "gdelt-actor"
}
actor1 = etk.create_document(actor1_cdr)
actor1.doc_id = doc.doc_id + "-actor1"
actor_field = "participant"
actor_prop = self.mapping.actor_property(event, "actor1", cameo_code)
if actor_prop and self.actor_role.get(actor_prop):
actor_field = self.actor_role.get(actor_prop)
doc.kg.add_value(actor_field, actor1.doc_id)
actor2_cdr = {
"ActorName": doc.cdr_document[self.attribute("Actor2Name")],
"ActorCountryCode": doc.cdr_document[self.attribute("Actor2CountryCode")],
"ActorKnownGroupCode": doc.cdr_document[self.attribute("Actor2KnownGroupCode")],
"ActorEthnicCode": doc.cdr_document[self.attribute("Actor2EthnicCode")],
"ActorReligion1Code": doc.cdr_document[self.attribute("Actor2Religion1Code")],
"ActorReligion2Code": doc.cdr_document[self.attribute("Actor2Religion2Code")],
"ActorType1Code": doc.cdr_document[self.attribute("Actor2Type1Code")],
"ActorType2Code": doc.cdr_document[self.attribute("Actor2Type2Code")],
"ActorType3Code": doc.cdr_document[self.attribute("Actor2Type3Code")],
"ActorGeo_Type": doc.cdr_document[self.attribute("Actor2Geo_Type")],
"ActorGeo_FullName": doc.cdr_document[self.attribute("Actor2Geo_FullName")],
"ActorGeo_CountryCode": doc.cdr_document[self.attribute("Actor2Geo_CountryCode")],
"ActorGeo_ADM1Code": doc.cdr_document[self.attribute("Actor2Geo_ADM1Code")],
"ActorGeo_Lat": doc.cdr_document[self.attribute("Actor2Geo_Lat")],
"ActorGeo_Long": doc.cdr_document[self.attribute("Actor2Geo_Long")],
"ActorGeo_FeatureID": doc.cdr_document[self.attribute("Actor2Geo_FeatureID")],
"dataset": "gdelt-actor"
}
actor2 = etk.create_document(actor2_cdr)
actor2.doc_id = doc.doc_id + "-actor2"
actor_field = "participant"
actor_prop = self.mapping.actor_property(event, "actor2", cameo_code)
if actor_prop and self.actor_role.get(actor_prop):
actor_field = self.actor_role.get(actor_prop)
doc.kg.add_value(actor_field, actor2.doc_id)
return [actor1, actor2] | Each event has two actors. The relationship of the event to the actors depends
on the cameo code and is defined by the mapping.
Args:
doc: the document containing the evence
event: one of "event1", "event2", or "event3"
cameo_code:
Returns: the documents created for each actor |
17,091 | def createNotification(self, ulOverlayHandle, ulUserValue, type_, pchText, style):
fn = self.function_table.createNotification
pImage = NotificationBitmap_t()
pNotificationId = VRNotificationId()
result = fn(ulOverlayHandle, ulUserValue, type_, pchText, style, byref(pImage), byref(pNotificationId))
return result, pImage, pNotificationId | Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead. |
17,092 | def is_ancestor_of_repository(self, id_, repository_id):
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=repository_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=repository_id) | Tests if an ``Id`` is an ancestor of a repository.
arg: id (osid.id.Id): an ``Id``
arg: repository_id (osid.id.Id): the Id of a repository
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``repository_id,`` ``false`` otherwise
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
17,093 | def parse_personalities(personalities_line):
tokens = personalities_line.split()
assert tokens.pop(0) == "Personalities"
assert tokens.pop(0) == ":"
personalities = []
for token in tokens:
assert token.startswith() and token.endswith()
personalities.append(token.strip())
return personalities | Parse the "personalities" line of ``/proc/mdstat``.
Lines are expected to be like:
Personalities : [linear] [raid0] [raid1] [raid5] [raid4] [raid6]
If they do not have this format, an error will be raised since it
would be considered an unexpected parsing error.
Parameters
----------
personalities_line : str
A single "Personalities" line from an ``/proc/mdstat`` files.
Returns
-------
A list of raid "personalities" listed on the line. |
17,094 | def noise_despike(sig, win=3, nlim=24., maxiter=4):
if win % 2 != 1:
win += 1
kernel = np.ones(win) / win
over = np.ones(len(sig), dtype=bool)
npad = int((win - 1) / 2)
over[:npad] = False
over[-npad:] = False
nloops = 0
while any(over) and (nloops < maxiter):
rmean = np.convolve(sig, kernel, )
rstd = rmean**0.5
over[npad:-npad] = (sig[npad:-npad] > rmean + nlim * rstd)
if any(over):
sig[npad:-npad][over[npad:-npad]] = rmean[over[npad:-npad]]
nloops += 1
return sig | Apply standard deviation filter to remove anomalous values.
Parameters
----------
win : int
The window used to calculate rolling statistics.
nlim : float
The number of standard deviations above the rolling
mean above which data are considered outliers.
Returns
-------
None |
17,095 | def from_dict(cls, serialized):
def field(name):
return serialized.get(name) or serialized.get(name.lower())
return Error(
code=field(),
message=field(),
info=ErrorInfo.from_dict(field()),
version=bakery.LATEST_VERSION,
) | Create an error from a JSON-deserialized object
@param serialized the object holding the serialized error {dict} |
17,096 | def store_memory_object(self, mo, overwrite=True):
for p in self._containing_pages_mo(mo):
self._apply_object_to_page(p, mo, overwrite=overwrite)
self._update_range_mappings(mo.base, mo.object, mo.length) | This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of
one for each byte.
:param memory_object: the memory object to store |
17,097 | def remove_shard(self, shard, drop_buffered_records=False):
try:
self.roots.remove(shard)
except ValueError:
pass
else:
self.active.extend(shard.children)
if drop_buffered_records:
heap = self.buffer.heap
to_remove = [x for x in heap if x[2] is shard]
for x in to_remove:
heap.remove(x) | Remove a Shard from the Coordinator. Drops all buffered records from the Shard.
If the Shard is active or a root, it is removed and any children promoted to those roles.
:param shard: The shard to remove
:type shard: :class:`~bloop.stream.shard.Shard`
:param bool drop_buffered_records:
Whether records from this shard should be removed.
Default is False. |
17,098 | def _sigfigs(n, sigfigs=3):
n = float(n)
if n == 0 or math.isnan(n):
return n
return round(n, -int(math.floor(math.log10(abs(n))) - sigfigs + 1)) | helper function to round a number to significant figures |
17,099 | def p_referenceInitializer(p):
if p[1][0] == :
try:
p[0] = p.parser.aliases[p[1]]
except KeyError:
ce = CIMError(
CIM_ERR_FAILED,
_format("Unknown alias: {0!A}", p[1]))
ce.file_line = (p.parser.file, p.lexer.lineno)
raise ce
else:
p[0] = p[1] | referenceInitializer : objectHandle
| aliasIdentifier |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.