Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,300 | def _parse_chemical_equation(value):
arrow = _pp.oneOf().setResultsName()
species = _pp.Word(_pp.printables).setResultsName()
coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1)
.setParseAction(_pp.tokenMap(int))
.setResultsName())
group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress()) + species)
reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress() + group_))
.setResultsName())
products = ((group_ + _pp.ZeroOrMore(_pp.Suppress() + group_))
.setResultsName())
grammar = reactants + arrow + products
parsed = grammar.parseString(value).asDict()
if parsed[] == :
parsed[], parsed[] \
= parsed[], parsed[]
parsed[] =
return parsed | Parse the chemical equation mini-language.
See the docstring of `ChemicalEquation` for more.
Parameters
----------
value : `str`
A string in chemical equation mini-language.
Returns
-------
mapping
A mapping in the format specified by the mini-language (see notes on
`ChemicalEquation`).
Examples
--------
>>> from pyrrole.core import _parse_chemical_equation
>>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D')
>>> parsed['arrow']
'->'
>>> parsed['products'][1]['species']
'B'
>>> parsed['reactants'][0]['coefficient']
2 |
375,301 | def create(cls, name, input_speed=None, learn_dns_automatically=True,
output_speed=None, provider_name=None, probe_address=None,
standby_mode_period=3600, standby_mode_timeout=30,
active_mode_period=5, active_mode_timeout=1, comment=None):
json = {: name,
: input_speed,
: output_speed,
: probe_address,
: provider_name,
: comment,
: standby_mode_period,
: standby_mode_timeout,
: active_mode_period,
: active_mode_timeout,
: learn_dns_automatically}
return ElementCreator(cls, json) | Create a Dynamic Netlink.
:param str name: name of netlink Element
:param int input_speed: input speed in Kbps, used for ratio-based
load-balancing
:param int output_speed: output speed in Kbps, used for ratio-based
load-balancing
:param bool learn_dns_automatically: whether to obtain DNS automatically
from the DHCP interface
:param str provider_name: optional name to identify provider for this
netlink
:param list probe_address: list of IP addresses to use as probing
addresses to validate connectivity
:type probe_ip_address: list(str)
:param int standby_mode_period: Specifies the probe period when
standby mode is used (in seconds)
:param int standby_mode_timeout: probe timeout in seconds
:param int active_mode_period: Specifies the probe period when active
mode is used (in seconds)
:param int active_mode_timeout: probe timeout in seconds
:raises CreateElementFailed: failure to create netlink with reason
:rtype: DynamicNetlink
.. note:: To monitor the status of the network links, you must define
at least one probe IP address. |
375,302 | def getpath(element):
if not hasattr(element, ):
raise AttributeError(
.format(element)
)
result = element.__name__ if ismodule(element) else \
.format(element.__module__, element.__name__)
return result | Get full path of a given element such as the opposite of the
resolve_path behaviour.
:param element: must be directly defined into a module or a package and has
the attribute '__name__'.
:return: element absolute path.
:rtype: str
:raises AttributeError: if element has not the attribute __name__.
:Example:
>>> getpath(getpath)
b3j0f.utils.path.getpath |
375,303 | def _parse_repo_file(filename):
parsed = configparser.ConfigParser()
config = {}
try:
parsed.read(filename)
except configparser.MissingSectionHeaderError as err:
log.error(
,
filename, err.message
)
return (, {})
for section in parsed._sections:
section_dict = dict(parsed._sections[section])
section_dict.pop(, None)
config[section] = section_dict
headers =
section = None
with salt.utils.files.fopen(filename, ) as repofile:
for line in repofile:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
if line.startswith():
if section is None:
headers += line +
else:
try:
comments = config[section].setdefault(, [])
comments.append(line[1:].lstrip())
except KeyError:
log.debug(
, filename, line
)
elif line.startswith() and line.endswith():
section = line[1:-1]
return (headers, salt.utils.data.decode(config)) | Turn a single repo file into a dict |
375,304 | def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None):
if train and limit_top_tokens is not None:
tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens)
X_encoded = tokenizer.encode_texts(X)
if ngrams is not None:
X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train)
X_padded = tokenizer.pad_sequences(
X_encoded, fixed_token_seq_length=max_len)
if train:
ds = Dataset(X_padded,
y, tokenizer=tokenizer)
else:
ds = Dataset(X_padded, y)
ds.save(proc_data_path) | Process text and save as Dataset |
375,305 | def get_mesh_hcurves(oqparam):
imtls = oqparam.imtls
lon_lats = set()
data = AccumDict()
ncols = len(imtls) + 1
csvfile = oqparam.inputs[]
for line, row in enumerate(csv.reader(csvfile), 1):
try:
if len(row) != ncols:
raise ValueError( %
ncols, len(row))
x, y = row[0].split()
lon_lat = valid.longitude(x), valid.latitude(y)
if lon_lat in lon_lats:
raise DuplicatedPoint(lon_lat)
lon_lats.add(lon_lat)
for i, imt_ in enumerate(imtls, 1):
values = valid.decreasing_probabilities(row[i])
if len(values) != len(imtls[imt_]):
raise ValueError( %
(len(values), len(imtls([imt_]))))
data += {imt_: [numpy.array(values)]}
except (ValueError, DuplicatedPoint) as err:
raise err.__class__( % (err, csvfile, line))
lons, lats = zip(*sorted(lon_lats))
mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
return mesh, {imt: numpy.array(lst) for imt, lst in data.items()} | Read CSV data in the format `lon lat, v1-vN, w1-wN, ...`.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:returns:
the mesh of points and the data as a dictionary
imt -> array of curves for each site |
375,306 | def _formatVals(self, val_list):
vals = []
for (name, val) in val_list:
if val is not None:
if isinstance(val, float):
vals.append("%s.value %f" % (name, val))
else:
vals.append("%s.value %s" % (name, val))
else:
vals.append("%s.value U" % (name,))
return "\n".join(vals) | Formats value list from Munin Graph and returns multi-line value
entries for the plugin fetch cycle.
@param val_list: List of name-value pairs.
@return: Multi-line text. |
375,307 | def _create_optObject(self, **kwargs):
optimizer = kwargs.get(,
self.config[][])
if optimizer.upper() == :
optObject = pyLike.Minuit(self.like.logLike)
elif optimizer.upper() == :
optObject = pyLike.NewMinuit(self.like.logLike)
else:
optFactory = pyLike.OptimizerFactory_instance()
optObject = optFactory.create(str(optimizer), self.like.logLike)
return optObject | Make MINUIT or NewMinuit type optimizer object |
375,308 | def restore(source, offset):
backup_location = os.path.join(
os.path.dirname(os.path.abspath(source)), source + )
click.echo(.format(location=backup_location))
if not os.path.isfile(backup_location):
click.echo(.format(source=source))
return
with open(backup_location, ) as b:
data = b.read()
click.echo(.format(c=len(data), o=offset))
with open(source, ) as f:
f.seek(offset)
f.write(data)
f.flush()
click.echo() | Restore a smudged file from .bytes_backup |
375,309 | def commit_operation( self, input_op_data, accepted_nameop, current_block_number ):
if self.disposition != DISPOSITION_RW:
log.error("FATAL: borrowing violation: not a read-write connection")
traceback.print_stack()
os.abort()
cur = self.db.cursor()
canonical_op = None
op_type_str = None
opcode = accepted_nameop.get(, None)
try:
assert opcode is not None, "Undefined op " % accepted_nameop[]
except Exception, e:
log.exception(e)
log.error("FATAL: unrecognized op " % accepted_nameop[] )
os.abort()
if opcode in OPCODE_PREORDER_OPS:
canonical_op = self.commit_state_preorder( accepted_nameop, current_block_number )
op_type_str = "state_preorder"
elif opcode in OPCODE_CREATION_OPS:
canonical_op = self.commit_state_create( accepted_nameop, current_block_number )
op_type_str = "state_create"
elif opcode in OPCODE_TRANSITION_OPS:
canonical_op = self.commit_state_transition( accepted_nameop, current_block_number )
op_type_str = "state_transition"
elif opcode in OPCODE_TOKEN_OPS:
canonical_op = self.commit_token_operation(accepted_nameop, current_block_number)
op_type_str = "token_operation"
else:
raise Exception("Unknown operation {}".format(opcode))
if canonical_op is None:
log.error("FATAL: no canonical op generated (for {})".format(op_type_str))
os.abort()
log.debug("Extract consensus fields for {} in {}, as part of a {}".format(opcode, current_block_number, op_type_str))
consensus_op = self.extract_consensus_op(opcode, input_op_data, canonical_op, current_block_number)
return consensus_op | Commit an operation, thereby carrying out a state transition.
Returns a dict with the new db record fields |
375,310 | def put(self, session):
if self._sessions.full():
raise queue.Full
txn = session._transaction
if txn is None or txn.committed() or txn._rolled_back:
session.transaction()
self._pending_sessions.put(session)
else:
super(TransactionPingingPool, self).put(session) | Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full. |
375,311 | def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top):
err_msg = "%s must be between %i and %i"
if variable_value < range_bottom or variable_value > range_top:
raise ToolkitError(err_msg % (variable_name, range_bottom, range_top)) | Checks if numeric parameter is within given range |
375,312 | def env_problem(env_problem_name, **kwargs):
ep_cls = Registries.env_problems[env_problem_name]
ep = ep_cls()
ep.initialize(**kwargs)
return ep | Get and initialize the `EnvProblem` with the given name and batch size.
Args:
env_problem_name: string name of the registered env problem.
**kwargs: forwarded to env problem's initialize method.
Returns:
an initialized EnvProblem with the given batch size. |
375,313 | def BT(cpu, dest, src):
if dest.type == :
cpu.CF = ((dest.read() >> (src.read() % dest.size)) & 1) != 0
elif dest.type == :
addr, pos = cpu._getMemoryBit(dest, src)
base, size, ty = cpu.get_descriptor(cpu.DS)
value = cpu.read_int(addr + base, 8)
cpu.CF = Operators.EXTRACT(value, pos, 1) == 1
else:
raise NotImplementedError(f"Unknown operand for BT: {dest.type}") | Bit Test.
Selects the bit in a bit string (specified with the first operand, called the bit base) at the
bit-position designated by the bit offset (specified by the second operand) and stores the value
of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit
offset operand can be a register or an immediate value:
- If the bit base operand specifies a register, the instruction takes the modulo 16, 32, or 64
of the bit offset operand (modulo size depends on the mode and register size; 64-bit operands
are available only in 64-bit mode).
- If the bit base operand specifies a memory location, the operand represents the address of the
byte in memory that contains the bit base (bit 0 of the specified byte) of the bit string. The
range of the bit position that can be referenced by the offset operand depends on the operand size.
:param cpu: current CPU.
:param dest: bit base.
:param src: bit offset. |
375,314 | def transplant(exif_src, image, new_file=None):
if exif_src[0:2] == b"\xff\xd8":
src_data = exif_src
else:
with open(exif_src, ) as f:
src_data = f.read()
segments = split_into_segments(src_data)
exif = get_exif_seg(segments)
if exif is None:
raise ValueError("not found exif in input")
output_file = False
if image[0:2] == b"\xff\xd8":
image_data = image
else:
with open(image, ) as f:
image_data = f.read()
output_file = True
segments = split_into_segments(image_data)
new_data = merge_segments(segments, exif)
if isinstance(new_file, io.BytesIO):
new_file.write(new_data)
new_file.seek(0)
elif new_file:
with open(new_file, "wb+") as f:
f.write(new_data)
elif output_file:
with open(image, "wb+") as f:
f.write(new_data)
else:
raise ValueError("Give a 3rd argument to to output file") | py:function:: piexif.transplant(filename1, filename2)
Transplant exif from filename1 to filename2.
:param str filename1: JPEG
:param str filename2: JPEG |
375,315 | def add_property(attribute, type):
def decorator(cls):
private = "_" + attribute
def getAttr(self):
if getattr(self, private) is None:
setattr(self, private, type())
return getattr(self, private)
def setAttr(self, value):
setattr(self, private, value)
setattr(cls, attribute, property(getAttr, setAttr))
setattr(cls, private, None)
return cls
return decorator | Add a property to a class |
375,316 | def status(self, job_ids):
all_states = []
status = self.client.describe_instances(InstanceIds=job_ids)
for r in status[]:
for i in r[]:
instance_id = i[]
instance_state = translate_table.get(i[][], )
self.resources[instance_id][] = instance_state
all_states.extend([instance_state])
return all_states | Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs. |
375,317 | def sample(self, batch_size):
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes) | Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise. |
375,318 | def min_distance_single(self,
mesh,
transform=None,
return_name=False,
return_data=False):
if transform is None:
transform = np.eye(4)
b = self._get_BVH(mesh)
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(b, t)
if cg == ddata.result.o2:
names = reversed(names)
data = DistanceData(names, ddata.result)
if return_name and return_data:
return distance, name, data
elif return_name:
return distance, name
elif return_data:
return distance, data
else:
return distance | Get the minimum distance between a single object and any
object in the manager.
Parameters
---------------
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
return_names : bool
If true, return name of the closest object
return_data : bool
If true, a DistanceData object is returned as well
Returns
-------------
distance : float
Min distance between mesh and any object in the manager
name : str
The name of the object in the manager that was closest
data : DistanceData
Extra data about the distance query |
375,319 | def clustering_coef_wd(W):
A = np.logical_not(W == 0).astype(float)
S = cuberoot(W) + cuberoot(W.T)
K = np.sum(A + A.T, axis=1)
cyc3 = np.diag(np.dot(S, np.dot(S, S))) / 2
K[np.where(cyc3 == 0)] = np.inf
CYC3 = K * (K - 1) - 2 * np.diag(np.dot(A, A))
C = cyc3 / CYC3
return C | The weighted clustering coefficient is the average "intensity" of
triangles around a node.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
Notes
-----
Methodological note (also see clustering_coef_bd)
The weighted modification is as follows:
- The numerator: adjacency matrix is replaced with weights matrix ^ 1/3
- The denominator: no changes from the binary version
The above reduces to symmetric and/or binary versions of the clustering
coefficient for respective graphs. |
375,320 | def inverse(self):
inv_rot = np.linalg.inv(self.rotation)
inv_scale = 1.0 / self.scale
inv_trans = -inv_scale * inv_rot.dot(self.translation)
return SimilarityTransform(inv_rot, inv_trans, inv_scale,
from_frame=self._to_frame,
to_frame=self._from_frame) | Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform. |
375,321 | def plotChIds(self, maptype=None, modout=False):
if maptype is None:
maptype = self.defaultMap
polyList = self.getAllChannelsAsPolygons(maptype)
for p in polyList:
p.identifyModule(modout=modout) | Print the channel numbers on the plotting display
Note:
---------
This method will behave poorly if you are plotting in
mixed projections. Because the channel vertex polygons
are already projected using self.defaultMap, applying
this function when plotting in a different reference frame
may cause trouble. |
375,322 | def edit(self, changelist=0):
command = if self.action in (, ) else
if int(changelist):
self._connection.run([command, , str(changelist.change), self.depotFile])
else:
self._connection.run([command, self.depotFile])
self.query() | Checks out the file
:param changelist: Optional changelist to checkout the file into
:type changelist: :class:`.Changelist` |
375,323 | async def send_text_message_to_all_interfaces(self, *args, **kwargs):
logger.debug()
tasks = [interface.send_text_message(*args, **kwargs)
for _, interface in self.interfaces.items()]
logger.debug()
logger.debug(tasks)
res = [body for body in await asyncio.gather(*tasks)]
logger.debug()
logger.debug(res)
return res | TODO:
we should know from where user has come and use right interface
as well right interface can be chosen
:param args:
:param kwargs:
:return: |
375,324 | def merge_settings(settings, new_metadata_settings):
for d in (settings, new_metadata_settings):
if not isinstance(d, dict):
raise TypeError()
dict_deep_merge(result_settings, new_metadata_settings)
return result_settings | Will update the settings with the provided new settings data extracted from the IdP metadata
:param settings: Current settings dict data
:type settings: string
:param new_metadata_settings: Settings to be merged (extracted from IdP metadata after parsing)
:type new_metadata_settings: string
:returns: merged settings
:rtype: dict |
375,325 | def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=None, sleep_time=20,
retry_count=30, retry_sleep_time=10):
unit_name = sentry_unit.info[]
self.log.debug(
% (service, mtime, unit_name))
time.sleep(sleep_time)
proc_start_time = None
tries = 0
while tries <= retry_count and not proc_start_time:
try:
proc_start_time = self._get_proc_start_time(sentry_unit,
service,
pgrep_full)
self.log.debug(
.format(tries, service, unit_name))
except IOError as e:
self.log.debug(
.format(tries, service,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not proc_start_time:
self.log.warn(
)
return False
if proc_start_time >= mtime:
self.log.debug(
% (proc_start_time,
mtime, unit_name))
return True
else:
self.log.warn(
% (proc_start_time, mtime, unit_name))
return False | Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found. |
375,326 | def getent(refresh=False, root=None):
*
if in __context__ and not refresh:
return __context__[]
ret = []
if root is not None and __grains__[] != :
getpwall = functools.partial(_getpwall, root=root)
else:
getpwall = functools.partial(pwd.getpwall)
for data in getpwall():
ret.append(_format_info(data))
__context__[] = ret
return ret | Return the list of all info for all users
refresh
Force a refresh of user information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.getent |
375,327 | def mapTrace(trace, net, delta, verbose=False):
result = []
paths = {}
if verbose:
print("mapping trace with %s points" % len(trace))
for pos in trace:
newPaths = {}
candidates = net.getNeighboringEdges(pos[0], pos[1], delta)
if len(candidates) == 0 and verbose:
print("Found no candidate edges for %s,%s" % pos)
for edge, d in candidates:
if paths:
minDist = 1e400
minPath = None
for path, dist in paths.iteritems():
if dist < minDist:
if edge == path[-1]:
minPath = path
minDist = dist
elif edge in path[-1].getOutgoing():
minPath = path + (edge,)
minDist = dist
else:
minPath = path + (edge,)
minDist = dist + euclidean(
path[-1].getToNode().getCoord(),
edge.getFromNode().getCoord())
if minPath:
newPaths[minPath] = minDist + d * d
else:
newPaths[(edge,)] = d * d
if not newPaths:
if paths:
result += [e.getID() for e in _getMinPath(paths)]
paths = newPaths
if paths:
return result + [e.getID() for e in _getMinPath(paths)]
return result | matching a list of 2D positions to consecutive edges in a network |
375,328 | def process_for_rdns(self):
for item in self.input_stream:
for endpoint in [, ]:
if endpoint not in item[]:
item[][endpoint+self.domain_postfix] = None
continue
ip_address = net_utils.inet_to_str(item[][endpoint])
if self.ip_lookup_cache.get(ip_address):
domain = self.ip_lookup_cache.get(ip_address)
elif net_utils.is_internal(ip_address):
domain =
elif net_utils.is_special(ip_address):
domain = net_utils.is_special(ip_address)
else:
domain = self._reverse_dns_lookup(ip_address)
item[][endpoint+self.domain_postfix] = domain
self.ip_lookup_cache.set(ip_address, domain)
yield item | Look through my input stream for the fields in ip_field_list and
try to do a reverse dns lookup on those fields. |
375,329 | def _get_json(self,
path,
params=None,
base=JIRA_BASE_URL,
):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json | Get the json for a given path and params.
:param path: The subpath required
:type path: str
:param params: Parameters to filter the json query.
:type params: Optional[Dict[str, Any]]
:param base: The Base JIRA URL, defaults to the instance base.
:type base: Optional[str]
:rtype: Union[Dict[str, Any], List[Dict[str, str]]] |
375,330 | def embed_ising(source_linear, source_quadratic, embedding, target_adjacency, chain_strength=1.0):
target_linear = {v: 0. for v in target_adjacency}
for v, bias in iteritems(source_linear):
try:
chain_variables = embedding[v]
except KeyError:
try:
embedding[v] = {unused.pop()}
except KeyError:
raise ValueError(.format(v))
chain_variables = embedding[v]
b = bias / len(chain_variables)
for s in chain_variables:
try:
target_linear[s] += b
except KeyError:
raise ValueError(.format(s))
target_quadratic = {}
for (u, v), bias in iteritems(source_quadratic):
edges = set()
if u not in embedding:
raise ValueError(.format(u))
if v not in embedding:
raise ValueError(.format(v))
for s in embedding[u]:
for t in embedding[v]:
try:
if s in target_adjacency[t] and (t, s) not in edges:
edges.add((s, t))
except KeyError:
raise ValueError(.format(s))
if not edges:
raise ValueError("no edges in target graph between source variables {}, {}".format(u, v))
b = bias / len(edges)
for s, t in edges:
if (s, t) in target_quadratic:
target_quadratic[(s, t)] += b
elif (t, s) in target_quadratic:
target_quadratic[(t, s)] += b
else:
target_quadratic[(s, t)] = b
chain_quadratic = {}
for chain in itervalues(embedding):
chain_quadratic.update(chain_to_quadratic(chain, target_adjacency, chain_strength))
return target_linear, target_quadratic, chain_quadratic | Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0} |
375,331 | def clear_lock(clear_func, role, remote=None, lock_type=):
msg = .format(lock_type, role)
if remote:
msg += .format(remote)
log.debug(msg)
return clear_func(remote=remote, lock_type=lock_type) | Function to allow non-fileserver functions to clear update locks
clear_func
A function reference. This function will be run (with the ``remote``
param as an argument) to clear the lock, and must return a 2-tuple of
lists, one containing messages describing successfully cleared locks,
and one containing messages describing errors encountered.
role
What type of lock is being cleared (gitfs, git_pillar, etc.). Used
solely for logging purposes.
remote
Optional string which should be used in ``func`` to pattern match so
that a subset of remotes can be targeted.
lock_type : update
Which type of lock to clear
Returns the return data from ``clear_func``. |
375,332 | def previous_track(self):
if self._input_func in self._netaudio_func_list:
body = {"cmd0": "PutNetAudioCommand/CurUp",
"cmd1": "aspMainZone_WebUpdateStatus/",
"ZoneName": "MAIN ZONE"}
try:
return bool(self.send_post_command(
self._urls.command_netaudio_post, body))
except requests.exceptions.RequestException:
_LOGGER.error(
"Connection error: previous track command not sent.")
return False | Send previous track command to receiver command via HTTP post. |
375,333 | def _parse_account_information(self, rows):
acc_info = {}
if not rows:
return
for row in rows:
cols_raw = row.find_all()
cols = [ele.text.strip() for ele in cols_raw]
field, value = cols
field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower()
value = value.replace("\xa0", " ")
acc_info[field] = value
created = parse_tibia_datetime(acc_info["created"])
loyalty_title = None if acc_info["loyalty_title"] == "(no title)" else acc_info["loyalty_title"]
position = acc_info.get("position")
self.account_information = AccountInformation(created, loyalty_title, position) | Parses the character's account information
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`, optional
A list of all rows contained in the table. |
375,334 | def _toComparableValue(self, value):
if hasattr(self.currentSortColumn, ):
return self.currentSortColumn.toComparableValue(value)
classDef = self.currentSortColumn.__class__
filename = inspect.getsourcefile(classDef)
lineno = inspect.findsource(classDef)[1]
warnings.warn_explicit(
"IColumn implementor " + qual(self.currentSortColumn.__class__) + " "
"does not implement method toComparableValue. This is required since "
"Mantissa 0.6.6.",
DeprecationWarning, filename, lineno)
return value | Trivial wrapper which takes into account the possibility that our sort
column might not have defined the C{toComparableValue} method.
This can probably serve as a good generic template for some
infrastructure to deal with arbitrarily-potentially-missing methods
from certain versions of interfaces, but we didn't take it any further
than it needed to go for this system's fairly meagre requirements.
*Please* feel free to refactor upwards as necessary. |
375,335 | def tracebacks_from_file(fileobj, reverse=False):
if reverse:
lines = deque()
for line in BackwardsReader(fileobj):
lines.appendleft(line)
if tb_head in line:
yield next(tracebacks_from_lines(lines))
lines.clear()
else:
for traceback in tracebacks_from_lines(fileobj):
yield traceback | Generator that yields tracebacks found in a file object
With reverse=True, searches backwards from the end of the file. |
375,336 | def change_vlan_id(self, vlan_id):
first, second = self.nicid.split()
firstintf = first.split()[0]
secondintf = second.split()[0]
newvlan = str(vlan_id).split()
self.update(nicid=.format(
firstintf, newvlan[0], secondintf, newvlan[-1])) | Change a VLAN id for an inline interface.
:param str vlan_id: New VLAN id. Can be in format '1-2' or
a single numerical value. If in '1-2' format, this specifies
the vlan ID for the first inline interface and the rightmost
for the second.
:return: None |
375,337 | def parse_tibia_date(date_str) -> Optional[datetime.date]:
try:
t = datetime.datetime.strptime(date_str.strip(), "%b %d %Y")
return t.date()
except (ValueError, AttributeError):
return None | Parses a date from the format used in Tibia.com
Accepted format:
- ``MMM DD YYYY``, e.g. ``Jul 23 2015``
Parameters
-----------
date_str: :class:`str`
The date as represented in Tibia.com
Returns
-----------
:class:`datetime.date`, optional
The represented date. |
375,338 | def BinToTri(self, a, b):
if (b >= 0):
y = a - b / np.sqrt(3)
z = b * 2 / np.sqrt(3)
x = 100 - (a + b / np.sqrt(3))
return (x, y, z)
else:
y = a + b / np.sqrt(3)
z = b * 2 / np.sqrt(3)
x = 100 - (a - b / np.sqrt(3))
return (x, y, z) | Turn an a-b coord to an x-y-z triangular coord .
if z is negative, calc with its abs then return (a, -b).
:param a,b: the numbers of the a-b coord
:type a,b: float or double are both OK, just numbers
:return: the corresponding x-y-z triangular coord
:rtype: a tuple consist of x,y,z |
375,339 | def run_and_measure(self, quil_program: Program, qubits: List[int] = None, trials: int = 1,
memory_map: Any = None) -> np.ndarray:
if qubits is None:
qubits = sorted(quil_program.get_qubits(indices=True))
if memory_map is not None:
quil_program = self.augment_program_with_memory_values(quil_program, memory_map)
return self.connection._run_and_measure(quil_program=quil_program, qubits=qubits,
trials=trials,
random_seed=self.random_seed) | Run a Quil program once to determine the final wavefunction, and measure multiple times.
Alternatively, consider using ``wavefunction`` and calling ``sample_bitstrings`` on the
resulting object.
For a large wavefunction and a low-medium number of trials, use this function.
On the other hand, if you're sampling a small system many times you might want to
use ``Wavefunction.sample_bitstrings``.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction from which the returned bitstrings are sampled itself only
represents a stochastically generated sample and the outcomes sampled from
*different* ``run_and_measure`` calls *generally sample different bitstring
distributions*.
:param quil_program: The program to run and measure
:param qubits: An optional list of qubits to measure. The order of this list is
respected in the returned bitstrings. If not provided, all qubits used in
the program will be measured and returned in their sorted order.
:param int trials: Number of times to sample from the prepared wavefunction.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: An array of measurement results (0 or 1) of shape (trials, len(qubits)) |
375,340 | def a_neg(self):
na = Point(self.center)
if self.xAxisIsMajor:
na.x -= self.majorRadius
else:
na.y -= self.majorRadius
return na | Negative antipodal point on the major axis, Point class. |
375,341 | def encrypt(key_id, plaintext, encryption_context=None, grant_tokens=None,
region=None, key=None, keyid=None, profile=None):
alias/mykeymyplaindata{"aws:username":"myuser"}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
ciphertext = conn.encrypt(
key_id,
plaintext,
encryption_context=encryption_context,
grant_tokens=grant_tokens
)
r[] = ciphertext[]
except boto.exception.BotoServerError as e:
r[] = __utils__[](e)
return r | Encrypt plaintext into cipher text using specified key.
CLI example::
salt myminion boto_kms.encrypt 'alias/mykey' 'myplaindata' '{"aws:username":"myuser"}' |
375,342 | def sync_camera_gyro_manual(image_sequence, image_timestamps, gyro_data, gyro_timestamps, full_output=False):
flow = tracking.optical_flow_magnitude(image_sequence)
flow_timestamps = image_timestamps[:-2]
(frame_pair, gyro_idx) = manual_sync_pick(flow, gyro_timestamps, gyro_data)
gyro_abs_max = np.max(np.abs(gyro_data), axis=0)
gyro_normalized = (gyro_abs_max / np.max(gyro_abs_max)).flatten()
flow_normalized = (flow / np.max(flow)).flatten()
rate = lambda ts: len(ts) / (ts[-1] - ts[0])
freq_gyro = rate(gyro_timestamps)
freq_image = rate(flow_timestamps)
logger.debug("Gyro sampling frequency: %.2f Hz, Image sampling frequency: %.2f Hz", freq_gyro, freq_image)
gyro_part = gyro_normalized[gyro_idx[0]:gyro_idx[1]+1]
flow_part = flow_normalized[frame_pair[0]:frame_pair[1]+1]
N = flow_part.size * freq_gyro / freq_image
flow_part_resampled = ssig.resample(flow_part, N).flatten()
corr = ssig.correlate(gyro_part, flow_part_resampled, )
i = np.argmax(corr)
t_0_f = flow_timestamps[frame_pair[0]]
t_1_f = flow_timestamps[frame_pair[1]]
t_off_g = gyro_timestamps[gyro_idx[0] + i]
t_off_f = t_1_f
time_offset = t_off_g - t_off_f
if full_output:
return time_offset, flow, frame_pair
else:
return time_offset | Get time offset that aligns image timestamps with gyro timestamps.
Given an image sequence, and gyroscope data, with their respective timestamps,
calculate the offset that aligns the image data with the gyro data.
The timestamps must only differ by an offset, not a scale factor.
This function finds an approximation of the offset *d* that makes this transformation
t_gyro = t_camera + d
i.e. your new image timestamps should be
image_timestamps_aligned = image_timestamps + d
The offset is calculated using correlation. The parts of the signals to use are
chosen by the user by picking points in a plot window.
The offset is accurate up to about +/- 2 frames, so you should run
*refine_time_offset* if you need better accuracy.
Parameters
---------------
image_sequence : sequence of image data
This must be either a list or generator that provides a stream of
images that are used for optical flow calculations.
image_timestamps : ndarray
Timestamps of the images in image_sequence
gyro_data : (3, N) ndarray
Gyroscope measurements (angular velocity)
gyro_timestamps : ndarray
Timestamps of data in gyro_data
full_output : bool
If False, only return the offset, otherwise return extra data
Returns
--------------
time_offset : float
The time offset to add to image_timestamps to align the image data
with the gyroscope data
flow : ndarray
(Only if full_output=True)
The calculated optical flow magnitude
frame_pair : (int, int)
The frame pair that was picked for synchronization |
375,343 | def is_kibana_cache_incomplete(self, es_cache, k_cache):
return len(es_set - k_set.intersection(es_set)) > 0 | Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has |
375,344 | def clean_markup(self, orig_str):
for val in self.get_markup_vars():
orig_str = orig_str.replace(val, )
return orig_str | clean markup from string |
375,345 | def clean(self):
if self.lookup == :
return
else:
lookups = self.lookup.split(LOOKUP_SEP)
opts = self.model_def.model_class()._meta
valid = True
while len(lookups):
lookup = lookups.pop(0)
try:
field = opts.get_field(lookup)
except FieldDoesNotExist:
valid = False
else:
if isinstance(field, models.ForeignKey):
opts = get_remote_field_model(field)._meta
elif len(lookups):
valid = False
finally:
if not valid:
msg = _("This field doesnlookup': [msg]}) | Make sure the lookup makes sense |
375,346 | def up_to_date(self):
VersionInfo = self.get_latest_version()
CurrentVersion = VersionInfo.get()
LatestVersion = VersionInfo.get()
if CurrentVersion == LatestVersion:
log.info()
log.debug(.format(LatestVersion, LatestVersion))
return True
else:
log.warning()
log.debug(.format(LatestVersion, LatestVersion))
return False | Check if Team Password Manager is up to date. |
375,347 | def tasks(self, pattern=None, negate=False, state=None, limit=None, reverse=True,
params=None, success=False, error=True):
request = clearly_pb2.FilterTasksRequest(
tasks_filter=clearly_pb2.PatternFilter(pattern=pattern or ,
negate=negate),
state_pattern=state or , limit=limit, reverse=reverse
)
for task in about_time(ClearlyClient._fetched_callback, self._stub.filter_tasks(request)):
ClearlyClient._display_task(task, params, success, error) | Filters stored tasks and displays their current statuses.
Note that, to be able to list the tasks sorted chronologically, celery retrieves
tasks from the LRU event heap instead of the dict storage, so the total number
of tasks fetched may be different than the server `max_tasks` setting. For
instance, the `limit` field refers to max events searched, not max tasks.
Args:
Filter args:
pattern (Optional[str]): a pattern to filter tasks
ex.: '^dispatch|^email' to filter names starting with that
or 'dispatch.*123456' to filter that exact name and number
or even '123456' to filter that exact number anywhere.
negate (bool): if True, finds tasks that do not match criteria
state (Optional[str]): a celery task state to filter
limit (int): the maximum number of events to fetch
if None or 0, fetches all.
reverse (bool): if True (default), shows the most recent first
Display args:
params (Optional[bool]): if True shows args and kwargs in the first and
last seen states, if False never shows, and if None follows the
success and error arguments.
default is None
success (bool): if True shows successful tasks' results
default is False
error (bool): if True shows failed and retried tasks' tracebacks.
default is True, as you're monitoring to find errors, right? |
375,348 | def rows_above_layout(self):
if self._in_alternate_screen:
return 0
elif self._min_available_height > 0:
total_rows = self.output.get_size().rows
last_screen_height = self._last_screen.height if self._last_screen else 0
return total_rows - max(self._min_available_height, last_screen_height)
else:
raise HeightIsUnknownError() | Return the number of rows visible in the terminal above the layout. |
375,349 | def players(timeout=timeout):
rc = requests.get(.format(card_info_url, ), timeout=timeout).json()
players = {}
for i in rc[] + rc[]:
players[i[]] = {: i[],
: i[],
: i[],
: i.get(),
: i[]}
return players | Return all players in dict {id: c, f, l, n, r}.
id, rank, nationality(?), first name, last name. |
375,350 | def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {: nd.zeros(data_shape, ctx=dev(gpu_id)),
: nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {: nd.zeros(data_shape, ctx=dev(gpu_id)),
: nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id)) | Run DistilledSGLD on mnist dataset |
375,351 | def dumpRule(serviceCls, rule, prefix):
rulePath = prefix + rule.rule
rulePath = re.sub(, , rulePath)
cor = ConvertedRule(
rulePath=rulePath,
operationId=rule.endpoint
)
for meth in sorted(rule.methods or []):
cor.methods.append(meth)
origEP = cor.operationId
if origEP.endswith():
origEP = origEP[:-7]
cor.branch = True
cor.operationId = % (serviceCls.__name__, origEP)
meth = getattr(serviceCls, origEP)
if hasattr(meth, ):
cor.subKlein = meth._subKleinQname
cor.doco = OpenAPIExtendedDocumentation.fromObject(meth, decode=True)
return cor | Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s) |
375,352 | def _parse_linear_expression(expression, expanded=False, **kwargs):
offset = 0
constant = None
if expression.is_Add:
coefficients = expression.as_coefficients_dict()
elif expression.is_Mul:
coefficients = {expression.args[1]: expression.args[0]}
elif expression.is_Symbol:
coefficients = {expression: 1}
elif expression.is_Number:
coefficients = {}
else:
raise ValueError("Expression {} seems to be invalid".format(expression))
for var in coefficients:
if not (var.is_Symbol):
if var == one:
constant = var
offset = float(coefficients[var])
elif expanded:
raise ValueError("Expression {} seems to be invalid".format(expression))
else:
coefficients = _parse_linear_expression(expression, expanded=True, **kwargs)
if constant is not None:
del coefficients[constant]
return offset, coefficients | Parse the coefficients of a linear expression (linearity is assumed).
Returns a dictionary of variable: coefficient pairs. |
375,353 | def output_ip(gandi, ip, datacenters, vms, ifaces, output_keys, justify=11):
output_generic(gandi, ip, output_keys, justify)
if in output_keys:
iface = ifaces.get(ip[])
type_ = if iface.get() else
output_line(gandi, , type_, justify)
if type_ == :
output_line(gandi, , iface[][], justify)
if in output_keys:
iface = ifaces.get(ip[])
vm_id = iface.get()
if vm_id:
vm_name = vms.get(vm_id, {}).get()
if vm_name:
output_line(gandi, , vm_name, justify)
if in output_keys:
for dc in datacenters:
if dc[] == ip.get(,
ip.get(, {}).get()):
dc_name = dc.get(, dc.get(, ))
break
output_line(gandi, , dc_name, justify) | Helper to output an ip information. |
375,354 | def velocity_confidence_transition(data, vkey=, scale=10, copy=False):
adata = data.copy() if copy else data
if vkey not in adata.layers.keys():
raise ValueError(
)
idx = np.array(adata.var[vkey + ].values, dtype=bool)
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = T.dot(adata.layers[][:, idx]) - adata.layers[][:, idx]
dX -= dX.mean(1)[:, None]
V = adata.layers[vkey][:, idx].copy()
V -= V.mean(1)[:, None]
adata.obs[vkey + ] = prod_sum_var(dX, V) / (norm(dX) * norm(V))
logg.hint(_confidence_transition\)
return adata if copy else None | Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell |
375,355 | def iter_coords(obj):
if isinstance(obj, (tuple, list)):
coords = obj
elif in obj:
coords = [geom[][] for geom in obj[]]
elif in obj:
coords = obj[][]
else:
coords = obj.get(, obj)
for coord in coords:
if isinstance(coord, (float, int)):
yield tuple(coords)
break
else:
for f in iter_coords(coord):
yield f | Returns all the coordinate tuples from a geometry or feature. |
375,356 | def get_all_slots(class_):
all_slots = []
parent_param_classes = [c for c in classlist(class_)[1::]]
for c in parent_param_classes:
if hasattr(c,):
all_slots+=c.__slots__
return all_slots | Return a list of slot names for slots defined in class_ and its
superclasses. |
375,357 | def declare_set(self, name, sep=os.pathsep):
self._declare_special(name, sep, SetVariable) | Declare an environment variable as a set-like special variable.
This can be used even if the environment variable is not
present.
:param name: The name of the environment variable that should
be considered set-like.
:param sep: The separator to be used. Defaults to the value
of ``os.pathsep``. |
375,358 | def string_repr(s):
if compat.is_bytes(s):
res = "{!r}: ".format(s)
for b in s:
if type(b) is str:
b = ord(b)
res += "%02x " % b
return res
return "{}".format(s) | Return a string as hex dump. |
375,359 | def find_output_without_tag(self, tag):
tag = tag.upper()
return FileList([i for i in self if tag not in i.tags]) | Find all files who do not have tag in self.tags |
375,360 | def save_species_fitness(self, delimiter=, null_value=, filename=):
with open(filename, ) as f:
w = csv.writer(f, delimiter=delimiter)
for s in self.get_species_fitness(null_value):
w.writerow(s) | Log species' average fitness throughout evolution. |
375,361 | def setInstitutionLogo(self, pathList: tuple):
for p in pathList:
url = None
if type(p) in (list, tuple):
p, url = p
logo = QtSvg.QSvgWidget(p)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(int(w), int(h))
self.layout().addWidget(logo)
if url:
logo.mousePressEvent = lambda evt, u=url: self._openUrl(evt, u) | takes one or more [logo].svg paths
if logo should be clickable, set
pathList = (
(my_path1.svg,www.something1.html),
(my_path2.svg,www.something2.html),
...) |
375,362 | def lrange(self, name, start, end):
return self.execute_command(, name, start, end) | Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation |
375,363 | def delete_record(self, identifier=None, rtype=None, name=None, content=None, **kwargs):
if not rtype and kwargs.get():
warnings.warn(,
DeprecationWarning)
rtype = kwargs.get()
return self._delete_record(identifier=identifier, rtype=rtype, name=name, content=content) | Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content. |
375,364 | def write_content(self, content, destination):
directory = os.path.dirname(destination)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with io.open(destination, , encoding=) as f:
f.write(content)
return destination | Write given content to destination path.
It will create needed directory structure first if it contain some
directories that does not allready exists.
Args:
content (str): Content to write to target file.
destination (str): Destination path for target file.
Returns:
str: Path where target file has been written. |
375,365 | def draw(self, startpoint=(0, 0), mode=, showfig=False):
p0 = startpoint
angle = 0.0
patchlist = []
anotelist = []
xmin0, xmax0, ymin0, ymax0 = 0, 0, 0, 0
xmin, xmax, ymin, ymax = 0, 0, 0, 0
for ele in self._lattice_eleobjlist:
ele.setDraw(p0=p0, angle=angle, mode=mode)
angle += ele.next_inc_angle
patchlist.extend(ele._patches)
if hasattr(ele, ):
anotelist.append(ele._anote)
try:
p0 = ele.next_p0
xyrange = ele._patches[0].get_path().get_extents()
xmin, xmax = xyrange.xmin, xyrange.xmax
ymin, ymax = xyrange.ymin, xyrange.ymax
except:
pass
xmin0 = min(xmin, xmin0)
xmax0 = max(xmax, xmax0)
ymin0 = min(ymin, ymin0)
ymax0 = max(ymax, ymax0)
if showfig:
fig = plt.figure()
ax = fig.add_subplot(111, aspect=)
[ax.add_patch(i) for i in patchlist]
[ax.annotate(s=i[],
xy=i[],
xytext=i[],
arrowprops=dict(arrowstyle=),
rotation=-90,
fontsize=)
for i in anotelist]
ax.set_xlim([xmin0 * 2, xmax0 * 2])
ax.set_ylim([ymin0 * 2, ymax0 * 2])
plt.show()
return patchlist, anotelist, (xmin0, xmax0), (ymin0, ymax0) | lattice visualization
:param startpoint: start drawing point coords, default: (0, 0)
:param showfig: show figure or not, default: False
:param mode: artist mode, 'plain' or 'fancy', 'plain' by default
:return: patchlist, anotelist, (xmin0, xmax0), (ymin0, ymax0)
patchlist: list of element patches
anotelist: list of annotations
(xmin0, xmax0) and (ymin0, ymax0) are ploting range |
375,366 | def get_or_add_dPt_for_point(self, idx):
matches = self.xpath( % idx)
if matches:
return matches[0]
dPt = self._add_dPt()
dPt.idx.val = idx
return dPt | Return the `c:dPt` child representing the visual properties of the
data point at index *idx*. |
375,367 | def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True) | Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence. |
375,368 | def get_widget(self, index=None, path=None, tabs=None):
if (index and tabs) or (path and tabs):
return tabs.widget(index)
elif self.plugin:
return self.get_plugin_tabwidget(self.plugin).currentWidget()
else:
return self.plugins_tabs[0][0].currentWidget() | Get widget by index.
If no tabs and index specified the current active widget is returned. |
375,369 | def get(self, spike_ids, channels=None):
if isinstance(spike_ids, slice):
spike_ids = _range_from_slice(spike_ids,
start=0,
stop=self.n_spikes,
)
if not hasattr(spike_ids, ):
spike_ids = [spike_ids]
if channels is None:
channels = slice(None, None, None)
nc = self.n_channels
else:
channels = np.asarray(channels, dtype=np.int32)
assert np.all(channels < self.n_channels)
nc = len(channels)
spike_ids = _as_array(spike_ids)
n_spikes = len(spike_ids)
shape = (n_spikes, nc, self._n_samples_extract)
waveforms = np.zeros(shape, dtype=np.float32)
if self.n_samples_trace == 0:
return np.transpose(waveforms, (0, 2, 1))
for i, spike_id in enumerate(spike_ids):
assert 0 <= spike_id < self.n_spikes
time = self._spike_samples[spike_id]
try:
w = self._load_at(time, channels)
except ValueError as e:
logger.warn("Error while loading waveform: %s", str(e))
continue
assert w.shape == (self._n_samples_extract, nc)
waveforms[i, :, :] = w.T
waveforms_f = waveforms.reshape((-1, self._n_samples_extract))
unmasked = waveforms_f.max(axis=1) != 0
waveforms_f[unmasked] = self._filter(waveforms_f[unmasked], axis=1)
waveforms_f = waveforms_f.reshape((n_spikes, nc,
self._n_samples_extract))
margin_before, margin_after = self._filter_margin
if margin_after > 0:
assert margin_before >= 0
waveforms_f = waveforms_f[:, :, margin_before:-margin_after]
assert waveforms_f.shape == (n_spikes,
nc,
self.n_samples_waveforms,
)
return np.transpose(waveforms_f, (0, 2, 1)) | Load the waveforms of the specified spikes. |
375,370 | def batchcancel_openOrders(self, acc_id, symbol=None, side=None, size=None, _async=False):
params = {}
path =
params[] = acc_id
if symbol:
params[] = symbol
if side:
assert side in [, ]
params[] = side
if size:
params[] = size
return api_key_get(params, path, _async=_async) | 批量撤销未成交订单
:param acc_id: 帐号ID
:param symbol: 交易对
:param side: 方向
:param size:
:param _async:
:return: |
375,371 | def libvlc_video_set_spu_delay(p_mi, i_delay):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay) | Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later. |
375,372 | def _local(self, args):
args.pop(0)
command = args[0]
if command == :
self._local_install(args)
elif command == :
self._local_list_files(args)
elif command == :
self._local_info(args)
else:
raise SPMInvocationError({0}\.format(command)) | Process local commands |
375,373 | def parse_default_arguments(self, default_args_sample):
parsed_arguments_dict = {}
default_arguments = list(self._arguments.default_arguments.values())
expected_length = len(default_arguments)
real_length = len(default_args_sample)
default_args_count = len([item for item in default_arguments if item.default is not None])
if not self._arguments.has_optional_default_argument and (default_args_sample is None or expected_length != real_length):
raise ArgumentException("Command require {} positional argument(s), found {}".format(
expected_length,
real_length
))
elif self._arguments.has_optional_default_argument and default_args_sample is not None and real_length < expected_length - default_args_count:
raise ArgumentException("Command require {} or {} positional argument(s), found {}".format(
expected_length,
expected_length - default_args_count,
real_length
))
for index in range(0, expected_length):
arg_meta = default_arguments[index]
try:
arg = default_args_sample[index]
except IndexError:
arg = arg_meta.default
try:
if arg_meta.value_type is not None:
arg = self.__convert_value_to_type(arg, arg_meta.value_type)
parsed_arguments_dict[arg_meta.name] = arg
except (TypeError, ValueError):
raise ArgumentException("Invalid argument type - expected {}, got {}".format(arg_meta.value_type.__name__, type(arg).__name__))
return parsed_arguments_dict | :type default_args_sample list
:rtype dict |
375,374 | def detect_compiler(libpath):
from os import waitpid, path
from subprocess import Popen, PIPE
command = "nm {0}".format(path.abspath(libpath))
child = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE)
waitpid(child.pid, 0)
contents = child.stdout.readlines()
i = 0
found = False
while i < len(contents) and found == False:
if "_MOD_" in contents[i]:
found = "gfortran"
elif "_mp_" in contents[i]:
found = "ifort"
i += 1
return found | Determines the compiler used to compile the specified shared library by
using the system utilities.
:arg libpath: the full path to the shared library *.so file. |
375,375 | def eventize(self, granularity):
email = {}
email[Email.EMAIL_ID] = []
email[Email.EMAIL_EVENT] = []
email[Email.EMAIL_DATE] = []
email[Email.EMAIL_OWNER] = []
email[Email.EMAIL_SUBJECT] = []
email[Email.EMAIL_BODY] = []
email[Email.EMAIL_ORIGIN] = []
events = pandas.DataFrame()
for item in self.items:
origin = item["origin"]
email_data = item["data"]
if granularity == 1:
email[Email.EMAIL_ID].append(email_data["Message-ID"])
email[Email.EMAIL_EVENT].append(Email.EVENT_OPEN)
try:
email[Email.EMAIL_DATE].append(str_to_datetime(email_data["Date"], ignoretz=True))
except KeyError:
email[Email.EMAIL_DATE].append(str_to_datetime("1970-01-01"))
email[Email.EMAIL_OWNER].append(email_data["From"])
email[Email.EMAIL_SUBJECT].append(email_data["Subject"])
try:
email[Email.EMAIL_BODY].append(email_data["body"]["plain"])
except KeyError:
email[Email.EMAIL_BODY].append("None")
email[Email.EMAIL_ORIGIN].append(origin)
if granularity == 2:
pass
if granularity == 3:
pass
events[Email.EMAIL_ID] = email[Email.EMAIL_ID]
events[Email.EMAIL_EVENT] = email[Email.EMAIL_EVENT]
events[Email.EMAIL_DATE] = email[Email.EMAIL_DATE]
events[Email.EMAIL_OWNER] = email[Email.EMAIL_OWNER]
events[Email.EMAIL_SUBJECT] = email[Email.EMAIL_SUBJECT]
events[Email.EMAIL_BODY] = email[Email.EMAIL_BODY]
events[Email.EMAIL_ORIGIN] = email[Email.EMAIL_ORIGIN]
return events | This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame |
375,376 | def proc_exit(self, details):
log = self._params.get(, self._discard)
pid = self._key
exit_code = details
why = statusfmt(exit_code)
proc = None
for p in self._parent._proc_state:
if pid == p.pid:
proc = p
if proc is None:
log.error("Legion reported exit of unknown pid %s for task %r which %s", pid, self._name, why)
return
now = time.time()
proc.pid = None
proc.exit_code = exit_code
proc.exited = now
proc.pending_sig = None
proc.next_sig = None
self._parent._last_status = exit_code
extant = len(self._parent.get_pids())
if extant == 0:
self._parent._started = None
self._parent._stopping = None
self._parent._stopped = now
self._parent.onexit()
else:
log.debug("Task still has %d process%s running", self._name, extant, ses(extant, ))
if exit_code and not self._parent._terminated:
log.warning("Task pid %d %s -- unexpected error exit", self._name, pid, why)
else:
log.info("Task pid %d %s", self._name, pid, why) | Handle the event when one of the task processes exits. |
375,377 | def sort_objs_by_attr(objs, key, reverse=False):
if len(objs) == 0:
return []
if not hasattr(objs[0], key):
raise AttributeError(.format(type(objs[0]), key))
result = sorted(objs, key=attrgetter(key), reverse=reverse)
return result | 对原生不支持比较操作的对象根据属性排序
:param:
* objs: (list) 需要排序的对象列表
* key: (string) 需要进行排序的对象属性
* reverse: (bool) 排序结果是否进行反转,默认为 False,不进行反转
:return:
* result: (list) 排序后的对象列表
举例如下::
print('--- sorted_objs_by_attr demo---')
class User(object):
def __init__(self, user_id):
self.user_id = user_id
users = [User(23), User(3), User(99)]
result = sorted_objs_by_attr(users, key='user_id')
reverse_result = sorted_objs_by_attr(users, key='user_id', reverse=True)
print([item.user_id for item in result])
print([item.user_id for item in reverse_result])
print('---')
执行结果::
--- sorted_objs_by_attr demo---
[3, 23, 99]
[99, 23, 3]
--- |
375,378 | def parse_terminal_token(cls, parser, text):
token_regex = cls.token_regex
if parser._parsing_texkey_expression:
token_regex = cls.texkey_token_regex
parser._parsing_texkey_expression = False
match = token_regex.match(text)
if match:
matched_token = match.group(0)
if cls.starts_with_colon.match(remaining_text):
return text, \
SyntaxError("parsing a keyword (token followed by \":\"): \"" + repr(matched_token) + "\"")
if not parser._parsing_parenthesized_simple_values_expression \
and matched_token in INSPIRE_KEYWORDS_SET:
return text, SyntaxError("parsing a keyword (non shortened INSPIRE keyword)")
result = remaining_text, matched_token
else:
result = text, SyntaxError("expecting match on " + repr(cls.token_regex.pattern))
return result | Parses a terminal token that doesn't contain parentheses nor colon symbol.
Note:
Handles a special case of tokens where a ':' is needed (for `texkey` queries).
If we're parsing text not in parentheses, then some DSL keywords (e.g. And, Or, Not, defined above) should
not be recognized as terminals, thus we check if they are in the Keywords table (namespace like structure
handled by PyPeg).
This is done only when we are not parsing a parenthesized SimpleValue.
Also, helps in supporting more implicit-and queries cases (last two checks). |
375,379 | def build_library(tile, libname, chip):
dirs = chip.build_dirs()
output_name = % (libname, chip.arch_name())
if os.path.exists():
VariantDir(dirs[], os.path.join(, ), duplicate=0)
else:
VariantDir(dirs[], , duplicate=0)
library_env = setup_environment(chip)
library_env[] = output_name
library_env[] = os.path.join(dirs[], output_name)
library_env[] = dirs[]
tilebus_defs = setup_dependencies(tile, library_env)
tilebus_defs += tile.find_products()
compile_tilebus(tilebus_defs, library_env, header_only=True)
SConscript(os.path.join(dirs[], ), exports=)
library_env.InstallAs(os.path.join(dirs[], output_name), os.path.join(dirs[], output_name))
for src, dst in chip.property(, []):
srcpath = os.path.join(*src)
destpath = os.path.join(dirs[], dst)
library_env.InstallAs(destpath, srcpath)
return os.path.join(dirs[], output_name) | Build a static ARM cortex library |
375,380 | def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype) | Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default. |
375,381 | def unregister_node_path(self, node):
path = node.file if hasattr(node, "file") else node.path
path = foundations.strings.to_string(path)
return self.unregister_file(path) | Unregisters given Node path from the **file_system_events_manager**.
:param node: Node.
:type node: FileNode or DirectoryNode or ProjectNode
:return: Method success.
:rtype: bool |
375,382 | def delete(filething):
t = OggOpus(filething)
filething.fileobj.seek(0)
t.delete(filething) | delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file. |
375,383 | def ScreenGenerator(nfft, r0, nx, ny):
while 1:
layers = GenerateTwoScreens(nfft, r0)
for iLayer in range(2):
for iy in range(int(nfft/ny)):
for ix in range(int(nfft/nx)):
yield layers[iLayer][iy*ny:iy*ny+ny, ix*nx:ix*nx+nx] | Generate an infinite series of rectangular phase screens
Uses an FFT screen generator to make a large screen and then
returns non-overlapping subsections of it |
375,384 | def hacking_has_only_comments(physical_line, filename, lines, line_number):
if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)):
return (0, "H104: File contains nothing but comments") | Check for empty files with only comments
H104 empty file with only comments |
375,385 | def is_entry_safe(self, entry):
normalized = os.path.normpath(entry)
if normalized.startswith(os.sep) or normalized.startswith(".." + os.sep):
self.log([u"Entry is not safe", entry])
return False
self.log([u"Entry is safe", entry])
return True | Return ``True`` if ``entry`` can be safely extracted,
that is, if it does start with ``/`` or ``../``
after path normalization, ``False`` otherwise.
:rtype: bool |
375,386 | def setupFog(self):
fogcfg = self.cfg["graphics.fogSettings"]
if not fogcfg["enable"]:
return
glEnable(GL_FOG)
if fogcfg["color"] is None:
fogcfg["color"] = self.cfg["graphics.clearColor"]
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(*fogcfg["color"]))
glHint(GL_FOG_HINT, GL_DONT_CARE)
glFogi(GL_FOG_MODE, GL_LINEAR)
glFogf(GL_FOG_START, fogcfg["start"])
glFogf(GL_FOG_END, fogcfg["end"]) | Sets the fog system up.
The specific options available are documented under :confval:`graphics.fogSettings`\ . |
375,387 | def _send_container_healthcheck_sc(self, containers_by_id):
for container in containers_by_id.itervalues():
healthcheck_tags = self._get_tags(container, HEALTHCHECK)
match = False
for tag in healthcheck_tags:
for rule in self.whitelist_patterns:
if re.match(rule, tag):
match = True
self._submit_healthcheck_sc(container)
break
if match:
break | Send health service checks for containers. |
375,388 | def t_HEXCONSTANT(self, t):
r
t.value = int(t.value, 16)
t.type =
return t | r'0x[0-9A-Fa-f]+ |
375,389 | def processing_blocks(self):
sbi_ids = Subarray(self.get_name()).sbi_ids
pbs = []
for sbi_id in sbi_ids:
sbi = SchedulingBlockInstance(sbi_id)
pbs.append(sbi.processing_block_ids)
return , pbs | Return list of PBs associated with the subarray.
<http://www.esrf.eu/computing/cs/tango/pytango/v920/server_api/server.html#PyTango.server.pipe> |
375,390 | def next_power_of_2(n):
n -= 1
shift = 1
while (n + 1) & n:
n |= n >> shift
shift *= 2
return max(4, n + 1) | Return next power of 2 greater than or equal to n |
375,391 | def _get_app_path(url):
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
return app_path | Extract the app path from a Bokeh server URL
Args:
url (str) :
Returns:
str |
375,392 | def _ioctl_cast(n):
if sys.version_info < (2, 5):
n, = struct.unpack(, struct.pack(, n))
return n | Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is
signed. Until 2.5 Python exclusively implemented the BSD behaviour,
preventing use of large unsigned int requests like the TTY layer uses
below. So on 2.4, we cast our unsigned to look like signed for Python. |
375,393 | def import_cvxpy():
global _CVXPY_ERROR_LOGGED
try:
import cvxpy
except ImportError:
cvxpy = None
if not _CVXPY_ERROR_LOGGED:
_log.error("Could not import cvxpy. Tomography tools will not function.")
_CVXPY_ERROR_LOGGED = True
return cvxpy | Try importing the qutip module, log an error if unsuccessful.
:return: The cvxpy module if successful or None
:rtype: Optional[module] |
375,394 | def ppo_opt_step(i,
opt_state,
ppo_opt_update,
policy_net_apply,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.1):
new_policy_params = trax_opt.get_params(opt_state)
g = grad(
ppo_loss, argnums=1)(
policy_net_apply,
new_policy_params,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
return ppo_opt_update(i, g, opt_state) | PPO optimizer step. |
375,395 | def main():
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
options = docopt.docopt(__doc__,
help=True,
version= % __VERSION__)
print(template_remover.clean(io.open(options[]).read()))
return 0 | Entry point for remove_template. |
375,396 | def loadkml(self, filename):
nodes = self.readkmz(filename)
self.snap_points = []
for n in nodes:
point = self.readObject(n)
if self.mpstate.map is not None and point[0] == :
self.snap_points.extend(point[2])
newcolour = (random.randint(0, 255), 0, random.randint(0, 255))
curpoly = mp_slipmap.SlipPolygon(point[1], point[2],
layer=2, linewidth=2, colour=newcolour)
self.mpstate.map.add_object(curpoly)
self.allayers.append(curpoly)
self.curlayers.append(point[1])
if self.mpstate.map is not None and point[0] == :
icon = self.mpstate.map.icon()
curpoint = mp_slipmap.SlipIcon(point[1], latlon = (point[2][0][0], point[2][0][1]), layer=3, img=icon, rotation=0, follow=False)
curtext = mp_slipmap.SlipLabel(point[1], point = (point[2][0][0], point[2][0][1]), layer=4, label=point[1], colour=(0,255,255))
self.mpstate.map.add_object(curpoint)
self.mpstate.map.add_object(curtext)
self.allayers.append(curpoint)
self.alltextlayers.append(curtext)
self.curlayers.append(point[1])
self.curtextlayers.append(point[1])
self.menu_needs_refreshing = True | Load a kml from file and put it on the map |
375,397 | def refresh(self):
new_device_json = self._device_request()
_LOGGER.debug("Device Refresh Response: %s", new_device_json)
new_avatar_json = self._avatar_request()
_LOGGER.debug("Avatar Refresh Response: %s", new_avatar_json)
new_info_json = self._info_request()
_LOGGER.debug("Device Info Refresh Response: %s", new_info_json)
new_settings_json = self._settings_request()
_LOGGER.debug("Device Settings Refresh Response: %s",
new_settings_json)
self.update(new_device_json, new_info_json, new_settings_json,
new_avatar_json)
self._update_activities() | Refresh the devices json object data. |
375,398 | def _proxy(self):
if self._context is None:
self._context = EventContext(
self._version,
workspace_sid=self._solution[],
sid=self._solution[],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EventContext for this EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext |
375,399 | def hook(name=None, *args, **kwargs):
def decorator(f):
if not hasattr(f, "hooks"):
f.hooks = []
f.hooks.append((name or f.__name__, args, kwargs))
return f
return decorator | Decorator to register the function as a hook |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.