Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,600 |
def _set_original_fields(instance):
original_fields = {}
def _set_original_field(instance, field):
if instance.pk is None:
original_fields[field] = None
else:
if isinstance(instance._meta.get_field(field), ForeignKey):
|
Save fields value, only for non-m2m fields.
|
24,601 |
def drag(self, point):
point = np.array(point, dtype=np.float32)
dx, dy = point - self._pdown
mindim = 0.3 * np.min(self._size)
target = self._target
x_axis = self._pose[:3, 0].flatten()
y_axis = self._pose[:3, 1].flatten()
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
if self._state == Trackball.STATE_ROTATE:
x_angle = -dx / mindim
x_rot_mat = transformations.rotation_matrix(
x_angle, y_axis, target
)
y_angle = dy / mindim
y_rot_mat = transformations.rotation_matrix(
y_angle, x_axis, target
)
self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose))
elif self._state == Trackball.STATE_ROLL:
center = self._size / 2.0
v_init = self._pdown - center
v_curr = point - center
v_init = v_init / np.linalg.norm(v_init)
v_curr = v_curr / np.linalg.norm(v_curr)
theta = (-np.arctan2(v_curr[1], v_curr[0]) +
np.arctan2(v_init[1], v_init[0]))
rot_mat = transformations.rotation_matrix(theta, z_axis, target)
self._n_pose = rot_mat.dot(self._pose)
elif self._state == Trackball.STATE_PAN:
dx = -dx / (5.0 * mindim) * self._scale
dy = -dy / (5.0 * mindim) * self._scale
translation = dx * x_axis + dy * y_axis
self._n_target = self._target + translation
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose)
elif self._state == Trackball.STATE_ZOOM:
radius = np.linalg.norm(eye - target)
ratio = 0.0
if dy > 0:
ratio = np.exp(abs(dy) / (0.5 * self._size[1])) - 1.0
elif dy < 0:
ratio = 1.0 - np.exp(dy / (0.5 * (self._size[1])))
translation = -np.sign(dy) * ratio * radius * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose)
|
Update the tracball during a drag.
Parameters
----------
point : (2,) int
The current x and y pixel coordinates of the mouse during a drag.
This will compute a movement for the trackball with the relative
motion between this point and the one marked by down().
|
24,602 |
def announcement_posted_hook(request, obj):
logger.debug("Announcement posted")
if obj.notify_post:
logger.debug("Announcement notify on")
announcement_posted_twitter(request, obj)
try:
notify_all = obj.notify_email_all
except AttributeError:
notify_all = False
try:
if notify_all:
announcement_posted_email(request, obj, True)
else:
announcement_posted_email(request, obj)
except Exception as e:
logger.error("Exception when emailing announcement: {}".format(e))
messages.error(request, "Exception when emailing announcement: {}".format(e))
raise e
else:
logger.debug("Announcement notify off")
|
Runs whenever a new announcement is created, or a request is approved and posted.
obj: The Announcement object
|
24,603 |
def subset(self, selector):
if isinstance(selector, (list, tuple)):
return map(int, selector)
selector = SELECTORS.get(selector, selector)
mdtop = MDTrajTopology.from_openmm(self.handler.topology)
return mdtop.select(selector)
|
Returns a list of atom indices corresponding to a MDTraj DSL
query. Also will accept list of numbers, which will be coerced
to int and returned.
|
24,604 |
def get_options(config_options, local_options, cli_options):
options = DEFAULT_OPTIONS.copy()
if config_options is not None:
options.update(config_options)
if local_options is not None:
options.update(local_options)
if cli_options is not None:
options.update(cli_options)
return options
|
Figure out what options to use based on the four places it can come from.
Order of precedence:
* cli_options specified by the user at the command line
* local_options specified in the config file for the metric
* config_options specified in the config file at the base
* DEFAULT_OPTIONS hard coded defaults
|
24,605 |
def _grads(self, x):
try:
self.optimizer_array = x
self.obj_grads = self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e100, 1e100)
return self.obj_grads
|
Gets the gradients from the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the gradients, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:type x: np.array
|
24,606 |
def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service[] if isinstance(service, Entity) else service
endpoint = .format(sid)
return getattr(Entity, ).__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs)
|
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
|
24,607 |
def run(self):
while not self._abort:
hashes = self._GetHashes(self._hash_queue, self.hashes_per_batch)
if hashes:
time_before_analysis = time.time()
hash_analyses = self.Analyze(hashes)
current_time = time.time()
self.seconds_spent_analyzing += current_time - time_before_analysis
self.analyses_performed += 1
for hash_analysis in hash_analyses:
self._hash_analysis_queue.put(hash_analysis)
self._hash_queue.task_done()
time.sleep(self.wait_after_analysis)
else:
time.sleep(self.EMPTY_QUEUE_WAIT_TIME)
|
The method called by the threading library to start the thread.
|
24,608 |
def execute_function(self, func, *nargs, **kwargs):
import types
fn = types.FunctionType(func.func_code,
func.func_globals.copy(),
name=func.func_name,
argdefs=func.func_defaults,
closure=func.func_closure)
fn.func_globals.update(self.globals)
error_class = Exception if config.catch_rex_errors else None
try:
return fn(*nargs, **kwargs)
except RexError:
raise
except error_class as e:
from inspect import getfile
stack = traceback.format_exc()
filename = getfile(func)
raise RexError("Failed to exec %s:\n\n%s" % (filename, stack))
|
Execute a function object within the execution context.
@returns The result of the function call.
|
24,609 |
def eval_table(tbl, expression, vm=, blen=None, storage=None,
create=, vm_kwargs=None, **kwargs):
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
length = len(columns[0])
if vm_kwargs is None:
vm_kwargs = dict()
if vm == :
import numexpr
evaluate = numexpr.evaluate
elif vm == :
def evaluate(expr, local_dict=None, **kw):
return eval(expr, dict(), local_dict)
else:
raise ValueError()
variables = _get_expression_variables(expression, vm)
required_columns = {v: columns[names.index(v)] for v in variables}
blen = _util.get_blen_table(required_columns, blen=blen)
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
blocals = {v: c[i:j] for v, c in required_columns.items()}
res = evaluate(expression, local_dict=blocals, **vm_kwargs)
if out is None:
out = getattr(storage, create)(res, expectedlen=length, **kwargs)
else:
out.append(res)
return out
|
Evaluate `expression` against columns of a table.
|
24,610 |
def delete_statement(cls, prop_nr):
return cls(value=, snak_type=, data_type=, is_reference=False, is_qualifier=False, references=[],
qualifiers=[], rank=, prop_nr=prop_nr, check_qualifier_equality=True)
|
This serves as an alternative constructor for WDBaseDataType with the only purpose of holding a WD property
number and an empty string value in order to indicate that the whole statement with this property number of a
WD item should be deleted.
:param prop_nr: A WD property number as string
:return: An instance of WDBaseDataType
|
24,611 |
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
hparams.norm_epsilon)
targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2)
target_space_emb = tf.tile(target_space_emb,
[tf.shape(targets_flat)[0], 1, 1, 1])
targets_shifted = common_layers.shift_right(
targets_flat, pad_value=target_space_emb)
if hparams.attention_type == "none":
targets_with_attention = tf.zeros_like(targets_shifted)
else:
inputs_padding_bias = (1.0 - mask) * -1e9
targets_with_attention = attention(
targets_shifted,
inputs_encoded,
norm_fn,
hparams,
bias=inputs_padding_bias)
kernel = (hparams.kernel_height, hparams.kernel_width)
targets_merged = common_layers.subseparable_conv_block(
tf.concat([targets_with_attention, targets_shifted], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
normalizer_fn=norm_fn,
padding="LEFT",
separability=4,
name="targets_merge")
return targets_merged, 0.0
|
Middle part of slicenet, connecting encoder and decoder.
|
24,612 |
def url(value):
if not url_regex.search(value):
message = u"{0} is not a valid URL".format(value)
if url_regex.search( + value):
message += u". Did you mean: http://{0}".format(value)
raise ValueError(message)
return value
|
Validate a URL.
:param string value: The URL to validate
:returns: The URL if valid.
:raises: ValueError
|
24,613 |
def _netid_subscription_url(netid, subscription_codes):
return "{0}/{1}/subscription/{2}".format(
url_base(), netid,
(.join([str(n) for n in subscription_codes])
if isinstance(subscription_codes, (list, tuple))
else subscription_codes))
|
Return UWNetId resource for provided netid and subscription
code or code list
|
24,614 |
def _set_rc(self):
base_str = self._get_rc_strings()
pattern_base = map(lambda s: s.replace(, ), base_str)
pattern = % .join(self._get_formatoptions())
self._rc = rcParams.find_and_replace(base_str, pattern=pattern,
pattern_base=pattern_base)
user_rc = SubDict(rcParams[], base_str, pattern=pattern,
pattern_base=pattern_base)
self._rc.update(user_rc.data)
self._defaultParams = SubDict(rcParams.defaultParams, base_str,
pattern=pattern,
pattern_base=pattern_base)
|
Method to set the rcparams and defaultParams for this plotter
|
24,615 |
def chfullname(name, fullname):
s Full Name
CLI Example:
.. code-block:: bash
salt user.chfullname foo
User \ does not existfullnamefullnamefullname/Users/{0}RealNamecreatechangecreatecreatefullname'))
return current == fullname
|
Change the user's Full Name
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo 'Foo Bar'
|
24,616 |
def make_innermost_setter(setter):
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
results_stack = []
while hasattr(kernel_results, ):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(
inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
|
Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
|
24,617 |
def _set_keepalive(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=keepalive.keepalive, is_container=, presence=False, yang_name="keepalive", rest_name="keepalive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__keepalive = t
if hasattr(self, ):
self._set()
|
Setter method for keepalive, mapped from YANG variable /interface/tunnel/keepalive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_keepalive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_keepalive() directly.
|
24,618 |
def getExtentAddress(self, zoom, extent=None, contained=False):
if extent:
bbox = extent
else:
bbox = self.extent
minX = bbox[0]
maxX = bbox[2]
if self.originCorner == :
minY = bbox[3]
maxY = bbox[1]
elif self.originCorner == :
minY = bbox[1]
maxY = bbox[3]
[minCol, minRow] = self.tileAddress(zoom, [minX, maxY])
[maxCol, maxRow] = self.tileAddress(zoom, [maxX, minY])
if contained and minCol != maxCol or minRow != maxRow:
parentBoundsMin = self.tileBounds(zoom, minCol, minRow)
if self.originCorner == :
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[3] == minY:
maxRow -= 1
elif self.originCorner == :
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[1] == minY:
maxRow -= 1
return [minRow, minCol, maxRow, maxCol]
|
Return the bounding addresses ([minRow, minCol, maxRow, maxCol] based
on the instance's extent or a user defined extent. Generic method
that works with regular and irregular pyramids.
Parameters:
zoom -- the zoom for which we want the bounding addresses
extent (optional) -- the extent ([minX, minY, maxX, maxY])
defaults to the instance extent
contained (optional) -- get only tile addresses that contain
a coordinate of the extent. For instance if
the extent only intersects a tile border,
if this option is set to True, this tile
will be ignored. defaults to False
|
24,619 |
def read_namespaced_resource_quota(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespaced_resource_quota_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_resource_quota_with_http_info(name, namespace, **kwargs)
return data
|
read the specified ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_resource_quota(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ResourceQuota (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread.
|
24,620 |
def create_pattern(cls, userdata):
empty = cls.create_empty(None)
userdata_dict = cls.normalize(empty, userdata)
return Userdata(userdata_dict)
|
Create a user data instance with all values the same.
|
24,621 |
def _stage(self, accepted, count=0):
new5 = self._combine_rest_push()
new1 = self._combine_push_pop()
new2 = self._combine_push_rest()
new3 = self._combine_pop_rest()
new4 = self._combine_rest_rest()
new = new1 + new2 + new3 + new4 + new5
del new1
del new2
del new3
del new4
del new5
if len(new) == 0:
return None
self.statediag = self.statediag + new
del new
newstates = []
for key in self.statediag:
if len(key.trans) == 0 or key.trans == {}:
pass
else:
newstates.append(key)
del self.statediag
self.statediag = newstates
self.quickresponse = {}
self.quickresponse_types = {}
self.quickresponse_types[0] = []
self.quickresponse_types[1] = []
self.quickresponse_types[2] = []
self.quickresponse_types[3] = []
self.quickresponse_types[4] = []
for state in self.statediag:
if state.id not in self.quickresponse:
self.quickresponse[state.id] = [state]
else:
self.quickresponse[state.id].append(state)
self.quickresponse_types[state.type].append(state)
exists = self._check(accepted)
if exists == -1:
return self._stage(accepted, count + 1)
else:
print exists
return exists
|
This is a repeated state in the state removal algorithm
|
24,622 |
def grid(self, **kw):
ttk.Scrollbar.grid(self, **kw)
self._layout =
|
Position a widget in the parent widget in a grid.
:param column: use cell identified with given column (starting with 0)
:type column: int
:param columnspan: this widget will span several columns
:type columnspan: int
:param in\_: widget to use as container
:type in\_: widget
:param ipadx: add internal padding in x direction
:type ipadx: int
:param ipady: add internal padding in y direction
:type ipady: int
:param padx: add padding in x direction
:type padx: int
:param pady: add padding in y irection
:type pady: int
:param row: use cell identified with given row (starting with 0)
:type row: int
:param rowspan: this widget will span several rows
:type rowspan: int
:param sticky: "n", "s", "e", "w" or combinations: if cell is
larger on which sides will this widget stick to
the cell boundary
:type sticky: str
|
24,623 |
def create_packet(header, data):
packet = IncomingPacket()
packet.header = header
packet.data = data
if len(header) == HeronProtocol.HEADER_SIZE:
packet.is_header_read = True
if len(data) == packet.get_datasize():
packet.is_complete = True
return packet
|
Creates an IncomingPacket object from header and data
This method is for testing purposes
|
24,624 |
def get(self):
ret_list = []
if hasattr(self, "font"):
ret_list.append(self.font)
if hasattr(self, "size"):
ret_list.append(self.size)
if hasattr(self, "text"):
ret_list.append(self.text)
return ret_list
|
method to fetch all contents as a list
:return: list
|
24,625 |
def seed_response(self, command, response):
if command not in self.responses:
self.responses[command] = deque()
self.responses[command].append(response)
return self
|
Sets the response that the adapter will return for the specified
command.
You can seed multiple responses per command; the adapter will
put them into a FIFO queue. When a request comes in, the
adapter will pop the corresponding response off of the queue.
Example:
.. code-block:: python
adapter.seed_response('sayHello', {'message': 'Hi!'})
adapter.seed_response('sayHello', {'message': 'Hello!'})
adapter.send_request({'command': 'sayHello'})
# {'message': 'Hi!'}
adapter.send_request({'command': 'sayHello'})
# {'message': 'Hello!'}
|
24,626 |
def getEntityType(self, found = None):
i3visio.emaili3visio.text
for character in self.substitutionValues.keys():
for value in self.substitutionValues[character]:
if value in found:
return "i3visio.text"
return self.name
|
Method to recover the value of the entity in case it may vary.
:param found: The expression to be analysed.
:return: The entity type returned will be an s'i3visio.email' for [email protected] and an 'i3visio.text' for foo[at]bar[dot]com.
|
24,627 |
def find_log_files(self, sp_key, filecontents=True, filehandles=False):
path_filters = getattr(self, , {}).get()
path_filters_exclude = getattr(self, , {}).get()
if isinstance(sp_key, dict):
report.files[self.name] = list()
for sf in report.searchfiles:
if report.search_file(sp_key, {: sf[0], : sf[1]}):
report.files[self.name].append({: sf[0], : sf[1]})
sp_key = self.name
logwarn = "Depreciation Warning: {} - Please use new style for find_log_files()".format(self.name)
if len(report.files[self.name]) > 0:
logger.warn(logwarn)
else:
logger.debug(logwarn)
elif not isinstance(sp_key, str):
logger.warn("Did not understand find_log_files() search key")
return
for f in report.files[sp_key]:
report.last_found_file = os.path.join(f[], f[])
if path_filters_exclude and len(path_filters_exclude) > 0:
exlusion_hits = (fnmatch.fnmatch(report.last_found_file, pfe) for pfe in path_filters_exclude)
if any(exlusion_hits):
logger.debug("{} - Skipping as it matched the path_filters_exclude for ".format(sp_key, f[], self.name))
continue
if path_filters and len(path_filters) > 0:
inclusion_hits = (fnmatch.fnmatch(report.last_found_file, pf) for pf in path_filters)
if not any(inclusion_hits):
logger.debug("{} - Skipping as it didn{}fn{}{}fns_namefnrootrootfnimagerootfnfrootfnutf-8fft open filehandle when returning file: {}\n{}".format(f[], e))
f[] = None
else:
yield f
|
Return matches log files of interest.
:param sp_key: Search pattern key specified in config
:param filehandles: Set to true to return a file handle instead of slurped file contents
:return: Yields a dict with filename (fn), root directory (root), cleaned sample name
generated from the filename (s_name) and either the file contents or file handle
for the current matched file (f).
As yield is used, the results can be iterated over without loading all files at once
|
24,628 |
def select_ip_version(host, port):
return socket.AF_INET6
return socket.AF_INET
|
Returns AF_INET4 or AF_INET6 depending on where to connect to.
|
24,629 |
def length_prefix(length, offset):
if length < 56:
return chr(offset + length)
else:
length_string = int_to_big_endian(length)
return chr(offset + 56 - 1 + len(length_string)) + length_string
|
Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
|
24,630 |
def louvain(adjacency_matrix):
adjacency_matrix = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.Graph())
tree = community.generate_dendogram(adjacency_matrix, part_init=None)
row = list()
col = list()
append_row = row.append
append_col = col.append
community_counter = 0
for i in range(len(tree)):
partition = community.partition_at_level(tree, i)
for n, c in partition.items():
append_row(n)
append_col(community_counter + c)
community_counter += max(partition.values()) + 1
row = np.array(row)
col = np.array(col)
data = np.ones(row.size, dtype=np.float64)
louvain_features = sparse.coo_matrix((data, (row, col)), shape=(len(partition.keys()), community_counter),
dtype=np.float64)
return louvain_features
|
Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
|
24,631 |
def str_if_nested_or_str(s):
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s
|
Turn input into a native string if possible.
|
24,632 |
def quantile_normalize(matrix, inplace=False, target=None):
assert isinstance(matrix, ExpMatrix)
assert isinstance(inplace, bool)
if target is not None:
assert isinstance(target, np.ndarray) and \
np.issubdtype(target.dtype, np.float)
if not inplace:
matrix = matrix.copy()
X = matrix.X
_, n = X.shape
nan = []
for j in range(n):
nan.append(np.nonzero(np.isnan(X[:, j]))[0])
if nan[j].size > 0:
q = np.arange(1, nan[j].size + 1, dtype=np.float64) / \
(nan[j].size + 1.0)
fill = np.nanpercentile(X[:, j], 100 * q)
X[nan[j], j] = fill
A = np.argsort(X, axis=0, kind=)
for j in range(n):
matrix.iloc[:, j] = matrix.X[A[:, j], j]
if target is None:
target = np.mean(matrix.X, axis=1)
else:
target = np.sort(target)
A = np.argsort(A, axis=0, kind=)
for j in range(n):
matrix.iloc[:, j] = target[A[:, j]]
for j in range(n):
if nan[j].size > 0:
matrix.iloc[nan[j], j] = np.nan
return matrix
|
Quantile normalization, allowing for missing values (NaN).
In case of nan values, this implementation will calculate evenly
distributed quantiles and fill in the missing data with those values.
Quantile normalization is then performed on the filled-in matrix,
and the nan values are restored afterwards.
Parameters
----------
matrix: `ExpMatrix`
The expression matrix (rows = genes, columns = samples).
inplace: bool
Whether or not to perform the operation in-place. [False]
target: `numpy.ndarray`
Target distribution to use. needs to be a vector whose first
dimension matches that of the expression matrix. If ``None``,
the target distribution is calculated based on the matrix
itself. [None]
Returns
-------
numpy.ndarray (ndim = 2)
The normalized matrix.
|
24,633 |
def on_message(self, message):
if self.ws.closed:
return None
try:
safe_call(self.logger.debug, , self, message)
for data in self.ddp_frames_from_message(message):
self.process_ddp(data)
signals.request_finished.send(sender=self.__class__)
except geventwebsocket.WebSocketError:
self.ws.close()
|
Process a message received from remote.
|
24,634 |
def validate(self):
if not (isinstance(self.target_class, set) and
all(isinstance(x, six.string_types) for x in self.target_class)):
raise TypeError(u.format(
type(self.target_class).__name__, self.target_class))
for cls in self.target_class:
validate_safe_string(cls)
|
Ensure that the CoerceType block is valid.
|
24,635 |
def rm_regions(a, b, a_start_ind, a_stop_ind):
import numpy
for i in range(len(a_stop_ind)):
next_a_start = numpy.argmax(a[a_stop_ind[i]:])
next_b_start = numpy.argmax(b[a_stop_ind[i]:])
if next_b_start > next_a_start:
a[a_start_ind[i]:a_stop_ind[i]] = False
return a
|
Remove contiguous regions in `a` before region `b`
Boolean arrays `a` and `b` should have alternating occuances of regions of
`True` values. This routine removes additional contiguous regions in `a`
that occur before a complimentary region in `b` has occured
Args
----
a: ndarray
Boolean array with regions of contiguous `True` values
b: ndarray
Boolean array with regions of contiguous `True` values
a_start_ind: ndarray
indices of start of `a` regions
a_stop_ind: ndarray
indices of stop of `a` regions
Returns
-------
a: ndarray
Boolean array with regions for which began before a complimentary
region in `b` have occured
|
24,636 |
def _nextNonSpaceColumn(block, column):
textAfter = block.text()[column:]
if textAfter.strip():
spaceLen = len(textAfter) - len(textAfter.lstrip())
return column + spaceLen
else:
return -1
|
Returns the column with a non-whitespace characters
starting at the given cursor position and searching forwards.
|
24,637 |
def cli(env, package_keyname, required):
client = env.client
manager = ordering.OrderingManager(client)
table = formatting.Table(COLUMNS)
categories = manager.list_categories(package_keyname)
if required:
categories = [cat for cat in categories if cat[]]
for cat in categories:
table.add_row([
cat[][],
cat[][],
if cat[] else
])
env.fout(table)
|
List the categories of a package.
::
# List the categories of Bare Metal servers
slcli order category-list BARE_METAL_SERVER
# List the required categories for Bare Metal servers
slcli order category-list BARE_METAL_SERVER --required
|
24,638 |
def ParseFileObject(self, parser_mediator, file_object):
display_name = parser_mediator.GetDisplayName()
self.ParseFileLNKFile(parser_mediator, file_object, display_name)
|
Parses a Windows Shortcut (LNK) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
|
24,639 |
def _fix_channels(self, op, attrs, inputs):
if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:
return attrs
weight_name = self._renames[inputs[1]]
if not weight_name in self._params:
raise ValueError("Unable to get channels/units attr from onnx graph.")
else:
wshape = self._params[weight_name].shape
assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape)
if op in [mx.sym.FullyConnected]:
attrs[] = wshape[0]
else:
if op == mx.sym.Convolution:
attrs[] = wshape[0]
elif op == mx.sym.Deconvolution:
attrs[] = wshape[1]
return attrs
|
A workaround for getting 'channels' or 'units' since onnx don't provide
these attributes. We check the shape of weights provided to get the number.
|
24,640 |
def create_binary_annotation(key, value, annotation_type, host):
return zipkin_core.BinaryAnnotation(
key=key,
value=value,
annotation_type=annotation_type,
host=host,
)
|
Create a zipkin binary annotation object
:param key: name of the annotation, such as 'http.uri'
:param value: value of the annotation, such as a URI
:param annotation_type: type of annotation, such as AnnotationType.I32
:param host: zipkin endpoint object
:returns: zipkin binary annotation object
|
24,641 |
def _set_below(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=below.below, is_container=, presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__below = t
if hasattr(self, ):
self._set()
|
Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly.
|
24,642 |
def print_usage(self, file=None):
optparse.OptionParser.print_usage(self, file)
file.flush()
|
Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
|
24,643 |
def _compute_delta_beta(self, X, T, E, weights, index=None):
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.hazards_.index
return delta_betas
|
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
|
24,644 |
def check_many(self, domains):
return dict((item.domain, item.status) for item in self.check_domain_request(domains))
|
Check availability for a number of domains. Returns a dictionary
mapping the domain names to their statuses as a string
("active"/"free").
|
24,645 |
def analytic_kl_builder(posterior, prior, sample):
del sample
return tf.reduce_sum(tfp.distributions.kl_divergence(posterior, prior))
|
A pre-canned builder for the analytic kl divergence.
|
24,646 |
def serialize(self):
import yaml
output = {"metadata": self.metadata, "sources": {},
"name": self.name}
for key, entry in self.items():
output["sources"][key] = entry._captured_init_kwargs
return yaml.dump(output)
|
Produce YAML version of this catalog.
Note that this is not the same as ``.yaml()``, which produces a YAML
block referring to this catalog.
|
24,647 |
def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
assert self.binded,
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._call_sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key],
grad_req=self._grad_req)
if self._monitor is not None:
module.install_monitor(self._monitor)
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key
|
Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
|
24,648 |
def find_block_end(row, line_list, sentinal, direction=1):
import re
row_ = row
line_ = line_list[row_]
flag1 = row_ == 0 or row_ == len(line_list) - 1
flag2 = re.match(sentinal, line_)
if not (flag1 or flag2):
while True:
if (row_ == 0 or row_ == len(line_list) - 1):
break
line_ = line_list[row_]
if re.match(sentinal, line_):
break
row_ += direction
return row_
|
Searches up and down until it finds the endpoints of a block Rectify
with find_paragraph_end in pyvim_funcs
|
24,649 |
def choicebox(message=, title=, choices=[]):
return psidialogs.choice(message=message, title=title, choices=choices)
|
Original doc: Present the user with a list of choices.
return the choice that he selects.
return None if he cancels the selection selection.
|
24,650 |
def extract_parameters(pil, keys=None):
out_dict = {}
if keys is None:
keys = pil.keys()
for key in keys:
try:
out_dict[key] = pil[key]
except ValueError:
out_dict[key] = None
return out_dict
|
Extract and return parameter names and values from a pil object
Parameters
----------
pil : `Pil` object
keys : list
List of parameter names, if None, extact all parameters
Returns
-------
out_dict : dict
Dictionary with parameter name, value pairs
|
24,651 |
def _serialize_to_many(self, key, vals, rlink):
rel = {
key: {
: [],
: {
: rlink + + key
}
}
}
try:
for val in vals:
rel[key][].append({
: val[],
: val[],
})
except TypeError:
del rel[key][]
return rel
|
Make a to_many JSON API compliant
:spec:
jsonapi.org/format/#document-resource-object-relationships
:param key:
the string name of the relationship field
:param vals:
array of dict's containing `rid` & `rtype` keys for the
to_many, empty array if no values, & None if the to_manys
values are unknown
:return:
dict as documented in the spec link
|
24,652 |
def all_terms(self):
for s_name, s in self.sections.items():
if s.name != :
yield s
for rterm in s:
yield rterm
for d in rterm.descendents:
yield d
|
Iterate over all of the terms. The self.terms property has only root level terms. This iterator
iterates over all terms
|
24,653 |
def send_last_message(self, msg, connection_id=None):
zmq_identity = None
if connection_id is not None and self._connections is not None:
if connection_id in self._connections:
connection_info = self._connections.get(connection_id)
if connection_info.connection_type == \
ConnectionType.ZMQ_IDENTITY:
zmq_identity = connection_info.connection
del self._connections[connection_id]
else:
LOGGER.debug("Can't send to %s, not in self._connections",
connection_id)
return
self._ready.wait()
try:
asyncio.run_coroutine_threadsafe(
self._send_last_message(zmq_identity, msg),
self._event_loop)
except RuntimeError:
pass
|
Should be used instead of send_message, when you want to close the
connection once the message is sent.
:param msg: protobuf validator_pb2.Message
|
24,654 |
def is_element_in_database(element=, database=):
if element == :
return False
list_entry_from_database = get_list_element_from_database(database=database)
if element in list_entry_from_database:
return True
return False
|
will try to find the element in the folder (database) specified
Parameters:
==========
element: string. Name of the element. Not case sensitive
database: string (default is 'ENDF_VII'). Name of folder that has the list of elements
Returns:
=======
bool: True if element was found in the database
False if element could not be found
|
24,655 |
def type_search(self, basetype, symbolstr, origin):
symbols = symbolstr.split("%")
base, basemod = self.tree_find(basetype, origin, "types")
i = 1
while isinstance(base, CustomType) and i < len(symbols):
if symbols[i] in base.members:
base = base.members[symbols[i]]
if base.is_custom:
base, basemod = self.tree_find(base.kind, origin, "types")
elif symbols[i] in base.executables:
base = base.executables[symbols[i]]
i += 1
return base
|
Recursively traverses the module trees looking for the final
code element in a sequence of %-separated symbols.
:arg basetype: the type name of the first element in the symbol string.
:arg symblstr: a %-separated list of symbols, e.g. this%sym%sym2%go.
:arg origin: an instance of the Module class that started the request.
|
24,656 |
def _find_glob_matches(in_files, metadata):
reg_files = copy.deepcopy(in_files)
glob_files = []
for glob_search in [x for x in metadata.keys() if "*" in x]:
cur = []
for fname in in_files:
if fnmatch.fnmatch(fname, "*/%s" % glob_search):
cur.append(fname)
reg_files.remove(fname)
assert cur, "Did not find file matches for %s" % glob_search
glob_files.append(cur)
return reg_files, glob_files
|
Group files that match by globs for merging, rather than by explicit pairs.
|
24,657 |
def is_molecular_function(self, go_term):
mf_root = "GO:0003674"
if go_term == mf_root:
return True
ancestors = self.get_isa_closure(go_term)
if mf_root in ancestors:
return True
else:
return False
|
Returns True is go_term has is_a, part_of ancestor of molecular function GO:0003674
|
24,658 |
def fix_axon_peri_v2(hobj):
for i,sec in enumerate(hobj.axon):
if i < 2:
sec.L = 30
sec.diam = 1
else:
sec.L = 1e-6
sec.diam = 1
h.define_shape()
|
Replace reconstructed axon with a stub
:param hobj: hoc object
|
24,659 |
def _minimal_common_integer(si_0, si_1):
si_0_splitted = si_0._ssplit()
si_1_splitted = si_1._ssplit()
len_0, len_1 = len(si_0_splitted), len(si_1_splitted)
if len_0 == 1 and len_1 == 2:
si_0_splitted, si_1_splitted = si_1_splitted, si_0_splitted
len_0, len_1 = len_1, len_0
if len_0 == 1 and len_1 == 1:
return StridedInterval._minimal_common_integer_splitted(si_0, si_1)
if len_0 == 2 and len_1 == 1:
int_0 = StridedInterval._minimal_common_integer_splitted(si_0_splitted[0], si_1_splitted[0])
int_1 = StridedInterval._minimal_common_integer_splitted(si_0_splitted[1], si_1_splitted[0])
else:
int_0 = StridedInterval._minimal_common_integer_splitted(si_0_splitted[0], si_1_splitted[0])
int_1 = StridedInterval._minimal_common_integer_splitted(si_0_splitted[1], si_1_splitted[1])
if int_0 is None:
return int_1
elif int_1 is None:
return int_0
else:
return int_0
|
Calculates the minimal integer that appears in both StridedIntervals.
As a wrapper method of _minimal_common_integer_splitted(), this method takes arbitrary StridedIntervals.
For more information, please refer to the comment of _minimal_common_integer_splitted().
:param si_0: the first StridedInterval
:type si_0: StridedInterval
:param si_1: the second StridedInterval
:type si_1: StridedInterval
:return: the minimal common integer, or None if there is no common integer
|
24,660 |
def _parse_response(response, clazz, is_list=False, resource_name=None):
target = response.json()[
resource_name] if resource_name else response.json()
if is_list:
return [clazz.from_json(resource) for resource in target]
else:
return clazz.from_json(target)
|
Parse a Marathon response into an object or list of objects.
|
24,661 |
def search_for_port(port_glob, req, expected_res):
if usb.core.find(idVendor=0x0403, idProduct=0x6001) is None:
return None
ports = glob.glob(port_glob)
if len(ports) == 0:
return None
for port in ports:
with r12_serial_port(port) as ser:
if not ser.isOpen():
ser.open()
if sys.version_info[0] == 2:
ser.write(str(req).encode())
else:
ser.write(bytes(req, ))
time.sleep(0.1)
return None
|
Find the serial port the arm is connected to.
|
24,662 |
def expand_multirow_data(data):
num_cols = len(data[0])
row_heights = []
for mlrow in data:
row_height = 0
for j, cell in enumerate(mlrow):
row_height = max(row_height, 1 if not isinstance(cell, (list, tuple)) else len(cell))
row_heights.append(row_height)
num_lines = sum(row_heights)
new_data = [[""]*num_cols for i in range(num_lines)]
i0 = 0
for row_height, mlrow in zip(row_heights, data):
for j, cell in enumerate(mlrow):
if not isinstance(cell, (list, tuple)):
cell = [cell]
for incr, x in enumerate(cell):
new_data[i0+incr][j] = x
i0 += row_height
return new_data, row_heights
|
Converts multirow cells to a list of lists and informs the number of lines of each row.
Returns:
tuple: new_data, row_heights
|
24,663 |
def get_pltpat(self, plt_ext="svg"):
if self.ntplt.desc == "":
return ".".join(["{BASE}", plt_ext])
return "".join(["{BASE}_", self.ntplt.desc, ".", plt_ext])
|
Return png pattern: {BASE}.png {BASE}_pruned.png {BASE}_upper_pruned.png
|
24,664 |
async def _handle_bad_server_salt(self, message):
bad_salt = message.obj
self._log.debug(, bad_salt.bad_msg_id)
self._state.salt = bad_salt.new_server_salt
states = self._pop_states(bad_salt.bad_msg_id)
self._send_queue.extend(states)
self._log.debug(, len(states))
|
Corrects the currently used server salt to use the right value
before enqueuing the rejected message to be re-sent:
bad_server_salt#edab447b bad_msg_id:long bad_msg_seqno:int
error_code:int new_server_salt:long = BadMsgNotification;
|
24,665 |
def _split_column_and_labels(self, column_or_label):
column = None if column_or_label is None else self._get_column(column_or_label)
labels = [label for i, label in enumerate(self.labels) if column_or_label not in (i, label)]
return column, labels
|
Return the specified column and labels of other columns.
|
24,666 |
def status_server(self, port):
if self.status_server_started == False:
self.status_server_started = True
try:
self.status_service = binwalk.core.statuserver.StatusServer(port, self)
except Exception as e:
binwalk.core.common.warning("Failed to start status server on port %d: %s" % (port, str(e)))
|
Starts the progress bar TCP service on the specified port.
This service will only be started once per instance, regardless of the
number of times this method is invoked.
Failure to start the status service is considered non-critical; that is,
a warning will be displayed to the user, but normal operation will proceed.
|
24,667 |
def Jacobian_re_im(self, pars):
r
partials = []
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print()
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
|
24,668 |
def cmdloop(self):
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print()
continue
if tokens[0] not in self.commands:
print()
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception()
|
Start CLI REPL.
|
24,669 |
def get_url(self, *paths, **params):
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url
|
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
|
24,670 |
def displayName( self ):
if ( not self._displayName ):
return projex.text.pretty(self.objectName())
return self._displayName
|
Return the user friendly name for this node. if the display name \
is not implicitly set, then the words for the object name \
will be used.
:return <str>
|
24,671 |
def get_psf_sky(self, ra, dec):
if self.data is None:
beam = self.wcshelper.get_beam(ra, dec)
return beam.a, beam.b, beam.pa
x, y = self.sky2pix([ra, dec])
x = int(np.clip(x, 0, self.data.shape[1] - 1))
y = int(np.clip(y, 0, self.data.shape[2] - 1))
psf_sky = self.data[:, x, y]
return psf_sky
|
Determine the local psf at a given sky location.
The psf is returned in degrees.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
a, b, pa : float
The psf semi-major axis, semi-minor axis, and position angle in (degrees).
If a psf is defined then it is the psf that is returned, otherwise the image
restoring beam is returned.
|
24,672 |
def Registry(address=, **kwargs):
registry = None
try:
try:
registry = V1(address, **kwargs)
registry.ping()
except RegistryException:
registry = V2(address, **kwargs)
registry.ping()
except OSError:
logger.warning(
.format(address)
)
return registry
|
:return:
|
24,673 |
def import_vmesh(file):
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements
|
Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
|
24,674 |
def write_static_networks(gtfs, output_dir, fmt=None):
if fmt is None:
fmt = "edg"
single_layer_networks = stop_to_stop_networks_by_type(gtfs)
util.makedirs(output_dir)
for route_type, net in single_layer_networks.items():
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
file_name = os.path.join(output_dir, "network_" + tag + "." + fmt)
if len(net.edges()) > 0:
_write_stop_to_stop_network_edges(net, file_name, fmt=fmt)
|
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
|
24,675 |
def get_groups_of_user(config, fas, username):
if not _cache.is_configured:
_cache.configure(**config[])
key = cache_key_generator(get_groups_of_user, username)
def creator():
if not fas:
return []
results = []
for group in fas.person_by_username(username).get(, []):
if group[] == :
results.append(group.name)
return results
return _cache.get_or_create(key, creator)
|
Return the list of (pkgdb) groups to which the user belongs.
:arg config: a dict containing the fedmsg config
:arg fas: a fedora.client.fas2.AccountSystem object instanciated and loged
into FAS.
:arg username: the name of a user for which we want to retrieve groups
:return: a list of FAS groups to which the user belongs.
|
24,676 |
def check_data(cls, name, dims, is_unstructured):
if isinstance(name, six.string_types):
name = [name]
dims = [dims]
is_unstructured = [is_unstructured]
N = len(name)
if len(dims) != N or len(is_unstructured) != N:
return [False] * N, [
% (
N, len(dims), len(is_unstructured))] * N
return [True] * N, [] * N
|
A validation method for the data shape
The default method does nothing and should be subclassed to validate
the results. If the plotter accepts a :class:`InteractiveList`, it
should accept a list for name and dims
Parameters
----------
name: str or list of str
The variable name(s) of the data
dims: list of str or list of lists of str
The dimension name(s) of the data
is_unstructured: bool or list of bool
True if the corresponding array is unstructured
Returns
-------
list of bool or None
True, if everything is okay, False in case of a serious error,
None if it is intermediate. Each object in this list corresponds to
one in the given `name`
list of str
The message giving more information on the reason. Each object in
this list corresponds to one in the given `name`
|
24,677 |
def _inject_format_spec(self, value, format_spec):
t = type(value)
return value[:-1] + t(u) + format_spec + t(u)
|
value: '{x}', format_spec: 'f' -> '{x:f}'
|
24,678 |
def cminus(a, b):
s subtract
function or a - b syntax, minus will thread over the latest dimension possible.
'
spa = sps.issparse(a)
spb = sps.issparse(b)
if not spa: a = np.asarray(a)
if not spb: b = np.asarray(b)
if spa: b = np.reshape(b, (1,1)) if len(np.shape(b)) == 0 else b
elif spb: a = np.reshape(a, (1,1)) if len(np.shape(a)) == 0 else a
return a - b
|
cminus(a, b) returns the difference a - b as a numpy array object. Like numpy's subtract
function or a - b syntax, minus will thread over the latest dimension possible.
|
24,679 |
def fetch(self):
params = values.of({})
payload = self._version.fetch(
,
self._uri,
params=params,
)
return AvailableAddOnExtensionInstance(
self._version,
payload,
available_add_on_sid=self._solution[],
sid=self._solution[],
)
|
Fetch a AvailableAddOnExtensionInstance
:returns: Fetched AvailableAddOnExtensionInstance
:rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance
|
24,680 |
def dump_guest_stack(self, cpu_id):
if not isinstance(cpu_id, baseinteger):
raise TypeError("cpu_id can only be an instance of type baseinteger")
stack = self._call("dumpGuestStack",
in_p=[cpu_id])
return stack
|
Produce a simple stack dump using the current guest state.
This feature is not implemented in the 4.0.0 release but may show up
in a dot release.
in cpu_id of type int
The identifier of the Virtual CPU.
return stack of type str
String containing the formatted stack dump.
|
24,681 |
def open(self):
if self.comm is None:
state, buffer_paths, buffers = _remove_buffers(self.get_state())
args = dict(target_name=,
data={: state, : buffer_paths},
buffers=buffers,
metadata={: __protocol_version__}
)
if self._model_id is not None:
args[] = self._model_id
self.comm = Comm(**args)
|
Open a comm to the frontend if one isn't already open.
|
24,682 |
def _update_new_ordered_reqs_count(self):
last_num_ordered = self._last_performance_check_data.get()
num_ordered = sum(num for num, _ in self.monitor.numOrderedRequests.values())
if num_ordered != last_num_ordered:
self._last_performance_check_data[] = num_ordered
return True
else:
return False
|
Checks if any requests have been ordered since last performance check
and updates the performance check data store if needed.
:return: True if new ordered requests, False otherwise
|
24,683 |
def fulfill(self, method, *args, **kwargs):
return getattr(self.session, method)(*args, **kwargs)
|
Fulfill an HTTP request to Keen's API.
|
24,684 |
def _copy_artifact(self, tgt, jar, version, typename, suffix=, extension=,
artifact_ext=, override_name=None):
genmap = self.context.products.get(typename)
product_mapping = genmap.get(tgt)
if product_mapping is None:
raise ValueError("No product mapping in {} for {}. "
"You may need to run some other task first".format(typename, tgt))
for basedir, jars in product_mapping.items():
for artifact in jars:
path = self.artifact_path(jar, version, name=override_name, suffix=suffix,
extension=extension, artifact_ext=artifact_ext)
safe_mkdir(os.path.dirname(path))
shutil.copy(os.path.join(basedir, artifact), path)
|
Copy the products for a target into the artifact path for the jar/version
|
24,685 |
def initialize(self):
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime)
|
Initialize croniter and related times
|
24,686 |
def _get_struct_gradientbevelfilter(self):
obj = _make_object("GradientBevelFilter")
obj.NumColors = num_colors = unpack_ui8(self._src)
obj.GradientColors = [self._get_struct_rgba()
for _ in range(num_colors)]
obj.GradientRatio = [unpack_ui8(self._src)
for _ in range(num_colors)]
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Angle = unpack_fixed16(self._src)
obj.Distance = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerShadow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.OnTop = bc.u_get(1)
obj.Passes = bc.u_get(4)
return obj
|
Get the values for the GRADIENTBEVELFILTER record.
|
24,687 |
def _width(self):
layout = self._instruction.get(GRID_LAYOUT)
if layout is not None:
width = layout.get(WIDTH)
if width is not None:
return width
return self._instruction.number_of_consumed_meshes
|
For ``self.width``.
|
24,688 |
def chained_get(container, path, default=None):
for key in path:
try:
container = container[key]
except (AttributeError, KeyError, TypeError):
return default
return container
|
Helper function to perform a series of .get() methods on a dictionary
and return a default object type in the end.
Parameters
----------
container : dict
The dictionary on which the .get() methods should be performed.
path : list or tuple
The list of keys that should be searched for.
default : any (optional, default=None)
The object type that should be returned if the search yields
no result.
|
24,689 |
def Barati_high(Re):
r
Cd = (8E-6*((Re/6530.)**2 + tanh(Re) - 8*log(Re)/log(10.))
- 0.4119*exp(-2.08E43/(Re+Re**2)**4)
- 2.1344*exp(-((log(Re**2 + 10.7563)/log(10))**2 + 9.9867)/Re)
+ 0.1357*exp(-((Re/1620.)**2 + 10370.)/Re)
- 8.5E-3*(2*log(tanh(tanh(Re)))/log(10) - 2825.7162)/Re + 2.4795)
return Cd
|
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_.
.. math::
C_D = 8\times 10^{-6}\left[(Re/6530)^2 + \tanh(Re) - 8\ln(Re)/\ln(10)\right]
- 0.4119\exp(-2.08\times10^{43}/[Re + Re^2]^4)
-2.1344\exp(-\{[\ln(Re^2 + 10.7563)/\ln(10)]^2 + 9.9867\}/Re)
+0.1357\exp(-[(Re/1620)^2 + 10370]/Re)
- 8.5\times 10^{-3}\{2\ln[\tanh(\tanh(Re))]/\ln(10) - 2825.7162\}/Re
+ 2.4795
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6
This model is the wider-range model the authors developed.
At sufficiently low diameters or Re values, drag is no longer a phenomena.
Examples
--------
Maching example in [1]_, in a table of calculated values.
>>> Barati_high(200.)
0.7730544082789523
References
----------
.. [1] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
|
24,690 |
def open_data(self, url, data=None):
if not isinstance(url, str):
raise URLError()
try:
[type, data] = url.split(, 1)
except ValueError:
raise IOError(, )
if not type:
type =
semi = type.rfind()
if semi >= 0 and not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding =
msg = []
msg.append(%time.strftime(,
time.gmtime(time.time())))
msg.append( % type)
if encoding == :
data = base64.decodebytes(data.encode()).decode()
else:
data = unquote(data)
msg.append( % len(data))
msg.append()
msg.append(data)
msg = .join(msg)
headers = email.message_from_string(msg)
f = io.StringIO(msg)
return addinfourl(f, headers, url)
|
Use "data" URL.
|
24,691 |
def _insert_vars(self, path: str, data: dict) -> str:
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with)
|
Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
|
24,692 |
def independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
unfolding, matrix_form):
r
if matrix_form:
coef = -coef
Mu = unfolding.Mu
mu = Mu(s, i, j)
rhouv_isconjugated = False
if s == 1:
coef_list = [[mu, None, -im(coef), matrix_form, rhouv_isconjugated]]
elif s == -1:
coef_list = [[mu, None, re(coef), matrix_form, rhouv_isconjugated]]
else:
coef_list = [[mu, None, coef, matrix_form, rhouv_isconjugated]]
return coef_list
|
r"""Get the indices mu, nu, and term coefficients for linear terms.
>>> from fast.symbolic import define_density_matrix
>>> Ne = 2
>>> coef = 1+2j
>>> rhouv = define_density_matrix(Ne)[1, 1]
>>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1)
>>> unfolding = Unfolding(Ne, real=True, normalized=True)
>>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
... unfolding, False)
[[1, None, -2.00000000000000, False, False]]
|
24,693 |
def summary(self):
print("Type: %s" % self.__class__.__name__)
print("Batch Name: %r" % self.batch_name)
if self.tag:
print("Tag: %s" % self.tag)
print("Root directory: %r" % self.get_root_directory())
print("Maximum concurrency: %s" % self.max_concurrency)
if self.description:
print("Description: %s" % self.description)
|
A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user.
|
24,694 |
def get_label_map(opts):
result = {}
try:
for entry in os.scandir(diskdir):
if entry.name.startswith():
continue
if islink(entry.path):
target = os.readlink(entry.path)
else:
target = entry.path
result[target] = entry.name
if opts.debug:
print(, result)
except FileNotFoundError:
pass
return result
|
Find volume labels from filesystem and return in dict format.
|
24,695 |
def set_action_name(self, name):
if self._open and name is not None:
self._open[-1].name = name
self.notify()
|
Set the name of the top group, if present.
|
24,696 |
def hook_up(self, router: UrlDispatcher):
router.add_get(self.webhook_path, self.check_hook)
router.add_post(self.webhook_path, self.receive_events)
|
Dynamically hooks the right webhook paths
|
24,697 |
def run(cli_args):
from .core import Core
c = Core(
source_file=cli_args["--data-file"],
schema_files=cli_args["--schema-file"],
extensions=cli_args[],
strict_rule_validation=cli_args[],
fix_ruby_style_regex=cli_args[],
allow_assertions=cli_args[],
file_encoding=cli_args[],
)
c.validate()
return c
|
Split the functionality into 2 methods.
One for parsing the cli and one that runs the application.
|
24,698 |
def _run_morfologik(self, words):
p = subprocess.Popen(
[, , self.jar_path, ,
, ,
, ],
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = p.communicate(input=bytes("\n".join(words), "utf-8"))
return decode(out, )
|
Runs morfologik java jar and assumes that input and output is
UTF-8 encoded.
|
24,699 |
def get_events(self):
to_send = {: 50}
response = self._send_data(, , , to_send)
output = {: ""}
for event in response[]:
desc = "Source IP: {ip}\n"
desc += "Datetime: {time}\n"
desc += "Indicator: {match}\n"
desc += "Method: {method}\n"
desc += "URL: {url}\n"
desc += "Request Type: {type}\n"
desc += "User-Agent: {userAgent}\n"
desc += "Contact: {contact}\n"
desc += "\n"
output[] += desc.format(**event)
return output
|
Get events from the cloud node.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.