Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
388,100 | def which_users_can(self, name):
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get()) for i in _roles]
return result | Which role can SendMail? |
388,101 | def __verify_minion_publish(self, clear_load):
if not in self.opts:
return False
if not isinstance(self.opts[], dict):
return False
if any(key not in clear_load for key in (, , , , , )):
return False
log.warning(
, clear_load[]
)
return False
clear_load.pop()
perms = []
for match in self.opts[]:
if re.match(match, clear_load[]):
if isinstance(self.opts[][match], list):
perms.extend(self.opts[][match])
if in clear_load[]:
clear_load[] = clear_load[].split()
arg_ = []
for arg in clear_load[]:
arg_.append(arg.split())
clear_load[] = arg_
return self.ckminions.auth_check(
perms,
clear_load[],
clear_load[],
clear_load[],
clear_load.get(, ),
publish_validate=True) | Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load |
388,102 | def boolify(value, nullable=False, return_string=False):
if isinstance(value, BOOL_COERCEABLE_TYPES):
return bool(value)
val = text_type(value).strip().lower().replace(, , 1)
if val.isnumeric():
return bool(float(val))
elif val in BOOLISH_TRUE:
return True
elif nullable and val in NULL_STRINGS:
return None
elif val in BOOLISH_FALSE:
return False
else:
try:
return bool(complex(val))
except ValueError:
if isinstance(value, string_types) and return_string:
return value
raise TypeCoercionError(value, "The value %r cannot be boolified." % value) | Convert a number, string, or sequence type into a pure boolean.
Args:
value (number, string, sequence): pretty much anything
Returns:
bool: boolean representation of the given value
Examples:
>>> [boolify(x) for x in ('yes', 'no')]
[True, False]
>>> [boolify(x) for x in (0.1, 0+0j, True, '0', '0.0', '0.1', '2')]
[True, False, True, False, False, True, True]
>>> [boolify(x) for x in ("true", "yes", "on", "y")]
[True, True, True, True]
>>> [boolify(x) for x in ("no", "non", "none", "off", "")]
[False, False, False, False, False]
>>> [boolify(x) for x in ([], set(), dict(), tuple())]
[False, False, False, False]
>>> [boolify(x) for x in ([1], set([False]), dict({'a': 1}), tuple([2]))]
[True, True, True, True] |
388,103 | def _ReadStringDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STRING_MEMBER)
else:
supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_STRING
definition_object = self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.StringDefinition,
definition_name, supported_definition_values)
encoding = definition_values.get(, None)
if not encoding:
error_message =
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.encoding = encoding
return definition_object | Reads a string data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. |
388,104 | def generate(self, chars):
if not self._cache:
self.load()
body = self.create_wave_body(chars)
return patch_wave_header(body) | Generate audio CAPTCHA data. The return data is a bytearray.
:param chars: text to be generated. |
388,105 | def snake_to_pascal(name, singularize=False):
parts = name.split("_")
if singularize:
return "".join(p.upper() if p in _ALL_CAPS else to_singular(p.title()) for p in parts)
else:
return "".join(p.upper() if p in _ALL_CAPS else p.title() for p in parts) | Converts snake_case to PascalCase. If singularize is True, an attempt is made at singularizing
each part of the resulting name. |
388,106 | def app_state(self, app):
if not self.available or not self.screen_on:
return STATE_OFF
if self.current_app["package"] == app:
return STATE_ON
return STATE_OFF | Informs if application is running. |
388,107 | def models(cls, api_version=DEFAULT_API_VERSION):
if api_version == :
from .v2015_08_01 import models
return models
elif api_version == :
from .v2017_04_01 import models
return models
elif api_version == :
from .v2018_01_01_preview import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | Module depends on the API version:
* 2015-08-01: :mod:`v2015_08_01.models<azure.mgmt.eventhub.v2015_08_01.models>`
* 2017-04-01: :mod:`v2017_04_01.models<azure.mgmt.eventhub.v2017_04_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.eventhub.v2018_01_01_preview.models>` |
388,108 | def OnCellText(self, event):
row, col, _ = self.grid.actions.cursor
self.grid.GetTable().SetValue(row, col, event.code)
event.Skip() | Text entry event handler |
388,109 | def loads(content):
lines = _group_lines(line for line in content.split())
lines = [
(i, _parse_envfile_line(line))
for i, line in lines if line.strip()
]
errors = []
duplicates = _find_duplicates(((i, line[0]) for i, line in lines))
for i, variable, j in duplicates:
errors.append(.join([
,
,
]) % (i + 1, variable, j + 1)
)
if errors:
raise ValueError(errors)
return {k: v for _, (k, v) in lines} | Loads variable definitions from a string. |
388,110 | def create(self, _attributes=None, _joining=None, _touch=True, **attributes):
if _attributes is not None:
attributes.update(_attributes)
instance = self._related.new_instance(attributes)
instance.save({"touch": False})
self.attach(instance.get_key(), _joining, _touch)
return instance | Create a new instance of the related model.
:param attributes: The attributes
:type attributes: dict
:rtype: orator.orm.Model |
388,111 | def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
resource_dicts):
for res_dict in resource_dicts:
filtered_traffic_resources = \
[r for r in infra_traffic_resources if r.key == res_dict[]]
if filtered_traffic_resources:
traffic_res = filtered_traffic_resources[0]
else:
traffic_res = vim.DvsHostInfrastructureTrafficResource()
traffic_res.key = res_dict[]
traffic_res.allocationInfo = \
vim.DvsHostInfrastructureTrafficResourceAllocation()
infra_traffic_resources.append(traffic_res)
if res_dict.get():
traffic_res.allocationInfo.limit = res_dict[]
if res_dict.get():
traffic_res.allocationInfo.reservation = res_dict[]
if res_dict.get() or res_dict.get():
if not traffic_res.allocationInfo.shares:
traffic_res.allocationInfo.shares = vim.SharesInfo()
if res_dict.get():
traffic_res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict[])
if res_dict.get():
traffic_res.allocationInfo.shares.shares = res_dict[] | Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec) |
388,112 | def pacific_atlantic(matrix):
n = len(matrix)
if not n: return []
m = len(matrix[0])
if not m: return []
res = []
atlantic = [[False for _ in range (n)] for _ in range(m)]
pacific = [[False for _ in range (n)] for _ in range(m)]
for i in range(n):
dfs(pacific, matrix, float("-inf"), i, 0)
dfs(atlantic, matrix, float("-inf"), i, m-1)
for i in range(m):
dfs(pacific, matrix, float("-inf"), 0, i)
dfs(atlantic, matrix, float("-inf"), n-1, i)
for i in range(n):
for j in range(m):
if pacific[i][j] and atlantic[i][j]:
res.append([i, j])
return res | :type matrix: List[List[int]]
:rtype: List[List[int]] |
388,113 | async def dump_message(obj, msg, field_archiver=None):
mtype = msg.__class__
fields = mtype.f_specs()
obj = collections.OrderedDict() if obj is None else get_elem(obj)
for field in fields:
await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)
return obj | Dumps message to the object.
Returns message popo representation.
:param obj:
:param msg:
:param field_archiver:
:return: |
388,114 | def create_record(awsclient, name_prefix, instance_reference, type="A", host_zone_name=None):
if host_zone_name is None:
host_zone_name = _retrieve_stack_host_zone_name(awsclient)
if not (type == "A" or type == "CNAME"):
raise Exception("Record set type is not supported!")
name_of_record = name_prefix \
.replace(, ) \
.replace(, ) \
.title() + "HostRecord"
if isinstance(instance_reference, Instance):
resource_record = troposphere.GetAtt(
instance_reference,
"PrivateIp"
)
else:
resource_record = instance_reference
return RecordSetType(
name_of_record,
HostedZoneName=host_zone_name,
Name=troposphere.Join("", [
name_prefix + ".",
host_zone_name,
]),
Type=type,
TTL=TTL_DEFAULT,
ResourceRecords=[
resource_record
],
) | Builds route53 record entries enabling DNS names for services
Note: gcdt.route53 create_record(awsclient, ...)
is used in dataplatform cloudformation.py templates!
:param name_prefix: The sub domain prefix to use
:param instance_reference: The EC2 troposphere reference which's private IP should be linked to
:param type: The type of the record A or CNAME (default: A)
:param host_zone_name: The host zone name to use (like preprod.ds.glomex.cloud. - DO NOT FORGET THE DOT!)
:return: RecordSetType |
388,115 | def use_comparative_grade_system_view(self):
self._object_views[] = COMPARATIVE
for session in self._get_provider_sessions():
try:
session.use_comparative_grade_system_view()
except AttributeError:
pass | Pass through to provider GradeSystemLookupSession.use_comparative_grade_system_view |
388,116 | def _translate_div(self, oprnd1, oprnd2, oprnd3):
assert oprnd1.size and oprnd2.size and oprnd3.size
assert oprnd1.size == oprnd2.size
op1_var = self._translate_src_oprnd(oprnd1)
op2_var = self._translate_src_oprnd(oprnd2)
op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3)
if oprnd3.size > oprnd1.size:
op1_var_zx = smtfunction.zero_extend(op1_var, oprnd3.size)
op2_var_zx = smtfunction.zero_extend(op2_var, oprnd3.size)
result = op1_var_zx.udiv(op2_var_zx)
elif oprnd3.size < oprnd1.size:
result = smtfunction.extract(op1_var.udiv(op2_var), 0, oprnd3.size)
else:
result = op1_var.udiv(op2_var)
return [op3_var == result] + op3_var_constrs | Return a formula representation of an DIV instruction. |
388,117 | def _get_powercfg_minute_values(scheme, guid, subguid, safe_name):
if scheme is None:
scheme = _get_current_scheme()
if __grains__[] == :
cmd = .format(scheme, guid)
else:
cmd = .format(scheme, guid, subguid)
out = __salt__[](cmd, python_shell=False)
split = out.split()
if len(split) > 1:
for s in split:
if safe_name in s or subguid in s:
out = s
break
else:
out = split[0]
raw_settings = re.findall(r, out)
return {: int(raw_settings[0], 0) / 60,
: int(raw_settings[1], 0) / 60} | Returns the AC/DC values in an dict for a guid and subguid for a the given
scheme |
388,118 | def synthesize_property(property_name,
default = None,
contract = None,
read_only = False,
private_member_name = None):
return SyntheticDecoratorFactory().syntheticMemberDecorator(memberName = property_name,
defaultValue = default,
contract = contract,
readOnly = read_only,
privateMemberName = private_member_name,
memberDelegate = PropertyDelegate()) | When applied to a class, this decorator adds a property to it and overrides the constructor in order to set\
the default value of the property.
:IMPORTANT: In order for this to work on python 2, you must use new objects that is to say that the class must inherit from object.
By default, the private attribute containing the property's value will be named ``property_name`` with '_' prepended to it.
Naming convention can be overridden with a custom one using :meth:`naming_convention <naming_convention>` decorator.
:param property_name: Name of the property to synthesize.
:type property_name: str
:param default: Property's default value.
:type default: *
:param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_
:type contract: *
:param read_only: If set to ``True``, the property will not a have a setter.
:type read_only: bool
:param private_member_name: Custom name for the private attribute that contains the property's value.
:type private_member_name: str|None
:raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name.
:raises: :class:`InvalidPropertyOverrideError` when there's already a member with that name and which is not a property. |
388,119 | def get_auth(self):
return (self._cfgparse.get(self._section, ), self._cfgparse.get(self._section, )) | Returns username from the configfile. |
388,120 | def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig | Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key. |
388,121 | def gumbel_softmax(x,
z_size,
mode,
softmax_k=0,
temperature_warmup_steps=150000,
summary=True,
name=None):
with tf.variable_scope(name, default_name="gumbel_softmax"):
m = tf.layers.dense(x, 2**z_size, name="mask")
if softmax_k > 0:
m, kl = top_k_softmax(m, softmax_k)
return m, m, 1.0 - tf.reduce_mean(kl)
logsm = tf.nn.log_softmax(m)
gumbel_samples = gumbel_sample(common_layers.shape_list(m))
steps = temperature_warmup_steps
gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5
temperature = 1.2 - common_layers.inverse_lin_decay(steps)
temperature = tf.cond(
tf.less(tf.random_uniform([]), 0.9), lambda: temperature,
lambda: tf.random_uniform([], minval=0.5, maxval=1.0))
s = tf.nn.softmax((logsm + gumbel_samples) / temperature)
m = tf.nn.softmax(m)
kl = -tf.reduce_max(logsm, axis=-1)
if summary:
tf.summary.histogram("max-log", tf.reshape(kl, [-1]))
maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1])
maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size))
distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot
d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)
d_variance = tf.reduce_mean(
tf.squared_difference(distrib, d_mean), axis=[0])
d_dev = -tf.reduce_mean(d_variance)
ret = s
if mode != tf.estimator.ModeKeys.TRAIN:
ret = tf.reshape(maxvhot, common_layers.shape_list(s))
return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002 | Gumbel softmax discretization bottleneck.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
mode: tf.estimator.ModeKeys.
softmax_k: If > 0 then do top-k softmax.
temperature_warmup_steps: Number of steps it takes to decay temperature to
0.
summary: Whether to write summaries.
name: Name for the bottleneck scope.
Returns:
Embedding function, discrete code, and loss. |
388,122 | def _health_check_thread(self):
while self._run_health_checker:
response = self._health_check(Health_pb2.HealthCheckRequest(service=))
logging.debug( + str(response))
time.sleep(30)
return | Health checker thread that pings the service every 30 seconds
:return: None |
388,123 | def request(self, apdu):
if _debug: ServerSSM._debug("request %r", apdu)
apdu.pduSource = self.pdu_address
apdu.pduDestination = None
self.ssmSAP.sap_request(apdu) | This function is called by transaction functions to send
to the application. |
388,124 | def _is_inventory_group(key, value):
if (
key.startswith()
or not isinstance(value, (list, tuple, GeneratorType))
):
return False
if isinstance(value, tuple):
value = value[0]
if isinstance(value, GeneratorType):
value = list(value)
return all(
isinstance(item, ALLOWED_HOST_TYPES)
for item in value
) | Verify that a module-level variable (key = value) is a valid inventory group. |
388,125 | def is_canonical_address(address: Any) -> bool:
if not is_bytes(address) or len(address) != 20:
return False
return address == to_canonical_address(address) | Returns `True` if the `value` is an address in its canonical form. |
388,126 | def squash_layouts(self, layouts):
my datastuffmy data
top_layout = layouts[0]
json_data = {}
layout = Layout(top_layout.name(), json_data, layouts)
for mlayout in reversed(layouts):
self.dict_merge(layout.json(), mlayout.json())
return layout | Returns a squashed layout
The first element takes precedence (i.e. left to right).
Dictionaries are recursively merged, overwrites only occur on non-dictionary entries.
[0,1]
0:
test: 'my data'
1:
test: 'stuff'
Result:
test: 'my data'
@param layouts: List of layouts to merge together
@return: New layout with list of layouts squash merged |
388,127 | def add_template_events_to_ifo(self, ifo, columns, vectors):
self.template_events = self.template_event_dict[ifo]
self.add_template_events(columns, vectors)
self.template_event_dict[ifo] = self.template_events
self.template_events = None | Add a vector indexed |
388,128 | def add_component_type(self, component_type):
name = component_type.name
if in name:
name = name.replace(, )
component_type.name = name
self.component_types[name] = component_type | Adds a component type to the model.
@param component_type: Component type to be added.
@type component_type: lems.model.fundamental.ComponentType |
388,129 | def _FlushInput(self):
self.ser.flush()
flushed = 0
while True:
ready_r, ready_w, ready_x = select.select([self.ser], [],
[self.ser], 0)
if len(ready_x) > 0:
logging.error("Exception from serial port.")
return None
elif len(ready_r) > 0:
flushed += 1
self.ser.read(1)
self.ser.flush()
else:
break | Flush all read data until no more available. |
388,130 | def calc_J(self):
del self.J
self.J = np.zeros([self.param_vals.size, self.data.size])
dp = np.zeros_like(self.param_vals)
f0 = self.model.copy()
for a in range(self.param_vals.size):
dp *= 0
dp[a] = self.dl[a]
f1 = self.func(self.param_vals + dp, *self.func_args, **self.func_kwargs)
grad_func = (f1 - f0) / dp[a]
self.J[a] = -grad_func | Updates self.J, returns nothing |
388,131 | def parse(self):
if not self.survey_str:
return None
lines = self.survey_str.splitlines()
if len(lines) < 10:
raise ParseException("Expected at least 10 lines in a Compass Survey, only found %d!\nlines=%s" % (len(lines), lines))
first_line = lines.pop(0).strip()
if first_line.startswith():
cave_name =
name = first_line.strip().strip()
else:
cave_name = first_line
name = lines.pop(0).split(, 1)[1].strip()
date_comment_toks = lines.pop(0).split(, 1)[1].split()
date = CompassSurveyParser._parse_date(date_comment_toks[0])
comment = date_comment_toks[1].strip() if len(date_comment_toks) > 1 else
lines.pop(0)
team = [member.strip() for member in lines.pop(0).split()]
return survey | Parse our string and return a Survey object, None, or raise :exc:`ParseException` |
388,132 | def formula(self, atom_sequence):
labels = {}
types = []
y = 0
for k, atomi in enumerate(atom_sequence):
lbl = re.sub("[0-9]+", "", atomi).capitalize()
if lbl not in labels:
labels[lbl] = y
types.append([k+1])
y += 1
else:
types[ labels[lbl] ].append(k+1)
atoms = list(labels.keys())
atoms = [x for x in self.formula_sequence if x in atoms] + [x for x in atoms if x not in self.formula_sequence]
formula =
for atom in atoms:
n = len(types[labels[atom]])
if n==1: n =
else: n = str(n)
formula += atom + n
return formula | Constructs standardized chemical formula
NB: this is the PUBLIC method
@returns formula_str |
388,133 | def list_reshape(list_, new_shape, trail=False):
r
if not trail:
total = reduce(operator.mul, new_shape)
assert total == len(list_)
newlist = list_
for dim in reversed(new_shape):
slice_ = (newlist[i::dim] for i in range(dim))
newlist = list(map(list, zip(*slice_)))
if not trail:
newlist = newlist[0]
return newlist | r"""
reshapes leaving trailing dimnsions in front if prod(new_shape) != len(list_)
Args:
list_ (list):
new_shape (tuple):
Returns:
list: list_
CommandLine:
python -m utool.util_list --exec-list_reshape --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> list_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> new_shape = (2, 2, 3)
>>> newlist = list_reshape(list_, new_shape)
>>> depth = ut.depth_profile(newlist)
>>> result = ('list_ = %s' % (ut.repr2(newlist, nl=1),))
>>> print('depth = %r' % (depth,))
>>> print(result)
>>> newlist2 = np.reshape(list_, depth).tolist()
>>> ut.assert_eq(newlist, newlist2) |
388,134 | def get_grid(grid_id):
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs | Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance. |
388,135 | def split_heads(self, x):
with tf.name_scope("split_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
depth = (self.hidden_size // self.num_heads)
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
return tf.transpose(x, [0, 2, 1, 3]) | Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads] |
388,136 | def _R2deriv(self,R,z,phi=0.,t=0.):
r= nu.sqrt(R*R+z*z)
return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*R**2.\
+self._mass(r)/r**5.*(z**2.-2.*R**2.) | NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2013-06-28 - Written - Bovy (IAS) |
388,137 | def _reset(self, framer):
if id(framer) == self._framer_id:
return
self._other = {}
framer.init_state(self)
self._framer_id = id(framer) | Reset the state for the framer. It is safe to call this
method multiple times with the same framer; the ID of the
framer object will be saved and the state only reset if the
IDs are different. After resetting the state, the framer's
``init_state()`` method will be called. |
388,138 | def remove_service(service, zone=None, permanent=True):
**
if zone:
cmd = .format(zone, service)
else:
cmd = .format(service)
if permanent:
cmd +=
return __firewall_cmd(cmd) | Remove a service from zone. This option can be specified multiple times.
If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.remove_service ssh
To remove a service from a specific zone
.. code-block:: bash
salt '*' firewalld.remove_service ssh dmz |
388,139 | def put_task_info(self, task_name, key, value):
params = OrderedDict([(, ), (, task_name)])
headers = {: }
body = self.TaskInfo(key=key, value=value).serialize()
self._client.put(self.resource(), params=params, headers=headers, data=body) | Put information into a task.
:param task_name: name of the task
:param key: key of the information item
:param value: value of the information item |
388,140 | def set_random_seed():
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed) | Set the random seed from flag everywhere. |
388,141 | def plot(self, key=None, invert=None, plotmethod=,
cmap=plt.cm.gray, ms=4, Max=None,
fs=None, dmargin=None, wintit=None,
draw=True, connect=True):
dax, KH = _plot.Data_plot(self, key=key, invert=invert, Max=Max,
plotmethod=plotmethod, cmap=cmap, ms=ms,
fs=fs, dmargin=dmargin, wintit=wintit,
draw=draw, connect=connect)
return dax, KH | Plot the data content in a predefined figure |
388,142 | def execute(self, program: Program):
for dg in program.defined_gates:
if dg.parameters is not None:
raise NotImplementedError("PyQVM does not support parameterized DEFGATEs")
self.defined_gates[dg.name] = dg.matrix
self.program = program
self.program_counter = 0
halted = len(program) == 0
while not halted:
halted = self.transition()
return self | Execute a program on the QVM.
Note that the QAM is stateful. Subsequent calls to :py:func:`execute` will not
automatically reset the wavefunction or the classical RAM. If this is desired,
consider starting your program with ``RESET``.
:return: ``self`` to support method chaining. |
388,143 | def crypto_box_keypair():
pk = ffi.new("unsigned char[]", crypto_box_PUBLICKEYBYTES)
sk = ffi.new("unsigned char[]", crypto_box_SECRETKEYBYTES)
rc = lib.crypto_box_keypair(pk, sk)
ensure(rc == 0,
,
raising=exc.RuntimeError)
return (
ffi.buffer(pk, crypto_box_PUBLICKEYBYTES)[:],
ffi.buffer(sk, crypto_box_SECRETKEYBYTES)[:],
) | Returns a randomly generated public and secret key.
:rtype: (bytes(public_key), bytes(secret_key)) |
388,144 | def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
title =
axs = subplots(len(xs), 3, imgsize=imgsize, figsize=figsize, title=title, weight=, size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], **kwargs)
y.show(ax=axs[i,2], **kwargs)
z.show(ax=axs[i,1], **kwargs) | Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. |
388,145 | def user_picklist(i_info, command):
valid_entry = False
awsc.get_all_aminames(i_info)
list_instances(i_info, "", True)
msg_txt = ("Enter {0}
" [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI,
command, len(i_info)))
while not valid_entry:
entry_raw = obtain_input(msg_txt)
try:
entry_int = int(entry_raw)
except ValueError:
entry_int = 999
(tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command)
return tar_idx | Display list of instances matching args and ask user to select target.
Instance list displayed and user asked to enter the number corresponding
to the desired target instance, or '0' to abort.
Args:
i_info (dict): information on instances and details.
command (str): command specified on the command line.
Returns:
tar_idx (int): the dictionary index number of the targeted instance. |
388,146 | def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping,
run_length=run_length, decrease_damp_factor=decrease_damp_factor,
get_cos=collect_stats, max_iter=max_iter, **kwargs)
lp.do_run_2()
if collect_stats:
return lp.stats | Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers. |
388,147 | def plot_mixing_lines(self, p=None, rv=None, **kwargs):
r
for artist in self._mixing_lines:
artist.remove()
self._mixing_lines = []
if rv is None:
rv = np.array([
0.1e-3, 0.2e-3, 0.5e-3, 1e-3, 1.5e-3, 2e-3, 3e-3, 4e-3, 6e-3,
8e-3, 10e-3, 12e-3, 15e-3, 20e-3, 30e-3, 40e-3,
50e-3]).reshape(-1, 1)
else:
rv = np.asarray(rv).reshape(-1, 1)
if p is None:
p = np.linspace(min(self.get_ylim()), max(self.get_ylim()))
else:
p = np.asarray(p)
Td = calculate(
, p=p, rv=rv, p_units=, rv_units=,
Td_units=)
Td_label = calculate(, p=550, p_units=, rv=rv,
Td_units=)
linedata = [np.vstack((t, p)).T for t in Td]
kwargs.setdefault(, True)
kwargs.setdefault(, )
kwargs.setdefault(, )
kwargs.setdefault(, 1)
kwargs.setdefault(, 0.5)
kwargs.setdefault(, 1.1)
collection = LineCollection(linedata, **kwargs)
self._mixing_lines.append(collection)
self.add_collection(collection)
rv = rv.flatten() * 1000
for i in range(len(rv)):
if rv[i] < 1:
format_string =
else:
format_string =
t = self.text(Td_label[i], 550, format_string.format(rv[i]),
fontsize=8, ha=, va=, rotation=60,
color=, bbox={
: , : , : 0,
}, zorder=1.2)
t.set_clip_on(True)
self._mixing_lines.append(t) | r'''Plot lines of constant mixing ratio.
Adds lines of constant mixing ratio (isohumes) to the
plot. The default style of these lines is dashed green lines with an
alpha value of 0.8. These can be overridden using keyword arguments.
Parameters
----------
rv : array_like, optional
1-dimensional array of unitless mixing ratio values to plot. If
none are given, default values are used.
p : array_like, optional
1-dimensional array of pressure values to be included in the
isohumes. If not specified, they will be linearly distributed
across the current plotted pressure range.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also
--------
`matplotlib.collections.LineCollection` |
388,148 | def join_channel(self, channel):
self.s.send(( % channel).encode())
if self.verbose:
print( % channel) | Join a different chat channel on Twitch.
Note, this function returns immediately, but the switch might
take a moment
:param channel: name of the channel (without #) |
388,149 | def form_node(cls):
assert issubclass(cls, FormNode)
res = attrs(init=False, slots=True)(cls)
res._args = []
res._required_args = 0
res._rest_arg = None
state = _FormArgMode.REQUIRED
for field in fields(res):
if in field.metadata:
if state is _FormArgMode.REST:
raise RuntimeError()
if field.metadata[] is _FormArgMode.REQUIRED:
if state is _FormArgMode.OPTIONAL:
raise RuntimeError()
res._args.append(field)
res._required_args += 1
elif field.metadata[] is _FormArgMode.OPTIONAL:
state = _FormArgMode.OPTIONAL
res._args.append(field)
elif field.metadata[] is _FormArgMode.REST:
state = _FormArgMode.REST
res._rest_arg = field
else:
assert 0
return res | A class decorator to finalize fully derived FormNode subclasses. |
388,150 | def index(ubifs, lnum, offset, inodes={}, bad_blocks=[]):
try:
if len(bad_blocks):
if lnum in bad_blocks:
return
ubifs.file.seek((ubifs.leb_size * lnum) + offset)
buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ)
chdr = nodes.common_hdr(buf)
log(index , % (chdr, ubifs.file.last_read_addr()))
verbose_display(chdr)
node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ)
file_offset = ubifs.file.last_read_addr()
except Exception as e:
if str(e) == and settings.warn_only_block_read_errors:
bad_blocks.append(lnum)
return
else:
error(index, , % (lnum, ((ubifs.leb_size * lnum) + offset), e))
if chdr.node_type == UBIFS_IDX_NODE:
try:
idxn = nodes.idx_node(node_buf)
except Exception as e:
if settings.warn_only_block_read_errors:
error(index, , % (file_offset, e))
return
else:
error(index, , % (file_offset, e))
log(index, % (idxn, file_offset))
verbose_display(idxn)
branch_idx = 0
for branch in idxn.branches:
verbose_log(index, )
log(index, % (branch, file_offset + UBIFS_IDX_NODE_SZ + (branch_idx * UBIFS_BRANCH_SZ)))
verbose_display(branch)
index(ubifs, branch.lnum, branch.offs, inodes, bad_blocks)
branch_idx += 1
elif chdr.node_type == UBIFS_INO_NODE:
try:
inon = nodes.ino_node(node_buf)
except Exception as e:
if settings.warn_only_block_read_errors:
error(index, , % (file_offset, e))
return
else:
error(index, , % (file_offset, e))
ino_num = inon.key[]
log(index, % (inon, file_offset, ino_num))
verbose_display(inon)
if not ino_num in inodes:
inodes[ino_num] = {}
inodes[ino_num][] = inon
elif chdr.node_type == UBIFS_DATA_NODE:
try:
datn = nodes.data_node(node_buf, (ubifs.leb_size * lnum) + UBIFS_COMMON_HDR_SZ + offset + UBIFS_DATA_NODE_SZ)
except Exception as e:
if settings.warn_only_block_read_errors:
error(index, , % (file_offset, e))
return
else:
error(index, , % (file_offset, e))
ino_num = datn.key[]
log(index, % (datn, file_offset, ino_num))
verbose_display(datn)
if not ino_num in inodes:
inodes[ino_num] = {}
if not in inodes[ino_num]:
inodes[ino_num][]= []
inodes[ino_num][].append(datn)
elif chdr.node_type == UBIFS_DENT_NODE:
try:
dn = nodes.dent_node(node_buf)
except Exception as e:
if settings.warn_only_block_read_errors:
error(index, , % (file_offset, e))
return
else:
error(index, , % (file_offset, e))
ino_num = dn.key[]
log(index, % (dn, file_offset, ino_num))
verbose_display(dn)
if not ino_num in inodes:
inodes[ino_num] = {}
if not in inodes[ino_num]:
inodes[ino_num][]= []
inodes[ino_num][].append(dn) | Walk the index gathering Inode, Dir Entry, and File nodes.
Arguments:
Obj:ubifs -- UBIFS object.
Int:lnum -- Logical erase block number.
Int:offset -- Offset in logical erase block.
Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number.
Returns:
Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number.
'ino' -- Inode node.
'data' -- List of data nodes if present.
'dent' -- List of directory entry nodes if present. |
388,151 | def added(self):
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and not in diffs[key]:
keys.extend(_added(diffs[key],
prefix=.format(prefix, key)))
elif diffs[key][] == self.NONE_VALUE:
if isinstance(diffs[key][], dict):
keys.extend(
_added(diffs[key][],
prefix=.format(prefix, key)))
else:
keys.append(.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=)) | Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation |
388,152 | def cursor_up(self, count=None):
top, _bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = max(self.cursor.y - (count or 1), top) | Move cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip. |
388,153 | def printstartfinish(verb, inp=None, kcount=None):
r
if inp:
if verb > 1:
ttxt = str(timedelta(seconds=default_timer() - inp))
ktxt =
if kcount:
ktxt += str(kcount) +
print( + ttxt + + ktxt + )
else:
t0 = default_timer()
if verb > 2:
print("\n:: empymod START ::\n")
return t0 | r"""Print start and finish with time measure and kernel count. |
388,154 | def _init_img_params(param):
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param | Initialize 2D image-type parameters that can accept either a
single or two values. |
388,155 | def free_memory(self):
self.main.free_memory()
QTimer.singleShot(self.INITIAL_FREE_MEMORY_TIME_TRIGGER,
lambda: self.main.free_memory())
QTimer.singleShot(self.SECONDARY_FREE_MEMORY_TIME_TRIGGER,
lambda: self.main.free_memory()) | Free memory signal. |
388,156 | def full_keywords(soup):
"author keywords list including inline tags, such as italic"
if not raw_parser.author_keywords(soup):
return []
return list(map(node_contents_str, raw_parser.author_keywords(soup))) | author keywords list including inline tags, such as italic |
388,157 | def _connect():
jenkins_url = __salt__[]() or \
__salt__[]() or \
__salt__[]()
jenkins_user = __salt__[]() or \
__salt__[]() or \
__salt__[]()
jenkins_password = __salt__[]() or \
__salt__[]() or \
__salt__[]()
if not jenkins_url:
raise SaltInvocationError()
return jenkins.Jenkins(jenkins_url,
username=jenkins_user,
password=jenkins_password) | Return server object used to interact with Jenkins.
:return: server object used to interact with Jenkins |
388,158 | def iter_filths():
for filth_cls in iter_filth_clss():
if issubclass(filth_cls, RegexFilth):
m = next(re.finditer(r"\s+", "fake pattern string"))
yield filth_cls(m)
else:
yield filth_cls() | Iterate over all instances of filth |
388,159 | def classes(self):
return [o for o in self.objects if isinstance(o.node, astroid.ClassDef)] | return all class nodes in the diagram |
388,160 | def retrieve_file_handles_of_same_dataset(self, **args):
mandatory_args = [, , ]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
LOGGER.debug(,
args[], str(args[]))
if self.__switched_on:
return self.__retrieve_file_handles_of_same_dataset(**args)
else:
msg =
raise esgfpid.exceptions.SolrSwitchedOff(msg) | :return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed. |
388,161 | def attach_zone(geoid, organization_id_or_slug):
organization = Organization.objects.get_by_id_or_slug(
organization_id_or_slug)
if not organization:
log.error(, organization_id_or_slug)
geozone = GeoZone.objects.get(id=geoid)
if not geozone:
log.error(, geoid)
log.info(.format(
organization=organization, geozone=geozone))
organization.zone = geozone.id
organization.save()
log.info() | Attach a zone <geoid> restricted to level for a given <organization>. |
388,162 | def get_group(self, group_id):
def process_result(result):
return Group(self, result)
return Command(, [ROOT_GROUPS, group_id],
process_result=process_result) | Return specified group.
Returns a Command. |
388,163 | def open(cls, pkg_file):
pkg_reader = PackageReader.from_file(pkg_file)
package = cls()
Unmarshaller.unmarshal(pkg_reader, package, PartFactory)
return package | Return an |OpcPackage| instance loaded with the contents of
*pkg_file*. |
388,164 | def status(self, remote=False):
if remote:
components = urlparse.urlparse(self.endpoint)
try:
result = self.session.get(components[0] + "://" + components[1] + "/status", timeout=self.timeout)
except Exception as e:
if self.logger: self.logger.debug("Failed to connect to server for status: %s", e)
result = None
if result and result.status_code == 200:
self.server_status = result.json()
self.server_status["endpoint"] = self.endpoint
elif result:
if self.logger: self.logger.debug("Server status response not understandable: Status: %d, Body: %s",
result.status_code, result.text)
self.server_status = {"endpoint": self.endpoint,
"status": ("Unexpected HTTP status " + str(result.status_code) + " at: " +
strftime("%d %b %Y %H:%M:%S +0000", gmtime()))}
else:
self.server_status = {"endpoint": self.endpoint,
"status": "Unreachable at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime())}
return self.local_status, self.server_status | Return the connection status, both locally and remotely.
The local connection status is a dictionary that gives:
* the count of multiple queries sent to the server.
* the count of single queries sent to the server.
* the count of actions sent to the server.
* the count of actions executed successfully by the server.
* the count of actions queued to go to the server.
The remote connection status includes whether the server is live,
as well as data about version and build. The server data is
cached, unless the remote flag is specified.
:param remote: whether to query the server for its latest status
:return: tuple of status dicts: (local, server). |
388,165 | def _default(cls, opts):
level = getattr(logging, opts.log_level, logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
formatter = logging.Formatter()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return True | Setup default logger |
388,166 | def GetHostMemPhysFreeMB(self):
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Undocumented. |
388,167 | def Type_string(self, text, interval = 0, dl = 0):
self.Delay(dl)
self.keyboard.type_string(text, interval) | 键盘输入字符串,interval是字符间输入时间间隔,单位"秒" |
388,168 | def footprint(self,nside):
import healpy
import ugali.utils.projector
if nside > 2**9: raise Exception("Overflow error: nside must be <=2**9")
pix = np.arange(healpy.nside2npix(nside),dtype=)
footprint = np.zeros(healpy.nside2npix(nside),dtype=)
ra,dec = ugali.utils.projector.pixToAng(nside,pix)
table_name = %nside
self.upload(np.array([pix,ra,dec]).T, [,,], name=table_name)
radius = healpy.nside2resol(nside_superpix,arcmin=True)
query=%(radius, table_name) | Download the survey footprint for HEALpix pixels. |
388,169 | def mod_watch(name, **kwargs):
if kwargs[] in (, , ):
if kwargs.get():
kwargs.pop()
return _reinterpreted_state(run(name, **kwargs))
return run(name, **kwargs)
elif kwargs[] == or kwargs[] == :
if kwargs.get():
kwargs.pop()
return _reinterpreted_state(script(name, **kwargs))
return script(name, **kwargs)
elif kwargs[] == or kwargs[] == :
if kwargs.get():
func = kwargs.pop()
return call(name, func, **kwargs)
else:
return {: name,
: {},
: (
).format(kwargs),
: False}
return {: name,
: {},
:
.format(kwargs),
: False} | Execute a cmd function based on a watch call
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered. |
388,170 | def get_id(self, request_data, parameter_name=):
if parameter_name not in request_data:
raise ParseError("`{}` parameter is required".format(parameter_name))
id_parameter = request_data.get(parameter_name, None)
if not isinstance(id_parameter, int):
raise ParseError("`{}` parameter not an integer".format(parameter_name))
return id_parameter | Extract an integer from request data. |
388,171 | def __add_bgedge(self, bgedge, merge=True):
if bgedge.vertex1 in self.bg and bgedge.vertex2 in self.bg[bgedge.vertex1] and merge:
key = min(self.bg[bgedge.vertex1][bgedge.vertex2].keys())
self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"] += bgedge.multicolor
self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["data"] = {}
else:
self.bg.add_edge(bgedge.vertex1, bgedge.vertex2, attr_dict={"multicolor": deepcopy(bgedge.multicolor),
"data": bgedge.data})
self.cache_valid["overall_set_of_colors"] = False | Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Checks that vertices in supplied :class:`bg.edge.BGEdge` instance actually are present in current :class:`BreakpointGraph` if **merge** option of provided. Otherwise a new edge is added to the current :class:`BreakpointGraph`.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes |
388,172 | def is_source_code_missing_open_brackets(source_code):
open_brackets = "[{("
close_brackets = "]})"
last_close_bracket = [-1]
counters = [0] * len(open_brackets)
for t_type, t_content in reversed(list(parse_py_statements(source_code))):
if t_type != "op":
continue
if t_content in open_brackets:
idx = open_brackets.index(t_content)
if last_close_bracket[-1] == idx:
counters[idx] -= 1
del last_close_bracket[-1]
elif t_content in close_brackets:
idx = close_brackets.index(t_content)
counters[idx] += 1
last_close_bracket += [idx]
return not all([c == 0 for c in counters]) | :param str source_code:
:return: whether this source code snippet (e.g. one line) is complete/even w.r.t. opening/closing brackets
:rtype: bool |
388,173 | def build_all(self,
verbose=False,
hide_base_schemas=True,
hide_implicit_types=True,
hide_implicit_preds=True):
if verbose:
printDebug("Scanning entities...", "green")
printDebug("----------", "comment")
self.build_ontologies()
if verbose:
printDebug("Ontologies.........: %d" % len(self.all_ontologies), "comment")
self.build_classes(hide_base_schemas, hide_implicit_types)
if verbose:
printDebug("Classes............: %d" % len(self.all_classes), "comment")
self.build_properties(hide_implicit_preds)
if verbose:
printDebug("Properties.........: %d" % len(self.all_properties), "comment")
if verbose:
printDebug("..annotation.......: %d" % len(self.all_properties_annotation), "comment")
if verbose:
printDebug("..datatype.........: %d" % len(self.all_properties_datatype), "comment")
if verbose:
printDebug("..object...........: %d" % len(self.all_properties_object), "comment")
self.build_skos_concepts()
if verbose:
printDebug("Concepts (SKOS)....: %d" % len(self.all_skos_concepts), "comment")
self.build_shapes()
if verbose:
printDebug("Shapes (SHACL).....: %d" % len(self.all_shapes), "comment")
self.__computeInferredProperties()
if verbose:
printDebug("----------", "comment") | Extract all ontology entities from an RDF graph and construct Python representations of them. |
388,174 | def parse_line(self, line):
prefix = ""
if line.startswith(","):
line, prefix = line[1:], ","
j = json.loads(line)
yield j
self.io.write_line(prefix + json.dumps(j)) | Parse a single line of JSON and write modified JSON back. |
388,175 | def svd_solve(U, s, V, b, s_tol=1e-15):
n = U.shape[0]
assert(b.shape[0] == n)
m = b.shape[1] if np.ndim(b) > 1 else 1
sclamp = np.maximum(s, s_tol)
ss = 1. / np.sqrt(sclamp)
U2 = U * ss[np.newaxis, :]
V2 = ss[:, np.newaxis] * V
if m < n:
X = U2.dot(V2.dot(b))
else:
X = U2.dot(V2).dot(b)
return X | Solve the system :math:`A X = b` for :math:`X`.
Here :math:`A` is a positive semi-definite matrix using the singular value
decomposition. This truncates the SVD so only dimensions corresponding to
non-negative and sufficiently large singular values are used.
Parameters
----------
U: ndarray
The :code:`U` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
s: ndarray
The :code:`s` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
V: ndarray
The :code:`V` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
b: ndarray
An array or matrix
s_tol: float
Cutoff for small singular values. Singular values smaller than
:code:`s_tol` are clamped to :code:`s_tol`.
Returns
-------
X: ndarray
The result of :math:`X = A^-1 b`
okind: ndarray
The indices of :code:`s` that are kept in the factorisation |
388,176 | def G(self, y, t):
if self._independent_noise:
res = np.zeros((self.dimension, self.nnoises))
offset = 0
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
ix = (slicej, slice(offset, offset + self._nsubnoises[j]))
res[ix] = m.G(y[slicej], t)
offset += self._nsubnoises[j]
else:
res = np.empty((self.dimension, self.nnoises))
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
ix = (slicej, slice(None))
res[ix] = m.G(y[slicej], t)
return res | Noise coefficient matrix G of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(for an ODE network system without noise this function is not used)
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
G (array of shape (d, m)): where m is the number of independent
Wiener processes driving the complete network system. The noise
coefficient matrix G defines the stochastic term of the system. |
388,177 | def predict(self, text:str, n_words:int=1, no_unk:bool=True, temperature:float=1., min_p:float=None, sep:str=,
decoder=decode_spec_tokens):
"Return the `n_words` that come after `text`."
ds = self.data.single_dl.dataset
self.model.reset()
xb,yb = self.data.one_item(text)
new_idx = []
for _ in range(n_words):
res = self.pred_batch(batch=(xb,yb))[0][-1]
if no_unk: res[self.data.vocab.stoi[UNK]] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
new_idx.append(idx)
xb = xb.new_tensor([idx])[None]
return text + sep + sep.join(decoder(self.data.vocab.textify(new_idx, sep=None))) | Return the `n_words` that come after `text`. |
388,178 | def get_bromo_fnames_da(d_em_kHz, d_bg_kHz, a_em_kHz, a_bg_kHz,
ID=, t_tot=, num_p=, pM=,
t_step=0.5e-6, D=1.2e-11, dir_=):
clk_p = t_step/32.
E_sim = 1.*a_em_kHz/(a_em_kHz + d_em_kHz)
FRET_val = 100.*E_sim
print("Simulated FRET value: %.1f%%" % FRET_val)
d_em_kHz_str = "%04d" % d_em_kHz
a_em_kHz_str = "%04d" % a_em_kHz
d_bg_kHz_str = "%04.1f" % d_bg_kHz
a_bg_kHz_str = "%04.1f" % a_bg_kHz
print("D: EM %s BG %s " % (d_em_kHz_str, d_bg_kHz_str))
print("A: EM %s BG %s " % (a_em_kHz_str, a_bg_kHz_str))
fname_d = (
).format(
em=d_em_kHz_str, bg=d_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
fname_a = (
).format(
em=a_em_kHz_str, bg=a_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
print(fname_d)
print(fname_a)
name = (
).format(FRET_val, d_bg_kHz, a_bg_kHz, d_em_kHz)
return dir_+fname_d, dir_+fname_a, name, clk_p, E_sim | Get filenames for donor and acceptor timestamps for the given parameters |
388,179 | def is_active(self, timperiods):
now = int(time.time())
timperiod = timperiods[self.modulation_period]
if not timperiod or timperiod.is_time_valid(now):
return True
return False | Know if this result modulation is active now
:return: True is we are in the period, otherwise False
:rtype: bool |
388,180 | def mtf_resnet_base():
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 32
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_layers", 6)
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 32)
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("row_blocks", 1)
hparams.add_hparam("col_blocks", 1)
hparams.add_hparam("rows_size", 32)
hparams.add_hparam("cols_size", 32)
hparams.add_hparam("layer_sizes", [3, 4, 6, 3])
hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512])
hparams.add_hparam("is_cifar", False)
hparams.initializer = "normal_unit_scaling"
hparams.initializer_gain = 2.
hparams.learning_rate = 0.1
return hparams | Set of hyperparameters. |
388,181 | def analyte_2_massname(s):
el = re.match(, s).groups()[0]
m = re.match(, s).groups()[0]
return m + el | Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3} |
388,182 | def coordination_geometry_symmetry_measures(self, coordination_geometry,
tested_permutations=False,
points_perfect=None,
optimization=None):
if tested_permutations:
tested_permutations = set()
if self.permutations_safe_override:
raise ValueError()
csms = []
permutations = []
algos = []
local2perfect_maps = []
perfect2local_maps = []
for algo in coordination_geometry.algorithms:
if algo.algorithm_type == EXPLICIT_PERMUTATIONS:
return self.coordination_geometry_symmetry_measures_standard(
coordination_geometry, algo,
points_perfect=points_perfect,
optimization=optimization)
if algo.algorithm_type == SEPARATION_PLANE:
cgsm = self.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry,
algo,
tested_permutations=tested_permutations,
points_perfect=points_perfect)
csm, perm, algo, local2perfect_map, perfect2local_map = cgsm
csms.extend(csm)
permutations.extend(perm)
algos.extend(algo)
local2perfect_maps.extend(local2perfect_map)
perfect2local_maps.extend(perfect2local_map)
return csms, permutations, algos, local2perfect_maps, perfect2local_maps | Returns the symmetry measures of a given coordination_geometry for a set of permutations depending on
the permutation setup. Depending on the parameters of the LocalGeometryFinder and on the coordination
geometry, different methods are called.
:param coordination_geometry: Coordination geometry for which the symmetry measures are looked for
:return: the symmetry measures of a given coordination_geometry for a set of permutations
:raise: NotImplementedError if the permutation_setup does not exists |
388,183 | def alignment_display(self):
self.dirty.update(range(self.lines))
for y in range(self.lines):
for x in range(self.columns):
self.buffer[y][x] = self.buffer[y][x]._replace(data="E") | Fills screen with uppercase E's for screen focus and alignment. |
388,184 | def _check_retcode(cmd):
return salt.modules.cmdmod.retcode(cmd, output_loglevel=, ignore_retcode=True) == 0 | Simple internal wrapper for cmdmod.retcode |
388,185 | def pkg_desc(self):
options = [
"-p",
"--desc"
]
flag = ["--color="]
colors = [
"red",
"green",
"yellow",
"cyan",
"grey"
]
tag = ""
for arg in self.args:
if arg.startswith(flag[0]):
tag = arg[len(flag[0]):]
self.args.remove(arg)
break
if tag and tag not in colors:
print("\nslpkg: Error: Available colors {0}\n".format(
colors))
raise SystemExit()
if (len(self.args) == 3 and self.args[0] in options and
self.args[1] in self.meta.repositories and tag in colors):
PkgDesc(self.args[2], self.args[1], tag).view()
elif (len(self.args) == 3 and self.args[0] in options and
self.args[1] in self.meta.repositories):
PkgDesc(self.args[2], self.args[1], paint="").view()
elif (len(self.args) > 1 and self.args[0] in options and
self.args[1] not in self.meta.repositories):
usage(self.args[1])
else:
usage("") | Print slack-desc by repository |
388,186 | def remove_qc_reports(portal):
logger.info("Removing Reports > Quality Control ...")
ti = portal.reports.getTypeInfo()
actions = map(lambda action: action.id, ti._actions)
for index, action in enumerate(actions, start=0):
if action == :
ti.deleteActions([index])
break
logger.info("Removing Reports > Quality Control [DONE]") | Removes the action Quality Control from Reports |
388,187 | def remove_isoforms(ids):
key = lambda x: x.rsplit(".", 1)[0]
iso_number = lambda x: get_number(x.split(".")[-1])
ids = sorted(ids, key=key)
newids = []
for k, ii in groupby(ids, key=key):
min_i = min(list(ii), key=iso_number)
newids.append(min_i)
return newids | This is more or less a hack to remove the GMAP multiple mappings. Multiple
GMAP mappings can be seen given the names .mrna1, .mrna2, etc. |
388,188 | def read_reg(self, addr):
if byte(data, 0) != 0:
raise FatalError.WithResult("Failed to read register address %08x" % addr, data)
return val | Read memory address in target |
388,189 | def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items):
if cov_interval == "genome":
return cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0]
background_file = cnvkit_background([], background_file, items, target_bed, antitarget_bed)
cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground")
cns_file = _cnvkit_segment(cnr_file, cov_interval, data)
metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0]
if not utils.file_exists(metrics_file):
with file_transaction(data, metrics_file) as tx_metrics_file:
cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file]
do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics")
metrics = _read_metrics_file(metrics_file)
out = []
for cnn in cnns:
cnn["metrics"] = metrics
out.append(cnn)
return out | Estimate noise of a sample using a flat background.
Only used for panel/targeted data due to memory issues with whole genome
samples. |
388,190 | def _QueryHashes(self, digests):
url_parameters = {: self._api_key, : .join(digests)}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._VIRUSTOTAL_API_REPORT_URL, , params=url_parameters)
except errors.ConnectionError as exception:
json_response = None
logger.error(.format(
exception))
return json_response | Queries VirusTotal for a specfic hashes.
Args:
digests (list[str]): hashes to look up.
Returns:
dict[str, object]: JSON response or None on error. |
388,191 | def add_flow_exception(exc):
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError( % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set) | Add an exception that should not be logged.
The argument must be a subclass of Exception. |
388,192 | def _nfw_func(self, x):
c = 0.000001
if isinstance(x, np.ndarray):
x[np.where(x<c)] = c
nfwvals = np.ones_like(x)
inds1 = np.where(x < 1)
inds2 = np.where(x > 1)
nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5)
nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5)
return nfwvals
elif isinstance(x, float) or isinstance(x, int):
x = max(x, c)
if x == 1:
return 1
if x < 1:
return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5)
else:
return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5) | Classic NFW function in terms of arctanh and arctan
:param x: r/Rs
:return: |
388,193 | def is_dir(path):
try:
return path.expanduser().absolute().is_dir()
except AttributeError:
return os.path.isdir(os.path.abspath(os.path.expanduser(str(path)))) | Determine if a Path or string is a directory on the file system. |
388,194 | def minify_print(
ast,
obfuscate=False,
obfuscate_globals=False,
shadow_funcname=False,
drop_semi=False):
return .join(chunk.text for chunk in minify_printer(
obfuscate, obfuscate_globals, shadow_funcname, drop_semi)(ast)) | Simple minify print function; returns a string rendering of an input
AST of an ES5 program
Arguments
ast
The AST to minify print
obfuscate
If True, obfuscate identifiers nested in each scope with a
shortened identifier name to further reduce output size.
Defaults to False.
obfuscate_globals
Also do the same to identifiers nested on the global scope; do
not enable unless the renaming of global variables in a not
fully deterministic manner into something else is guaranteed to
not cause problems with the generated code and other code that
in the same environment that it will be executed in.
Defaults to False for the reason above.
drop_semi
Drop semicolons whenever possible (e.g. the final semicolons of
a given block). |
388,195 | def dropEvent(self, event):
data = event.mimeData()
if data.hasFormat() and \
data.hasFormat():
tableName = self.tableTypeName()
if nativestring(data.data()) == tableName:
data = nativestring(data.data())
query = Q.fromXmlString(data)
self.setQuery(query)
return
super(XOrbTreeWidget, self).dropEvent(event) | Listens for query's being dragged and dropped onto this tree.
:param event | <QDropEvent> |
388,196 | def _prompt(letters=, default=None):
while True:
try:
input_text = sys.stdin.readline().strip()
except KeyboardInterrupt:
sys.exit(0)
if input_text and input_text in letters:
return input_text
if default is not None and input_text == :
return default
print() | Wait for the user to type a character (and hit Enter). If the user enters
one of the characters in `letters`, return that character. If the user
hits Enter without entering a character, and `default` is specified,
returns `default`. Otherwise, asks the user to enter a character again. |
388,197 | def from_config(cls, cp, section, variable_args):
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False) | Returns a distribution based on a configuration file.
The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
The file to construct the distribution from must be provided by setting
`filename`. Boundary arguments can be provided in the same way as
described in `get_param_bounds_from_config`.
.. code-block:: ini
[{section}-{tag}]
name = fromfile
filename = ra_prior.hdf
min-ra = 0
max-ra = 6.28
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
BoundedDist
A distribution instance from the pycbc.inference.prior module. |
388,198 | def goodnode(self, nodelist):
l = len(nodelist)
for n in range(self.current_node(l), l):
self.msg.message("Trying node " + str(n) + ": " + nodelist[n])
try:
req = urllib.request.Request(url=nodelist[n])
urllib.request.urlopen(req)
except HTTPError as e:
self.msg.error_message(e)
self.currentnode = int(self.currentnode) + 1
else:
self.msg.message("Using " + nodelist[n])
return nodelist[n] | Goes through the provided list
and returns the first server node
that does not return an error. |
388,199 | def remnant_mass(eta, ns_g_mass, ns_sequence, chi, incl, shift):
if not (eta>0. and eta<=0.25 and abs(chi)<=1):
print()
print(.format(ns_g_mass, eta, chi, incl))
raise Exception()
q = (1+math.sqrt(1-4*eta)-2*eta)/eta*0.5
ns_compactness = ns_g_mass_to_ns_compactness(ns_g_mass, ns_sequence)
ns_b_mass = ns_g_mass_to_ns_b_mass(ns_g_mass, ns_sequence)
if not (ns_compactness>0 and q>=1):
print()
print(.format(ns_b_mass, eta, chi, incl))
print(.format(ns_compactness, q))
print()
raise Exception()
kappa = q*ns_compactness
chi_eff = bh_effective_spin(chi, incl)
if not abs(chi_eff)<=1:
print()
print(.format(ns_b_mass, eta, chi, incl))
print(.format(chi_eff))
print()
raise Exception()
xi = scipy.optimize.fsolve(xi_eq, 100., args=(kappa,chi_eff,q), full_output=1)[0]
alpha = 0.296
beta = 0.171
remnant_mass = alpha*xi*(1-2*ns_compactness)-beta*kappa*PG_ISSO_solver(chi_eff,0)
remnant_mass = remnant_mass*ns_b_mass - shift
return remnant_mass | Function that determines the remnant disk mass of
an NS-BH system using the fit to numerical-relativity
results discussed in Foucart PRD 86, 124007 (2012).
Parameters
-----------
eta: float
the symmetric mass ratio of the binary
ns_g_mass: float
NS gravitational mass (in solar masses)
ns_sequence: 3D-array
contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
chi: float
the BH dimensionless spin parameter
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
shift: float
an amount to be subtracted to the remnant mass predicted
by the model (in solar masses)
Returns
----------
remnant_mass: float
The remnant mass in solar masses |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.