Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
17,400 | def get_request_setting(self, service_id, version_number, name):
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name))
return FastlyRequestSetting(self, content) | Gets the specified Request Settings object. |
17,401 | def _Operation(self,operation):
try:
return(clc.v2.Requests(
clc.v2.API.Call(
,
% (self.alias,operation), % self.id,
session=self.session),
alias=self.alias,
session=self.session))
except clc.APIFailedResponse as e:
return(clc.v2.Requests(e.response_json,alias=self.alias,session=self.session)) | Execute specified operations task against one or more servers.
Returns a clc.v2.Requests object. If error due to server(s) already being in
the requested state this is not raised as an error at this level.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete()
0 |
17,402 | def update_subports(self, port):
trunk_details = port.get()
subports = trunk_details[]
host_id = port.get(bc.dns.DNSNAME)
context = bc.get_context()
el_context = context.elevated()
for subport in subports:
bc.get_plugin().update_port(el_context, subport[],
{:
{bc.portbindings.HOST_ID: host_id,
: bc.trunk_consts.TRUNK_SUBPORT_OWNER}})
trunk_obj = bc.trunk_objects.Trunk.get_object(
el_context, id=trunk_details[])
trunk_obj.update(status=bc.trunk_consts.ACTIVE_STATUS) | Set port attributes for trunk subports.
For baremetal deployments only, set the neutron port attributes
during the bind_port event. |
17,403 | def is_secret_known(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> bool:
return (
secrethash in end_state.secrethashes_to_unlockedlocks or
secrethash in end_state.secrethashes_to_onchain_unlockedlocks
) | True if the `secrethash` is for a lock with a known secret. |
17,404 | def createEditor(self, parent, option, index):
editor = QtGui.QDoubleSpinBox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
editor.setDecimals(self.decimals)
except TypeError as err:
pass
return editor | Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index. |
17,405 | def _build_predict(self, Xnew, full_cov=False):
y = self.Y - self.mean_function(self.X)
Kmn = self.kern.K(self.X, Xnew)
Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew)
f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False)
return f_mean + self.mean_function(Xnew), f_var | Xnew is a data matrix, the points at which we want to predict.
This method computes
p(F* | Y)
where F* are points on the GP at Xnew, Y are noisy observations at X. |
17,406 | def _collect_capacity_curves(data, direction="charge"):
minimum_v_value = np.Inf
maximum_v_value = -np.Inf
charge_list = []
cycles = data.get_cycle_numbers()
for cycle in cycles:
try:
if direction == "charge":
q, v = data.get_ccap(cycle)
else:
q, v = data.get_dcap(cycle)
except NullData as e:
logging.warning(e)
break
else:
d = pd.DataFrame({"q": q, "v": v})
d.name = cycle
charge_list.append(d)
v_min = v.min()
v_max = v.max()
if v_min < minimum_v_value:
minimum_v_value = v_min
if v_max > maximum_v_value:
maximum_v_value = v_max
return charge_list, cycles, minimum_v_value, maximum_v_value | Create a list of pandas.DataFrames, one for each charge step.
The DataFrames are named by its cycle number.
Input: CellpyData
Returns: list of pandas.DataFrames
minimum voltage value,
maximum voltage value |
17,407 | def load_sub_plugins_from_str(cls, plugins_str):
plugin_classes = {}
if plugins_str:
for plugin_name in plugins_str.split(":"):
pc = load_plugin(plugin_name, MONITOR_DEFAULT_PLUGIN_MODULE)
plugin_classes[plugin_name] = pc
return plugin_classes | Load plugin classes based on column separated list of plugin names.
Returns dict with plugin name as key and class as value. |
17,408 | def _get_content_length(self,msg):
m = re.search(r,msg,re.S)
return (m and int(m.group())) or 0 | 从消息中解析Content-length |
17,409 | def create_branches(self, branches):
if not isinstance(branches, TreeBuffer):
branches = TreeBuffer(branches)
self.set_buffer(branches, create_branches=True) | Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict |
17,410 | def read_config(cls, configparser):
config = dict()
section = cls.__name__
option = "prefixes"
if configparser.has_option(section, option):
value = configparser.get(section, option)
names = [x.strip().lower() for x in value.split(",")]
else:
names = []
config[option] = names
return config | Read configuration file options. |
17,411 | def get_group(value):
group = Group()
token, value = get_display_name(value)
if not value or value[0] != :
raise errors.HeaderParseError("expected at end of group "
"display name but found ".format(value))
group.append(token)
group.append(ValueTerminal(, ))
value = value[1:]
if value and value[0] == :
group.append(ValueTerminal(, ))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != :
raise errors.HeaderParseError(
"expected at end of group but found {}".format(value))
group.append(ValueTerminal(, ))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value | group = display-name ":" [group-list] ";" [CFWS] |
17,412 | def get_client(service, service_type=, **conn_args):
client_details = choose_client(service)
user_agent = get_user_agent(**conn_args)
if client_details:
if client_details[] == :
client = get_gcp_client(
mod_name=client_details[],
pkg_name=conn_args.get(, ),
key_file=conn_args.get(, None),
project=conn_args[], user_agent=user_agent)
else:
client = get_google_client(
mod_name=client_details[],
key_file=conn_args.get(, None),
user_agent=user_agent, api_version=conn_args.get(, ))
else:
try:
client = get_google_client(
mod_name=service, key_file=conn_args.get(, None),
user_agent=user_agent, api_version=conn_args.get(, ))
except Exception as e:
raise e
return client_details, client | User function to get the correct client.
Based on the GOOGLE_CLIENT_MAP dictionary, the return will be a cloud or general
client that can interact with the desired service.
:param service: GCP service to connect to. E.g. 'gce', 'iam'
:type service: ``str``
:param conn_args: Dictionary of connection arguments. 'project' is required.
'user_agent' can be specified and will be set in the client
returned.
:type conn_args: ``dict``
:return: client_details, client
:rtype: ``tuple`` of ``dict``, ``object`` |
17,413 | def _rm_units_from_var_name_single(var):
m = re.match(re_var_w_units, var)
if m:
try:
var = m.group(1).strip().lower()
except Exception:
pass
return var | NOTE: USE THIS FOR SINGLE CELLS ONLY
When parsing sheets, all variable names be exact matches when cross-referenceing the metadata and data sections
However, sometimes people like to put "age (years BP)" in one section, and "age" in the other. This causes problems.
We're using this regex to match all variableName cells and remove the "(years BP)" where applicable.
:param str var: Variable name
:return str: Variable name |
17,414 | def cost_matrix(self, set_a, set_b, time_a, time_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)
return costs | Calculates the costs (distances) between the items in set a and set b at the specified times.
Args:
set_a: List of STObjects
set_b: List of STObjects
time_a: time at which objects in set_a are evaluated
time_b: time at whcih object in set_b are evaluated
Returns:
A numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a
and the items in set b. |
17,415 | def serialize(v, known_modules=[]):
tname = name(v, known_modules=known_modules)
func = serializer(tname)
return func(v), tname | Get a text representation of an object. |
17,416 | def _compile_docker_commands(app_name, assembled_specs, port_spec):
app_spec = assembled_specs[][app_name]
commands = []
commands += _lib_install_commands_for_app(app_name, assembled_specs)
if app_spec[]:
commands.append("cd {}".format(container_code_path(app_spec)))
commands.append("export PATH=$PATH:{}".format(container_code_path(app_spec)))
commands += _copy_assets_commands_for_app(app_spec, assembled_specs)
commands += _get_once_commands(app_spec, port_spec)
commands += _get_always_commands(app_spec)
return commands | This is used to compile the command that will be run when the docker container starts
up. This command has to install any libs that the app uses, run the `always` command, and
run the `once` command if the container is being launched for the first time |
17,417 | def scale_down(self, workers, pods=None):
pods = pods or self._cleanup_terminated_pods(self.pods())
ips = set(urlparse(worker).hostname for worker in workers)
to_delete = [p for p in pods if p.status.pod_ip in ips]
if not to_delete:
return
self._delete_pods(to_delete) | Remove the pods for the requested list of workers
When scale_down is called by the _adapt async loop, the workers are
assumed to have been cleanly closed first and in-memory data has been
migrated to the remaining workers.
Note that when the worker process exits, Kubernetes leaves the pods in
a 'Succeeded' state that we collect here.
If some workers have not been closed, we just delete the pods with
matching ip addresses.
Parameters
----------
workers: List[str] List of addresses of workers to close |
17,418 | def ng_call_ctrl_function(self, element, func, params=, return_out=False):
if isinstance(params, string_types):
param_str = params
elif isinstance(params, (tuple, list)):
param_str = self.__serialize_params(params)
else:
raise ValueError()
exec_str = % (func, param_str)
if return_out:
return self.__type2python(
self.browser.execute_script(.format(exec_str), element))
else:
self.browser.execute_script(exec_str, element) | :Description: Will execute controller function with provided parameters.
:Warning: This will only work for angular.js 1.x.
:Warning: Requires angular debugging to be enabled.
:param element: Element for browser instance to target.
:param func: Function to execute from angular element controller.
:type func: string
:param params: String (naked) args, or list of parameters to pass to target function.
:type params: string, tuple, list
:param return_out: Return output of function call otherwise None
:type return_out: bool |
17,419 | def run_sql_script(conn, scriptname=):
script_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), ))
script_str = open(os.path.join(script_dir, scriptname)).read()
conn.execution_options(autocommit=True).execute(script_str)
return | This function runs .sql scripts in the folder 'sql_scripts' |
17,420 | def find(self, title):
if title not in self._titles:
raise KeyError(title)
return self._titles[title][0] | Return the first worksheet with the given title.
Args:
title(str): title/name of the worksheet to return
Returns:
WorkSheet: contained worksheet object
Raises:
KeyError: if the spreadsheet has no no worksheet with the given ``title`` |
17,421 | def get_app_perms(model_or_app_label):
from django.contrib.auth.models import Permission
if isinstance(model_or_app_label, string_types):
app_label = model_or_app_label
else:
app_label = model_or_app_label._meta.app_label
qs = Permission.objects.filter(content_type__app_label=app_label)
perms = ( % (app_label, p.codename) for p in qs.iterator())
return set(perms) | Get permission-string list of the specified django application.
Parameters
----------
model_or_app_label : model class or string
A model class or app_label string to specify the particular django
application.
Returns
-------
set
A set of perms of the specified django application.
Examples
--------
>>> perms1 = get_app_perms('auth')
>>> perms2 = get_app_perms(Permission)
>>> perms1 == perms2
True |
17,422 | def _extract_stars(data, catalog, size=(11, 11), use_xy=True):
colnames = catalog.colnames
if ( not in colnames or not in colnames) or not use_xy:
xcenters, ycenters = skycoord_to_pixel(catalog[], data.wcs,
origin=0, mode=)
else:
xcenters = catalog[].data.astype(np.float)
ycenters = catalog[].data.astype(np.float)
if in colnames:
ids = catalog[]
else:
ids = np.arange(len(catalog), dtype=np.int) + 1
if data.uncertainty is None:
weights = np.ones_like(data.data)
else:
if data.uncertainty.uncertainty_type == :
weights = np.asanyarray(data.uncertainty.array, dtype=np.float)
else:
warnings.warn(
,
AstropyUserWarning)
weights = np.ones_like(data.data)
if data.mask is not None:
weights[data.mask] = 0.
stars = []
for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids):
try:
large_slc, small_slc = overlap_slices(data.data.shape, size,
(ycenter, xcenter),
mode=)
data_cutout = data.data[large_slc]
weights_cutout = weights[large_slc]
except (PartialOverlapError, NoOverlapError):
stars.append(None)
continue
origin = (large_slc[1].start, large_slc[0].start)
cutout_center = (xcenter - origin[0], ycenter - origin[1])
star = EPSFStar(data_cutout, weights_cutout,
cutout_center=cutout_center, origin=origin,
wcs_large=data.wcs, id_label=obj_id)
stars.append(star)
return stars | Extract cutout images from a single image centered on stars defined
in the single input catalog.
Parameters
----------
data : `~astropy.nddata.NDData`
A `~astropy.nddata.NDData` object containing the 2D image from
which to extract the stars. If the input ``catalog`` contains
only the sky coordinates (i.e. not the pixel coordinates) of the
stars then the `~astropy.nddata.NDData` object must have a valid
``wcs`` attribute.
catalogs : `~astropy.table.Table`
A single catalog of sources to be extracted from the input
``data``. The center of each source can be defined either in
pixel coordinates (in ``x`` and ``y`` columns) or sky
coordinates (in a ``skycoord`` column containing a
`~astropy.coordinates.SkyCoord` object). If both are specified,
then the value of the ``use_xy`` keyword determines which
coordinates will be used.
size : int or array_like (int), optional
The extraction box size along each axis. If ``size`` is a
scalar then a square box of size ``size`` will be used. If
``size`` has two elements, they should be in ``(ny, nx)`` order.
The size must be greater than or equal to 3 pixel for both axes.
use_xy : bool, optional
Whether to use the ``x`` and ``y`` pixel positions when both
pixel and sky coordinates are present in the input catalog
table. If `False` then sky coordinates are used instead of
pixel coordinates (e.g. for linked stars). The default is
`True`.
Returns
-------
stars : list of `EPSFStar` objects
A list of `EPSFStar` instances containing the extracted stars. |
17,423 | def start(self, *args):
if self.volumes:
volumes = " --bind " + " --bind ".join(self.volumes)
else:
volumes = ""
self._print("Instantiating container [{0:s}]. Timeout set to {1:d}. The container ID is printed below.".format(self.name, self.time_out))
utils.xrun("singularity instance.start",
list(args) + [volumes,
self.image, self.name])
self.status = "created"
return 0 | Create a singularity container instance |
17,424 | def reset(cls):
cls.input_el.value = ""
cls.subconspect_el.html = ""
cls.show_error(False) | Reset the conspect elements to initial state. |
17,425 | def make_fileitem_peinfo_detectedentrypointsignature_name(entrypoint_name, condition=, negate=False,
preserve_case=False):
document =
search =
content_type =
content = entrypoint_name
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for FileItem/PEInfo/DetectedEntryPointSignature/Name
:return: A IndicatorItem represented as an Element node |
17,426 | def _insert_code(code_to_modify, code_to_insert, before_line):
linestarts = dict(dis.findlinestarts(code_to_modify))
if not linestarts:
return False, code_to_modify
if code_to_modify.co_name == :
if before_line == min(linestarts.values()):
return False, code_to_modify
if before_line not in linestarts.values():
return False, code_to_modify
offset = None
for off, line_no in linestarts.items():
if line_no == before_line:
offset = off
break
code_to_insert_list = add_jump_instruction(offset, code_to_insert)
try:
code_to_insert_list, new_names = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, ,
dis.hasname)
code_to_insert_list, new_consts = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, ,
[opmap[]])
code_to_insert_list, new_vars = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, ,
dis.haslocal)
new_bytes, all_inserted_code = _update_label_offsets(code_to_modify.co_code, offset, list(code_to_insert_list))
new_lnotab = _modify_new_lines(code_to_modify, offset, code_to_insert_list)
if new_lnotab is None:
return False, code_to_modify
except ValueError:
pydev_log.exception()
return False, code_to_modify
new_code = CodeType(
code_to_modify.co_argcount,
code_to_modify.co_kwonlyargcount,
len(new_vars),
code_to_modify.co_stacksize,
code_to_modify.co_flags,
new_bytes,
new_consts,
new_names,
new_vars,
code_to_modify.co_filename,
code_to_modify.co_name,
code_to_modify.co_firstlineno,
new_lnotab,
code_to_modify.co_freevars,
code_to_modify.co_cellvars
)
return True, new_code | Insert piece of code `code_to_insert` to `code_to_modify` right inside the line `before_line` before the
instruction on this line by modifying original bytecode
:param code_to_modify: Code to modify
:param code_to_insert: Code to insert
:param before_line: Number of line for code insertion
:return: boolean flag whether insertion was successful, modified code |
17,427 | def upload_model(self, path: str, meta: dict, force: bool) -> str:
raise NotImplementedError | Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing. |
17,428 | def get_blink_cookie(self, name):
value = self.get_cookie(name)
if value != None:
self.clear_cookie(name)
return escape.url_unescape(value) | Gets a blink cookie value |
17,429 | def _decode(s, encoding=None, errors=None):
if encoding is None:
encoding = ENCODING
if errors is None:
errors = ENCODING_ERRORS
return s if isinstance(s, unicode) else s.decode(encoding, errors) | Decodes *s*. |
17,430 | def _handle_unknown_method(self, method, remainder, request=None):
if request is None:
self._raise_method_deprecation_warning(self._handle_unknown_method)
controller = self._find_controller( % method, method)
if controller:
return controller, remainder
if remainder:
if self._find_controller(remainder[0]):
abort(405)
sub_controller = self._lookup_child(remainder[0])
if sub_controller:
return lookup_controller(sub_controller, remainder[1:],
request)
abort(405) | Routes undefined actions (like RESET) to the appropriate controller. |
17,431 | def _override_cfg(container, yamlkeys, value):
key = yamlkeys[0]
rest = yamlkeys[1:]
if len(rest) == 0:
container[key] = value
elif key in container:
_override_cfg(container[key], rest, value)
else:
subtree = {}
_override_cfg(subtree, rest, value)
container[key] = subtree | Override a hierarchical key in the config, setting it to the value.
Note that yamlkeys should be a non-empty list of strings. |
17,432 | def process_data_config_section(config, data_config):
if in data_config:
for connector in data_config[]:
config.data[][
connector[]] = get_config_from_package(
connector[])
if in data_config:
if data_config[]:
for source in data_config[]:
config.data[][source[]] = source
del config.data[][source[]][] | Processes the data configuration section from the configuration
data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param data_config: Data configuration section from a config data dict. |
17,433 | def _release_waiter(self) -> None:
if not self._waiters:
return
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return | Iterates over all waiters till found one that is not finsihed and
belongs to a host that has available connections. |
17,434 | def _startJobWithRetries(self, jobID):
with ConnectionFactory.get() as conn:
query = \
\
\
\
\
% (self.jobsTableName,)
sqlParams = [self.STATUS_RUNNING, self._connectionID,
jobID, self.STATUS_NOTSTARTED]
numRowsUpdated = conn.cursor.execute(query, sqlParams)
if numRowsUpdated != 1:
self._logger.warn(
, numRowsUpdated)
return | Place the given job in STATUS_RUNNING mode; the job is expected to be
STATUS_NOTSTARTED.
NOTE: this function was factored out of jobStartNext because it's also
needed for testing (e.g., test_client_jobs_dao.py) |
17,435 | def _get_seal_key_ntlm1(negotiate_flags, exported_session_key):
if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_56:
seal_key = exported_session_key[:7] + b"\xa0"
else:
seal_key = exported_session_key[:5] + b"\xe5\x38\xb0"
return seal_key | 3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for
authentication where NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has not
been negotiated. Will weaken the keys if NTLMSSP_NEGOTIATE_56 is not
negotiated it will default to the 40-bit key
:param negotiate_flags: The negotiate_flags structure sent by the server
:param exported_session_key: A 128-bit session key used to derive signing
and sealing keys
:return seal_key: Key used to seal messages |
17,436 | def request(self, batch, attempt=0):
try:
q = self.api.new_queue()
for msg in batch:
q.add(msg[], msg[], source=msg[])
q.submit()
except:
if attempt > self.retries:
raise
self.request(batch, attempt+1) | Attempt to upload the batch and retry before raising an error |
17,437 | def _GetFormatErrorLocation(
self, yaml_definition, last_definition_object):
name = yaml_definition.get(, None)
if name:
error_location = .format(name or )
elif last_definition_object:
error_location = .format(last_definition_object.name)
else:
error_location =
return error_location | Retrieves a format error location.
Args:
yaml_definition (dict[str, object]): current YAML definition.
last_definition_object (DataTypeDefinition): previous data type
definition.
Returns:
str: format error location. |
17,438 | def encode(secret: Union[str, bytes], payload: dict = None,
alg: str = default_alg, header: dict = None) -> str:
secret = util.to_bytes(secret)
payload = payload or {}
header = header or {}
header_json = util.to_bytes(json.dumps(header))
header_b64 = util.b64_encode(header_json)
payload_json = util.to_bytes(json.dumps(payload))
payload_b64 = util.b64_encode(payload_json)
pre_signature = util.join(header_b64, payload_b64)
signature = _hash(secret, pre_signature, alg)
signature_b64 = util.b64_encode(signature)
token = util.join(pre_signature, signature_b64)
return util.from_bytes(token) | :param secret: The secret used to encode the token.
:type secret: Union[str, bytes]
:param payload: The payload to be encoded in the token.
:type payload: dict
:param alg: The algorithm used to hash the token.
:type alg: str
:param header: The header to be encoded in the token.
:type header: dict
:return: A new token
:rtype: str |
17,439 | def get_policy(policy_name,
region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_policy(_get_policy_arn(policy_name,
region=region, key=key, keyid=keyid, profile=profile))
return ret.get(, {}).get(, {})
except boto.exception.BotoServerError:
return None | Check to see if policy exists.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile |
17,440 | def CollectData(self):
while 1:
_bytes = self._ReadPacket()
if not _bytes:
return None
if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F:
logging.warning("Wanted data, dropped type=0x%02x, len=%d",
_bytes[0], len(_bytes))
continue
seq, _type, x, y = struct.unpack("BBBB", _bytes[:4])
data = [
struct.unpack(">hhhh", _bytes[x:x + 8])
for x in range(4,
len(_bytes) - 8, 8)
]
if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
logging.warning("Data sequence skipped, lost packet?")
self._last_seq = seq
if _type == 0:
if not self._coarse_scale or not self._fine_scale:
logging.warning(
"Waiting for calibration, dropped data packet.")
continue
out = []
for main, usb, aux, voltage in data:
if main & 1:
coarse = ((main & ~1) - self._coarse_zero)
out.append(coarse * self._coarse_scale)
else:
out.append((main - self._fine_zero) * self._fine_scale)
return out
elif _type == 1:
self._fine_zero = data[0][0]
self._coarse_zero = data[1][0]
elif _type == 2:
self._fine_ref = data[0][0]
self._coarse_ref = data[1][0]
else:
logging.warning("Discarding data packet type=0x%02x", _type)
continue
if self._coarse_ref != self._coarse_zero:
self._coarse_scale = 2.88 / (
self._coarse_ref - self._coarse_zero)
if self._fine_ref != self._fine_zero:
self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero) | Return some current samples. Call StartDataCollection() first. |
17,441 | def __gzip(filename):
zipname = filename +
file_pointer = open(filename,)
zip_pointer = gzip.open(zipname,)
zip_pointer.writelines(file_pointer)
file_pointer.close()
zip_pointer.close()
return zipname | Compress a file returning the new filename (.gz) |
17,442 | def clear_lock(self, abspath=True):
cmd_list = [, , ]
return self._call_and_parse(cmd_list, abspath=abspath) | Clean any conda lock in the system. |
17,443 | def slice(self, *slice_args, **kwargs):
sorted_by = kwargs.pop(, )
if kwargs:
raise TypeError(
.format(kwargs.popitem()[0]))
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype) | Create a new SampleSet with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned SampleSet.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)), dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
>>> # the first 3 samples by energy == truncate(3)
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # the last 3 samples by energy
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
>>> # every second sample in between (skip the top and the bottom 3)
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables] |
17,444 | def call_git_branch():
try:
with open(devnull, "w") as fnull:
arguments = [GIT_COMMAND, , , ]
return check_output(arguments, cwd=CURRENT_DIRECTORY,
stderr=fnull).decode("ascii").strip()
except (OSError, CalledProcessError):
return None | return the string output of git desribe |
17,445 | def geom_find_rotsymm(g, atwts, ax, improp, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL):
import numpy as np
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
nval = nmax + 1
nfac = 1.0
while nfac > tol and nval > 0:
nval = nval - 1
try:
nfac = geom_symm_match(g, atwts, ax, \
2*np.pi/nval, improp)
except ZeroDivisionError as zde:
if nval > 0:
raise zde
return nval, nfac | Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring |
17,446 | def mod2pi(ts):
return np.pi - np.mod(np.pi - ts, 2*np.pi) | For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi] |
17,447 | def get_nearest_points_dirty(self, center_point, radius, unit=):
if unit == :
radius = utils.mi_to_km(radius)
grid_size = GEO_HASH_GRID_SIZE[self.precision]
if radius > grid_size / 2:
suggested_precision = 0
for precision, max_size in GEO_HASH_GRID_SIZE.items():
if radius > max_size / 2:
suggested_precision = precision - 1
break
raise ValueError(
.format(suggested_precision)
)
me_and_neighbors = geohash.expand(self.get_point_hash(center_point))
return chain(*(self.data.get(key, []) for key in me_and_neighbors)) | return approx list of point from circle with given center and radius
it uses geohash and return with some error (see GEO_HASH_ERRORS)
:param center_point: center of search circle
:param radius: radius of search circle
:return: list of GeoPoints from given area |
17,448 | def convert_areaSource(self, node):
geom = node.areaGeometry
coords = split_coords_2d(~geom.Polygon.exterior.LinearRing.posList)
polygon = geo.Polygon([geo.Point(*xy) for xy in coords])
msr = valid.SCALEREL[~node.magScaleRel]()
area_discretization = geom.attrib.get(
, self.area_source_discretization)
if area_discretization is None:
raise ValueError(
% node[])
return source.AreaSource(
source_id=node[],
name=node[],
tectonic_region_type=node.attrib.get(),
mfd=self.convert_mfdist(node),
rupture_mesh_spacing=self.rupture_mesh_spacing,
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
upper_seismogenic_depth=~geom.upperSeismoDepth,
lower_seismogenic_depth=~geom.lowerSeismoDepth,
nodal_plane_distribution=self.convert_npdist(node),
hypocenter_distribution=self.convert_hpdist(node),
polygon=polygon,
area_discretization=area_discretization,
temporal_occurrence_model=self.get_tom(node)) | Convert the given node into an area source object.
:param node: a node with tag areaGeometry
:returns: a :class:`openquake.hazardlib.source.AreaSource` instance |
17,449 | def make_movie(workdir, pf, dpi=120, fps=1, format="pdf", engine="ffmpeg"):
os.chdir(workdir)
if format != "png":
cmd = "parallel convert -density {}".format(dpi)
cmd += " {} {.}.png ::: " + "*.{}".format(format)
sh(cmd)
assert engine in ("ffmpeg", "gifsicle"), \
"Only ffmpeg or gifsicle is currently supported"
if engine == "ffmpeg":
cmd = "ffmpeg -framerate {} -pattern_type glob -i {}.mp4"\
.format(fps, pf)
elif engine == "gifsicle":
cmd = "convert *.png gif:- |"
cmd += " gifsicle --delay {} --loop --optimize=3".format(100 / fps)
cmd += " --colors=256 --multifile - > {}.gif".format(pf)
sh(cmd) | Make the movie using either ffmpeg or gifsicle. |
17,450 | def _run_process(self, start_path, stop_path, process_num=0):
self.producer.initialize_worker(process_num)
self.consumer.initialize_worker(process_num)
for path in range(start_path, stop_path):
self._run_path(path)
self.consumer.finalize_worker(process_num) | The function calls _run_path for given set of paths |
17,451 | def _get_lowstate(self):
if not self.request.body:
return
data = self.deserialize(self.request.body)
self.request_payload = copy(data)
if data and in data and not isinstance(data[], list):
data[] = [data[]]
if not isinstance(data, list):
lowstate = [data]
else:
lowstate = data
return lowstate | Format the incoming data into a lowstate object |
17,452 | def normalize_variables(cls, variables):
if variables.get(, True) in (, False, , None):
del variables[]
return super(PackageResource, cls).normalize_variables(variables) | Make sure version is treated consistently |
17,453 | def check_and_get_data(input_list,**pars):
empty_list = []
retrieve_list = []
candidate_list = []
ipppssoot_list = []
total_input_list = []
for input_item in input_list:
print(, input_item)
indx = input_item.find()
if indx != -1:
lc_input_item = input_item.lower()
suffix = lc_input_item[indx+1:indx+4]
print(, lc_input_item)
if suffix == :
try:
asntab = Table.read(input_item, format=)
except FileNotFoundError:
log.error(.format(input_item))
return(empty_list)
for row in asntab:
if row[].startswith():
continue
memname = row[].lower().strip()
if memname.find() != -1:
candidate_list.append(memname)
else:
candidate_list.append(memname + )
elif suffix == or suffix == :
if lc_input_item not in candidate_list:
candidate_list.append(lc_input_item)
else:
log.error(.format(suffix))
return(empty_list)
elif len(input_item) == 9:
try:
if input_item not in ipppssoot_list:
retrieve_list = aqutils.retrieve_observation(input_item,**pars)
if retrieve_list:
total_input_list += retrieve_list
ipppssoot_list.append(input_item)
else:
log.error(.format(input_item))
return(empty_list)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
for file in candidate_list:
if glob.glob(file):
total_input_list.append(file)
continue
else:
log.error(.format(file))
return(empty_list)
log.info("TOTAL INPUT LIST: {}".format(total_input_list))
return(total_input_list) | Verify that all specified files are present. If not, retrieve them from MAST.
Parameters
----------
input_list : list
List of one or more calibrated fits images that will be used for catalog generation.
Returns
=======
total_input_list: list
list of full filenames |
17,454 | def forward_backward(self, x):
with mx.autograd.record():
(ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length) = forward(x, self._model, self._mlm_loss,
self._nsp_loss, self._vocab_size,
args.dtype)
ls = ls / self._rescale_factor
if args.dtype == :
self._trainer.backward(ls)
else:
ls.backward()
return ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length | forward backward implementation |
17,455 | def save_itemgetter(self, obj):
class Dummy:
def __getitem__(self, item):
return item
items = obj(Dummy())
if not isinstance(items, tuple):
items = (items,)
return self.save_reduce(operator.itemgetter, items) | itemgetter serializer (needed for namedtuple support) |
17,456 | def __validate_dates(start_date, end_date):
try:
start_date = datetime.datetime.strptime(start_date, )
end_date = datetime.datetime.strptime(end_date, )
except ValueError:
raise ValueError("Incorrect data format, should be yyyy-mm-dd")
if (end_date - start_date).days > 366:
raise ValueError("The difference between start and end date " +
"should be less than or equal to 366 days.")
if (end_date - start_date).days < 0:
raise ValueError("End date cannot be before start date.") | Validate if a date string.
Validate if a string is a date on yyyy-mm-dd format and it the
period between them is less than a year. |
17,457 | def decrypt_with_ad(self, ad: bytes, ciphertext: bytes) -> bytes:
if self.n == MAX_NONCE:
raise NoiseMaxNonceError()
if not self.has_key():
return ciphertext
plaintext = self.cipher.decrypt(self.k, self.n, ad, ciphertext)
self.n = self.n + 1
return plaintext | If k is non-empty returns DECRYPT(k, n++, ad, ciphertext). Otherwise returns ciphertext. If an authentication
failure occurs in DECRYPT() then n is not incremented and an error is signaled to the caller.
:param ad: bytes sequence
:param ciphertext: bytes sequence
:return: plaintext bytes sequence |
17,458 | def validate_input(self):
if self.vert[1] <= self.vert[0]:
raise ValueError(u.format(self.vert[1], self.vert[0])) | Raise appropriate exception if gate was defined incorrectly. |
17,459 | def run(self):
self.graphite.start()
while True:
log.debug(, self.period)
time.sleep(self.period)
log.debug()
try:
self.push()
log.debug()
except:
log.exception()
raise | Loop forever, pushing out stats. |
17,460 | def streams(self):
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams()) | Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving. |
17,461 | def _init_map(self):
TextAnswerFormRecord._init_map(self)
FilesAnswerFormRecord._init_map(self)
super(AnswerTextAndFilesMixin, self)._init_map() | stub |
17,462 | def broadcast(self, data_dict):
if self.vis_socket:
self.queued_messages.append(data_dict)
self.send_all_updates() | Send to the visualizer (if there is one) or enqueue for later |
17,463 | def get_iso3_country_code(cls, country, use_live=True, exception=None):
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata[]:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata[].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata[].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata[].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None | Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None |
17,464 | def get_locations():
arequest = requests.get(LOCATIONS_URL, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == :
_LOGGER.error("Token expired.")
return False
return arequest.json() | Pull the accounts locations. |
17,465 | def main():
cmd_args = TagCubeCLI.parse_args()
try:
tagcube_cli = TagCubeCLI.from_cmd_args(cmd_args)
except ValueError, ve:
print % ve
sys.exit(1)
try:
sys.exit(tagcube_cli.run())
except ValueError, ve:
print % ve
sys.exit(2) | Project's main method which will parse the command line arguments, run a
scan using the TagCubeClient and exit. |
17,466 | def new(self, data):
temp = copy(self)
temp.__init__(data)
return temp | 通过data新建一个stock_block
Arguments:
data {[type]} -- [description]
Returns:
[type] -- [description] |
17,467 | def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None):
if viewup is None:
viewup = rcParams[][]
center = list(self.center)
bnds = list(self.bounds)
if z_shift is None:
z_shift = (bnds[5] - bnds[4]) * factor
center[2] = center[2] + z_shift
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points) | Genrates an orbital path around the data scene
Parameters
----------
facotr : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
z_shift : float, optional
shift the plane up/down from the center of the scene by this amount |
17,468 | def multiply(traj, result_list):
z=traj.x*traj.y
result_list[traj.v_idx] = z | Example of a sophisticated simulation that involves multiplying two values.
This time we will store tha value in a shared list and only in the end add the result.
:param traj:
Trajectory containing
the parameters in a particular combination,
it also serves as a container for results. |
17,469 | def add(self, pattern, function, method=None, type_cast=None):
if not type_cast:
type_cast = {}
with self._lock:
self._data_store.append({
: pattern,
: function,
: method,
: type_cast,
}) | Function for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path.
function (function): Function to associate with this path.
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None. |
17,470 | def OnActivateReader(self, event):
item = event.GetItem()
if item:
itemdata = self.readertreepanel.readertreectrl.GetItemPyData(item)
if isinstance(itemdata, smartcard.Card.Card):
self.ActivateCard(itemdata)
elif isinstance(itemdata, smartcard.reader.Reader.Reader):
self.dialogpanel.OnActivateReader(itemdata)
event.Skip() | Called when the user activates a reader in the tree. |
17,471 | def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id):
twitter = login(twitter_app_key, twitter_app_secret)
twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000)
for twitter_list in twitter_lists_list:
print(twitter_list)
return twitter_lists_list | A service that leverages twitter lists for on-demand annotation of popular users.
TODO: Do this. |
17,472 | def notification_selected_sm_changed(self, model, prop_name, info):
selected_state_machine_id = self.model.selected_state_machine_id
if selected_state_machine_id is None:
return
page_id = self.get_page_num(selected_state_machine_id)
number_of_pages = self.view["notebook"].get_n_pages()
old_label_colors = list(range(number_of_pages))
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
old_label_colors[p] = label.get_style_context().get_color(Gtk.StateType.NORMAL)
if not self.view.notebook.get_current_page() == page_id:
self.view.notebook.set_current_page(page_id)
for p in range(number_of_pages):
page = self.view["notebook"].get_nth_page(p)
label = self.view["notebook"].get_tab_label(page).get_child().get_children()[0]
style = label.get_style_context() | If a new state machine is selected, make sure the tab is open |
17,473 | def next_population(self, population, fitnesses):
return [self._next_solution() for _ in range(self._population_size)] | Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions. |
17,474 | def finalize(self):
super(StringWriterConsumer, self).finalize()
self.result = self.decoder(self.result) | finalize simulation for consumer |
17,475 | def fen(self, *, shredder: bool = False, en_passant: str = "legal", promoted: Optional[bool] = None) -> str:
return " ".join([
self.epd(shredder=shredder, en_passant=en_passant, promoted=promoted),
str(self.halfmove_clock),
str(self.fullmove_number)
]) | Gets a FEN representation of the position.
A FEN string (e.g.,
``rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1``) consists
of the position part :func:`~chess.Board.board_fen()`, the
:data:`~chess.Board.turn`, the castling part
(:data:`~chess.Board.castling_rights`),
the en passant square (:data:`~chess.Board.ep_square`),
the :data:`~chess.Board.halfmove_clock`
and the :data:`~chess.Board.fullmove_number`.
:param shredder: Use :func:`~chess.Board.castling_shredder_fen()`
and encode castling rights by the file of the rook
(like ``HAha``) instead of the default
:func:`~chess.Board.castling_xfen()` (like ``KQkq``).
:param en_passant: By default, only fully legal en passant squares
are included (:func:`~chess.Board.has_legal_en_passant()`).
Pass ``fen`` to strictly follow the FEN specification
(always include the en passant square after a two-step pawn move)
or ``xfen`` to follow the X-FEN specification
(:func:`~chess.Board.has_pseudo_legal_en_passant()`).
:param promoted: Mark promoted pieces like ``Q~``. By default, this is
only enabled in chess variants where this is relevant. |
17,476 | def day(self):
self.magnification = 86400
self._update(self.baseNumber, self.magnification)
return self | set unit to day |
17,477 | def compareBIMfiles(beforeFileName, afterFileName, outputFileName):
options = Dummy()
options.before = beforeFileName
options.after = afterFileName
options.out = outputFileName
CompareBIM.checkArgs(options)
beforeBIM = CompareBIM.readBIM(options.before)
afterBIM = CompareBIM.readBIM(options.after)
CompareBIM.compareSNPs(beforeBIM, afterBIM, options.out)
return beforeBIM - afterBIM | Compare two BIM files for differences.
:param beforeFileName: the name of the file before modification.
:param afterFileName: the name of the file after modification.
:param outputFileName: the name of the output file (containing the
differences between the ``before`` and the ``after``
files.
:type beforeFileName: str
:type afterFileName: str
:type outputFileName: str
:returns: the number of differences between the two files.
The ``bim`` files contain the list of markers in a given dataset. The
``before`` file should have more markers than the ``after`` file. The
``after`` file should be a subset of the markers in the ``before`` file. |
17,478 | def split_on(word: str, section: str) -> Tuple[str, str]:
return word[:word.index(section)] + section, word[word.index(section) + len(section):] | Given a string, split on a section, and return the two sections as a tuple.
:param word:
:param section:
:return:
>>> split_on('hamrye', 'ham')
('ham', 'rye') |
17,479 | def canvasReleaseEvent(self, e):
_ = e
self.is_emitting_point = False
self.rectangle_created.emit() | Handle canvas release events has finished capturing e.
:param e: A Qt event object.
:type: QEvent |
17,480 | def ctrl_srfc_pt_send(self, target, bitfieldPt, force_mavlink1=False):
return self.send(self.ctrl_srfc_pt_encode(target, bitfieldPt), force_mavlink1=force_mavlink1) | This message sets the control surfaces for selective passthrough mode.
target : The system setting the commands (uint8_t)
bitfieldPt : Bitfield containing the passthrough configuration, see CONTROL_SURFACE_FLAG ENUM. (uint16_t) |
17,481 | def progression_sinusoidal(week, start_weight, final_weight, start_week,
end_week,
periods=2, scale=0.025, offset=0):
linear = progression_linear(week, start_weight, final_weight,
start_week, end_week)
time_period = end_week - start_week
sine_argument = ((week - offset - start_week) * (math.pi * 2) /
(time_period / periods))
linear_with_sinusoidal = linear * (1 + scale * math.sin(sine_argument))
return linear_with_sinusoidal | A sinusoidal progression function going through the points
('start_week', 'start_weight') and ('end_week', 'final_weight'), evaluated
in 'week'. This function calls a linear progression function
and multiplies it by a sinusoid.
Parameters
----------
week
The week to evaluate the linear function at.
start_weight
The weight at 'start_week'.
final_weight
The weight at 'end_week'.
start_week
The number of the first week, typically 1.
end_week
The number of the final week, e.g. 8.
periods
Number of sinusoidal periods in the time range.
scale
The scale (amplitude) of the sinusoidal term.
offset
The offset (shift) of the sinusoid.
Returns
-------
weight
The weight at 'week'.
Examples
-------
>>> progression_sinusoidal(1, 100, 120, 1, 8)
100.0
>>> progression_sinusoidal(8, 100, 120, 1, 8)
120.0
>>> progression_sinusoidal(4, 100, 120, 1, 8)
106.44931454758678 |
17,482 | def mask(args):
p = OptionParser(mask.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 4):
sys.exit(not p.print_help())
if len(args) == 4:
databin, sampleids, strids, metafile = args
df, m, samples, loci = read_binfile(databin, sampleids, strids)
mode = "STRs"
elif len(args) == 2:
databin, metafile = args
df = pd.read_csv(databin, sep="\t", index_col=0)
m = df.as_matrix()
samples = df.index
loci = list(df.columns)
mode = "TREDs"
pf = "{}_{}_SEARCH".format(mode, timestamp())
final_columns, percentiles = read_meta(metafile)
maskfile = pf + ".mask.tsv"
run_args = []
for i, locus in enumerate(loci):
a = m[:, i]
percentile = percentiles[locus]
run_args.append((i, a, percentile))
if mode == "TREDs" or need_update(databin, maskfile):
cpus = min(8, len(run_args))
write_mask(cpus, samples, final_columns, run_args, filename=maskfile)
logging.debug("File `{}` written.".format(maskfile)) | %prog mask data.bin samples.ids STR.ids meta.tsv
OR
%prog mask data.tsv meta.tsv
Compute P-values based on meta and data. The `data.bin` should be the matrix
containing filtered loci and the output mask.tsv will have the same
dimension. |
17,483 | def saveCustomParams(self, data):
LOGGER.info()
message = { : data }
self.send(message) | Send custom dictionary to Polyglot to save and be retrieved on startup.
:param data: Dictionary of key value pairs to store in Polyglot database. |
17,484 | def delete(args):
nodes = [ClusterNode.from_uri(n) for n in args.nodes]
cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster))
echo("Deleting...")
for node in nodes:
cluster.delete_node(node)
cluster.wait() | Delete nodes from the cluster |
17,485 | def bedtools_merge(data, sample):
LOGGER.info("Entering bedtools_merge: %s", sample.name)
mappedreads = os.path.join(data.dirs.refmapping,
sample.name+"-mapped-sorted.bam")
cmd1 = [ipyrad.bins.bedtools, "bamtobed", "-i", mappedreads]
cmd2 = [ipyrad.bins.bedtools, "merge", "-i", "-"]
if in data.paramsdict["datatype"]:
check_insert_size(data, sample)
cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, "-d")
else:
cmd2.insert(2, str(-1 * data._hackersonly["min_SE_refmap_overlap"]))
cmd2.insert(2, "-d")
LOGGER.info("stdv: bedtools merge cmds: %s %s", cmd1, cmd2)
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
result = proc2.communicate()[0]
proc1.stdout.close()
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, result)
nregions = len(result.strip().split("\n"))
LOGGER.info("bedtools_merge: Got
return result | Get all contiguous genomic regions with one or more overlapping
reads. This is the shell command we'll eventually run
bedtools bamtobed -i 1A_0.sorted.bam | bedtools merge [-d 100]
-i <input_bam> : specifies the input file to bed'ize
-d <int> : For PE set max distance between reads |
17,486 | def log_response(self, response):
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra) | Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None |
17,487 | def pdf(self, x, e=0., w=1., a=0.):
t = (x-e) / w
return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t) | probability density function
see: https://en.wikipedia.org/wiki/Skew_normal_distribution
:param x: input value
:param e:
:param w:
:param a:
:return: |
17,488 | def _extended_gcd(self, a, b):
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t | Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t |
17,489 | def determine_result(self, returncode, returnsignal, output, isTimeout):
if not output:
return
last = output[-1]
if isTimeout:
return
if returncode != 0:
return
if last is None:
return
elif in last:
return result.RESULT_TRUE_PROP
elif in last:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN | Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.). |
17,490 | def send_batches(self, batch_list):
if isinstance(batch_list, BaseMessage):
batch_list = batch_list.SerializeToString()
return self._post(, batch_list) | Sends a list of batches to the validator.
Args:
batch_list (:obj:`BatchList`): the list of batches
Returns:
dict: the json result data, as a dict |
17,491 | def OnTextColor(self, event):
color = event.GetValue().GetRGB()
post_command_event(self, self.TextColorMsg, color=color) | Text color choice event handler |
17,492 | def get_form_value(self, form_key, object_brain_uid, default=None):
if form_key not in self.request.form:
return default
uid = object_brain_uid
if not api.is_uid(uid):
uid = api.get_uid(object_brain_uid)
values = self.request.form.get(form_key)
if isinstance(values, list):
if len(values) == 0:
return default
if len(values) > 1:
logger.warn("Multiple set of values for {}".format(form_key))
values = values[0]
return values.get(uid, default) | Returns a value from the request's form for the given uid, if any |
17,493 | def _remove_add_key(self, key):
if not hasattr(self, ):
return
if key in self._queue:
self._queue.remove(key)
self._queue.append(key)
if self.maxsize == 0:
return
while len(self._queue) > self.maxsize:
del self[self._queue[0]] | Move a key to the end of the linked list and discard old entries. |
17,494 | def reynolds(target, u0, b, temperature=):
r
value = u0*sp.exp(b*target[temperature])
return value | r"""
Uses exponential model by Reynolds [1] for the temperature dependance of
shear viscosity
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
u0, b : float, array_like
Coefficients of the viscosity exponential model (mu = u0*Exp(-b*T)
where T is the temperature in Kelvin
temperature : string
The dictionary key containing the temperature values (K). Can be
either a pore or throat array.
[1] Reynolds O. (1886). Phil Trans Royal Soc London, v. 177, p.157. |
17,495 | def address_by_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack(, bytes(ifname[:15], ))
)[20:24]) | Returns the IP address of the given interface name, e.g. 'eth0'
Parameters
----------
ifname : str
Name of the interface whose address is to be returned. Required.
Taken from this Stack Overflow answer: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python#24196955 |
17,496 | def status(name, sig=None):
*
if sig:
return __salt__[](sig)
contains_globbing = bool(re.search(r, name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
results[service] = __salt__[](service)
if contains_globbing:
return results
return results[name] | Return the status for a service.
If the name contains globbing, a dict mapping service name to PID or empty
string is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
string: PID if running, empty otherwise
dict: Maps service name to PID if running, empty string otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature] |
17,497 | def decode(self, covertext):
if not isinstance(covertext, str):
raise InvalidInputException()
insufficient = (len(covertext) < self._fixed_slice)
if insufficient:
raise DecodeFailureError(
"Covertext is shorter than self._fixed_slice, can\x00')
msg_len_header = self._encrypter.decryptOneBlock(
X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT])
msg_len_header = msg_len_header[8:16]
msg_len = fte.bit_ops.bytes_to_long(
msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT])
retval = X[16:16 + msg_len]
retval += covertext[self._fixed_slice:]
ctxt_len = self._encrypter.getCiphertextLen(retval)
remaining_buffer = retval[ctxt_len:]
retval = retval[:ctxt_len]
retval = self._encrypter.decrypt(retval)
return retval, remaining_buffer | Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``. |
17,498 | def add_progress(self, count, symbol=,
color=None, on_color=None, attrs=None):
self._progress.add_progress(count, symbol, color, on_color, attrs) | Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "fg", "bg" and "style" parameters. For these,
use the colorama package to set up the formatting. |
17,499 | def adapt(self, d, x):
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw | Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.