Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
374,900 | def includeme(config):
log.info()
for key, val in config.registry.settings.items():
if key.startswith():
log.debug( % (key, val))
htmlmin_opts[key[8:]] = asbool(val)
if key.startswith():
log.debug( % (key, val))
opts[key[16:]] = asbool(val)
config.add_tween(, under=INGRESS) | Add pyramid_htmlmin n your pyramid include list. |
374,901 | def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
return _delete_resource(name, name_param=,
desc=, res_type=,
region=region, key=key, keyid=keyid, profile=profile, **args) | Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg |
374,902 | def Right(self, n = 1, dl = 0):
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.right_key, n) | 右方向键n次 |
374,903 | def flush(self):
chunks = []
chunks.append(self._compress(b, lib.BROTLI_OPERATION_FLUSH))
while lib.BrotliEncoderHasMoreOutput(self._encoder) == lib.BROTLI_TRUE:
chunks.append(self._compress(b, lib.BROTLI_OPERATION_FLUSH))
return b.join(chunks) | Flush the compressor. This will emit the remaining output data, but
will not destroy the compressor. It can be used, for example, to ensure
that given chunks of content will decompress immediately. |
374,904 | def sphlat(r, colat, lons):
r = ctypes.c_double(r)
colat = ctypes.c_double(colat)
lons = ctypes.c_double(lons)
radius = ctypes.c_double()
lon = ctypes.c_double()
lat = ctypes.c_double()
libspice.sphcyl_c(r, colat, lons, ctypes.byref(radius), ctypes.byref(lon),
ctypes.byref(lat))
return radius.value, lon.value, lat.value | Convert from spherical coordinates to latitudinal coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphlat_c.html
:param r: Distance of the point from the origin.
:type r: float
:param colat: Angle of the point from positive z axis (radians).
:type colat: float
:param lons: Angle of the point from the XZ plane (radians).
:type lons: float
:return:
Distance of a point from the origin,
Angle of the point from the XZ plane in radians,
Angle of the point from the XY plane in radians.
:rtype: tuple |
374,905 | def _create_storage(storage_service, trajectory=None, **kwargs):
kwargs_copy = kwargs.copy()
kwargs_copy[] = trajectory
matching_kwargs = get_matching_kwargs(storage_service, kwargs_copy)
storage_service = storage_service(**matching_kwargs)
unused_kwargs = set(kwargs.keys()) - set(matching_kwargs.keys())
return storage_service, unused_kwargs | Creates a service from a constructor and checks which kwargs are not used |
374,906 | def lmean (inlist):
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist)) | Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist) |
374,907 | def node_link_graph(data, directed=False, attrs=_attrs):
directed = data.get(, directed)
graph = dn.DynGraph()
if directed:
graph = graph.to_directed()
id_ = attrs[]
mapping = []
graph.graph = data.get(, {})
c = count()
for d in data[]:
node = d.get(id_, next(c))
mapping.append(node)
nodedata = dict((make_str(k), v) for k, v in d.items() if k != id_)
graph.add_node(node, **nodedata)
for d in data[]:
graph.add_interaction(d[], d["target"], d[])
return graph | Return graph from node-link data format.
Parameters
----------
data : dict
node-link formatted graph data
directed : bool
If True, and direction not specified in data, return a directed graph.
attrs : dict
A dictionary that contains three keys 'id', 'source', 'target'.
The corresponding values provide the attribute names for storing
Dynetx-internal graph data. Default value:
:samp:`dict(id='id', source='source', target='target')`.
Returns
-------
G : DyNetx graph
A DyNetx graph object
Examples
--------
>>> from dynetx.readwrite import json_graph
>>> G = dn.DynGraph([(1,2)])
>>> data = json_graph.node_link_data(G)
>>> H = json_graph.node_link_graph(data)
See Also
--------
node_link_data |
374,908 | def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
if cols_to_keep is None:
cols_for_index = [reference_col]
else:
cols_for_index = [reference_col] + cols_to_keep
check_params_columns_duplicate(id_cols + cols_for_index)
if method == or method == :
df[] = df.groupby(id_cols)[reference_col].transform(min)
id_cols += []
if method == or method == :
df[] = df.groupby(id_cols)[reference_col].transform(max)
id_cols += []
names = id_cols + cols_for_index
new_df = df.set_index(names)
index_values = df.groupby(id_cols).sum().index.values
if complete_index is None:
complete_index = df.groupby(cols_for_index).sum().index.values
elif isinstance(complete_index, dict):
if complete_index[] == :
freq = complete_index[]
date_format = complete_index[]
start = complete_index[]
end = complete_index[]
if isinstance(freq, dict):
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
complete_index = pd.date_range(start=start, end=end, freq=freq)
complete_index = complete_index.strftime(date_format)
else:
raise ParamsValueError(f
f)
if not isinstance(index_values[0], tuple):
index_values = [(x,) for x in index_values]
if not isinstance(complete_index[0], tuple):
complete_index = [(x,) for x in complete_index]
new_tuples_index = [x + y for x in index_values for y in complete_index]
new_index = pd.MultiIndex.from_tuples(new_tuples_index, names=names)
new_df = new_df.reindex(new_index).reset_index()
if method == or method == :
new_df = new_df[new_df[reference_col] >= new_df[]]
del new_df[]
if method == or method == :
new_df = new_df[new_df[reference_col] <= new_df[]]
del new_df[]
return new_df | Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B |
374,909 | def watch(self, *keys):
if self.explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
self.watching = True
for key in keys:
self._watched_keys[key] = deepcopy(self.mock_redis.redis.get(self.mock_redis._encode(key))) | Put the pipeline into immediate execution mode.
Does not actually watch any keys. |
374,910 | def _parse_options(self, argv, location):
observed = []
while argv:
if argv[0].startswith():
name = argv.pop(0)[2:]
if not name:
break
if name not in self.options:
raise InvalidOption(name)
option = self.options[name]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(name)
observed.append(option)
option.parse(argv, name, location)
elif argv[0].startswith():
if argv[0] == :
break
block = argv.pop(0)[1:]
for abbreviation in block[:-1]:
if self.abbreviations[abbreviation].nargs != 0:
raise BadAbbreviationBlock(abbreviation, block, "options that require value arguments must be last in abbreviation blocks")
for abbreviation in block:
option = self.abbreviations[abbreviation]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(option.name)
observed.append(option)
option.parse(argv, + abbreviation, location)
else:
break | Parse the options part of an argument list.
IN:
lsArgs <list str>:
List of arguments. Will be altered.
location <str>:
A user friendly string describing where this data came from. |
374,911 | def expand_effect_repertoire(self, new_purview=None):
return self.subsystem.expand_effect_repertoire(
self.effect.repertoire, new_purview) | See |Subsystem.expand_repertoire()|. |
374,912 | def eval_model(model, test, add_eval_metrics={}):
logger.info("Evaluate...")
model_metrics_values = model.evaluate(test[0], test[1], verbose=0,
batch_size=test[1].shape[0])
model_metrics = dict(zip(_listify(model.metrics_names),
_listify(model_metrics_values)))
y_true = test[1]
y_pred = model.predict(test[0], verbose=0)
eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()}
intersected_keys = set(model_metrics).intersection(set(eval_metrics))
if len(intersected_keys) > 0:
logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones".
format(intersected_keys))
eval_metrics = _delete_keys(eval_metrics, intersected_keys)
return merge_dicts(model_metrics, eval_metrics) | Evaluate model's performance on the test-set.
# Arguments
model: Keras model
test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`.
add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions
accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from
the `concise.eval_metrics` module.
# Returns
dictionary with evaluation metrics |
374,913 | def _gaussian(x, amp, loc, std):
return amp * np.exp(-((x - loc)*(x - loc))/(2.0*std*std)) | This is a simple gaussian.
Parameters
----------
x : np.array
The items at which the Gaussian is evaluated.
amp : float
The amplitude of the Gaussian.
loc : float
The central value of the Gaussian.
std : float
The standard deviation of the Gaussian.
Returns
-------
np.array
Returns the Gaussian evaluated at the items in `x`, using the provided
parameters of `amp`, `loc`, and `std`. |
374,914 | def remove_stream_handlers(logger=None):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
if (isinstance(handler, logging.FileHandler) or
isinstance(handler, logging.NullHandler) or
(isinstance(handler, logging.Handler) and not
isinstance(handler, logging.StreamHandler))):
new_handlers.append(handler)
logger.handlers = new_handlers | Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger |
374,915 | def aggregate_detail(slug_list, with_data_table=False):
r = get_r()
metrics_data = []
granularities = r._granularities()
keys = [, , , , , , ]
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in granularities]
} | Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table. |
374,916 | def find_last_true(sorted_list, true_criterion):
if not true_criterion(sorted_list[0]):
raise ValueError
if true_criterion(sorted_list[-1]):
return sorted_list[-1]
lower, upper = 0, len(sorted_list) - 1
index = int((lower + upper) / 2.0)
while 1:
if true_criterion(sorted_list[index]):
if true_criterion(sorted_list[index + 1]):
lower = index
index = int((index + upper) / 2.0)
else:
return index
else:
upper = index
index = int((lower + index) / 2.0) | Suppose we have a list of item [item1, item2, ..., itemN].
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
If we do a mapping::
>>> def true_criterion(item):
... return item <= 6
>>> [true_criterion(item) for item in sorted_list]
[True, True, ... True(last true), False, False, ... False]
this function returns the index of last true item.
we do can do the map for all item, and run a binary search to find the
index. But sometime the mapping function is expensive. This method avoid
run mapping function for all items.
Example::
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
criterion = def true_criterion(x): return x <= 6
boolean = [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
Solution::
# first, we check index = int((0 + 9)/2.0) = 4, it's True.
# Then check array[4 + 1], it's still True.
# Then we jump to int((4 + 9)/2.0) = 6, it's True.
# Then check array[6 + 1], ite's False. So array[6] is the one we need.
>>> find_last_true([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], true_criterion)
6
**中文文档**
功能: 假设有一组排序号了的元素, 从前往后假设前面的元素都满足某一条件, 而到了
中间某处起就不再满足了。本函数返回满足这一条件的最后一个元素。这在当检验是否
满足条件本身开销较大时, 能节约大量的计算时间。例如你要判定一系列网页中, 从
page1 到 page999, 从第几页开始出现404错误。假设是第400个, 那么如果一个个地
去试, 需要400次, 那如果从0 - 999之间去试, 只需要试验9次即可 (2 ** 9 = 512)
算法:
我们检验最中间的元素, 如果为False, 那么则检验左边所有未检验过的元素的最中间
的那个。如果为True, 那么检验右边所有未检验过的元素的最中间那个。重复这一过程
直到被检验的元素为True, 而下一个元素为False, 说明找到了。
例题::
有序数组 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
序号 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
条件 小于等于6
真值表 [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
解::
第一次检查``index = int((0+9)/2.0) = 4``, 为True,
检查array[4+1], 也是True。那么跳跃至``int((4+9)/2.0)=6``, 为True,。
再检查array[6+1], 为False, 很显然, 我们找到了。 |
374,917 | def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
validate(reference_beats, estimated_beats)
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
beat_error = np.ones(reference_beats.shape[0])
paired = np.zeros(reference_beats.shape[0])
goto_criteria = 0
for n in range(1, reference_beats.shape[0]-1):
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
window_min = reference_beats[n] - previous_interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
window_max = reference_beats[n] + next_interval
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
paired[n] = 1
offset = estimated_beats[beats_in_window] - reference_beats[n]
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
if incorrect_beats.shape[0] < 3:
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
if goto_criteria:
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
return 1.0*(goto_criteria == 3) | Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
goto_threshold : float
Threshold of beat error for a beat to be "correct"
(Default value = 0.35)
goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this
(Default value = 0.2)
goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this
(Default value = 0.2)
Returns
-------
goto_score : float
Either 1.0 or 0.0 if some specific criteria are met |
374,918 | def h2i(self, pkt, seconds):
if seconds is None:
seconds = 0
tmp_short = (seconds >> 32) & 0xFFFF
tmp_int = seconds & 0xFFFFFFFF
return struct.pack("!HI", tmp_short, tmp_int) | Convert the number of seconds since 1-Jan-70 UTC to the packed
representation. |
374,919 | def minimize(self, time, variables, **kwargs):
deltas = self.step(time=time, variables=variables, **kwargs)
with tf.control_dependencies(control_inputs=deltas):
return tf.no_op() | Performs an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
**kwargs: Additional optimizer-specific arguments. The following arguments are used
by some optimizers:
- arguments: Dict of arguments for callables, like fn_loss.
- fn_loss: A callable returning the loss of the current model.
- fn_reference: A callable returning the reference values, in case of a comparative
loss.
- fn_kl_divergence: A callable returning the KL-divergence relative to the
current model.
- sampled_loss: A sampled loss (integer).
- return_estimated_improvement: Returns the estimated improvement resulting from
the natural gradient calculation if true.
- source_variables: List of source variables to synchronize with.
- global_variables: List of global variables to apply the proposed optimization
step to.
Returns:
The optimization operation. |
374,920 | def backup(self, backup_name, folder_key=None, folder_name=None):
folder = self._find_or_create_folder(folder_key, folder_name)
drive_service = self.drive_service
try:
source_rsrc = drive_service.files().get(fileId=self.document_key).execute()
except Exception, e:
logger.exception("Google API error. %s", e)
raise e
backup = self._create_new_or_copy(source_doc=source_rsrc,
target_name=backup_name,
folder=folder,
sheet_description="backup")
backup_key = backup[]
return backup_key | Copies the google spreadsheet to the backup_name and folder specified.
Args:
backup_name (str): The name of the backup document to create.
folder_key (Optional) (str): The key of a folder that the new copy will
be moved to.
folder_name (Optional) (str): Like folder_key, references the folder to move a
backup to. If the folder can't be found, sheetsync will create it. |
374,921 | def _database_create(self, engine, database):
logger.info(, database, engine)
database_operation(engine, , database)
url = copy(engine.url)
url.database = database
return str(url) | Create a new database and return a new url representing
a connection to the new database |
374,922 | def get_job(self, job_id):
try:
return RawMantaClient.get_job(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
mpath = "/%s/jobs/%s/job.json" % (self.account, job_id)
content = self.get_object(mpath, accept=)
try:
return json.loads(content)
except ValueError:
raise errors.MantaError( % content) | GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival |
374,923 | def get_device_by_name(self, device_name):
found_device = None
for device in self.get_devices():
if device.name == device_name:
found_device = device
break
if found_device is None:
logger.debug(.format(device_name))
return found_device | Search the list of connected devices by name.
device_name param is the string name of the device |
374,924 | def add_stock(self, product_id, sku_info, quantity):
return self._post(
,
data={
"product_id": product_id,
"sku_info": sku_info,
"quantity": quantity
}
) | 增加库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 增加的库存数量
:return: 返回的 JSON 数据包 |
374,925 | def calc_tc_v1(self):
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nmbzones):
flu.tc[k] = inp.t-con.tcalt[k]*(con.zonez[k]-con.zrelt) | Adjust the measured air temperature to the altitude of the
individual zones.
Required control parameters:
|NmbZones|
|TCAlt|
|ZoneZ|
|ZRelT|
Required input sequence:
|hland_inputs.T|
Calculated flux sequences:
|TC|
Basic equation:
:math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)`
Examples:
Prepare two zones, the first one lying at the reference
height and the second one 200 meters above:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(2)
>>> zrelt(2.0)
>>> zonez(2.0, 4.0)
Applying the usual temperature lapse rate of 0.6°C/100m does
not affect the temperature of the first zone but reduces the
temperature of the second zone by 1.2°C:
>>> tcalt(0.6)
>>> inputs.t = 5.0
>>> model.calc_tc_v1()
>>> fluxes.tc
tc(5.0, 3.8) |
374,926 | def _send_consumer_aware_request(self, group, payloads, encoder_fn, decoder_fn):
original_ordering = [(p.topic, p.partition) for p in payloads]
broker = self._get_coordinator_for_group(group)
responses = {}
request_id = self._next_id()
log.debug(, request_id, broker, payloads)
request = encoder_fn(client_id=self.client_id,
correlation_id=request_id, payloads=payloads)
try:
host, port, afi = get_ip_port_afi(broker.host)
conn = self._get_conn(host, broker.port, afi)
except KafkaConnectionError as e:
log.warning(
, request_id, broker, e)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
else:
future = conn.send(request_id, request)
while not future.is_done:
for r, f in conn.recv():
f.success(r)
if decoder_fn is None:
log.debug(
, request_id)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = None
return []
if future.failed():
log.warning(
,
request_id, broker, future.exception)
for payload in payloads:
topic_partition = (payload.topic, payload.partition)
responses[topic_partition] = FailedPayloadsError(payload)
else:
response = future.value
_resps = []
for payload_response in decoder_fn(response):
topic_partition = (payload_response.topic,
payload_response.partition)
responses[topic_partition] = payload_response
_resps.append(payload_response)
log.debug(, request_id, _resps)
return [responses[tp] for tp in original_ordering] | Send a list of requests to the consumer coordinator for the group
specified using the supplied encode/decode functions. As the payloads
that use consumer-aware requests do not contain the group (e.g.
OffsetFetchRequest), all payloads must be for a single group.
Arguments:
group: the name of the consumer group (str) the payloads are for
payloads: list of object-like entities with topic (str) and
partition (int) attributes; payloads with duplicate
topic+partition are not supported.
encode_fn: a method to encode the list of payloads to a request body,
must accept client_id, correlation_id, and payloads as
keyword arguments
decode_fn: a method to decode a response body into response objects.
The response objects must be object-like and have topic
and partition attributes
Returns:
List of response objects in the same order as the supplied payloads |
374,927 | def _process_execute_error(self, msg):
content = msg[]
if content[]==:
keepkernel = content[]== or content[]==
self._keep_kernel_on_exit = keepkernel
self.exit_requested.emit(self)
else:
traceback = .join(content[])
self._append_plain_text(traceback) | Process a reply for an execution request that resulted in an error. |
374,928 | def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, scale, X_idx_sorted, X_csc=None, X_csr=None):
assert sample_mask.dtype == numpy.bool
loss = self.loss_
do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1
for k in range(loss.K):
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter=,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_split=self.min_impurity_split,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
sample_weight = sample_weight * sample_mask.astype(numpy.float64)
X = X_csr if X_csr is not None else X
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
self.estimators_[i, k] = tree
if do_dropout:
drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state)
scale[i + 1] = 1. / (n_dropped + 1.)
y_pred[:, k] = 0
for m in range(i + 1):
if drop_model[m] == 1:
scale[m] *= n_dropped / (n_dropped + 1.)
else:
y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel()
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
return y_pred | Fit another stage of ``n_classes_`` trees to the boosting model. |
374,929 | def server(self):
conn = self.connection_class(self)
with self.mutex:
self.connections.append(conn)
return conn | Creates and returns a ServerConnection object. |
374,930 | def _goto(self, pose, duration, wait, accurate):
kwargs = {}
if not accurate:
kwargs[] = 3
q0 = self.convert_to_ik_angles(self.joints_position)
q = self.inverse_kinematics(pose, initial_position=q0, **kwargs)
joints = self.convert_from_ik_angles(q)
last = self.motors[-1]
for m, pos in list(zip(self.motors, joints)):
m.goto_position(pos, duration,
wait=False if m != last else wait) | Goes to a given cartesian pose.
:param matrix pose: homogeneous matrix representing the target position
:param float duration: move duration
:param bool wait: whether to wait for the end of the move
:param bool accurate: trade-off between accurate solution and computation time. By default, use the not so accurate but fast version. |
374,931 | def obfn_g0var(self):
return self.var_y0() if self.opt[] else \
self.cnst_A0(None, self.Xf) - self.cnst_c0() | Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value. |
374,932 | def post_cleanup(self):
targetNode = self.article.top_node
node = self.add_siblings(targetNode)
for e in self.parser.getChildren(node):
e_tag = self.parser.getTag(e)
if e_tag != :
if self.is_highlink_density(e) \
or self.is_table_and_no_para_exist(e) \
or not self.is_nodescore_threshold_met(node, e):
self.parser.remove(e)
return node | \
remove any divs that looks like non-content,
clusters of links, or paras with no gusto |
374,933 | def _truncate_to_field(model, field_name, value):
field = model._meta.get_field(field_name)
if len(value) > field.max_length:
midpoint = field.max_length // 2
len_after_midpoint = field.max_length - midpoint
first = value[:midpoint]
sep =
last = value[len(value) - len_after_midpoint + len(sep):]
value = sep.join([first, last])
return value | Shorten data to fit in the specified model field.
If the data were too big for the field, it would cause a failure to
insert, so we shorten it, truncating in the middle (because
valuable information often shows up at the end. |
374,934 | def list(self, service_rec=None, host_rec=None, hostfilter=None):
return self.send.service_list(service_rec, host_rec, hostfilter) | List a specific service or all services
:param service_rec: t_services.id
:param host_rec: t_hosts.id
:param hostfilter: Valid hostfilter or None
:return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr,
svc.t_hosts.f_hostname, svc.t_services.f_proto,
svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name,
svc.t_services.f_banner), ...] |
374,935 | def _get_headers(self):
user_agent = __api_lib_name__ + + __version__ + + \
PYTHON_VERSION
headers = {: user_agent,
: }
if self.key:
headers[] = + self.key
return headers | Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts. |
374,936 | def _set_mldVlan(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mldVlan.mldVlan, is_container=, presence=False, yang_name="mldVlan", rest_name="mld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__mldVlan = t
if hasattr(self, ):
self._set() | Setter method for mldVlan, mapped from YANG variable /interface_vlan/interface/vlan/ipv6/mldVlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mldVlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mldVlan() directly. |
374,937 | def collection(data, bins=10, *args, **kwargs):
from physt.histogram_collection import HistogramCollection
if hasattr(data, "columns"):
data = {column: data[column] for column in data.columns}
return HistogramCollection.multi_h1(data, bins, **kwargs) | Create histogram collection with shared binnning. |
374,938 | def exec_func_src3(func, globals_, sentinal=None, verbose=False,
start=None, stop=None):
import utool as ut
sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True)
if sentinal is not None:
sourcecode = ut.replace_between_tags(sourcecode, , sentinal)
if start is not None or stop is not None:
sourcecode = .join(sourcecode.splitlines()[slice(start, stop)])
if verbose:
print(ut.color_text(sourcecode, ))
six.exec_(sourcecode, globals_) | execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw |
374,939 | def next_conkey(self, conkey):
if conkey in self.conditions:
return conkey
conkeys = self.sorted_conkeys(prefix=conkey)
if not conkeys:
return conkey
for candidate in conkeys:
if self.conditions[candidate] is None:
return candidate
i = self.cond_int(candidate)
return re.sub(r, str(i + 1), candidate) | Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed. |
374,940 | def make_measurement(name,
channels,
lumi=1.0, lumi_rel_error=0.1,
output_prefix=,
POI=None,
const_params=None,
verbose=False):
if verbose:
llog = log[]
llog.info("creating measurement {0}".format(name))
if not isinstance(channels, (list, tuple)):
channels = [channels]
meas = Measurement(.format(name), )
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, string_types):
if verbose:
llog.info("setting POI {0}".format(POI))
meas.SetPOI(POI)
else:
if verbose:
llog.info("adding POIs {0}".format(.join(POI)))
for p in POI:
meas.AddPOI(p)
if verbose:
llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error))
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info("adding channel {0}".format(channel.GetName()))
meas.AddChannel(channel)
if const_params is not None:
if verbose:
llog.info("adding constant parameters {0}".format(
.join(const_params)))
for param in const_params:
meas.AddConstantParam(param)
return meas | Create a Measurement from a list of Channels |
374,941 | def set_value(self, control, value=None):
func = getattr(_xinput, + control)
if in control:
target_type = c_short
if self.percent:
target_value = int(32767 * value)
else:
target_value = value
elif in control:
target_type = c_bool
target_value = bool(value)
elif in control:
target_type = c_byte
if self.percent:
target_value = int(255 * value)
else:
target_value = value
elif in control:
target_type = c_int
target_value = int(value)
func(c_uint(self.id), target_type(target_value)) | Set a value on the controller
If percent is True all controls will accept a value between -1.0 and 1.0
If not then:
Triggers are 0 to 255
Axis are -32768 to 32767
Control List:
AxisLx , Left Stick X-Axis
AxisLy , Left Stick Y-Axis
AxisRx , Right Stick X-Axis
AxisRy , Right Stick Y-Axis
BtnBack , Menu/Back Button
BtnStart , Start Button
BtnA , A Button
BtnB , B Button
BtnX , X Button
BtnY , Y Button
BtnThumbL , Left Thumbstick Click
BtnThumbR , Right Thumbstick Click
BtnShoulderL , Left Shoulder Button
BtnShoulderR , Right Shoulder Button
Dpad , Set Dpad Value (0 = Off, Use DPAD_### Constants)
TriggerL , Left Trigger
TriggerR , Right Trigger |
374,942 | def geom_find_group(g, atwts, pr_ax, mom, tt, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL, \
dig=_DEF.SYMM_ATWT_ROUND_DIGITS,
avmax=_DEF.SYMM_AVG_MAX):
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False)
g_coord = g.reshape((g.shape[0] // 3, 3))
if tt == ETT.SPHERICAL:
ax_midpts = []
for atwt in np.unique(atwts):
g_atwt = g_subset(g, atwts, atwt, dig)
if g_atwt.shape[0] > 3:
g_atwt = g_atwt.reshape((g_atwt.shape[0] // 3, 3))
for tup in nCr(range(g_atwt.shape[0]), 2):
ax_midpts.append(np.add(*g_atwt[tup,:]))
ax_midpts = np.array(ax_midpts)
order = i = 0
while order < 2 and i < g_coord.shape[0]:
ax = g_coord[i,:]
if spla.norm(ax) > PRM.ZERO_VEC_TOL:
order, refl = geom_check_axis(g, atwts, ax, nmax, \
tol)
i += 1
if order >= 2:
ref_Axis = Axis(vector=ax, order=order, refl=refl)
else:
i = 0
while order < 2 and i < len(ax_midpts):
ax = ax_midpts[i,:]
if spla.norm(ax) > PRM.ZERO_VEC_TOL:
order, refl = geom_check_axis(g, atwts, ax, \
nmax, tol)
i += 1
if order < 2:
raise SymmError(SymmError.NOTFOUND,
"Cubic point group not found in spherical top " +
"molecule.", "geom_find_group()")
ref_Axis = Axis(vector=ax, order=order, refl=refl)
return ref_Axis
return prop_list | [Find all(?) proper rotation axes (n > 1) and reflection planes.]
.. todo:: Complete geom_find_axes docstring INCLUDING NEW HEADER LINE
DEPENDS on principal axes and moments being sorted such that:
I_A <= I_B <= I_C
Logic flow developed using:
1) http://symmetry.otterbein.edu/common/images/flowchart.pdf
Accessed 6 Mar 2015 (flow chart)
2) Largent et al. J Comp Chem 22: 1637-1642 (2012).
doi: 10.1002/jcc.22995
Helpful examples and descriptions of point groups from:
1) Wilson, Decius & Cross. "Molecular Vibrations." New York:
Dover (1980), pp 82-85.
2) "Molecular Structures of Organic Compounds -- Symmetry of
Molecules." Website of Prof. Dr. Stefan Immel, TU Darmstadt.
http://http://csi.chemie.tu-darmstadt.de/ak/immel/script/
redirect.cgi?filename=http://csi.chemie.tu-darmstadt.de/ak/
immel/tutorials/symmetry/index7.html. Accessed 6 Mar 2015.
Rotational symmetry numbers defined per:
Irikura, K. K. "Thermochemistry: Appendix B: Essential Statistical
Thermodynamics." Table II. NIST Computational Chemistry Comparison
& Benchmark Database. Online resource: http://cccbdb.nist.gov/
thermo.asp. Accessed 6 Mar 2015. |
374,943 | def fragment_fromstring(html, create_parent=False, base_url=None,
parser=None, **kw):
if parser is None:
parser = html_parser
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, parser=parser, no_leading_text=not accept_leading_text,
base_url=base_url, **kw)
if create_parent:
if not isinstance(create_parent, basestring):
create_parent =
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], basestring):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError()
if len(elements) > 1:
raise etree.ParserError(
"Multiple elements found (%s)"
% .join([_element_name(e) for e in elements]))
el = elements[0]
if el.tail and el.tail.strip():
raise etree.ParserError(
"Element followed by text: %r" % el.tail)
el.tail = None
return el | Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL). |
374,944 | def finalize(self):
super(StatisticsConsumer, self).finalize()
self.result = zip(self.grid, map(self.statistics, self.result)) | finalize for StatisticsConsumer |
374,945 | def _reverse_rounding_method(method):
if method is RoundingMethods.ROUND_UP:
return RoundingMethods.ROUND_DOWN
if method is RoundingMethods.ROUND_DOWN:
return RoundingMethods.ROUND_UP
if method is RoundingMethods.ROUND_HALF_UP:
return RoundingMethods.ROUND_HALF_DOWN
if method is RoundingMethods.ROUND_HALF_DOWN:
return RoundingMethods.ROUND_HALF_UP
if method in \
(RoundingMethods.ROUND_TO_ZERO, RoundingMethods.ROUND_HALF_ZERO):
return method
raise BasesAssertError() | Reverse meaning of ``method`` between positive and negative. |
374,946 | def preserve_shape(func):
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
result = result.reshape(shape)
return result
return wrapped_function | Preserve shape of the image. |
374,947 | def _function_add_node(self, cfg_node, function_addr):
snippet = self._to_snippet(cfg_node=cfg_node)
self.kb.functions._add_node(function_addr, snippet) | Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None |
374,948 | def move(self, key, folder):
path, host, flags = self._exists(key)
self._invalidate_cache()
newpath = joinpath(
folder.base,
folder.get_name(),
"cur",
basename(path)
)
self.filesystem.rename(path, newpath)
folder._invalidate_cache() | Move the specified key to folder.
folder must be an MdFolder instance. MdFolders can be obtained
through the 'folders' method call. |
374,949 | def root_mean_square(X):
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width) | root mean square for each variable in the segmented time series |
374,950 | def add_url_rule(
self,
path: str,
endpoint: Optional[str]=None,
view_func: Optional[Callable]=None,
methods: Optional[Iterable[str]]=None,
defaults: Optional[dict]=None,
host: Optional[str]=None,
subdomain: Optional[str]=None,
*,
provide_automatic_options: Optional[bool]=None,
is_websocket: bool=False,
strict_slashes: bool=True,
) -> None:
endpoint = endpoint or _endpoint_from_view_func(view_func)
handler = ensure_coroutine(view_func)
if methods is None:
methods = getattr(view_func, , [])
methods = cast(Set[str], set(methods))
required_methods = set(getattr(view_func, , set()))
if provide_automatic_options is None:
automatic_options = getattr(view_func, , None)
if automatic_options is None:
automatic_options = not in methods
else:
automatic_options = provide_automatic_options
if automatic_options:
required_methods.add()
methods.update(required_methods)
if not self.url_map.host_matching and (host is not None or subdomain is not None):
raise RuntimeError()
if host is not None and subdomain is not None:
raise ValueError()
if subdomain is not None:
if self.config[] is None:
raise RuntimeError()
host = f"{subdomain}.{self.config[]}"
elif host is None and self.url_map.host_matching:
host = self.config[]
if host is None:
raise RuntimeError(
,
)
self.url_map.add(
self.url_rule_class(
path, methods, endpoint, host=host, provide_automatic_options=automatic_options,
defaults=defaults, is_websocket=is_websocket, strict_slashes=strict_slashes,
),
)
if handler is not None:
old_handler = self.view_functions.get(endpoint)
if getattr(old_handler, , False):
old_handler = old_handler.__wrapped__
if old_handler is not None and old_handler != view_func:
raise AssertionError(f"Handler is overwriting existing for endpoint {endpoint}")
self.view_functions[endpoint] = handler | Add a route/url rule to the application.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def route():
...
app.add_url_rule('/', route)
Arguments:
path: The path to route on, should start with a ``/``.
func: Callable that returns a reponse.
methods: List of HTTP verbs the function routes.
endpoint: Optional endpoint name, if not present the
function name is used.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.route('/book', defaults={'page': 0})
@app.route('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
provide_automatic_options: Optionally False to prevent
OPTION handling.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash). |
374,951 | def send_text(self, text):
return self.client.api.send_message(self.room_id, text) | Send a plain text message to the room. |
374,952 | def uniform_discr_frompartition(partition, dtype=None, impl=, **kwargs):
if not isinstance(partition, RectPartition):
raise TypeError(
.format(partition))
if not partition.is_uniform:
raise ValueError()
if dtype is not None:
dtype = np.dtype(dtype)
fspace = FunctionSpace(partition.set, out_dtype=dtype)
ds_type = tspace_type(fspace, impl, dtype)
if dtype is None:
dtype = ds_type.default_dtype()
weighting = kwargs.pop(, None)
exponent = kwargs.pop(, 2.0)
if weighting is None and is_numeric_dtype(dtype):
if exponent == float() or partition.ndim == 0:
weighting = 1.0
else:
weighting = partition.cell_volume
tspace = ds_type(partition.shape, dtype, exponent=exponent,
weighting=weighting)
return DiscreteLp(fspace, partition, tspace, **kwargs) | Return a uniformly discretized L^p function space.
Parameters
----------
partition : `RectPartition`
Uniform partition to be used for discretization.
It defines the domain and the functions and the grid for
discretization.
dtype : optional
Data type for the discretized space, must be understood by the
`numpy.dtype` constructor. The default for ``None`` depends on the
``impl`` backend, usually it is ``'float64'`` or ``'float32'``.
impl : string, optional
Implementation of the data storage arrays
kwargs :
Additional keyword parameters, see `uniform_discr` for details.
Returns
-------
discr : `DiscreteLp`
The uniformly discretized function space.
Examples
--------
>>> part = odl.uniform_partition(0, 1, 10)
>>> uniform_discr_frompartition(part)
uniform_discr(0.0, 1.0, 10)
See Also
--------
uniform_discr : implicit uniform Lp discretization
uniform_discr_fromspace : uniform Lp discretization from an existing
function space
odl.discr.partition.uniform_partition :
partition of the function domain |
374,953 | def _determine_rotated_logfile(self):
rotated_filename = self._check_rotated_filename_candidates()
if rotated_filename and exists(rotated_filename):
if stat(rotated_filename).st_ino == self._offset_file_inode:
return rotated_filename
if stat(self.filename).st_ino == self._offset_file_inode:
if self.copytruncate:
return rotated_filename
else:
sys.stderr.write(
"[pygtail] [WARN] file size of %s shrank, and copytruncate support is "
"disabled (expected at least %d bytes, was %d bytes).\n" %
(self.filename, self._offset, stat(self.filename).st_size))
return None | We suspect the logfile has been rotated, so try to guess what the
rotated filename is, and return it. |
374,954 | def fieldAlphaHistogram(
self, name, q=, fq=None, nbins=10, includequeries=True
):
oldpersist = self.persistent
self.persistent = True
bins = []
qbin = []
fvals = []
try:
fvals = self.fieldValues(name, q, fq, maxvalues=-1)
nvalues = len(fvals[name]) / 2
if nvalues < nbins:
nbins = nvalues
if nvalues == nbins:
for i in range(0, nbins):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
else:
delta = nvalues / nbins
if delta == 1:
for i in range(0, nbins - 2):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
term = fvals[name][(nbins - 1) * 2]
bin = [term, fvals[name][((nvalues - 1) * 2)], 0]
binq = % (name, self.prepareQueryTerm(name, term))
qbin.append(binq)
bins.append(bin)
else:
coffset = 0.0
delta = float(nvalues) / float(nbins)
for i in range(0, nbins):
idxl = int(coffset) * 2
idxu = (int(coffset + delta) * 2) - 2
bin = [fvals[name][idxl], fvals[name][idxu], 0]
binq =
try:
if i == 0:
binq = % (
name,
self.prepareQueryTerm(name, bin[1]),
)
elif i == nbins - 1:
binq = % (
name,
self.prepareQueryTerm(name, bin[0]),
)
else:
binq = % (
name,
self.prepareQueryTerm(name, bin[0]),
self.prepareQueryTerm(name, bin[1]),
)
except Exception:
self.logger.exception()
qbin.append(binq)
bins.append(bin)
coffset = coffset + delta
params = {
: q,
: ,
: ,
: name,
: ,
: 1,
: ,
}
request = urllib.parse.urlencode(params, doseq=True)
for sq in qbin:
try:
request = request + % urllib.parse.urlencode(
{: self.encoder(sq)[0]}
)
except Exception:
self.logger.exception()
rsp = self.doPost(self.solrBase + , request, self.formheaders)
data = eval(rsp.read())
for i in range(0, len(bins)):
v = data[][][qbin[i]]
bins[i][2] = v
if includequeries:
bins[i].append(qbin[i])
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return bins | Generates a histogram of values from a string field. Output is:
[[low, high, count, query], ... ] Bin edges is determined by equal division
of the fields |
374,955 | def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):
if not policy_metadata:
policy_metadata = client.get_policy(PolicyArn=policy_arn)
policy_document = client.get_policy_version(PolicyArn=policy_arn,
VersionId=policy_metadata[][])
return policy_document[][] | Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return: |
374,956 | def unit_overlap(evaluated_model, reference_model):
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):
raise ValueError(
"Arguments has to be instances of ")
terms1 = frozenset(evaluated_model.terms)
terms2 = frozenset(reference_model.terms)
if not terms1 and not terms2:
raise ValueError(
"Documents can't be empty. Please pass the valid documents.")
common_terms_count = len(terms1 & terms2)
return common_terms_count / (len(terms1) + len(terms2) - common_terms_count) | Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same. |
374,957 | def register_model(cls, model):
rest_name = model.rest_name
resource_name = model.resource_name
if rest_name not in cls._model_rest_name_registry:
cls._model_rest_name_registry[rest_name] = [model]
cls._model_resource_name_registry[resource_name] = [model]
elif model not in cls._model_rest_name_registry[rest_name]:
cls._model_rest_name_registry[rest_name].append(model)
cls._model_resource_name_registry[resource_name].append(model) | Register a model class according to its remote name
Args:
model: the model to register |
374,958 | def write_dltime (self, url_data):
self.writeln(u"<tr><td>"+self.part("dltime")+u"</td><td>"+
(_("%.3f seconds") % url_data.dltime)+
u"</td></tr>") | Write url_data.dltime. |
374,959 | def knob_end(self):
side_chain_atoms = self.knob_residue.side_chain
if not side_chain_atoms:
return self.knob_residue[]
distances = [distance(self.knob_residue[], x) for x in side_chain_atoms]
max_d = max(distances)
knob_end_atoms = [atom for atom, d in zip(side_chain_atoms, distances) if d == max_d]
if len(knob_end_atoms) == 1:
return knob_end_atoms[0]._vector
else:
return numpy.mean([x._vector for x in knob_end_atoms], axis=0) | Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom.
Returns CA coordinates for GLY. |
374,960 | def directional_hamming_distance(reference_intervals, estimated_intervals):
util.validate_intervals(estimated_intervals)
util.validate_intervals(reference_intervals)
if len(reference_intervals) > 1 and (reference_intervals[:-1, 1] >
reference_intervals[1:, 0]).any():
raise ValueError()
est_ts = np.unique(estimated_intervals.flatten())
seg = 0.
for start, end in reference_intervals:
dur = end - start
between_start_end = est_ts[(est_ts >= start) & (est_ts < end)]
seg_ts = np.hstack([start, between_start_end, end])
seg += dur - np.diff(seg_ts).max()
return seg / (reference_intervals[-1, 1] - reference_intervals[0, 0]) | Compute the directional hamming distance between reference and
estimated intervals as defined by [#harte2010towards]_ and used for MIREX
'OverSeg', 'UnderSeg' and 'MeanSeg' measures.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> overseg = 1 - mir_eval.chord.directional_hamming_distance(
... ref_intervals, est_intervals)
>>> underseg = 1 - mir_eval.chord.directional_hamming_distance(
... est_intervals, ref_intervals)
>>> seg = min(overseg, underseg)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2), dtype=float
Reference chord intervals to score against.
estimated_intervals : np.ndarray, shape=(m, 2), dtype=float
Estimated chord intervals to score against.
Returns
-------
directional hamming distance : float
directional hamming distance between reference intervals and
estimated intervals. |
374,961 | def _fix_repo_url(repo_url):
parsed = urlparse.urlparse(repo_url)
if parsed.scheme not in (, ):
return repo_url
username = parsed.username or ""
password = parsed.password or ""
port = ":" + parsed.port if parsed.port else ""
netloc = "".join((username, ":", password, "@", parsed.hostname, port))
part_list = list(parsed)
part_list[1] = netloc
return urlparse.urlunparse(part_list) | Add empty credentials to a repo URL if not set, but only for HTTP/HTTPS.
This is to make git not hang while trying to read the username and
password from standard input. |
374,962 | def get_rendition_size(self, spec, output_scale, crop):
if crop:
_, _, width, height = crop
else:
width = self._record.width
height = self._record.height
mode = spec.get(, )
if mode == :
return self.get_rendition_fit_size(spec, width, height, output_scale)
if mode == :
return self.get_rendition_fill_size(spec, width, height, output_scale)
if mode == :
return self.get_rendition_stretch_size(spec, width, height, output_scale)
raise ValueError("Unknown resize mode {}".format(mode)) | Wrapper to determine the overall rendition size and cropping box
Returns tuple of (size,box) |
374,963 | def _check_operators(self):
if not isinstance(self._operators, (list, tuple, np.ndarray)):
raise TypeError((
).format(type(self._operators)))
for op in self._operators:
if not hasattr(op, ):
raise ValueError()
op.cost = check_callable(op.cost) | Check Operators
This method checks if the input operators have a "cost" method
Raises
------
ValueError
For invalid operators type
ValueError
For operators without "cost" method |
374,964 | def decode(data):
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError(
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError(
.format(riemann.netowrk.CASHADDR_PREFIX))
prefix, data = data.split()
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError()
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6]) | str -> bytes |
374,965 | def _make_fake_message(self, user_id, page_id, payload):
event = {
: {
: user_id,
},
: {
: page_id,
},
: {
: ujson.dumps(payload),
},
}
return FacebookMessage(event, self, False) | Creates a fake message for the given user_id. It contains a postback
with the given payload. |
374,966 | def _main(self):
probes = self.config.get(, None)
if not probes:
raise ValueError()
for probe_config in self.config[]:
probe = plugin.get_probe(probe_config, self.plugin_context)
if not in probe_config:
raise ValueError("no output specified")
for output_name in probe_config[]:
output = plugin.get_output(output_name, self.plugin_context)
if not output.started:
output.start()
self.joins.append(output)
probe._emit.append(output)
probe.start()
self.joins.append(probe)
vaping.io.joinall(self.joins)
return 0 | process |
374,967 | def validate_rc():
transactions = rc.read()
if not transactions:
print()
return False
transactions = sort(unique(transactions))
return validate_setup(transactions) | Before we execute any actions, let's validate our .vacationrc. |
374,968 | def parse(data):
sections = re.compile("^
headings = re.findall("^
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed | Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...] |
374,969 | def get_file_str(path, saltenv=):
*
fn_ = cache_file(path, saltenv)
if isinstance(fn_, six.string_types):
try:
with salt.utils.files.fopen(fn_, ) as fp_:
return fp_.read()
except IOError:
return False
return fn_ | Download a file from a URL to the Minion cache directory and return the
contents of that file
Returns ``False`` if Salt was unable to cache a file from a URL.
CLI Example:
.. code-block:: bash
salt '*' cp.get_file_str salt://my/file |
374,970 | def to_wire(self, file, compress=None, origin=None, **kw):
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw) | Convert the RRset to wire format. |
374,971 | def set_lacp_fallback(self, name, mode=None):
if mode not in [, , ]:
return False
disable = True if mode == else False
commands = [ % name]
commands.append(self.command_builder(,
value=mode, disable=disable))
return self.configure(commands) | Configures the Port-Channel lacp_fallback
Args:
name(str): The Port-Channel interface name
mode(str): The Port-Channel LACP fallback setting
Valid values are 'disabled', 'static', 'individual':
* static - Fallback to static LAG mode
* individual - Fallback to individual ports
* disabled - Disable LACP fallback
Returns:
True if the operation succeeds otherwise False is returned |
374,972 | def vote_choice_address(self) -> List[str]:
if self.vote_id is None:
raise Exception("vote_id is required")
addresses = []
vote_init_txid = unhexlify(self.vote_id)
for choice in self.choices:
vote_cast_privkey = sha256(vote_init_txid + bytes(
list(self.choices).index(choice))
).hexdigest()
addresses.append(Kutil(network=self.deck.network,
privkey=bytearray.fromhex(vote_cast_privkey)).address)
return addresses | calculate the addresses on which the vote is casted. |
374,973 | def _get_elements(complex_type, root):
found_elements = []
element = findall(root, % XS_NAMESPACE,
attribute_name=, attribute_value=complex_type)[0]
found_elements = findall(element, % XS_NAMESPACE)
return found_elements | Get attribute elements |
374,974 | def on_menu_exit(self, event):
try:
self.help_window.Destroy()
except:
pass
if in sys.argv:
self.Destroy()
try:
sys.exit()
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex | Exit the GUI |
374,975 | def convert_op(self, op):
if op == :
return
elif op == or op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
elif op == :
return
else:
return op | Converts NeuroML arithmetic/logical operators to python equivalents.
@param op: NeuroML operator
@type op: string
@return: Python operator
@rtype: string |
374,976 | def get_rendered_fields(self, ctx=None):
if ctx is None:
ctx = RenderContext()
ctx.push(self)
current = self._fields[self._field_idx]
res = current.get_rendered_fields(ctx)
ctx.pop()
return res | :param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered |
374,977 | def _get_shaperecords(self, num_fill_bits,
num_line_bits, shape_number):
shape_records = []
bc = BitConsumer(self._src)
while True:
type_flag = bc.u_get(1)
if type_flag:
straight_flag = bc.u_get(1)
num_bits = bc.u_get(4)
if straight_flag:
record = _make_object()
record.TypeFlag = 1
record.StraightFlag = 1
record.NumBits = num_bits
record.GeneralLineFlag = general_line_flag = bc.u_get(1)
if general_line_flag:
record.DeltaX = bc.s_get(num_bits + 2)
record.DeltaY = bc.s_get(num_bits + 2)
else:
record.VertLineFlag = vert_line_flag = bc.s_get(1)
if vert_line_flag:
record.DeltaY = bc.s_get(num_bits + 2)
else:
record.DeltaX = bc.s_get(num_bits + 2)
else:
record = _make_object()
record.TypeFlag = 1
record.StraightFlag = 0
record.NumBits = num_bits
record.ControlDeltaX = bc.s_get(num_bits + 2)
record.ControlDeltaY = bc.s_get(num_bits + 2)
record.AnchorDeltaX = bc.s_get(num_bits + 2)
record.AnchorDeltaY = bc.s_get(num_bits + 2)
else:
record = _make_object()
record.TypeFlag = 0
five_bits = [bc.u_get(1) for _ in range(5)]
if not any(five_bits):
break
if shape_number > 2:
record.NumFillBits = num_fill_bits = bc.u_get(4)
record.NumLineBits = num_line_bits = bc.u_get(4)
else:
record.NumFillBits = bc.u_get(4)
record.NumLineBits = bc.u_get(4)
bc = BitConsumer(self._src)
shape_records.append(record)
return shape_records | Return an array of SHAPERECORDS. |
374,978 | def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5 | Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`). |
374,979 | def fetch(version=):
doi = {
: ,
:
}
try:
doi = doi[version]
except KeyError as err:
raise ValueError(.format(
version,
.join([.format(k) for k in doi.keys()])
))
requirements = {
: {: },
: {: }
}[version]
local_fname = os.path.join(data_dir(), , .format(version))
fetch_utils.dataverse_download_doi(
doi,
local_fname,
file_requirements=requirements) | Downloads the specified version of the Bayestar dust map.
Args:
version (Optional[:obj:`str`]): The map version to download. Valid versions are
:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and
:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults
to :obj:`'bayestar2017'`.
Raises:
:obj:`ValueError`: The requested version of the map does not exist.
:obj:`DownloadError`: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there
was a problem connecting to the Dataverse. |
374,980 | def timeinfo(self):
if self.istep not in self.sdat.tseries.index:
return None
return self.sdat.tseries.loc[self.istep] | Time series data of the time step.
Set to None if no time series data is available for this time step. |
374,981 | def latitude(self, latitude):
if not (-90 <= latitude <= 90):
raise ValueError(
.format(latitude))
self._latitude = latitude | Setter for latiutde. |
374,982 | def resource_to_url(resource, request=None, quote=False):
if request is None:
request = get_current_request()
reg = get_current_registry()
cnv = reg.getAdapter(request, IResourceUrlConverter)
return cnv.resource_to_url(resource, quote=quote) | Converts the given resource to a URL.
:param request: Request object (required for the host name part of the
URL). If this is not given, the current request is used.
:param bool quote: If set, the URL returned will be quoted. |
374,983 | def spades(args):
from jcvi.formats.fastq import readlen
p = OptionParser(spades.__doc__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
folder, = args
for p, pf in iter_project(folder):
rl = readlen([p[0], "--silent"])
kmers = None
if rl >= 150:
kmers = "21,33,55,77"
elif rl >= 250:
kmers = "21,33,55,77,99,127"
cmd = "spades.py"
if kmers:
cmd += " -k {0}".format(kmers)
cmd += " --careful"
cmd += " --pe1-1 {0} --pe1-2 {1}".format(*p)
cmd += " -o {0}_spades".format(pf)
print(cmd) | %prog spades folder
Run automated SPADES. |
374,984 | def _fetch(self, params, required, defaults):
defaults.update(params)
pp_params = self._check_and_update_params(required, defaults)
pp_string = self.signature + urlencode(pp_params)
response = self._request(pp_string)
response_params = self._parse_response(response)
log.debug(, pprint.pformat(defaults))
log.debug(, pprint.pformat(response_params))
nvp_params = {}
tmpd = defaults.copy()
tmpd.update(response_params)
for k, v in tmpd.items():
if k in self.NVP_FIELDS:
nvp_params[str(k)] = v
if in nvp_params:
nvp_params[] = paypaltime2datetime(nvp_params[])
nvp_obj = PayPalNVP(**nvp_params)
nvp_obj.init(self.request, params, response_params)
nvp_obj.save()
return nvp_obj | Make the NVP request and store the response. |
374,985 | def _check_response(response, expected):
response_code = response.status_code
if expected == response_code:
return
if response_code < 400:
raise ex.UnexpectedResponseCodeException(response.text)
elif response_code == 401:
raise ex.UnauthorizedException(response.text)
elif response_code == 400:
raise ex.BadRequestException(response.text)
elif response_code == 403:
raise ex.ForbiddenException(response.text)
elif response_code == 404:
raise ex.NotFoundException(response.text)
elif response_code == 429:
raise ex.RateLimitedException(response.text)
else:
raise ex.InternalServerErrorException(response.text) | Checks if the expected response code matches the actual response code.
If they're not equal, raises the appropriate exception
Args:
response: (int) Actual status code
expected: (int) Expected status code |
374,986 | def class_in_progress(stack=None):
if stack is None:
stack = inspect.stack()
for frame in stack:
statement_list = frame[4]
if statement_list is None:
continue
if statement_list[0].strip().startswith():
return True
return False | True if currently inside a class definition, else False. |
374,987 | async def delete(self, request, resource=None, **kwargs):
if resource is None:
raise RESTNotFound(reason=)
self.collection.remove(resource) | Delete a resource. |
374,988 | def data_files(self):
tf_record_pattern = os.path.join(FLAGS.data_dir, % self.subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print( % (self.name,
self.subset,
FLAGS.data_dir))
self.download_message()
exit(-1)
return data_files | Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset. |
374,989 | def populate(self):
if self.exists:
raise CacheAlreadyExistsException( % self.cache_uri)
self._populate_setup()
with closing(self.graph):
with self._download_metadata_archive() as metadata_archive:
for fact in self._iter_metadata_triples(metadata_archive):
self._add_to_graph(fact) | Populates a new cache. |
374,990 | def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material,
DiamTarget, DIM_FRACTAL):
return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3)
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-8/9)
* (DiamTarget / material.Diameter)**((8*(DIM_FRACTAL-3)) / 9)
) | Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size. |
374,991 | def registerFilter(self, column, patterns, is_regex=False,
ignore_case=False):
if isinstance(patterns, basestring):
patt_list = (patterns,)
elif isinstance(patterns, (tuple, list)):
patt_list = list(patterns)
else:
raise ValueError("The patterns parameter must either be as string "
"or a tuple / list of strings.")
if is_regex:
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
patt_exprs = [re.compile(pattern, flags) for pattern in patt_list]
else:
if ignore_case:
patt_exprs = [pattern.lower() for pattern in patt_list]
else:
patt_exprs = patt_list
self._filters[column] = (patt_exprs, is_regex, ignore_case) | Register filter on a column of table.
@param column: The column name.
@param patterns: A single pattern or a list of patterns used for
matching column values.
@param is_regex: The patterns will be treated as regex if True, the
column values will be tested for equality with the
patterns otherwise.
@param ignore_case: Case insensitive matching will be used if True. |
374,992 | def derenzo_sources(space, min_pt=None, max_pt=None):
if space.ndim == 2:
return ellipsoid_phantom(space, _derenzo_sources_2d(), min_pt, max_pt)
if space.ndim == 3:
return ellipsoid_phantom(
space, cylinders_from_ellipses(_derenzo_sources_2d()),
min_pt, max_pt)
else:
raise ValueError() | Create the PET/SPECT Derenzo sources phantom.
The Derenzo phantom contains a series of circles of decreasing size.
In 3d the phantom is simply the 2d phantom extended in the z direction as
cylinders.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be created, must be 2- or
3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding
slice of the phantom is created (instead of squashing the whole
phantom into the slice).
min_pt, max_pt : array-like, optional
If provided, use these vectors to determine the bounding box of the
phantom instead of ``space.min_pt`` and ``space.max_pt``.
It is currently required that ``min_pt >= space.min_pt`` and
``max_pt <= space.max_pt``, i.e., shifting or scaling outside the
original space is not allowed.
Providing one of them results in a shift, e.g., for ``min_pt``::
new_min_pt = min_pt
new_max_pt = space.max_pt + (min_pt - space.min_pt)
Providing both results in a scaled version of the phantom.
Returns
-------
phantom : ``space`` element
The Derenzo source phantom in the given space. |
374,993 | def add_ones(a):
arr = N.ones((a.shape[0],a.shape[1]+1))
arr[:,:-1] = a
return arr | Adds a column of 1s at the end of the array |
374,994 | def call_pre_hook(awsclient, cloudformation):
if not hasattr(cloudformation, ):
return
hook_func = getattr(cloudformation, )
if not hook_func.func_code.co_argcount:
hook_func()
else:
log.error( +
) | Invoke the pre_hook BEFORE the config is read.
:param awsclient:
:param cloudformation: |
374,995 | def get_daemon_stats(self, details=False):
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get( % ( if details else )) | Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict |
374,996 | def list_items(path_to_directory, pattern, wanted):
if not path_to_directory:
return set()
needed = make_needed(pattern, path_to_directory, wanted)
return [os.path.join(path_to_directory, name)
for name in _names_in_directory(path_to_directory)
if needed(name)] | All items in the given path which match the given glob and are wanted |
374,997 | def get_sidecar(fname, allowedfileformats=):
if allowedfileformats == :
allowedfileformats = [, ]
for f in allowedfileformats:
fname = fname.split(f)[0]
fname +=
if os.path.exists(fname):
with open(fname) as fs:
sidecar = json.load(fs)
else:
sidecar = {}
if not in sidecar:
sidecar[] = {}
sidecar[][] = False
sidecar[][] = []
return sidecar | Loads sidecar or creates one |
374,998 | def increase(self, infile):
gf = infile[31:]
index = gf.index(random.choice(gf))
index_len = len(gf[index])
large_size_index = random.choice([gf.index(g) for g in gf if len(g) > index_len])
gf[index], gf[large_size_index] = gf[large_size_index], gf[index]
return infile[:31] + gf | Increase: 任意の箇所のバイト列と それより大きなサイズの任意のバイト列と入れ換える |
374,999 | def getReferenceSetByName(self, name):
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._referenceSetNameMap[name] | Returns the reference set with the specified name. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.