Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
22,400 | def check_periodrec_alias(actualperiod,
recoveredperiod,
tolerance=1.0e-3):
actualtwicehalfratio_over_1plusratio_over_1minusratio_over_1plus_twiceratio_over_1minus_twiceratio_over_1plus_thriceratio_over_1minus_thriceratio_over_minus1ratio_over_twice_minus1
if not (np.isfinite(actualperiod) and np.isfinite(recoveredperiod)):
LOGERROR("canunknown,other' | This determines what kind of aliasing (if any) exists between
`recoveredperiod` and `actualperiod`.
Parameters
----------
actualperiod : float
The actual period of the object.
recoveredperiod : float
The recovered period of the object.
tolerance : float
The absolute difference required between the input periods to mark the
recovered period as close to the actual period.
Returns
-------
str
The type of alias determined for the input combination of periods. This
will be CSV string with values taken from the following list, based on
the types of alias found::
['actual',
'twice',
'half',
'ratio_over_1plus',
'ratio_over_1minus',
'ratio_over_1plus_twice',
'ratio_over_1minus_twice',
'ratio_over_1plus_thrice',
'ratio_over_1minus_thrice',
'ratio_over_minus1',
'ratio_over_twice_minus1'] |
22,401 | def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
dvs_name = get_managed_object_name(dvs_ref)
log.trace(
%s\, enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg) | Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled. |
22,402 | def receive(self, max_batch_size=None, timeout=None):
if self.error:
raise self.error
if not self.running:
raise ValueError("Unable to receive until client has been started.")
data_batch = []
try:
timeout_ms = 1000 * timeout if timeout else 0
message_batch = self._handler.receive_message_batch(
max_batch_size=max_batch_size,
timeout=timeout_ms)
for message in message_batch:
event_data = EventData(message=message)
self.offset = event_data.offset
data_batch.append(event_data)
return data_batch
except (errors.TokenExpired, errors.AuthenticationException):
log.info("Receiver disconnected due to token error. Attempting reconnect.")
self.reconnect()
return data_batch
except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
if shutdown.action.retry and self.auto_reconnect:
log.info("Receiver detached. Attempting reconnect.")
self.reconnect()
return data_batch
log.info("Receiver detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except errors.MessageHandlerError as shutdown:
if self.auto_reconnect:
log.info("Receiver detached. Attempting reconnect.")
self.reconnect()
return data_batch
log.info("Receiver detached. Shutting down.")
error = EventHubError(str(shutdown), shutdown)
self.close(exception=error)
raise error
except Exception as e:
log.info("Unexpected error occurred (%r). Shutting down.", e)
error = EventHubError("Receive failed: {}".format(e))
self.close(exception=error)
raise error | Receive events from the EventHub.
:param max_batch_size: Receive a batch of events. Batch size will
be up to the maximum specified, but will return as soon as service
returns no new events. If combined with a timeout and no events are
retrieve before the time, the result will be empty. If no batch
size is supplied, the prefetch size will be the maximum.
:type max_batch_size: int
:rtype: list[~azure.eventhub.common.EventData] |
22,403 | def _integrateLinearOrbit(vxvv,pot,t,method,dt):
if in method:
if not ext_loaded or not _check_c(pot):
if ( in method or in method):
method=
else:
method=
if not ext_loaded:
warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning)
else:
warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning)
if method.lower() == :
return symplecticode.leapfrog(lambda x,t=t: _evaluatelinearForces(pot,x,
t=t),
nu.array(vxvv),
t,rtol=10.**-8)
elif method.lower() == :
return dop853(func=_linearEOM, x=vxvv, t=t, args=(pot,))
elif ext_loaded and \
(method.lower() == or method.lower() == \
or method.lower() == or method.lower() == \
or method.lower() == or method.lower() == \
or method.lower() == ):
warnings.warn("Using C implementation to integrate orbits",
galpyWarningVerbose)
out, msg= integrateLinearOrbit_c(pot,nu.array(vxvv),t,method,dt=dt)
return out
elif method.lower() == or not ext_loaded:
return integrate.odeint(_linearEOM,vxvv,t,args=(pot,),rtol=10.**-8.) | NAME:
integrateLinearOrbit
PURPOSE:
integrate a one-dimensional orbit
INPUT:
vxvv - initial condition [x,vx]
pot - linearPotential or list of linearPotentials
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
OUTPUT:
[:,2] array of [x,vx] at each t
HISTORY:
2010-07-13- Written - Bovy (NYU)
2018-10-05- Added support for C integration - Bovy (UofT) |
22,404 | def weighted(weights, sample_size, with_replacement=False):
assert sample_size <= len(weights), "The sample size must be smaller \
than or equal to the number of weights it's taken from."
weights = [float(w) for w in weights]
weight_indexes = list(range(0, len(weights)))
samples = []
while len(samples) < sample_size:
sample = OneOf.weighted(weights)
samples.append(weight_indexes[sample])
if not with_replacement:
del weights[sample]
del weight_indexes[sample]
return samples | Return a set of random integers 0 <= N <= len(weights) - 1, where the
weights determine the probability of each possible integer in the set. |
22,405 | def safe_date(self, x):
t = x[self.col_name]
if np.isnan(t):
return t
elif np.isposinf(t):
t = sys.maxsize
elif np.isneginf(t):
t = -sys.maxsize
tmp = time.localtime(float(t) / 1e9)
return time.strftime(self.date_format, tmp) | Transform x[self.col_name] into a date string.
Args:
x(dict like / pandas.Series): Row containing data to cast safely.
Returns:
str |
22,406 | def printInput(self, x):
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print | TODO: document
:param x:
:return: |
22,407 | def from_jsonf(cls, fpath: str, encoding: str=,
force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> T:
return cls.from_dict(util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) | From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance |
22,408 | def publishTemplate(self, templateMessage):
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=(, , ),
action=,
params=templateMessage)
return Response(r, desc) | 发送单聊模板消息方法(一个用户向多个用户发送不同消息内容,单条消息最大 128k。每分钟最多发送 6000 条信息,每次发送用户上限为 1000 人。) 方法
@param templateMessage:单聊模版消息。
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 |
22,409 | def set_object_info(self):
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version() | set my pandas type & version |
22,410 | def openWith(self, accel = True, gyro = True, temp = True, cycle = False, cycleFreq = 0x00):
val_pwr_2 = 0x00
if accel == False:
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_XA
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_YA
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_ZA
if gyro == False:
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_XG
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_YG
val_pwr_2 = val_pwr_2 | self.VAL_PWR_MGMT_2_STBY_ZG
val_pwr_1 = 0x00
if temp == False:
val_pwr_1 = val_pwr_1 | self.VAL_PWR_MGMT_1_OFF_TEMP
if cycle == True:
val_pwr_1 = val_pwr_1 | self.VAL_PWR_MGMT_1_ON_CYCLE
val_pwr_2 = val_pwr_2 | cycleFreq
self._sendCmd( self.REG_PWR_MGMT_2, val_pwr_2 )
self._sendCmd( self.REG_PWR_MGMT_1, val_pwr_1 ) | !
Trun on device with configurable sensors into wake up mode
@param accel: True - Enable accelerometer
@param gyro: True - Enable gyroscope
@param temp: True - Enable Thermometer
@param cycle: True - Enable cycle wake-up mode
@param cycleFreq: Cycle wake-up frequency, this value can be chosen:
@see VAL_PWR_MGMT_2_LP_WAKE_CTRL_1_25HZ is default
@see VAL_PWR_MGMT_2_LP_WAKE_CTRL_5HZ
@see VAL_PWR_MGMT_2_LP_WAKE_CTRL_20HZ
@see VAL_PWR_MGMT_2_LP_WAKE_CTRL_40HZ |
22,411 | def utf8(value):
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8") | Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8. |
22,412 | def logWrite(self, string):
logFile = open(self.logFile, )
logFile.write(string + )
logFile.close() | Only write text to the log file, do not print |
22,413 | def detect_vec(df, max_anoms=0.10, direction=,
alpha=0.05, period=None, only_last=False,
threshold=None, e_value=False, longterm_period=None,
plot=False, y_log=False, xlabel=, ylabel=,
title=None, verbose=False):
if (isinstance(df, DataFrame) and
len(df.columns) == 1 and
df.iloc[:,0].applymap(np.isreal).all(1)):
d = {
: range(len(df.iloc[:,0])),
: df.iloc[:,0]
}
df = DataFrame(d, index=d[])
elif isinstance(df, Series):
d = {
: range(len(df)),
: df
}
df = DataFrame(d, index=d[])
else:
raise ValueError(("data must be a single data frame, "
"list, or vector that holds numeric values."))
if max_anoms > 0.49:
length = len(df.value)
raise ValueError(
("max_anoms must be less than 50% of "
"the data points (max_anoms =%f data_points =%s).")
% (round(max_anoms * length, 0), length))
if not direction in [, , ]:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn(("alpha is the statistical signifigance, "
"and is usually between 0.01 and 0.1"))
if not period:
raise ValueError(("Period must be set to the number "
"of data points in a single period"))
if not isinstance(only_last, bool):
raise ValueError("only_last must be a boolean")
if not threshold in [None,,,]:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(plot, bool):
raise ValueError("plot must be a boolean")
if not isinstance(y_log, bool):
raise ValueError("y_log must be a boolean")
if not isinstance(xlabel, string_types):
raise ValueError("xlabel must be a string")
if not isinstance(ylabel, string_types):
raise ValueError("ylabel must be a string")
if title and not isinstance(title, string_types):
raise ValueError("title must be a string")
if not title:
title =
else:
title = title + " : "
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
if longterm_period:
all_data = []
for j in range(0, len(df.timestamp), longterm_period):
start_index = df.timestamp.iloc[j]
end_index = min((start_index + longterm_period), num_obs)
if (end_index - start_index) == longterm_period:
sub_df = df[(df.timestamp >= start_index)
& (df.timestamp <= end_index)]
else:
sub_df = df[(df.timestamp >= (num_obs - longterm_period)) &
(df.timestamp <= num_obs)]
all_data.append(sub_df)
else:
all_data = [df]
all_anoms = DataFrame(columns=[, ])
seasonal_plus_trend = DataFrame(columns=[, ])
for i in range(len(all_data)):
directions = {
: Direction(True, True),
: Direction(True, False),
: Direction(False, True)
}
anomaly_direction = directions[direction]
s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms,
alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
data_decomp = s_h_esd_timestamps[]
s_h_esd_timestamps = s_h_esd_timestamps[]
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=[, ])
if threshold:
if isinstance(all_data[i].index[0], Timestamp):
group = all_data[i].timestamp.map(Timestamp.date)
else:
group = all_data[i].timestamp.map(lambda t: int(t / period))
periodic_maxes = df.groupby(group).aggregate(np.max).value
if threshold == :
thresh = periodic_maxes.median()
elif threshold == :
thresh = periodic_maxes.quantile(.95)
elif threshold == :
thresh = periodic_maxes.quantile(.99)
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
try:
all_anoms.drop_duplicates(subset=[])
seasonal_plus_trend.drop_duplicates(subset=[])
except TypeError:
all_anoms.drop_duplicates(cols=[])
seasonal_plus_trend.drop_duplicates(cols=[])
if only_last:
d = {
: df.timestamp.iloc[-period:],
: df.value.iloc[-period:]
}
x_subset_single_period = DataFrame(d, index = d[])
past_obs = period * 7
if num_obs < past_obs:
past_obs = num_obs - period
d = {
: df.timestamp.iloc[-past_obs:-period],
: df.value.iloc[-past_obs:-period]
}
x_subset_previous = DataFrame(d, index=d[])
all_anoms = all_anoms[all_anoms.timestamp
>= x_subset_single_period.timestamp.iloc[0]]
num_obs = len(x_subset_single_period.value)
anom_pct = (len(df.value) / float(num_obs)) * 100
if anom_pct == 0:
return {
"anoms": None,
"plot": None
}
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
: all_anoms.timestamp,
: all_anoms.value,
: seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
: all_anoms.timestamp,
: all_anoms.value
}
anoms = DataFrame(d, index=d[].index)
return {
: anoms,
: None
} | Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of observations.
Args:
x: Time series as a column data frame, list, or vector, where the column consists of
the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
period: Defines the number of observations in a single period, and used during seasonal
decomposition.
only_last: Find and report anomalies only within the last period in the time series.
threshold: Only report positive going anoms above the threshold specified. Options are: ('None' | 'med_max' | 'p95' | 'p99').
e_value: Add an additional column to the anoms output containing the expected value.
longterm_period: Defines the number of observations for which the trend can be considered
flat. The value should be an integer multiple of the number of observations in a single period.
This increases anom detection efficacy for time series that are greater than a month.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm_period' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing index, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series. |
22,414 | def array_remove(col, element):
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element)) | Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])] |
22,415 | def logo_path(instance, filename):
extension = os.path.splitext(filename)[1].lower()
instance_id = str(instance.id)
fullname = os.path.join("enterprise/branding/", instance_id, instance_id + "_logo" + extension)
if default_storage.exists(fullname):
default_storage.delete(fullname)
return fullname | Delete the file if it already exist and returns the enterprise customer logo image path.
Arguments:
instance (:class:`.EnterpriseCustomerBrandingConfiguration`): EnterpriseCustomerBrandingConfiguration object
filename (str): file to upload
Returns:
path: path of image file e.g. enterprise/branding/<model.id>/<model_id>_logo.<ext>.lower() |
22,416 | def script_repr(self,imports=[],prefix=" "):
return self.pprint(imports,prefix,unknown_value=,qualify=True,
separator="\n") | Same as Parameterized.script_repr, except that X.classname(Y
is replaced with X.classname.instance(Y |
22,417 | def remove_date(self, file_path=, date=str(datetime.date.today())):
languages_exists = os.path.isfile(file_path)
if languages_exists:
with open(file_path, ) as inp, open(, ) as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[0] != date:
writer.writerow(row)
inp.close()
out.close()
os.remove(file_path)
os.rename("temp.csv",file_path) | Removes all rows of the associated date from the given csv file.
Defaults to today. |
22,418 | def cell_ends_with_code(lines):
if not lines:
return False
if not lines[-1].strip():
return False
if lines[-1].startswith():
return False
return True | Is the last line of the cell a line with code? |
22,419 | def delete(self):
r = self._client._request(, self._client._build_url(, property_id=self.id))
if r.status_code != requests.codes.no_content:
raise APIError("Could not delete property: {} with id {}".format(self.name, self.id)) | Delete this property.
:return: None
:raises APIError: if delete was not successful |
22,420 | def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = .format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = .format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split()
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = \
.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = \
.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split()) == 2:
name, ext = zip_file_name.split()
if ext != :
LOGGER.warning(.format(zip_file_name))
zip_file_name = .format(name, )
else:
error = \
.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), ) as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close() | Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory. |
22,421 | def get_all_regions_with_tiles(self):
for key in self.get_all_keys():
(layer, rx, ry) = struct.unpack(, key)
if layer == 1:
yield (rx, ry) | Generator which yields a set of (rx, ry) tuples which describe
all regions for which the world has tile data |
22,422 | def gen_mu(K, delta, c):
S = c * log(K/delta) * sqrt(K)
tau = gen_tau(S, K, delta)
rho = gen_rho(K)
normalizer = sum(rho) + sum(tau)
return [(rho[d] + tau[d])/normalizer for d in range(K)] | The Robust Soliton Distribution on the degree of
transmitted blocks |
22,423 | def __calculate_bu_dfs(dfs_data):
u = dfs_data[][0]
b = {}
b[u] = D(u, dfs_data)
__calculate_bu_dfs_recursively(u, b, dfs_data)
return b | Calculates the b(u) lookup table. |
22,424 | def rootChild_resetPassword(self, req, webViewer):
from xmantissa.signup import PasswordResetResource
return PasswordResetResource(self.store) | Return a page which will allow the user to re-set their password. |
22,425 | def set_target_ref(self, value):
if value is None or not isinstance(value, str):
raise TypeError("TargetRef is required and must be set to a String")
else:
self.__target_ref = value | Setter for 'target_ref' field.
:param value - a new value of 'target_ref' field. Required field. Must be a String type. |
22,426 | def _build_pcollection(self, pipeline, filepaths, language):
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for _, elem in etree.iterparse(f, events=("end",)):
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
if ns != "0":
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
elem.clear()
if raw_content is None or raw_content.lower().startswith("
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (title, raw_content)
def _clean_content(inputs):
title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (
tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield {
"title": title,
"text": text
}
return (
pipeline
| beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.FlatMap(_clean_content)
) | Build PCollection of examples in the raw (text) form. |
22,427 | def slamdunkGeneralStatsTable(self):
headers = OrderedDict()
headers[] = {
: .format(config.read_count_prefix),
: UTRs ({})shared_keyread_countminformat{:,.2f}scaleYlGnmodifyretainedtitle{} Retaineddescription
}
self.general_stats_addcols(self.slamdunk_data, headers) | Take the parsed summary stats from Slamdunk and add it to the
basic stats table at the top of the report |
22,428 | def apply_update(self, value, index):
_log.debug(.format(type(value).__name__, index, value.value))
builder = asiodnp3.UpdateBuilder()
builder.Update(value, index)
update = builder.Build()
OutstationApplication.get_outstation().Apply(update) | Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database. |
22,429 | def syncdb(self, site=None, all=0, database=None, ignore_errors=1):
r = self.local_renderer
ignore_errors = int(ignore_errors)
post_south = self.version_tuple >= (1, 7, 0)
use_run_syncdb = self.version_tuple >= (1, 9, 0)
r.env.db_syncdb_all_flag = if int(all) else
r.env.db_syncdb_database =
if database:
r.env.db_syncdb_database = % database
if self.is_local:
r.env.project_dir = r.env.local_project_dir
site = site or self.genv.SITE
for _site, site_data in r.iter_unique_databases(site=site):
r.env.SITE = _site
with self.settings(warn_only=ignore_errors):
if post_south:
if use_run_syncdb:
r.run_or_local(
)
else:
r.run_or_local(
)
else:
r.run_or_local(
) | Runs the standard Django syncdb command for one or more sites. |
22,430 | def datasets(self):
if self._datasets is None:
self._datasets = self._fetch_datasets()
return self._datasets | List of datasets in this mart. |
22,431 | def softmask(X, X_ref, power=1, split_zeros=False):
if X.shape != X_ref.shape:
raise ParameterError(.format(X.shape,
X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError()
if power <= 0:
raise ParameterError()
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = (Z < np.finfo(dtype).tiny)
Z[bad_idx] = 1
if np.isfinite(power):
mask = (X / Z)**power
ref_mask = (X_ref / Z)**power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
mask = X > X_ref
return mask | Robustly compute a softmask operation.
`M = X**power / (X**power + X_ref**power)`
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as `X`.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to `X > X_ref`.
Note: for hard masks, ties are always broken in favor of `X_ref` (`mask=0`).
split_zeros : bool
If `True`, entries where `X` and X`_ref` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=`X.shape`
The output mask array
Raises
------
ParameterError
If `X` and `X_ref` have different shapes.
If `X` or `X_ref` are negative anywhere
If `power <= 0`
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool) |
22,432 | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0
self.input = []
self.H0 = 0x67452301
self.H1 = 0xEFCDAB89
self.H2 = 0x98BADCFE
self.H3 = 0x10325476
self.H4 = 0xC3D2E1F0 | Initialize the message-digest and set all fields to zero. |
22,433 | def build_type_dict(converters):
more_types = {}
for converter in converters:
assert callable(converter)
more_types[converter.name] = converter
return more_types | Builds type dictionary for user-defined type converters,
used by :mod:`parse` module.
This requires that each type converter has a "name" attribute.
:param converters: List of type converters (parse_types)
:return: Type converter dictionary |
22,434 | def density_water(temp):
ut.check_range([temp, ">0", "Temperature in Kelvin"])
rhointerpolated = interpolate.CubicSpline(WATER_DENSITY_TABLE[0],
WATER_DENSITY_TABLE[1])
return np.asscalar(rhointerpolated(temp)) | Return the density of water at a given temperature.
If given units, the function will automatically convert to Kelvin.
If not given units, the function will assume Kelvin. |
22,435 | def handle(self, *args, **options):
for index in options.pop("indexes"):
data = {}
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {"index": index, "status": ex.status_code, "reason": ex.error}
finally:
logger.info(data) | Run do_index_command on each specified index and log the output. |
22,436 | def ppiece(self, content):
ve received from a peer, writing it out to
one or more files
!iiinfopieceshash matcheskindpiecepeerpiece_indexwriting piece {}. Length is {}...\nDownload complete\nt match. Discarding piece."
self.piece = self.init_piece()
self.request_all() | Process a piece that we've received from a peer, writing it out to
one or more files |
22,437 | def xirr(values, dates, guess=0):
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError( % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError:
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10) | Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. |
22,438 | def print_dictionary(self, d, h, n, nl=False):
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s) | Print complex using the specified indent (n) and newline (nl). |
22,439 | def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray:
out_array = []
for i in range(x):
if i % (x / (len(array) - 1)) == 0:
index = i / (x / (len(array) - 1))
out_array.append(array[int(index)])
else:
start_marker = array[math.floor(i / (x / (len(array) - 1)))]
stop_marker = array[math.ceil(i / (x / (len(array) - 1)))]
interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1))
interp_color = numpy.rint(start_marker + ((stop_marker - start_marker) * interp_amount))
out_array.append(interp_color)
out_array[-1] = array[-1]
return numpy.array(out_array).astype(numpy.uint8) | Creates a color map for values in array
:param array: color map to interpolate
:param x: number of colors
:return: interpolated color map |
22,440 | def get_key(key_name, region=None, key=None, keyid=None, profile=None):
t
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.get_key mykey
'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
key = conn.get_key_pair(key_name)
log.debug("the key to return is : %s", key)
if key is None:
return False
return key.name, key.fingerprint
except boto.exception.BotoServerError as e:
log.debug(e)
return False | Check to see if a key exists. Returns fingerprint and name if
it does and False if it doesn't
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.get_key mykey |
22,441 | def country(anon, obj, field, val):
return anon.faker.country(field=field) | Returns a randomly selected country. |
22,442 | def set_top_bar_color(self, index):
dr = QtCore.Qt.ForegroundRole
c = index.model().index(index.row(), 8, index.parent()).data(dr)
if not c:
c = self.upper_fr_default_bg_color
self.upper_fr.setStyleSheet( % (c.red(), c.green(), c.blue())) | Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None |
22,443 | def create_ssh_pub_key(self, name, key):
self.nova.keypairs.create(name, key) | Installs the public SSH key under the name :attr:`name`.
Once installed, the key can be referenced when creating new server
instances. |
22,444 | async def end_takeout(self, success):
try:
async with _TakeoutClient(True, self, None) as takeout:
takeout.success = success
except ValueError:
return False
return True | Finishes a takeout, with specified result sent back to Telegram.
Returns:
``True`` if the operation was successful, ``False`` otherwise. |
22,445 | def handle_job_exception(self, exception, variables=None):
try:
error_str, traceback_str = six.text_type(exception), traceback.format_exc()
except Exception:
self.metrics.counter().increment()
error_str, traceback_str = , traceback.format_exc()
self.logger.exception(exception)
if not isinstance(traceback_str, six.text_type):
try:
traceback_str = traceback_str.decode()
except UnicodeDecodeError:
traceback_str =
error_dict = {
: ERROR_CODE_SERVER_ERROR,
: % error_str,
: traceback_str,
}
if variables is not None:
try:
error_dict[] = {key: repr(value) for key, value in variables.items()}
except Exception:
self.metrics.counter().increment()
error_dict[] =
return JobResponse(errors=[error_dict]) | Makes and returns a last-ditch error response.
:param exception: The exception that happened
:type exception: Exception
:param variables: A dictionary of context-relevant variables to include in the error response
:type variables: dict
:return: A `JobResponse` object
:rtype: JobResponse |
22,446 | def update_image(self, ami_id, instance_id):
log = logging.getLogger(self.cls_logger + )
if not isinstance(ami_id, basestring):
msg = .format(t=ami_id.__class__.__name__)
raise ImageUtilError(msg)
if not isinstance(instance_id, basestring):
msg = .format(t=instance_id.__class__.__name__)
raise ImageUtilError(msg)
if ami_id is None or instance_id is None:
raise ImageUtilError()
log.info(.format(
a=ami_id, i=instance_id))
try:
ami_info = self.ec2.describe_images(DryRun=False, ImageIds=[ami_id], Owners=[self.owner_id])
except ClientError:
_, ex, trace = sys.exc_info()
msg = .format(n=ex.__class__.__name__, a=ami_id, e=str(ex))
raise AWSAPIError, msg, trace
log.debug(.format(a=ami_info))
cons3rt_uuid = None
try:
image_tags = ami_info[][0][]
for image_tag in image_tags:
if image_tag[] == :
cons3rt_uuid = image_tag[]
except KeyError:
_, ex, trace = sys.exc_info()
msg = .format(
a=ami_id, n=ex.__class__.__name__, e=str(ex))
raise ImageUtilError, msg, trace
if cons3rt_uuid is None:
raise ImageUtilError(.format(a=ami_id))
log.info(.format(u=cons3rt_uuid))
log.debug(.format(t=image_tags))
try:
snapshot_id = ami_info[][0][][0][][]
except KeyError:
_, ex, trace = sys.exc_info()
raise ImageUtilError(.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
log.info(.format(s=snapshot_id))
try:
image_name = ami_info[][0][]
except KeyError:
_, ex, trace = sys.exc_info()
raise ImageUtilError(.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
log.info(.format(n=image_name))
try:
image_description = ami_info[][0][]
except KeyError:
_, ex, trace = sys.exc_info()
log.warn(.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
image_description =
log.info(.format(d=image_description))
log.debug(.format(a=ami_id))
try:
self.ec2.deregister_image(DryRun=False, ImageId=ami_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = .format(n=ex.__class__.__name__, a=ami_id, e=str(ex))
raise ImageUtilError, msg, trace
log.info(.format(a=ami_id))
log.info()
time.sleep(20)
log.info(.format(i=instance_id))
try:
create_res = self.ec2.create_image(
DryRun=False,
InstanceId=instance_id,
Name=image_name,
Description=image_description,
NoReboot=False
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, m=image_name, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
try:
new_ami_id = create_res[]
except KeyError:
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
log.info(.format(w=new_ami_id))
log.info(.format(w=new_ami_id))
time.sleep(20)
try:
self.ec2.create_tags(DryRun=False, Resources=[new_ami_id], Tags=image_tags)
except ClientError:
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, i=new_ami_id, t=image_tags, e=str(ex))
raise ImageUtilError, msg, trace
log.info(.format(w=new_ami_id, t=image_tags))
log.debug(.format(s=snapshot_id))
try:
self.ec2.delete_snapshot(DryRun=False, SnapshotId=snapshot_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, s=snapshot_id, e=str(ex))
raise ImageUtilError, msg, trace | Replaces an existing AMI ID with an image created from the provided
instance ID
:param ami_id: (str) ID of the AMI to delete and replace
:param instance_id: (str) ID of the instance ID to create an image from
:return: None |
22,447 | def pdchAssignmentCommand(ChannelDescription_presence=0,
CellChannelDescription_presence=0,
MobileAllocation_presence=0,
StartingTime_presence=0, FrequencyList_presence=0,
ChannelDescription_presence1=0,
FrequencyChannelSequence_presence=0,
MobileAllocation_presence1=0,
PacketChannelDescription_presence=0,
DedicatedModeOrTBF_presence=0):
a = TpPd(pd=0x6)
b = MessageType(mesType=0x23)
c = ChannelDescription()
packet = a / b / c
if ChannelDescription_presence is 1:
d = ChannelDescriptionHdr(ieiCD=0x62, eightBitCD=0x0)
packet = packet / d
if CellChannelDescription_presence is 1:
e = CellChannelDescriptionHdr(ieiCCD=0x05, eightBitCCD=0x0)
packet = packet / e
if MobileAllocation_presence is 1:
f = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
packet = packet / f
if StartingTime_presence is 1:
g = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
packet = packet / g
if FrequencyList_presence is 1:
h = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0)
packet = packet / h
if ChannelDescription_presence1 is 1:
i = ChannelDescriptionHdr(ieiCD=0x1C, eightBitCD=0x0)
packet = packet / i
if FrequencyChannelSequence_presence is 1:
j = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0)
packet = packet / j
if MobileAllocation_presence1 is 1:
k = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0)
packet = packet / k
if PacketChannelDescription_presence is 1:
l = PacketChannelDescription(ieiPCD=0x22)
packet = packet / l
if DedicatedModeOrTBF_presence is 1:
m = DedicatedModeOrTBFHdr(ieiDMOT=0x23, eightBitDMOT=0x0)
packet = packet / m
return packet | PDCH ASSIGNMENT COMMAND Section 9.1.13a |
22,448 | def _handle_pyout(self, msg):
self.log.debug("pyout: %s", msg.get(, ))
if not self._hidden and self._is_from_this_session(msg):
text = msg[][]
self._append_plain_text(text + , before_prompt=True) | Handle display hook output. |
22,449 | def format_table(table,
align=,
format=,
colwidth=None,
maxwidth=None,
spacing=2,
truncate=0,
suffix="..."
):
table = list(deepcopy(table))
if not isinstance(align, list):
align = [align]
if not isinstance(format, list):
format = [format]
if not isinstance(format[0], list):
format = [format]
num_cols = len(table[0])
if len(set([len(row) for row in table]))>1:
raise ValueError("All rows must have the same number of columns")
for i in range(len(table)):
table[i] = list(table[i])
colformat = format[min(i,len(format)-1)]
for j, cell in enumerate(table[i]):
f = colformat[min(j,len(colformat)-1)]
if isinstance(f, str):
fun = lambda x: f.format(x)
else:
fun = f
try:
table[i][j] = fun(cell)
except:
table[i][j] = str(cell)
if colwidth==None:
cellwidth = [[len(cell) for cell in row] for row in table]
colwidth = list(map(max, zip(*cellwidth)))
elif not isinstance(colwidth, list):
colwidth = [colwidth]
colwidth.extend([colwidth[-1]]*(num_cols-len(colwidth)))
if maxwidth==None:
maxwidth = get_terminal_size().columns-1
width = sum(colwidth)+spacing*(num_cols-1)
if width>maxwidth:
colwidth[truncate] -= (width-maxwidth)
for j, cw in enumerate(colwidth):
if cw<1:
raise RuntimeError("Column {} in format_table() has width {}. "
"Make sure all columns have width >0. "
"Read docstring for further details."
.format(j,cw)
)
s =
for i, row in enumerate(table):
if i != 0: s += "\n"
colalign = align[min(i,len(align)-1)]
colformat = format[min(i,len(format)-1)]
for j, col in enumerate(row):
a = colalign[min(j,len(colalign)-1)]
f = colformat[min(j,len(colformat)-1)]
w = colwidth[j]
if j!=0: s+= *spacing
s += format_fit(format_time(col), w, a, suffix)
return s | Formats a table represented as an iterable of iterable into a nice big string
suitable for printing.
Parameters:
-----------
align : string or list of strings
Alignment of cell contents. Each character in a string specifies
the alignment of one column.
* ``<`` - Left aligned (default)
* ``^`` - Centered
* ``>`` - Right aligned
The last alignment is repeated for unspecified columns.
If it's a list of strings, each string specifies the alignment of
one row. The last string is used repeatedly for unspecified rows.
format : string/function, or (nested) list of string/function
Formats the contents of the cells using the specified function(s)
or format string(s).
If it's a list of strings/functions each entry specifies formatting
for one column, the last entry being used repeatedly for
unspecified columns.
If it's a list of lists, each sub-list specifies one row, the last
sub-list being used repeatedly for unspecified rows.
colwidth : int, list of ints or None
The width of each column. The last width is used repeatedly for
unspecified columns. If ``None`` the width is fitted to the
contents.
maxwidth : int or None
The maximum width of the table. Defaults to terminal width minus
1 if ``None``. If the table would be wider than ``maxwidth`` one
of the columns is truncated.
spacing : int
The spacing between columns
truncate : int
Which column to truncate if table width would exceed ``maxwidth``.
Beware that no columns can have zero or negative width. If for instance
'maxwidth' is 80 and 'colwidth' is [10, 30, 30, 30] with spacing 2 the total
width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too
much, so a width of 26 will be removed from the truncated column. If
'truncate' is 0, column 0 will have a width of -16 which is not permitted. |
22,450 | def gmm_cause(points, k=4, p1=2, p2=2):
g = GMM(k, covariance_type="spherical")
g.fit(np.random.randn(300, 1))
g.means_ = p1 * np.random.randn(k, 1)
g.covars_ = np.power(abs(p2 * np.random.randn(k, 1) + 1), 2)
g.weights_ = abs(np.random.rand(k))
g.weights_ = g.weights_ / sum(g.weights_)
return g.sample(points)[0].reshape(-1) | Init a root cause with a Gaussian Mixture Model w/ a spherical covariance type. |
22,451 | def _zoom_rows(self, zoom):
self.grid.SetDefaultRowSize(self.grid.std_row_size * zoom,
resizeExistingRows=True)
self.grid.SetRowLabelSize(self.grid.row_label_size * zoom)
for row, tab in self.code_array.row_heights:
if tab == self.grid.current_table and \
row < self.grid.code_array.shape[0]:
base_row_width = self.code_array.row_heights[(row, tab)]
if base_row_width is None:
base_row_width = self.grid.GetDefaultRowSize()
zoomed_row_size = base_row_width * zoom
self.grid.SetRowSize(row, zoomed_row_size) | Zooms grid rows |
22,452 | def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs):
key = get_cache_key(cache_key, func, *func_args, **func_kwargs)
return cache.get(key) | Not a decorator, but a helper function to retrieve the cached
item for a key created via get_cache_key.
Args:
- cache_key: if there was a specific cache key used to cache the
function, it should be provided here. If not this should be None
- func: the function which was cache
- *func_args: arguments of the function
- **func_kwargs: keyword arguments of this function |
22,453 | def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
if default is not MISSING and default_factory is not MISSING:
raise ValueError()
return Field(default, default_factory, init, repr, hash, compare,
metadata) | Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory. |
22,454 | def save_yaml_file(file, val):
opened = False
if not hasattr(file, "write"):
file = io.open(file, "w", encoding="utf-8")
opened = True
try:
yaml.dump(val, file)
finally:
if opened:
file.close() | Save data to yaml file
:param file: Writable object or path to file
:type file: FileIO | str | unicode
:param val: Value or struct to save
:type val: None | int | float | str | unicode | list | dict |
22,455 | def submatrix(matrix,i1,i2,j1,j2):
new = []
for i in range(i1,i2+1):
new.append(matrix[i][j1:j2+1])
return _n.array(new) | returns the submatrix defined by the index bounds i1-i2 and j1-j2
Endpoints included! |
22,456 | def _unassembled_reads2_out_file_name(self):
if self.Parameters[].isOn():
unassembled_reads2 = self._absolute(
str(self.Parameters[].Value))
else:
raise ValueError("No reads2 (flag -2) output path specified")
return unassembled_reads2 | Checks if file name is set for reads2 output.
Returns absolute path. |
22,457 | def add_legend(self):
cuts = [tag for tag in self.tags if tag is not self._new_cut]
self.cuts_plot.ax.legend(cuts, loc=,
shadow=True, fancybox=True,
prop={: 8}, labelspacing=0.2) | Add or update Cuts plot legend. |
22,458 | def build_url(self, data):
query_part_one = []
query_part_two = []
keys_to_be_removed = []
for key, value in data.items():
if key not in [, , ]:
if key == :
query_part_one.append(.join(str(val) for val in value))
keys_to_be_removed.append(key)
elif key == :
query_part_one.append(value)
keys_to_be_removed.append(key)
else:
if isinstance(value, list):
value = .join(str(val) for val in value)
query_part_two.append(.format(key, value))
keys_to_be_removed.append(key)
for k in keys_to_be_removed:
del data[k]
data[] = .format(.join(query_part_one),
.join(query_part_two))
return data | This method occurs after dumping the data into the class.
Args:
data (dict): dictionary of all the query values
Returns:
data (dict): ordered dict of all the values |
22,459 | def get_content_size(self, path):
"Return size of files/dirs contents excluding parent node."
node = self._get_node(path)
return self._get_content_size(node) - node.size | Return size of files/dirs contents excluding parent node. |
22,460 | def list_sub_commmands(self, cmd_name, cmd):
ret = {}
if isinstance(cmd, click.core.Group):
for sub_cmd_name in cmd.commands:
sub_cmd = cmd.commands[sub_cmd_name]
sub = self.list_sub_commmands(sub_cmd_name, sub_cmd)
if sub:
if isinstance(sub, dict):
for n, c in sub.items():
ret[ % (cmd_name, n)] = c
else:
ret[ % (cmd_name, sub[0])] = sub[1]
elif isinstance(cmd, click.core.Command):
return (cmd.name, cmd)
return ret | Return all commands for a group |
22,461 | def detect_emg_activations(emg_signal, sample_rate, smooth_level=20, threshold_level=10,
time_units=False, volts=False, resolution=None, device="biosignalsplux",
plot_result=False):
if volts is True:
if resolution is not None:
emg_signal = raw_to_phy("EMG", device, emg_signal, resolution, option="mV")
units = "mV"
else:
raise RuntimeError(
"For converting raw units to mV is mandatory the specification of acquisition "
"resolution.")
else:
units = "Input Units"
if time_units is True:
time_units_str = "Time (s)"
time = numpy.linspace(0, len(emg_signal) / sample_rate, len(emg_signal))
else:
time = numpy.linspace(0, len(emg_signal) - 1, len(emg_signal))
time_units_str = "Sample Number"
pre_pro_signal = numpy.array(emg_signal) - numpy.average(emg_signal)
low_cutoff = 10
high_cutoff = 300
pre_pro_signal = _butter_bandpass_filter(pre_pro_signal, low_cutoff, high_cutoff, sample_rate)
tkeo = []
for i, signal_sample in enumerate(pre_pro_signal):
if i in (0, len(pre_pro_signal) - 1):
tkeo.append(signal_sample)
else:
tkeo.append(numpy.power(signal_sample, 2) - (pre_pro_signal[i + 1] *
pre_pro_signal[i - 1]))
smoothing_level = int((smooth_level / 100) * sample_rate)
rect_signal = numpy.absolute(tkeo)
rect_signal = _moving_average(rect_signal, sample_rate / 10)
smooth_signal = []
for i in range(0, len(rect_signal)):
if smoothing_level < i < len(rect_signal) - smoothing_level:
smooth_signal.append(numpy.mean(rect_signal[i - smoothing_level:i + smoothing_level]))
else:
smooth_signal.append(0)
avg_pre_pro_signal = numpy.average(pre_pro_signal)
std_pre_pro_signal = numpy.std(pre_pro_signal)
threshold_level = avg_pre_pro_signal + _thres_norm_reg(threshold_level, smooth_signal,
pre_pro_signal) * std_pre_pro_signal
binary_signal = []
for i in range(0, len(time)):
if smooth_signal[i] >= threshold_level:
binary_signal.append(1)
else:
binary_signal.append(0)
diff_signal = numpy.diff(binary_signal)
act_begin = numpy.where(diff_signal == 1)[0]
act_end = numpy.where(diff_signal == -1)[0]
if time_units is True:
time_begin = numpy.array(time)[act_begin]
time_end = numpy.array(time)[act_end]
else:
time_begin = act_begin
time_end = act_end
if plot_result is True:
plot([list(time), list(time)], [list(emg_signal), list(numpy.array(binary_signal) *
numpy.max(emg_signal))],
yAxisLabel=["Data Samples (" + units + ")"] * 2,
x_axis_label=time_units_str, legend=["EMG Signal", "Activation Signal"])
return time_begin, time_end, smooth_signal, threshold_level | -----
Brief
-----
Python implementation of Burst detection algorithm using Teager Kaiser Energy Operator.
-----------
Description
-----------
Activation events in EMG readings correspond to an increase of muscular activity, namely, from inaction to action.
These events are characterised by an increase in electric potential that returns to the initial values when the
muscle returns to a state of inaction.
This function detects activation events using the Teager Kaiser Energy Operator.
----------
Parameters
----------
emg_signal : list
List of EMG acquired samples.
sample_rate : int
Sampling frequency.
smooth_level : number
Defines a percentage proportional to the smoothing level, i.e. the bigger this value is,
the more smoothed is the signal.
threshold_level : number
Specification of the single threshold position, used for distinguishing between activation
(above) and inactivation samples (below).
time_units : boolean
If True this function will return the Burst begin and end positions in seconds.
volts : boolean
If True, then the conversion of raw units to mV will be done. Resolution need to be
specified.
resolution : int
Selected resolution for data acquisition.
device : str
Specification of the device category.
plot_result : boolean
If True it will be presented a graphical representation of the detected burst in the EMG
signal.
Returns
-------
out : bursts begin (ndarray), bursts end (ndarray)
Begin and end of bursts (sample number or time instant in seconds).
smooth_signal: list
It is returned the smoothed EMG signal (after the processing steps intended to simplify the
signal).
threshold_level: float
The value of the detection threshold used to locate the begin and end of each muscular
activation period. |
22,462 | def _get_private_room(self, invitees: List[User]):
return self._client.create_room(
None,
invitees=[user.user_id for user in invitees],
is_public=False,
) | Create an anonymous, private room and invite peers |
22,463 | def import_object(name):
parts = name.split()
if len(parts) < 2:
raise ValueError("Invalid name " % name)
module_name = ".".join(parts[:-1])
obj_name = parts[-1]
module = importlib.import_module(module_name)
return getattr(module, obj_name) | Import module and return object from it. *name* is :class:`str` in
format ``module.path.ObjectClass``.
::
>>> import_command('module.path.ObjectClass')
<class 'module.path.ObjectClass'> |
22,464 | def get_p2o_params_from_url(cls, url):
return params | Get the p2o params given a URL for the data source |
22,465 | def on_ok(self, sender):
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
ion_structure = self.ion_task.get_final_structure()
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar(, 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
self.history.info(
.format(actual_dilatmx, self.ioncell_task.get_inpvar()))
self.ioncell_task.reset_from_scratch()
return super().on_ok(sender) | This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted. |
22,466 | def process_tag(self, tag):
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception(
.format(
tag.data_type,
tag.name),
ex) | Processes tag and detects which function to use |
22,467 | def _datetime_to_stata_elapsed_vec(dates, fmt):
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d[] = delta.values.astype(
np.int64) // 1000
if days or year:
dates = DatetimeIndex(dates)
d[], d[] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d[], format=).astype(np.int64))
d[] = days // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == :
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d[] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d[] = year_month.values // 100
d[] = (year_month.values - d[] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d[] = v(dates)
else:
raise ValueError(
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (2 * (d.year - stata_epoch.year) +
(d.month > 6).astype(np.int))
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(
"Format {fmt} is not a known Stata date format".format(fmt=fmt))
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack(, b)[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index) | Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty |
22,468 | def run(self, *args):
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code | Merge unique identities using a matching algorithm. |
22,469 | def add_affiliation(self, value, curated_relation=None, record=None):
if value:
affiliation = {
: value
}
if record:
affiliation[] = record
if curated_relation is not None:
affiliation[] = curated_relation
self._ensure_list_field(, affiliation) | Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference |
22,470 | def step(self, state, clamping):
ns = state.copy()
for var in state:
if clamping.has_variable(var):
ns[var] = int(clamping.bool(var))
else:
or_value = 0
for clause, _ in self.in_edges_iter(var):
or_value = or_value or clause.bool(state)
if or_value:
break
ns[var] = int(or_value)
return ns | Performs a simulation step from the given state and with respect to the given clamping
Parameters
----------
state : dict
The key-value mapping describing the current state of the logical network
clamping : caspo.core.clamping.Clamping
A clamping over variables in the logical network
Returns
-------
dict
The key-value mapping describing the next state of the logical network |
22,471 | def export_default_instruments(target_folder, source_folder = None, raise_errors = False, verbose=True):
print()
instruments_to_load = get_classes_in_folder(source_folder, Instrument, verbose = True)
print()
print(instruments_to_load)
if verbose:
print((.format(len(instruments_to_load))))
loaded_instruments, failed = Instrument.load_and_append(instruments_to_load, raise_errors = raise_errors)
print()
print(loaded_instruments, failed)
for name, value in loaded_instruments.items():
filename = os.path.join(target_folder, .format(name))
value.save_b26(filename)
if verbose:
print()
print()
print((.format(len(loaded_instruments), len(failed))))
if failed != {}:
for error_name, error in failed.items():
print((, error_name, error)) | tries to instantiate all the instruments that are imported in /instruments/__init__.py
and saves instruments that could be instantiate into a .b2 file in the folder path
Args:
target_folder: target path for .b26 files |
22,472 | def overview(name, server):
print( % name)
print(" Interop namespace: %s" % server.interop_ns)
print(" Brand: %s" % server.brand)
print(" Version: %s" % server.version)
print(" Namespaces: %s" % ", ".join(server.namespaces))
print( % server.namespace_classname)
if VERBOSE:
print( % server.cimom_inst.tomof())
print( % server)
print( % server)
try:
insts = server.conn.EnumerateInstances(,
namespace=server.interop_ns)
print( % name)
for inst in insts:
print( % inst)
except pywbem.Error as er:
print( % (name, er))
try:
insts = server.conn.EnumerateInstances(,
namespace=server.interop_ns)
print( % name)
for inst in insts:
print( % inst)
except pywbem.Error as er:
print( % (name, er)) | Overview of the server as seen through the properties of the server
class. |
22,473 | def edge_label(self, edge):
return self.get_edge_properties( edge ).setdefault( self.LABEL_ATTRIBUTE_NAME, self.DEFAULT_LABEL ) | Get the label of an edge.
@type edge: edge
@param edge: One edge.
@rtype: string
@return: Edge label |
22,474 | def secret_loader(self, callback):
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads secret keys")
self.secret_loader_callback = callback
return callback | Decorate a method that receives a key id and returns a secret key |
22,475 | def close(self):
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
with open(local_loc, ) as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
self.closed = True | Close and upload local log file to remote storage Wasb. |
22,476 | def discard_logcat_logs(self):
if self.driver_wrapper.is_android_test():
try:
self.driver_wrapper.driver.get_log()
except Exception:
pass | Discard previous logcat logs |
22,477 | def weighted_n(self):
if not self.is_weighted:
return float(self.unweighted_n)
return float(sum(self._cube_dict["result"]["measures"]["count"]["data"])) | float count of returned rows adjusted for weighting. |
22,478 | def is_stop_here(self, frame, event, arg):
lineno = frame.f_lineno
filename = frame.f_code.co_filename
if self.different_line and event == :
if self.last_lineno == lineno and self.last_filename == filename:
return False
pass
self.last_lineno = lineno
self.last_filename = filename
if self.stop_level is not None:
if frame != self.last_frame:
self.last_level = Mstack.count_frames(frame)
self.last_frame = frame
pass
if self.last_level > self.stop_level:
return False
elif self.last_level == self.stop_level and \
self.stop_on_finish and event in [, ]:
self.stop_level = None
self.stop_reason = "in return for command"
return True
pass
if self._is_step_next_stop(event):
self.stop_reason =
return True
return False | Does the magic to determine if we stop here and run a
command processor or not. If so, return True and set
self.stop_reason; if not, return False.
Determining factors can be whether a breakpoint was
encountered, whether we are stepping, next'ing, finish'ing,
and, if so, whether there is an ignore counter. |
22,479 | def add_user_to_allow(self, name, user):
if not self.remove_user_from_acl(name, user):
return False
if name not in self._acl:
return False
self._acl[name][].append(user)
return True | Add a user to the given acl allow block. |
22,480 | def stream(self, amt=2**16, decode_content=None):
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data | A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header. |
22,481 | def setting(self, name_hyphen):
if name_hyphen in self._instance_settings:
value = self._instance_settings[name_hyphen][1]
else:
msg = "No setting named " % name_hyphen
raise UserFeedback(msg)
if hasattr(value, ) and value.startswith("$"):
env_var = value.lstrip("$")
if env_var in os.environ:
return os.getenv(env_var)
else:
msg = " is not defined in your environment" % env_var
raise UserFeedback(msg)
elif hasattr(value, ) and value.startswith("\$"):
return value.replace("\$", "$")
else:
return value | Retrieves the setting value whose name is indicated by name_hyphen.
Values starting with $ are assumed to reference environment variables,
and the value stored in environment variables is retrieved. It's an
error if thes corresponding environment variable it not set. |
22,482 | def delete_ip4(self, id_ip):
if not is_valid_int_param(id_ip):
raise InvalidParameterError(u)
url = + str(id_ip) + "/"
code, xml = self.submit(None, , url)
return self.response(code, xml) | Delete an IP4
:param id_ip: Ipv4 identifier. Integer value and greater than zero.
:return: None
:raise IpNotFoundError: IP is not registered.
:raise DataBaseError: Networkapi failed to access the database. |
22,483 | def rel_curve_to(self, dx1, dy1, dx2, dy2, dx3, dy3):
cairo.cairo_rel_curve_to(self._pointer, dx1, dy1, dx2, dy2, dx3, dy3)
self._check_status() | Relative-coordinate version of :meth:`curve_to`.
All offsets are relative to the current point.
Adds a cubic Bézier spline to the path from the current point
to a point offset from the current point by ``(dx3, dy3)``,
using points offset by ``(dx1, dy1)`` and ``(dx2, dy2)``
as the control points.
After this call the current point will be offset by ``(dx3, dy3)``.
Given a current point of ``(x, y)``,
``context.rel_curve_to(dx1, dy1, dx2, dy2, dx3, dy3)``
is logically equivalent to
``context.curve_to(x+dx1, y+dy1, x+dx2, y+dy2, x+dx3, y+dy3)``.
:param dx1: The X offset to the first control point.
:param dy1: The Y offset to the first control point.
:param dx2: The X offset to the second control point.
:param dy2: The Y offset to the second control point.
:param dx3: The X offset to the end of the curve.
:param dy3: The Y offset to the end of the curve.
:type dx1: float
:type dy1: float
:type dx2: float
:type dy2: float
:type dx3: float
:type dy3: float
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state. |
22,484 | def interpolate(self, year):
df = self.pivot_table(index=IAMC_IDX, columns=[],
values=, aggfunc=np.sum)
if year in df.columns:
df = df[np.isnan(df[year])]
fill_values = df.apply(fill_series,
raw=False, axis=1, year=year)
fill_values = fill_values.dropna().reset_index()
fill_values = fill_values.rename(columns={0: "value"})
fill_values[] = year
self.data = self.data.append(fill_values, ignore_index=True) | Interpolate missing values in timeseries (linear interpolation)
Parameters
----------
year: int
year to be interpolated |
22,485 | def serialize(self):
def _serialize(value):
if hasattr(value, ):
return value.serialize()
elif hasattr(value, ):
return value.to_dict()
else:
return value
return list(map(_serialize, self.items)) | Get the collection of items as a serialized object (ready to be json encoded).
:rtype: dict or list |
22,486 | def add_note(self, body):
from highton.models.note import Note
created_id = self._post_request(
endpoint=self.ENDPOINT + + str(self.id) + + Note.ENDPOINT,
data=self.element_to_string(
Note(body=body).encode()
)
).headers.get().replace(, ).split()[-1]
return Note.get(created_id) | Create a Note to current object
:param body: the body of the note
:type body: str
:return: newly created Note
:rtype: Tag |
22,487 | def process(self):
for filename in self.hairball_files(self.paths, self.extensions):
if not self.options.quiet:
print(filename)
try:
if self.cache:
scratch = self.cache.load(filename)
else:
scratch = kurt.Project.load(filename)
except Exception:
traceback.print_exc()
continue
for plugin in self.plugins:
plugin._process(scratch, filename=filename) | Run the analysis across all files found in the given paths.
Each file is loaded once and all plugins are run against it before
loading the next file. |
22,488 | def define_trajectory(self, trajectory_id, offset, n_pieces):
self._send_packet(struct.pack(,
self.COMMAND_DEFINE_TRAJECTORY,
trajectory_id,
self.TRAJECTORY_LOCATION_MEM,
self.TRAJECTORY_TYPE_POLY4D,
offset,
n_pieces)) | Define a trajectory that has previously been uploaded to memory.
:param trajectory_id: The id of the trajectory
:param offset: offset in uploaded memory
:param n_pieces: Nr of pieces in the trajectory
:return: |
22,489 | def format(self, pattern=):
s standard format function
and will receive the following keyword arguments as context:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
headtailpadding%0{0}dpadding%d{holes}holes{ranges}{range}{ranges}rangerange{0}range{0}-{1}{ranges}{range}rangeranges, '.join(ranges)
return pattern.format(**data) | Return string representation as specified by *pattern*.
Pattern can be any format accepted by Python's standard format function
and will receive the following keyword arguments as context:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes. |
22,490 | def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
def handle():
if username is not None and password is not None:
auth_value = .format(username, password).encode()
auth_token = base64.b64encode(auth_value)
auth_header = b + auth_token
headers.append([, auth_header])
default_handler(url, method, timeout, headers, data)()
return handle | Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers. |
22,491 | def frequency_cutoff_from_name(name, m1, m2, s1z, s2z):
params = {"mass1":m1, "mass2":m2, "spin1z":s1z, "spin2z":s2z}
return named_frequency_cutoffs[name](params) | Returns the result of evaluating the frequency cutoff function
specified by 'name' on a template with given parameters.
Parameters
----------
name : string
Name of the cutoff function
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz |
22,492 | def commitreturn(self,qstring,vals=()):
"commit and return result. This is intended for sql UPDATE ... RETURNING"
with self.withcur() as cur:
cur.execute(qstring,vals)
return cur.fetchone() | commit and return result. This is intended for sql UPDATE ... RETURNING |
22,493 | def reserve_ipblock(self, ipblock):
properties = {
"name": ipblock.name
}
if ipblock.location:
properties[] = ipblock.location
if ipblock.size:
properties[] = str(ipblock.size)
raw = {
"properties": properties,
}
response = self._perform_request(
url=, method=, data=json.dumps(raw))
return response | Reserves an IP block within your account. |
22,494 | def from_json(cls, data):
role = cls()
role.role_id = data[]
role.name = data[]
role.color = data[]
return role | Return object based on JSON / dict input
Args:
data (dict): Dictionary containing a serialized Role object
Returns:
:obj:`Role`: Role object representing the data |
22,495 | def delete_beacon(self, name):
if name in self._get_beacons(include_opts=False):
comment = \
.format(name)
complete = False
else:
if name in self.opts[]:
del self.opts[][name]
comment = .format(name)
else:
comment = .format(name)
complete = True
evt = salt.utils.event.get_event(, opts=self.opts)
evt.fire_event({: complete, : comment,
: self.opts[]},
tag=)
return True | Delete a beacon item |
22,496 | def str_rate(self):
if not self._eta.started or self._eta.stalled or not self.rate:
return
unit_rate, unit = UnitByte(self._eta.rate_overall if self.done else self.rate).auto
if unit_rate >= 100:
formatter =
elif unit_rate >= 10:
formatter =
else:
formatter =
return .format(locale.format(formatter, unit_rate, grouping=False), unit) | Returns the rate with formatting. If done, returns the overall rate instead. |
22,497 | def search_videos_by_tag(self, tag, category=None,
period=,
orderby=,
page=1, count=20):
url =
params = {
: self.client_id,
: tag,
: period,
: orderby,
: page,
: count
}
if category:
params[] = category
r = requests.get(url, params=params)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=80 |
22,498 | def alias_get(indices=None, aliases=None, hosts=None, profile=None):
es = _get_instance(hosts, profile)
try:
return es.indices.get_alias(index=indices, name=aliases)
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot get alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error)) | Check for the existence of an alias and if it exists, return it
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_get testindex |
22,499 | def mdf_path(self):
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path | Absolute path of the MDF file. Empty string if file is not present. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.