Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
17,800 | def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name,
container_name=None):
if container_name is None:
container_name = container_group_name
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
,
container_group_name,
, container_name, , CONTAINER_API])
return do_get(endpoint, access_token) | Get the container logs for containers in a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_name (str): Optional name of a container in the group.
Returns:
HTTP response. Container logs. |
17,801 | def construct_ingest_query(self, static_path, columns):
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m[] for m in self.metric_spec if m[] != ]
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions,
"spatialDimensions": []
},
"timestampSpec": {
"column": self.ts_dim,
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.druid_datasource
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {
"inputSpec": {
"paths": static_path,
"type": "static"
},
"type": "hadoop"
}
}
}
if self.job_properties:
ingest_query_dict[][][] \
.update(self.job_properties)
if self.hadoop_dependency_coordinates:
ingest_query_dict[] \
= self.hadoop_dependency_coordinates
return ingest_query_dict | Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list |
17,802 | def pstdev(data):
n = len(data)
if n < 2:
raise ValueError()
ss = _ss(data)
pvar = ss/n
return pvar**0.5 | Calculates the population standard deviation. |
17,803 | def start(self):
t **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
userThe Proxy Minion is starting upProxy Minion could not connect to MasterProxy Minion StoppingExiting on Ctrl-c')
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code) | Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`. |
17,804 | def _sync(self):
if (self._opcount > self.checkpoint_operations or
datetime.now() > self._last_sync + self.checkpoint_timeout):
self.log.debug("Synchronizing queue metadata.")
self.queue_metadata.sync()
self._last_sync = datetime.now()
self._opcount = 0
else:
self.log.debug("NOT synchronizing queue metadata.") | Synchronize the cached data with the underlyind database.
Uses an internal transaction counter and compares to the checkpoint_operations
and checkpoint_timeout paramters to determine whether to persist the memory store.
In this implementation, this method wraps calls to C{shelve.Shelf#sync}. |
17,805 | def memory_objects_for_hash(self, n):
return set([self[i] for i in self.addrs_for_hash(n)]) | Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash
`h`. |
17,806 | def readme(filename, encoding=):
with io.open(filename, encoding=encoding) as source:
return source.read() | Read the contents of a file |
17,807 | def get_content(self, obj):
election_day = ElectionDay.objects.get(
date=self.context[])
division = obj
if obj.level.name == DivisionLevel.DISTRICT:
division = obj.parent
special = True if self.context.get() else False
return PageContent.objects.division_content(
election_day,
division,
special
) | All content for a state's page on an election day. |
17,808 | def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):
print()
if names == :
tf_name = + random_string(7)
elif names == :
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if in params:
height, width = params[]
else:
height, width = params[]
if in params:
stride_height, stride_width = params[]
else:
stride_height, stride_width = params[]
if in params:
padding_h, padding_w, _, _ = params[]
else:
padding_h, padding_w = params[]
input_name = inputs[0]
pad =
if height % 2 == 1 and width % 2 == 1 and \
height // 2 == padding_h and width // 2 == padding_w and \
stride_height == 1 and stride_width == 1:
pad =
else:
padding_name = tf_name +
padding_layer = keras.layers.ZeroPadding2D(
padding=(padding_h, padding_w),
name=padding_name
)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
pooling = keras.layers.AveragePooling2D(
pool_size=(height, width),
strides=(stride_height, stride_width),
padding=pad,
name=tf_name,
data_format=
)
layers[scope_name] = pooling(layers[input_name]) | Convert Average pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers |
17,809 | def simBirth(self,which_agents):
AggShockConsumerType.simBirth(self,which_agents)
if hasattr(self,):
self.pLvlErrNow[which_agents] = 1.0
else:
self.pLvlErrNow = np.ones(self.AgentCount) | Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlErrNow = 1.0 for new agents, indicating that they correctly perceive their productivity.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None |
17,810 | def _request(self, request_method, endpoint=, url=, data=None, params=None, use_api_key=False, omit_api_version=False):
if not data:
data = {}
if not params:
params = {}
if endpoint and omit_api_version and not url:
url = % (self.base_url, endpoint)
if endpoint and not url:
url = % (self.base_url, settings.API_VERSION, endpoint)
if use_api_key:
headers = {
: self.auth.get_api_key(),
: self.user_agent,
}
else:
headers = {
: self.auth.get_authorization(),
: self.user_agent,
}
response = requests.__getattribute__(request_method)(
url=url,
hooks=settings.REQUEST_HOOK,
headers=headers,
json=data,
params=params
)
if ((response.status_code != 200) and (response.status_code != 202)):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise PyCronofyRequestError(
request=e.request,
response=e.response,
)
return response | Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response`` |
17,811 | def update(self, filter_, document, multi=False, **kwargs):
self._valide_update_document(document)
if multi:
return self.__collect.update_many(filter_, document, **kwargs)
else:
return self.__collect.update_one(filter_, document, **kwargs) | update method |
17,812 | def add(self, key):
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key] | Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had. |
17,813 | def overplot_lines(ax,
catlines_all_wave,
list_valid_islitlets,
rectwv_coeff,
global_integer_offset_x_pix, global_integer_offset_y_pix,
ds9_file, debugplot):
for islitlet in list_valid_islitlets:
crval1_linear = rectwv_coeff.contents[islitlet - 1][]
cdelt1_linear = rectwv_coeff.contents[islitlet - 1][]
crvaln_linear = crval1_linear + (EMIR_NAXIS1 - 1) * cdelt1_linear
bb_ns1_orig = rectwv_coeff.contents[islitlet - 1][]
ttd_order = rectwv_coeff.contents[islitlet - 1][]
aij = rectwv_coeff.contents[islitlet - 1][]
bij = rectwv_coeff.contents[islitlet - 1][]
min_row_rectified = float(
rectwv_coeff.contents[islitlet - 1][]
)
max_row_rectified = float(
rectwv_coeff.contents[islitlet - 1][]
)
mean_row_rectified = (min_row_rectified + max_row_rectified) / 2
wpoly_coeff = rectwv_coeff.contents[islitlet - 1][]
x0 = []
y0 = []
x1 = []
y1 = []
x2 = []
y2 = []
for line in catlines_all_wave:
if crval1_linear <= line <= crvaln_linear:
tmp_coeff = np.copy(wpoly_coeff)
tmp_coeff[0] -= line
tmp_xroots = np.polynomial.Polynomial(tmp_coeff).roots()
for dum in tmp_xroots:
if np.isreal(dum):
dum = dum.real
if 1 <= dum <= EMIR_NAXIS1:
x0.append(dum)
y0.append(mean_row_rectified)
x1.append(dum)
y1.append(min_row_rectified)
x2.append(dum)
y2.append(max_row_rectified)
xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0))
xx0 -= global_integer_offset_x_pix
yy0 += bb_ns1_orig
yy0 -= global_integer_offset_y_pix
xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1))
xx1 -= global_integer_offset_x_pix
yy1 += bb_ns1_orig
yy1 -= global_integer_offset_y_pix
xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2))
xx2 -= global_integer_offset_x_pix
yy2 += bb_ns1_orig
yy2 -= global_integer_offset_y_pix
if abs(debugplot) % 10 != 0:
if abs(debugplot) == 22:
for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2):
ax.plot([xx1_, xx2_], [yy1_, yy2_], , linewidth=2.0)
else:
ax.plot(xx0, yy0, )
if ds9_file is not None:
ds9_file.write(
.format(islitlet)
)
for xx0_, yy0_ in zip(xx0, yy0):
ds9_file.write(
.format(
xx0_, yy0_)
) | Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'. |
17,814 | def isObservableElement(self, elementName):
if not(isinstance(elementName, str)):
raise TypeError(
"Element name should be a string ." +
"I receive this {0}"
.format(elementName))
return (True if (elementName == "*")
else self._evaluateString(elementName)) | Mention if an element is an observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool |
17,815 | def is_admin_user(self):
user = api.user.get_current()
roles = user.getRoles()
return "LabManager" in roles or "Manager" in roles | Checks if the user is the admin or a SiteAdmin user.
:return: Boolean |
17,816 | def _finish_add(self, num_bytes_to_add, num_partition_bytes_to_add):
for pvd in self.pvds:
pvd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.joliet_vd is not None:
self.joliet_vd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.enhanced_vd is not None:
self.enhanced_vd.copy_sizes(self.pvd)
if self.udf_root is not None:
num_extents_to_add = utils.ceiling_div(num_partition_bytes_to_add,
self.pvd.logical_block_size())
self.udf_main_descs.partition.part_length += num_extents_to_add
self.udf_reserve_descs.partition.part_length += num_extents_to_add
self.udf_logical_volume_integrity.size_table += num_extents_to_add
if self._always_consistent:
self._reshuffle_extents()
else:
self._needs_reshuffle = True | An internal method to do all of the accounting needed whenever
something is added to the ISO. This method should only be called by
public API implementations.
Parameters:
num_bytes_to_add - The number of additional bytes to add to all
descriptors.
num_partition_bytes_to_add - The number of additional bytes to add to
the partition if this is a UDF file.
Returns:
Nothing. |
17,817 | def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel):
import xgboost as xgb
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
assert isinstance(predictors, list) or isinstance(predictors, tuple)
assert h2oXGBoostModel._model_json[] == , \
"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only."
tempFrame = self[predictors].cbind(self[yresp])
colnames = tempFrame.names
if type(predictors[0])==type(1):
temp = []
for colInd in predictors:
temp.append(colnames[colInd])
predictors = temp
if (type(yresp) == type(1)):
tempy = colnames[yresp]
yresp = tempy
enumCols = []
enumColsIndices = []
typeDict = self.types
for predName in predictors:
if str(typeDict[predName])==:
enumCols.append(predName)
enumColsIndices.append(colnames.index(predName))
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
nrows = tempFrame.nrow
if len(enumCols) > 0:
allDomain = tempFrame.levels()
domainLen = []
for enumIndex in enumColsIndices:
if len(allDomain[enumIndex])>0:
domainLen.append(len(allDomain[enumIndex])*-1)
incLevel = np.argsort(domainLen)
c2 = tempFrame[enumCols[incLevel[0]]]
tempFrame = tempFrame.drop(enumCols[incLevel[0]])
for index in range(1, len(incLevel)):
c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])
tempFrame = tempFrame.drop(enumCols[incLevel[index]])
enumCols = c2.names
tempFrame = c2.cbind(tempFrame)
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return xgb.DMatrix(data=csr_matrix(data), label=label) \
if h2oXGBoostModel._model_json[][] else xgb.DMatrix(data=data, label=label) | This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model |
17,818 | def list_pending_work_units(self, work_spec_name, start=0, limit=None):
return self.registry.filter(WORK_UNITS_ + work_spec_name,
priority_min=time.time(),
start=start, limit=limit) | Get a dictionary of in-progress work units for some work spec.
The dictionary is from work unit name to work unit definiton.
Units listed here should be worked on by some worker. |
17,819 | def complete_sum(self):
node = self.node.complete_sum()
if node is self.node:
return self
else:
return _expr(node) | Return an equivalent DNF expression that includes all prime
implicants. |
17,820 | def _generate_destination_for_source(self, src_ase):
for sa, cont, name, dpath in self._get_destination_paths():
if self._spec.options.rename:
name = str(pathlib.Path(name))
if name == :
raise RuntimeError(
)
else:
name = str(pathlib.Path(name) / src_ase.name)
dst_mode = self._translate_src_mode_to_dst_mode(src_ase.mode)
dst_ase = self._check_for_existing_remote(sa, cont, name, dst_mode)
if dst_ase is None:
dst_ase = blobxfer.models.azure.StorageEntity(cont, ed=None)
dst_ase.populate_from_local(
sa, cont, name, dst_mode, src_ase.cache_control)
dst_ase.size = src_ase.size
if (dst_mode == blobxfer.models.azure.StorageModes.Block and
self._spec.options.access_tier is not None):
dst_ase.access_tier = self._spec.options.access_tier
action = self._check_copy_conditions(src_ase, dst_ase)
if action == SynccopyAction.Copy:
yield dst_ase
elif action == SynccopyAction.Skip:
if self._spec.options.delete_extraneous_destination:
uid = (
blobxfer.operations.synccopy.SyncCopy.
create_deletion_id(
dst_ase._client, dst_ase.container, dst_ase.name)
)
self._delete_exclude.add(uid)
if self._general_options.dry_run:
logger.info(.format(
src_ase.path, dst_ase.path)) | Generate entities for source path
:param SyncCopy self: this
:param blobxfer.models.azure.StorageEntity src_ase: source ase
:rtype: blobxfer.models.azure.StorageEntity
:return: destination storage entity |
17,821 | def isModified(self):
return self.info_modified or \
self._content and self._content_digest() != self.digest | Check if either the datastream content or profile fields have changed
and should be saved to Fedora.
:rtype: boolean |
17,822 | def set(self, id_, lineno, value=, fname=None, args=None):
if fname is None:
if CURRENT_FILE:
fname = CURRENT_FILE[-1]
else:
fname = sys.argv[0]
self.table[id_] = ID(id_, args, value, lineno, fname) | Like the above, but issues no warning on duplicate macro
definitions. |
17,823 | def write_training_metrics(self):
with open(self.path, ) as file:
writer = csv.writer(file)
writer.writerow(FIELD_NAMES)
for row in self.rows:
writer.writerow(row) | Write Training Metrics to CSV |
17,824 | def load_config(path):
args = []
with open(path, ) as fp:
for line in fp.readlines():
if line.strip() and not line.startswith("
args.append(line.replace("\n", ""))
return args | Load device configuration from file path and return list with parsed lines.
:param path: Location of configuration file.
:type path: str
:rtype: list |
17,825 | def decompile_pyc(bin_pyc, output=sys.stdout):
from turicreate.meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output) | decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects |
17,826 | def memory_usage(self, index=True, deep=False):
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=[]).append(result)
return result | Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168 |
17,827 | def check(dependency=None, timeout=60):
hello.c exists\hello.c compiles\prints "Hello, world!\\\\n\
def decorator(check):
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
result = CheckResult.from_check(check)
state = None
try:
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact [email protected] with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator | Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit() |
17,828 | def add_params(endpoint, params):
p = PreparedRequest()
p.prepare(url=endpoint, params=params)
if PY2:
return unicode(p.url)
else:
return p.url | Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone |
17,829 | def ping(self):
ret, data = self.sendmess(MSG_NOP, bytes())
if data or ret > 0:
raise ProtocolError()
if ret < 0:
raise OwnetError(-ret, self.errmess[-ret]) | sends a NOP packet and waits response; returns None |
17,830 | def get_git_isolation():
ctx = click.get_current_context(silent=True)
if ctx and GIT_ISOLATION in ctx.meta:
return ctx.meta[GIT_ISOLATION] | Get Git isolation from the current context. |
17,831 | def getPDFstr(s):
if not bool(s):
return "()"
def make_utf16be(s):
r = hexlify(bytearray([254, 255]) + bytearray(s, "UTF-16BE"))
t = r if fitz_py2 else r.decode()
return "<" + t + ">"
r = ""
for c in s:
oc = ord(c)
if oc > 255:
return make_utf16be(s)
if oc > 31 and oc < 127:
if c in ("(", ")", "\\"):
r += "\\"
r += c
continue
if oc > 127:
r += "\\" + oct(oc)[-3:]
continue
if oc < 8 or oc > 13 or oc == 11 or c == 127:
r += "\\267"
continue
if oc == 8:
r += "\\b"
elif oc == 9:
r += "\\t"
elif oc == 10:
r += "\\n"
elif oc == 12:
r += "\\f"
elif oc == 13:
r += "\\r"
return "(" + r + ")" | Return a PDF string depending on its coding.
Notes:
If only ascii then "(original)" is returned, else if only 8 bit chars
then "(original)" with interspersed octal strings \nnn is returned,
else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the
UTF-16BE encoding of the original. |
17,832 | def rm_file_or_dir(path, ignore_errors=True):
if os.path.exists(path):
if os.path.isdir(path):
if os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
if os.path.islink(path):
os.unlink(path)
else:
os.remove(path) | Helper function to clean a certain filepath
Parameters
----------
path
Returns
------- |
17,833 | def check_policies(self, account, account_policies, aws_policies):
self.log.debug(.format(account.account_name))
sess = get_aws_session(account)
iam = sess.client()
added = {}
for policyName, account_policy in account_policies.items():
if isinstance(account_policy, bytes):
account_policy = account_policy.decode()
gitpol = json.loads(
re.sub(
r,
account.ad_group_base or account.account_name,
account_policy
)
)
if policyName in aws_policies:
pol = aws_policies[policyName]
awspol = iam.get_policy_version(
PolicyArn=pol[],
VersionId=pol[]
)[][]
if awspol != gitpol:
self.log.warn(.format(
policyName,
account.account_name
))
self.create_policy(account, iam, json.dumps(gitpol, indent=4), policyName, arn=pol[])
else:
self.log.debug(.format(
policyName,
account.account_name
))
else:
self.log.warn(.format(policyName, account.account_name))
response = self.create_policy(account, iam, json.dumps(gitpol), policyName)
added[policyName] = response[]
return added | Iterate through the policies of a specific account and create or update the policy if its missing or
does not match the policy documents from Git. Returns a dict of all the policies added to the account
(does not include updated policies)
Args:
account (:obj:`Account`): Account to check policies for
account_policies (`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_policies (`dict` of `str`: `dict`): A dictionary containing the non-AWS managed policies on the account
Returns:
:obj:`dict` of `str`: `str` |
17,834 | def pypi_search(self):
spec = self.pkg_spec
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg[]:
summary = pkg[].encode()
else:
summary = ""
print( % (pkg[].encode(), pkg["version"],
summary))
return 0 | Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec |
17,835 | def AddWarning(self, warning):
self._RaiseIfNotWritable()
self._storage_file.AddWarning(warning)
self.number_of_warnings += 1 | Adds an warning.
Args:
warning (ExtractionWarning): an extraction warning.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed. |
17,836 | def get_user_groups(self, user_name):
res = self._make_ocs_request(
,
self.OCS_SERVICE_CLOUD,
+ user_name + ,
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return [group.text for group in tree.find()]
raise HTTPResponseError(res) | Get a list of groups associated to a user.
:param user_name: name of user to list groups
:returns: list of groups
:raises: HTTPResponseError in case an HTTP error status was returned |
17,837 | def match(self, s=):
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + \
if match in s.lower():
res[k] = self[k]
return CMAOptions(res, unchecked=True) | return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options. |
17,838 | def for_entity(obj, check_support_attachments=False):
if check_support_attachments and not supports_attachments(obj):
return []
return getattr(obj, ATTRIBUTE) | Return attachments on an entity. |
17,839 | def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None):
if conversion_finder is None:
msg = "No conversion finder provided to find a converter between parsed attribute of type " \
" and expected type .".format(patt=str(parsed_att),
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type))
else:
msg = "No conversion chain found between parsed attribute of type and expected type " \
" using conversion finder {conv}.".format(patt=parsed_att,
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type),
conv=conversion_finder)
if errors is not None:
msg = msg + + str(errors)
return NoConverterFoundForObjectType(msg) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parsed_att:
:param attribute_type:
:param conversion_finder:
:return: |
17,840 | def get_vcsrc(self):
try:
vimrc = create_module(, settings.VCSRC_PATH)
except IOError:
self.stderr.write("No module or package at %s\n"
% settings.VCSRC_PATH)
vimrc = None
return vimrc | Returns in-memory created module pointing at user's configuration
and extra code/commands. By default tries to create module from
:setting:`VCSRC_PATH`. |
17,841 | def timings(reps,func,*args,**kw):
return timings_out(reps,func,*args,**kw)[0:2] | timings(reps,func,*args,**kw) -> (t_total,t_per_call)
Execute a function reps times, return a tuple with the elapsed total CPU
time in seconds and the time per call. These are just the first two values
in timings_out(). |
17,842 | def _local_list_files(self, args):
if len(args) < 2:
raise SPMInvocationError()
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError(.format(pkg_file))
formula_tar = tarfile.open(pkg_file, )
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name) | List files for a package file |
17,843 | def create(self, metadata, publisher_account,
service_descriptors=None, providers=None,
use_secret_store=True):
assert isinstance(metadata, dict), f
if not metadata or not Metadata.validate(metadata):
raise OceanInvalidMetadata(
)
if did in self._get_aquarius().list_assets():
raise OceanDIDAlreadyExist(
f)
ddo = DDO(did)
ddo.add_public_key(did, publisher_account.address)
ddo.add_authentication(did, PUBLIC_KEY_TYPE_RSA)
assert metadata_copy[][
],
assert Metadata.validate(metadata),
logger.debug()
brizo = BrizoProvider.get_brizo()
if not use_secret_store:
encrypt_endpoint = brizo.get_encrypt_endpoint(self._config)
files_encrypted = brizo.encrypt_files_dict(
metadata_copy[][],
encrypt_endpoint,
ddo.asset_id,
publisher_account.address,
self._keeper.sign_hash(ddo.asset_id, publisher_account)
)
else:
files_encrypted = self._get_secret_store(publisher_account) \
.encrypt_document(
did_to_id(did),
json.dumps(metadata_copy[][]),
)
metadata_copy[][] = ddo.generate_checksum(did, metadata)
ddo.add_proof(metadata_copy[][], publisher_account, self._keeper)
if files_encrypted:
logger.info(f)
index = 0
for file in metadata_copy[][]:
file[] = index
index = index + 1
del file[]
metadata_copy[][] = files_encrypted
else:
raise AssertionError(
)
ddo_service_endpoint = self._get_aquarius().get_service_endpoint(did)
metadata_service_desc = ServiceDescriptor.metadata_service_descriptor(metadata_copy,
ddo_service_endpoint)
if not service_descriptors:
service_descriptors = [ServiceDescriptor.authorization_service_descriptor(
self._config.secret_store_url)]
brizo = BrizoProvider.get_brizo()
service_descriptors += [ServiceDescriptor.access_service_descriptor(
metadata[MetadataBase.KEY][],
brizo.get_consume_endpoint(self._config),
brizo.get_service_endpoint(self._config),
3600,
self._keeper.escrow_access_secretstore_template.address
)]
else:
service_types = set(map(lambda x: x[0], service_descriptors))
if ServiceTypes.AUTHORIZATION not in service_types:
service_descriptors += [ServiceDescriptor.authorization_service_descriptor(
self._config.secret_store_url)]
else:
brizo = BrizoProvider.get_brizo()
service_descriptors += [ServiceDescriptor.access_service_descriptor(
metadata[MetadataBase.KEY][],
brizo.get_consume_endpoint(self._config),
brizo.get_service_endpoint(self._config),
3600,
self._keeper.escrow_access_secretstore_template.address
)]
service_descriptors = service_descriptors + [metadata_service_desc]
for service in ServiceFactory.build_services(did, service_descriptors):
ddo.add_service(service)
logger.debug(
f
f
f)
response = None
registered_on_chain = self._keeper.did_registry.register(
did,
checksum=Web3Provider.get_web3().sha3(text=metadata_copy[][]),
url=ddo_service_endpoint,
account=publisher_account,
providers=providers
)
if registered_on_chain is False:
logger.warning(f)
return None
logger.info(f)
try:
response = self._get_aquarius().publish_asset_ddo(ddo)
logger.debug()
except ValueError as ve:
raise ValueError(f)
except Exception as e:
logger.error(f)
if not response:
return None
return ddo | Register an asset in both the keeper's DIDRegistry (on-chain) and in the Metadata store (
Aquarius).
:param metadata: dict conforming to the Metadata accepted by Ocean Protocol.
:param publisher_account: Account of the publisher registering this asset
:param service_descriptors: list of ServiceDescriptor tuples of length 2.
The first item must be one of ServiceTypes and the second
item is a dict of parameters and values required by the service
:param providers: list of addresses of providers of this asset (a provider is
an ethereum account that is authorized to provide asset services)
:return: DDO instance |
17,844 | def main():
args = sys.argv
if in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([ [, False, ], [, False, ],
[, False, ], [, False, ],
[, False, ], [, False, ],
[, False, ], [, False, ],
[, False, ], [, False, False],
[, False, ], [, False, ],
[, False, ], [, False, True],
[, False, True], [, False, True],
[, False, True], [, False, True],
[, False, 0],
[, False, ],
[, False, ], [, False, ],
[, False, False], [, False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol, input_dir, save, data_model_num = extractor.get_vars(
[, , , , , , , , , , , , , , , , , , , , , , , ], checked_args)
try:
sym, size = symbol.split()
size = int(size)
except:
print()
print()
sym, size = , 5
if res_file:
try:
res_file, res_sym, res_size = res_file.split()
except:
print()
print(
.format(res_file))
res_file, res_sym, res_size = , , 0
else:
res_file, res_sym, res_size = , , 0
if spc_file:
try:
spc_file, spc_sym, spc_size = spc_file.split()
except:
print()
print(
.format(spc_file))
spc_file, spc_sym, spc_size = , , 0
else:
spc_file, spc_sym, spc_size = , , 0
try:
dmin, dmax = depth.split()
except:
print()
print(.format(depth))
dmin, dmax = -1, -1
if timescale:
try:
timescale, amin, amax = timescale.split()
pltTime = True
except:
print(
)
print(
.format(timescale))
timescale, amin, amax = None, -1, -1
pltTime = False
else:
timescale, amin, amax = None, -1, -1
pltTime = False
if norm and not isinstance(norm, bool):
wt_file = norm
norm = True
else:
norm = False
wt_file =
try:
method, step = meth.split()
except:
print(
)
print()
method, step = , 0
fig, figname = ipmag.core_depthplot(input_dir, meas_file, spc_file, samp_file, age_file, sum_file, wt_file, depth_scale, dmin, dmax, sym, size,
spc_sym, spc_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, pltSus, logit, pltTime, timescale, amin, amax, norm, data_model_num)
if not pmagplotlib.isServer:
figname = figname.replace(, )
if fig and save:
print(.format(figname))
plt.savefig(figname)
return
app = wx.App(redirect=False)
if not fig:
pw.simple_warning(
)
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop() | NAME
core_depthplot.py
DESCRIPTION
plots various measurements versus core_depth or age. plots data flagged as 'FS-SS-C' as discrete samples.
SYNTAX
core_depthplot.py [command line options]
# or, for Anaconda users:
core_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input measurments format file
-fsum FILE: specify input LIMS database (IODP) core summary csv file
-fwig FILE: specify input depth,wiggle to plot, in magic format with sample_core_depth key for depth
-fsa FILE: specify input er_samples format file from magic for depth
-fa FILE: specify input ages format file from magic for age
NB: must have either -fsa OR -fa (not both)
-fsp FILE sym size: specify input zeq_specimen format file from magic, sym and size
NB: PCAs will have specified color, while fisher means will be white with specified color as the edgecolor
-fres FILE specify input pmag_results file from magic, sym and size
-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
-S do not plot blanket treatment data (if this is set, you don't need the -LP)
-sym SYM SIZE, symbol, size for continuous points (e.g., ro 5, bs 10, g^ 10 for red dot, blue square, green triangle), default is blue dot at 5 pt
-D do not plot declination
-M do not plot magnetization
-log plot magnetization on a log scale
-L do not connect dots with a line
-I do not plot inclination
-d min max [in m] depth range to plot
-n normalize by weight in er_specimen table
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04, gts12]
-ds [mbsf,mcd] specify depth scale, mbsf default
-fmt [svg, eps, pdf, png] specify output format for plot (default: svg)
-sav save plot silently
DEFAULTS:
Measurements file: measurements.txt
Samples file: samples.txt
NRM step
Summary file: none |
17,845 | def load_ns_sequence(eos_name):
ns_sequence = []
if eos_name == :
ns_sequence_path = os.path.join(pycbc.tmpltbank.NS_SEQUENCE_FILE_DIRECTORY, )
ns_sequence = np.loadtxt(ns_sequence_path)
else:
print()
print()
print()
raise Exception()
max_ns_g_mass = max(ns_sequence[:,0])
return (ns_sequence, max_ns_g_mass) | Load the data of an NS non-rotating equilibrium sequence
generated using the equation of state (EOS) chosen by the
user. [Only the 2H 2-piecewise polytropic EOS is currently
supported. This yields NSs with large radiss (15-16km).]
Parameters
-----------
eos_name: string
NS equation of state label ('2H' is the only supported
choice at the moment)
Returns
----------
ns_sequence: 3D-array
contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
max_ns_g_mass: float
the maximum NS gravitational mass (in solar masses) in
the sequence (this is the mass of the most massive stable
NS) |
17,846 | def _router_address(self, data):
args = data.split()[1:]
try:
self._relay_attrs[].extend(args)
except KeyError:
self._relay_attrs[] = list(args) | only for IPv6 addresses |
17,847 | def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs):
*{hash_type: , : <md5sum>}755
source, source_hash = source_list(source,
source_hash,
saltenv)
sfn =
source_sum = None
if contents is None:
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if source_sum and ( in source_sum):
source_sum[] = source_sum[].lower()
if comments:
__clean_tmp(sfn)
raise CommandExecutionError(comments)
if sfn and source and keep_mode:
if _urlparse(source).scheme in (, ) \
or source.startswith():
try:
mode = __salt__[](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning(, sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents,
seuser=seuser, serole=serole, setype=setype, serange=serange)
__clean_tmp(sfn)
return changes | Return a dictionary of what changes need to be made for a file
.. versionchanged:: Neon
selinux attributes added
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base |
17,848 | def beatExtraction(st_features, win_len, PLOT=False):
toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
max_beat_time = int(round(2.0 / win_len))
hist_all = numpy.zeros((max_beat_time,))
for ii, i in enumerate(toWatch):
DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean()
if DifThres<=0:
DifThres = 0.0000000000000001
[pos1, _] = utilities.peakdet(st_features[i, :], DifThres)
posDifs = []
for j in range(len(pos1)-1):
posDifs.append(pos1[j+1]-pos1[j])
[hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5))
hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0
hist_times = hist_times.astype(float) / st_features.shape[1]
hist_all += hist_times
if PLOT:
plt.subplot(9, 2, ii + 1)
plt.plot(st_features[i, :], )
for k in pos1:
plt.plot(k, st_features[i, k], )
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
if PLOT:
plt.show(block=False)
plt.figure()
I = numpy.argmax(hist_all)
bpms = 60 / (hist_centers * win_len)
BPM = bpms[I]
Ratio = hist_all[I] / hist_all.sum()
if PLOT:
hist_all = hist_all[bpms < 500]
bpms = bpms[bpms < 500]
plt.plot(bpms, hist_all, )
plt.xlabel()
plt.ylabel()
plt.show(block=True)
return BPM, Ratio | This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- st_features: a numpy array (n_feats x numOfShortTermWindows)
- win_len: window size in seconds
RETURNS:
- BPM: estimates of beats per minute
- Ratio: a confidence measure |
17,849 | def _get_code_dir(self, code_path):
decompressed_dir = None
try:
if os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS):
decompressed_dir = _unzip_file(code_path)
yield decompressed_dir
else:
LOG.debug("Code %s is not a zip/jar file", code_path)
yield code_path
finally:
if decompressed_dir:
shutil.rmtree(decompressed_dir) | Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container |
17,850 | def p_variable(self, p):
p[0] = [_Segment(_BINDING, p[2])]
if len(p) > 4:
p[0].extend(p[4])
else:
p[0].append(_Segment(_TERMINAL, ))
self.segment_count += 1
p[0].append(_Segment(_END_BINDING, )) | variable : LEFT_BRACE LITERAL EQUALS unbound_segments RIGHT_BRACE
| LEFT_BRACE LITERAL RIGHT_BRACE |
17,851 | def _get_blob(self):
if not self.__blob:
self.__blob = self.repo.get_object(self.id)
return self.__blob | read blob on access only because get_object is slow |
17,852 | def _kl_independent(a, b, name="kl_independent"):
p = a.distribution
q = b.distribution
if (tensorshape_util.is_fully_defined(a.event_shape) and
tensorshape_util.is_fully_defined(b.event_shape)):
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = (tensorshape_util.rank(a.event_shape) -
tensorshape_util.rank(p.event_shape))
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with tf.control_dependencies(
[
assert_util.assert_equal(a.event_shape_tensor(),
b.event_shape_tensor()),
assert_util.assert_equal(p.event_shape_tensor(),
q.event_shape_tensor())
]):
num_reduce_dims = (
prefer_static.rank_from_shape(
a.event_shape_tensor, a.event_shape) -
prefer_static.rank_from_shape(
p.event_shape_tensor, a.event_shape))
reduce_dims = prefer_static.range(-num_reduce_dims - 1, -1, 1)
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims) | Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match. |
17,853 | def proxies(self, url):
netloc = urllib.parse.urlparse(url).netloc
proxies = {}
if settings.PROXIES and settings.PROXIES.get(netloc):
proxies["http"] = settings.PROXIES[netloc]
proxies["https"] = settings.PROXIES[netloc]
elif settings.PROXY_URL:
proxies["http"] = settings.PROXY_URL
proxies["https"] = settings.PROXY_URL
return proxies | Get the transport proxy configuration
:param url: string
:return: Proxy configuration dictionary
:rtype: Dictionary |
17,854 | def partition_dumps(self):
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
for resource in self.resources:
manifest.add(resource)
manifest_size += resource.length
manifest_files += 1
if (manifest_size >= self.max_size or
manifest_files >= self.max_files):
yield(manifest)
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
if (manifest_files > 0):
yield(manifest) | Yeild a set of manifest object that parition the dumps.
Simply adds resources/files to a manifest until their are either the
the correct number of files or the size limit is exceeded, then yields
that manifest. |
17,855 | def model_post_save(sender, instance, created=False, **kwargs):
if sender._meta.app_label == :
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify) | Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created |
17,856 | async def take_control(self, password):
cmd = "takecontrol %s" % password
return await asyncio.wait_for(
self._protocol.send_command(cmd), timeout=self._timeout
) | Take control of QTM.
:param password: Password as entered in QTM. |
17,857 | def findGlyph(self, glyphName):
glyphName = normalizers.normalizeGlyphName(glyphName)
groupNames = self._findGlyph(glyphName)
groupNames = [self.keyNormalizer.__func__(
groupName) for groupName in groupNames]
return groupNames | Returns a ``list`` of the group or groups associated with
**glyphName**.
**glyphName** will be an :ref:`type-string`. If no group is found
to contain **glyphName** an empty ``list`` will be returned. ::
>>> font.groups.findGlyph("A")
["A_accented"] |
17,858 | def xy_spectrail_arc_intersections(self, slitlet2d=None):
if self.list_arc_lines is None:
raise ValueError("Arc lines not sought")
number_spectrum_trails = len(self.list_spectrails)
if number_spectrum_trails == 0:
raise ValueError("Number of available spectrum trails is 0")
number_arc_lines = len(self.list_arc_lines)
if number_arc_lines == 0:
raise ValueError("Number of available arc lines is 0")
self.x_inter_rect = np.array([])
self.y_inter_rect = np.array([])
for arcline in self.list_arc_lines:
spectrail = self.list_spectrails[self.i_middle_spectrail]
xroot, yroot = intersection_spectrail_arcline(
spectrail=spectrail, arcline=arcline
)
arcline.x_rectified = xroot
self.x_inter_rect = np.append(
self.x_inter_rect, [xroot] * number_spectrum_trails
)
for spectrail in self.list_spectrails:
y_expected = self.corr_yrect_a + self.corr_yrect_b * \
spectrail.y_rectified
self.y_inter_rect = np.append(self.y_inter_rect, y_expected)
if abs(self.debugplot) >= 10:
print(,
self.y0_frontier_lower_expected)
print(,
self.y0_frontier_upper_expected)
print(,
self.corr_yrect_a +
self.corr_yrect_b * self.y0_frontier_lower)
print(,
self.corr_yrect_a +
self.corr_yrect_b * self.y0_frontier_upper)
self.x_inter_orig = np.array([])
self.y_inter_orig = np.array([])
for arcline in self.list_arc_lines:
for spectrail in self.list_spectrails:
xroot, yroot = intersection_spectrail_arcline(
spectrail=spectrail, arcline=arcline
)
self.x_inter_orig = np.append(self.x_inter_orig, xroot)
self.y_inter_orig = np.append(self.y_inter_orig, yroot)
if abs(self.debugplot % 10) != 0 and slitlet2d is not None:
title = "Slitlet
" (xy_spectrail_arc_intersections)"
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
for spectrail in self.list_spectrails:
xdum, ydum = spectrail.linspace_pix(start=self.bb_nc1_orig,
stop=self.bb_nc2_orig)
ax.plot(xdum, ydum, )
for arcline in self.list_arc_lines:
xdum, ydum = arcline.linspace_pix(start=self.bb_ns1_orig,
stop=self.bb_ns2_orig)
ax.plot(xdum, ydum, )
ax.plot(self.x_inter_orig, self.y_inter_orig, )
ax.plot(self.x_inter_rect, self.y_inter_rect, )
pause_debugplot(self.debugplot, pltshow=True) | Compute intersection points of spectrum trails with arc lines.
The member list_arc_lines is updated with new keyword:keyval
values for each arc line.
Parameters
----------
slitlet2d : numpy array
Slitlet image to be displayed with the computed boundaries
and intersecting points overplotted. This argument is
optional. |
17,859 | def css_property(self) -> str:
prop = self.random.choice(list(CSS_PROPERTIES.keys()))
val = CSS_PROPERTIES[prop]
if isinstance(val, list):
val = self.random.choice(val)
elif val == :
val = self.__text.hex_color()
elif val == :
val = .format(self.random.randint(1, 99),
self.random.choice(CSS_SIZE_UNITS))
return .format(prop, val) | Generate a random snippet of CSS that assigns value to a property.
:return: CSS property.
:Examples:
'background-color: #f4d3a1' |
17,860 | def __driver_stub(self, text, state):
origline = readline.get_line_buffer()
line = origline.lstrip()
if line and line[-1] == :
self.__driver_helper(line)
else:
toks = shlex.split(line)
return self.__driver_completer(toks, text, state) | Display help messages or invoke the proper completer.
The interface of helper methods and completer methods are documented in
the helper() decorator method and the completer() decorator method,
respectively.
Arguments:
text: A string, that is the current completion scope.
state: An integer.
Returns:
A string used to replace the given text, if any.
None if no completion candidates are found.
Raises:
This method is called via the readline callback. If this method
raises an error, it is silently ignored by the readline library.
This behavior makes debugging very difficult. For this reason,
non-driver methods are run within try-except blocks. When an error
occurs, the stack trace is printed to self.stderr. |
17,861 | def get_field_value_from_context(field_name, context_list):
field_path = field_name.split()
if field_path[0] == :
context_index = 0
field_path.pop(0)
else:
context_index = -1
while field_path[0] == :
context_index -= 1
field_path.pop(0)
try:
field_value = context_list[context_index]
while len(field_path):
field = field_path.pop(0)
if isinstance(field_value, (list, tuple, ListModel)):
if field.isdigit():
field = int(field)
field_value = field_value[field]
elif isinstance(field_value, dict):
try:
field_value = field_value[field]
except KeyError:
if field.isdigit():
field = int(field)
field_value = field_value[field]
else:
field_value = None
else:
field_value = getattr(field_value, field)
return field_value
except (IndexError, AttributeError, KeyError, TypeError):
return None | Helper to get field value from string path.
String '<context>' is used to go up on context stack. It just
can be used at the beginning of path: <context>.<context>.field_name_1
On the other hand, '<root>' is used to start lookup from first item on context. |
17,862 | def fillna_value(self, df, left, **concat_args):
value = pd.Series(
0, index=[c for c in df.columns
if c.endswith() and c.find() == -1])
return value | This method gives subclasses the opportunity to define how
join() fills missing values. Return value must be compatible with
DataFrame.fillna() value argument. Examples:
- return 0: replace missing values with zero
- return df.mean(): replace missing values with column mean
This default implimentation fills counts with zero.
TODO: identify counts more robustly instead of relying on column name
Typically fill other fields with mean but can't do that during the join
because that would leak information across a train/test split |
17,863 | async def get_connection(self, container):
if self._connpool:
conn = self._connpool.pop()
return RedisClientBase(conn, self)
else:
conn = self._create_client(container)
await RedisClientBase._get_connection(self, container, conn)
await self._protocol.send_command(conn, container, , str(self.db))
return RedisClientBase(conn, self) | Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc. |
17,864 | def func_on_enter(func):
def function_after_enter_pressed(ev):
ev.stopPropagation()
if ev.keyCode == 13:
func(ev)
return function_after_enter_pressed | Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events. |
17,865 | def remove_entry(self, offset, length):
for index, entry in enumerate(self._entries):
if entry.offset == offset and entry.length == length:
del self._entries[index]
break
else:
raise pycdlibexception.PyCdlibInternalError() | Given an offset and length, find and remove the entry in this block
that corresponds.
Parameters:
offset - The offset of the entry to look for.
length - The length of the entry to look for.
Returns:
Nothing. |
17,866 | def check(self, dsm, independence_factor=5, **kwargs):
least_common_mechanism = False
message =
data = dsm.data
categories = dsm.categories
dsm_size = dsm.size[0]
if not categories:
categories = [] * dsm_size
dependent_module_number = []
for j in range(0, dsm_size):
dependent_module_number.append(0)
for i in range(0, dsm_size):
if (categories[i] != and
categories[j] != and
data[i][j] > 0):
dependent_module_number[j] += 1
for index, item in enumerate(dsm.categories):
if item == or item == :
dependent_module_number[index] = 0
if max(dependent_module_number) <= dsm_size / independence_factor:
least_common_mechanism = True
else:
maximum = max(dependent_module_number)
message = (
% (
dsm.entities[dependent_module_number.index(maximum)],
maximum, dsm_size, independence_factor,
dsm_size / independence_factor))
return least_common_mechanism, message | Check least common mechanism.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
independence_factor (int): if the maximum dependencies for one
module is inferior or equal to the DSM size divided by the
independence factor, then this criterion is verified.
Returns:
bool: True if least common mechanism, else False |
17,867 | def modifyModlist(
old_entry: dict, new_entry: dict, ignore_attr_types: Optional[List[str]] = None,
ignore_oldexistent: bool = False) -> Dict[str, Tuple[str, List[bytes]]]:
ignore_attr_types = _list_dict(map(str.lower, (ignore_attr_types or [])))
modlist: Dict[str, Tuple[str, List[bytes]]] = {}
attrtype_lower_map = {}
for a in old_entry.keys():
attrtype_lower_map[a.lower()] = a
for attrtype in new_entry.keys():
attrtype_lower = attrtype.lower()
if attrtype_lower in ignore_attr_types:
continue
new_value = list(filter(lambda x: x is not None, new_entry[attrtype]))
if attrtype_lower in attrtype_lower_map:
old_value = old_entry.get(attrtype_lower_map[attrtype_lower], [])
old_value = list(filter(lambda x: x is not None, old_value))
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if not old_value and new_value:
modlist[attrtype] = (ldap3.MODIFY_ADD, escape_list(new_value))
elif old_value and new_value:
old_value_dict = _list_dict(old_value)
new_value_dict = _list_dict(new_value)
delete_values = []
for v in old_value:
if v not in new_value_dict:
delete_values.append(v)
add_values = []
for v in new_value:
if v not in old_value_dict:
add_values.append(v)
if len(delete_values) > 0 or len(add_values) > 0:
modlist[attrtype] = (
ldap3.MODIFY_REPLACE, escape_list(new_value))
elif old_value and not new_value:
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
if not ignore_oldexistent:
for a in attrtype_lower_map.keys():
if a in ignore_attr_types:
continue
attrtype = attrtype_lower_map[a]
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
return modlist | Build differential modify list for calling LDAPObject.modify()/modify_s()
:param old_entry:
Dictionary holding the old entry
:param new_entry:
Dictionary holding what the new entry should be
:param ignore_attr_types:
List of attribute type names to be ignored completely
:param ignore_oldexistent:
If true attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
:return: List of tuples suitable for
:py:meth:`ldap:ldap.LDAPObject.modify`.
This function is the same as :py:func:`ldap:ldap.modlist.modifyModlist`
except for the following changes:
* MOD_DELETE/MOD_DELETE used in preference to MOD_REPLACE when updating
an existing value. |
17,868 | def get_urlhash(self, url, fmt):
with self.open(os.path.basename(url)) as f:
return {: fmt(url), : filehash(f, )} | Returns the hash of the file of an internal url |
17,869 | def app_authorize(self, account=None, flush=True, bailout=False):
with self._get_account(account) as account:
user = account.get_name()
password = account.get_authorization_password()
if password is None:
password = account.get_password()
self._dbg(1, "Attempting to app-authorize %s." % user)
self._app_authenticate(account, password, flush, bailout)
self.app_authorized = True | Like app_authenticate(), but uses the authorization password
of the account.
For the difference between authentication and authorization
please google for AAA.
:type account: Account
:param account: An account object, like login().
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type bailout: bool
:param bailout: Whether to wait for a prompt after sending the password. |
17,870 | def add_months_to_date(months, date):
month = date.month
new_month = month + months
years = 0
while new_month < 1:
new_month += 12
years -= 1
while new_month > 12:
new_month -= 12
years += 1
year = date.year + years
try:
return datetime.date(year, new_month, date.day)
except ValueError:
if months > 0:
new_month += 1
if new_month > 12:
new_month -= 12
year += 1
return datetime.datetime(year, new_month, 1)
else:
new_day = calendar.monthrange(year, new_month)[1]
return datetime.datetime(year, new_month, new_day) | Add a number of months to a date |
17,871 | def url_to_filename(url: str, etag: str = None) -> str:
url_bytes = url.encode()
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode()
etag_hash = sha256(etag_bytes)
filename += + etag_hash.hexdigest()
return filename | Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period. |
17,872 | def deploy(self, driver, location_id=config.DEFAULT_LOCATION_ID,
size=config.DEFAULT_SIZE):
logger.debug( % (self.name, driver))
args = {: self.name}
if hasattr(config, ):
args[] = config.SSH_KEY_NAME
if hasattr(config, ):
args[] = config.EX_USERDATA
args[] = driver.list_locations()[location_id]
logger.debug( % args[])
args[] = size_from_name(size, driver.list_sizes())
logger.debug( % args[])
logger.debug( % config.IMAGE_NAMES[self.image_name])
args[] = image_from_name(
config.IMAGE_NAMES[self.image_name], driver.list_images())
logger.debug( % args[])
logger.debug( % args)
node = driver.create_node(**args)
logger.debug()
logger.debug( % driver.features)
password = node.extra.get() \
if in driver.features[] else None
logger.debug( % config.SSH_INTERFACE)
node, ip_addresses = driver._wait_until_running(
node, timeout=1200, ssh_interface=config.SSH_INTERFACE)
ssh_args = {: ip_addresses[0], : 22, : 10}
if password:
ssh_args[] = password
else:
ssh_args[] = config.SSH_KEY_PATH if hasattr(config, ) else None
logger.debug( % ssh_args)
ssh_client = libcloud.compute.ssh.SSHClient(**ssh_args)
logger.debug()
ssh_client = driver._ssh_client_connect(ssh_client)
logger.debug()
logger.debug( % len(self.deployment.steps))
driver._run_deployment_script(self.deployment, node, ssh_client)
node.script_deployments = self.script_deployments
logger.debug( % node.extra[])
return NodeProxy(node, args[]) | Use driver to deploy node, with optional ability to specify
location id and size id.
First, obtain location object from driver. Next, get the
size. Then, get the image. Finally, deploy node, and return
NodeProxy. |
17,873 | def event_stream(self, filters=None):
if filters is None:
filters = {}
return self._docker.events(decode=True, filters=filters) | :param filters: filters to apply on messages. See docker api.
:return: an iterable that contains events from docker. See the docker api for content. |
17,874 | def compute():
if what == "numpy":
y = eval(expr)
else:
y = ne.evaluate(expr)
return len(y) | Compute the polynomial. |
17,875 | def create_diff_storage(self, target, variant):
if not isinstance(target, IMedium):
raise TypeError("target can only be an instance of type IMedium")
if not isinstance(variant, list):
raise TypeError("variant can only be an instance of type list")
for a in variant[:10]:
if not isinstance(a, MediumVariant):
raise TypeError(
"array can only contain objects of type MediumVariant")
progress = self._call("createDiffStorage",
in_p=[target, variant])
progress = IProgress(progress)
return progress | Starts creating an empty differencing storage unit based on this
medium in the format and at the location defined by the @a target
argument.
The target medium must be in :py:attr:`MediumState.not_created`
state (i.e. must not have an existing storage unit). Upon successful
completion, this operation will set the type of the target medium to
:py:attr:`MediumType.normal` and create a storage unit necessary to
represent the differencing medium data in the given format (according
to the storage format of the target object).
After the returned progress object reports that the operation is
successfully complete, the target medium gets remembered by this
VirtualBox installation and may be attached to virtual machines.
The medium will be set to :py:attr:`MediumState.locked_read`
state for the duration of this operation.
in target of type :class:`IMedium`
Target medium.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorObjectInUse`
Medium not in @c NotCreated state. |
17,876 | def get_headers(self, container):
uri = "/%s" % utils.get_name(container)
resp, resp_body = self.api.method_head(uri)
return resp.headers | Return the headers for the specified container. |
17,877 | def parse_object(lexer: Lexer, is_const: bool) -> ObjectValueNode:
start = lexer.token
item = cast(Callable[[Lexer], Node], partial(parse_object_field, is_const=is_const))
return ObjectValueNode(
fields=any_nodes(lexer, TokenKind.BRACE_L, item, TokenKind.BRACE_R),
loc=loc(lexer, start),
) | ObjectValue[Const] |
17,878 | def initialize_socket(self):
try:
_LOGGER.debug("Trying to open socket.")
self._socket = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
self._socket.bind((, self._udp_port))
except socket.error as err:
raise err
else:
_LOGGER.debug("Socket open.")
socket_thread = threading.Thread(
name="SocketThread", target=socket_worker,
args=(self._socket, self.messages,))
socket_thread.setDaemon(True)
socket_thread.start() | initialize the socket |
17,879 | def ListTimeZones(self):
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=[, ],
title=)
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
).format(timezone_name, exception))
continue
local_date_string = .format(
local_timezone.localize(utc_date_time))
if in local_date_string:
_, _, diff = local_date_string.rpartition()
diff_string = .format(diff)
else:
_, _, diff = local_date_string.rpartition()
diff_string = .format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer) | Lists the timezones. |
17,880 | def nonNegativeDerivative(requestContext, seriesList, maxValue=None):
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev, val):
newValues.append(None)
prev = val
continue
diff = val - prev
if diff >= 0:
newValues.append(diff)
elif maxValue is not None and maxValue >= val:
newValues.append((maxValue - prev) + val + 1)
else:
newValues.append(None)
prev = val
newName = "nonNegativeDerivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results | Same as the derivative function above, but ignores datapoints that trend
down. Useful for counters that increase for a long time, then wrap or
reset. (Such as if a network interface is destroyed and recreated by
unloading and re-loading a kernel module, common with USB / WiFi cards.
Example::
&target=nonNegativederivative(
company.server.application01.ifconfig.TXPackets) |
17,881 | def shuffle(self, x, random=None):
if random is None:
random = self.random
_int = int
for i in reversed(xrange(1, len(x))):
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i] | x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random. |
17,882 | def database_names(self, session=None):
warnings.warn("database_names is deprecated. Use list_database_names "
"instead.", DeprecationWarning, stacklevel=2)
return self.list_database_names(session) | **DEPRECATED**: Get a list of the names of all databases on the
connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.7
Deprecated. Use :meth:`list_database_names` instead.
.. versionchanged:: 3.6
Added ``session`` parameter. |
17,883 | def db_to_specifier(db_string):
local_match = PLAIN_RE.match(db_string)
remote_match = URL_RE.match(db_string)
if local_match:
return + local_match.groupdict()[]
elif remote_match:
hostname, portnum, database = map(remote_match.groupdict().get,
(, , ))
local_url = settings._(, )
localhost, localport = urlparse.urlparse(local_url)[1].split()
| Return the database specifier for a database string.
This accepts a database name or URL, and returns a database specifier in the
format accepted by ``specifier_to_db``. It is recommended that you consult
the documentation for that function for an explanation of the format. |
17,884 | def unitigs(args):
p = OptionParser(unitigs.__doc__)
p.add_option("--maxerr", default=2, type="int", help="Maximum error rate")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bestedges, = args
G = read_graph(bestedges, maxerr=opts.maxerr, directed=True)
H = nx.Graph()
intconv = lambda x: int(x.split("-")[0])
for k, v in G.iteritems():
if k == G.get(v, None):
H.add_edge(intconv(k), intconv(v))
nunitigs = nreads = 0
for h in nx.connected_component_subgraphs(H, copy=False):
st = [x for x in h if h.degree(x) == 1]
if len(st) != 2:
continue
src, target = st
path = list(nx.all_simple_paths(h, src, target))
assert len(path) == 1
path, = path
print("|".join(str(x) for x in path))
nunitigs += 1
nreads += len(path)
logging.debug("A total of {0} unitigs built from {1} reads."\
.format(nunitigs, nreads)) | %prog unitigs best.edges
Reads Celera Assembler's "best.edges" and extract all unitigs. |
17,885 | def getAccessURL(self, CorpNum, UserID):
result = self._httpget(, CorpNum, UserID)
return result.url | νλΉ λ‘κ·ΈμΈ URL
args
CorpNum : νμ μ¬μ
μλ²νΈ
UserID : νμ νλΉμμ΄λ
return
30μ΄ λ³΄μ ν ν°μ ν¬ν¨ν url
raise
PopbillException |
17,886 | def compare_packages(rpm_str_a, rpm_str_b, arch_provided=True):
logger.debug(, rpm_str_a, rpm_str_b)
evr_a = parse_package(rpm_str_a, arch_provided)[]
evr_b = parse_package(rpm_str_b, arch_provided)[]
return labelCompare(evr_a, evr_b) | Compare two RPM strings to determine which is newer
Parses version information out of RPM package strings of the form
returned by the ``rpm -q`` command and compares their versions to
determine which is newer. Provided strings *do not* require an
architecture at the end, although if providing strings without
architecture, the ``arch_provided`` parameter should be set to
False.
Note that the packages do not have to be the same package (i.e.
they do not require the same name or architecture).
:param str rpm_str_a: an rpm package string
:param str rpm_str_b: an rpm package string
:param bool arch_provided: whether package strings contain
architecture information
:return: 1 (``a`` is newer), 0 (versions are equivalent), or -1
(``b`` is newer)
:rtype: int |
17,887 | def normalize_pts(pts, ymax, scaler=2):
return [(x * scaler, ymax - (y * scaler)) for x, y in pts] | scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left) |
17,888 | def find(self, name, required):
if name == None:
raise Exception("Name cannot be null")
locator = self._locate(name)
if locator == None:
if required:
raise ReferenceException(None, name)
return None
return self._references.find(locator, required) | Finds all matching dependencies by their name.
:param name: the dependency name to locate.
:param required: true to raise an exception when no dependencies are found.
:return: a list of found dependencies |
17,889 | def create_variable(self, varname, vtype=None):
var_types = (, , , )
vname = varname
var = None
type_from_name =
if in varname:
type_from_name, vname = varname.split()
if type_from_name not in (var_types):
type_from_name, vname = vname, type_from_name
if type_from_name not in (var_types):
raise Exception(.format(varname))
if vname in self.tkvariables:
var = self.tkvariables[vname]
else:
if vtype is None:
if type_from_name == :
var = tkinter.IntVar()
elif type_from_name == :
var = tkinter.BooleanVar()
elif type_from_name == :
var = tkinter.DoubleVar()
else:
var = tkinter.StringVar()
else:
var = vtype()
self.tkvariables[vname] = var
return var | Create a tk variable.
If the variable was created previously return that instance. |
17,890 | def subkeys(self, path):
for _ in subpaths_for_path_range(path, hardening_chars="'pH"):
yield self.subkey_for_path(_) | A generalized form that can return multiple subkeys. |
17,891 | def __deftype_impls(
ctx: ParserContext, form: ISeq
) -> Tuple[List[DefTypeBase], List[Method]]:
current_interface_sym: Optional[sym.Symbol] = None
current_interface: Optional[DefTypeBase] = None
interfaces = []
methods: List[Method] = []
interface_methods: MutableMapping[sym.Symbol, List[Method]] = {}
for elem in form:
if isinstance(elem, sym.Symbol):
if current_interface is not None:
if current_interface_sym in interface_methods:
raise ParserException(
f"deftype* forms may only implement an interface once",
form=elem,
)
assert (
current_interface_sym is not None
), "Symbol must be defined with interface"
interface_methods[current_interface_sym] = methods
current_interface_sym = elem
current_interface = _parse_ast(ctx, elem)
methods = []
if not isinstance(current_interface, (MaybeClass, MaybeHostForm, VarRef)):
raise ParserException(
f"deftype* interface implementation must be an existing interface",
form=elem,
)
interfaces.append(current_interface)
elif isinstance(elem, ISeq):
if current_interface is None:
raise ParserException(
f"deftype* method cannot be declared without interface", form=elem
)
methods.append(__deftype_method(ctx, elem, current_interface))
else:
raise ParserException(
f"deftype* must consist of interface or protocol names and methods",
form=elem,
)
if current_interface is not None:
if len(methods) > 0:
if current_interface_sym in interface_methods:
raise ParserException(
f"deftype* forms may only implement an interface once",
form=current_interface_sym,
)
assert (
current_interface_sym is not None
), "Symbol must be defined with interface"
interface_methods[current_interface_sym] = methods
else:
raise ParserException(
f"deftype* may not declare interface without at least one method",
form=current_interface_sym,
)
return interfaces, list(chain.from_iterable(interface_methods.values())) | Roll up deftype* declared bases and method implementations. |
17,892 | def raw_broadcast(self, destination, message, **kwargs):
self._broadcast(destination, message, **kwargs) | Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction |
17,893 | def get_metric_names(self, agent_id, re=None, limit=5000):
self._api_rate_limit_exceeded(self.get_metric_names)
parameters = {: re, : limit}
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/applications/{agent_id}/metrics.xml"\
.format(endpoint=endpoint, agent_id=agent_id)
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
metrics = {}
for metric in response.findall():
fields = []
for field in metric.findall():
fields.append(field.get())
metrics[metric.get()] = fields
return metrics | Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com |
17,894 | def numberofnetworks(self):
sels1 = selectiontools.Selections()
sels2 = selectiontools.Selections()
complete = selectiontools.Selection(,
self.nodes, self.elements)
for node in self.endnodes:
sel = complete.copy(node.name).select_upstream(node)
sels1 += sel
sels2 += sel.copy(node.name)
for sel1 in sels1:
for sel2 in sels2:
if sel1.name != sel2.name:
sel1 -= sel2
for name in list(sels1.names):
if not sels1[name].elements:
del sels1[name]
return sels1 | The number of distinct networks defined by the|Node| and
|Element| objects currently handled by the |HydPy| object. |
17,895 | def lock(self, name, ttl=None, lock_id=None):
return Lock(self, name, ttl, lock_id) | Create a named :py:class:`Lock` instance. The lock implements
an API similar to the standard library's ``threading.Lock``,
and can also be used as a context manager or decorator.
:param str name: The name of the lock.
:param int ttl: The time-to-live for the lock in milliseconds
(optional). If the ttl is ``None`` then the lock will not
expire.
:param str lock_id: Optional identifier for the lock instance. |
17,896 | def get(self, request, bot_id, handler_id, id, format=None):
return super(UrlParameterDetail, self).get(request, bot_id, handler_id, id, format) | Get url parameter by id
---
serializer: AbsParamSerializer
responseMessages:
- code: 401
message: Not authenticated |
17,897 | def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop()
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
17,898 | def get_remaining_time(program):
now = datetime.datetime.now()
program_start = program.get()
program_end = program.get()
if not program_start or not program_end:
_LOGGER.error()
_LOGGER.debug(, program)
return
if now > program_end:
_LOGGER.error()
_LOGGER.debug(, program)
return 0
progress = now - program_start
return progress.seconds | Get the remaining time in seconds of a program that is currently on. |
17,899 | def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) | Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.