code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def query(input, representation, resolvers=None, **kwargs):
apiurl = API_BASE+'/%s/%s/xml' % (urlquote(input), representation)
if resolvers:
kwargs['resolver'] = ",".join(resolvers)
if kwargs:
apiurl+= '?%s' % urlencode(kwargs)
result = []
try:
tree = ET.parse(urlopen(apiurl))
for data in tree.findall(".//data"):
datadict = {'resolver':data.attrib['resolver'],
'notation':data.attrib['notation'],
'value':[]}
for item in data.findall("item"):
datadict['value'].append(item.text)
if len(datadict['value']) == 1:
datadict['value'] = datadict['value'][0]
result.append(datadict)
except HTTPError:
pass
return result if result else None
|
Get all results for resolving input to the specified output representation
|
def _init_loaders(self) -> None:
for loader in settings.I18N_TRANSLATION_LOADERS:
loader_class = import_class(loader['loader'])
instance = loader_class()
instance.on_update(self.update)
run(instance.load(**loader['params']))
|
This creates the loaders instances and subscribes to their updates.
|
def get_all(self, inactive='', email_filter='', tag='', count=25, offset=0):
self._check_values()
params = '?inactive=' + inactive + '&emailFilter=' + email_filter +'&tag=' + tag
params += '&count=' + str(count) + '&offset=' + str(offset)
req = Request(
__POSTMARK_URL__ + 'bounces' + params,
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
try:
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err
|
Fetches a portion of bounces according to the specified input criteria. The count and offset
parameters are mandatory. You should never retrieve all bounces as that could be excessively
slow for your application. To know how many bounces you have, you need to request a portion
first, usually the first page, and the service will return the count in the TotalCount property
of the response.
|
def _get_mean(self, imt, mag, hypo_depth, rrup, d):
mag = min(mag, 8.3)
if imt.name == 'PGV':
mean = (
0.58 * mag +
0.0038 * hypo_depth +
d -
1.29 -
np.log10(rrup + 0.0028 * 10 ** (0.5 * mag)) -
0.002 * rrup
)
else:
mean = (
0.50 * mag +
0.0043 * hypo_depth +
d +
0.61 -
np.log10(rrup + 0.0055 * 10 ** (0.5 * mag)) -
0.003 * rrup
)
mean = np.log10(10**(mean)/(g*100))
return mean
|
Return mean value as defined in equation 3.5.1-1 page 148
|
def backend_add(cls, name, backend):
oper = cls.call(
'hosting.rproxy.server.create', cls.usable_id(name), backend)
cls.echo('Adding backend %s:%s into webaccelerator' %
(backend['ip'], backend['port']))
cls.display_progress(oper)
cls.echo('Backend added')
return oper
|
Add a backend into a webaccelerator
|
def trim_disconnected_blobs(im, inlets):
r
temp = sp.zeros_like(im)
temp[inlets] = True
labels, N = spim.label(im + temp)
im = im ^ (clear_border(labels=labels) > 0)
return im
|
r"""
Removes foreground voxels not connected to specified inlets
Parameters
----------
im : ND-array
The array to be trimmed
inlets : ND-array of tuple of indices
The locations of the inlets. Any voxels *not* connected directly to
the inlets will be trimmed
Returns
-------
image : ND-array
An array of the same shape as ``im``, but with all foreground
voxels not connected to the ``inlets`` removed.
|
def set_recursion_limit(limit):
if limit < minimum_recursion_limit:
raise CoconutException("--recursion-limit must be at least " + str(minimum_recursion_limit))
sys.setrecursionlimit(limit)
|
Set the Python recursion limit.
|
def graph_branches_from_node(self, node):
branches = []
branches_dict = self._graph.adj[node]
for branch in branches_dict.items():
branches.append(branch)
return sorted(branches, key=lambda _: repr(_))
|
Returns branches that are connected to `node`
Args
----
node: GridDing0
Ding0 object (member of graph)
Returns
-------
:any:`list`
List of tuples (node in :obj:`GridDing0`, branch in :obj:`BranchDing0`) ::
(node , branch_0 ),
...,
(node , branch_N ),
|
def diff_commonOverlap(self, text1, text2):
text1_length = len(text1)
text2_length = len(text2)
if text1_length == 0 or text2_length == 0:
return 0
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
if text1 == text2:
return text_length
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
|
Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
|
def zscan(self, name, cursor='0', match=None, count=10):
def value_function():
values = self.zrange(name, 0, -1, withscores=True)
values.sort(key=lambda x: x[1])
return values
return self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0])
|
Emulate zscan.
|
def indication(self, pdu):
if _debug: UDPDirector._debug("indication %r", pdu)
addr = pdu.pduDestination
peer = self.peers.get(addr, None)
if not peer:
peer = self.actorClass(self, addr)
peer.indication(pdu)
|
Client requests are queued for delivery.
|
def duplicate_statements(model, oldorigin, neworigin, rfilter=None):
for o, r, t, a in model.match(oldorigin):
if rfilter is None or rfilter(o, r, t, a):
model.add(I(neworigin), r, t, a)
return
|
Take links with a given origin, and create duplicate links with the same information but a new origin
:param model: Versa model to be updated
:param oldres: resource IRI to be duplicated
:param newres: origin resource IRI for duplication
:return: None
|
def _create_listening_stream(self, pull_addr):
sock = self._zmq_context.socket(zmq.PULL)
sock.connect(pull_addr)
stream = ZMQStream(sock, io_loop=self.io_loop)
return stream
|
Create a stream listening for Requests. The `self._recv_callback`
method is asociated with incoming requests.
|
def queueStream(self, rdds, oneAtATime=True, default=None):
deserializer = QueueStreamDeserializer(self._context)
if default is not None:
default = deserializer(default)
if Queue is False:
log.error('Run "pip install tornado" to install tornado.')
q = Queue()
for i in rdds:
q.put(i)
qstream = QueueStream(q, oneAtATime, default)
return DStream(qstream, self, deserializer)
|
Create stream iterable over RDDs.
:param rdds: Iterable over RDDs or lists.
:param oneAtATime: Process one at a time or all.
:param default: If no more RDDs in ``rdds``, return this. Can be None.
:rtype: DStream
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[4], [2], [7]])
... .foreachRDD(lambda rdd: print(rdd.collect()))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.35)
[4]
[2]
[7]
Example testing the default value:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[4], [2]], default=['placeholder'])
... .foreachRDD(lambda rdd: print(rdd.collect()))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.35)
[4]
[2]
['placeholder']
|
def _structure_dict(self, obj, cl):
if is_bare(cl) or cl.__args__ == (Any, Any):
return dict(obj)
else:
key_type, val_type = cl.__args__
if key_type is Any:
val_conv = self._structure_func.dispatch(val_type)
return {k: val_conv(v, val_type) for k, v in obj.items()}
elif val_type is Any:
key_conv = self._structure_func.dispatch(key_type)
return {key_conv(k, key_type): v for k, v in obj.items()}
else:
key_conv = self._structure_func.dispatch(key_type)
val_conv = self._structure_func.dispatch(val_type)
return {
key_conv(k, key_type): val_conv(v, val_type)
for k, v in obj.items()
}
|
Convert a mapping into a potentially generic dict.
|
def relativize_path(path, base, os_sep=os.sep):
if not check_base(path, base, os_sep):
raise OutsideDirectoryBase("%r is not under %r" % (path, base))
prefix_len = len(base)
if not base.endswith(os_sep):
prefix_len += len(os_sep)
return path[prefix_len:]
|
Make absolute path relative to an absolute base.
:param path: absolute path
:type path: str
:param base: absolute base path
:type base: str
:param os_sep: path component separator, defaults to current OS separator
:type os_sep: str
:return: relative path
:rtype: str or unicode
:raises OutsideDirectoryBase: if path is not below base
|
def add(self, name, handler, group_by=None, aggregator=None):
assert self.batch is not None, "No active batch, call start() first"
items = self.batch.setdefault(name, collections.OrderedDict())
if group_by is None:
items.setdefault(group_by, []).append((None, handler))
elif aggregator is not None:
agg = items.get(group_by, [(None, None)])[0][0]
items[group_by] = [(aggregator(agg), handler)]
else:
items[group_by] = [(None, handler)]
|
Add a new handler to the current batch.
|
def nodes_walker(node, ascendants=False):
attribute = "children" if not ascendants else "parent"
if not hasattr(node, attribute):
return
elements = getattr(node, attribute)
elements = elements if isinstance(elements, list) else [elements]
for element in elements:
yield element
if not hasattr(element, attribute):
continue
if not getattr(element, attribute):
continue
for sub_element in nodes_walker(element, ascendants=ascendants):
yield sub_element
|
Defines a generator used to walk into Nodes hierarchy.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_d = AbstractCompositeNode("MyNodeD", node_b)
>>> node_e = AbstractCompositeNode("MyNodeE", node_b)
>>> node_f = AbstractCompositeNode("MyNodeF", node_d)
>>> node_g = AbstractCompositeNode("MyNodeG", node_f)
>>> node_h = AbstractCompositeNode("MyNodeH", node_g)
>>> for node in nodes_walker(node_a):
... print node.name
MyNodeB
MyNodeD
MyNodeF
MyNodeG
MyNodeH
MyNodeE
MyNodeC
:param node: Node to walk.
:type node: AbstractCompositeNode
:param ascendants: Ascendants instead of descendants will be yielded.
:type ascendants: bool
:return: Node.
:rtype: AbstractNode or AbstractCompositeNode
|
def create_raw(self, key, value):
data = None
if key is not None and value is not None:
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data
|
Create method of CRUD operation for raw data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
def url(self):
url = "https://api.darksky.net/forecast/{key}/{lat},{lon}".format(key=self.api_key,
lat=self.latitude,
lon=self.longitude)
if isinstance(self._date, datetime):
url += ",{:%Y-%m-%dT%H:%M:%S}".format(self._date)
url += "?units={}".format(self.units)
if self.lang != "auto":
url += "&lang={}".format(self.lang)
if len(self._exclude) > 0:
url += "&exclude="
for e in self._exclude:
url += "{},".format(e)
url = url.strip(",")
if self._extend:
url += "&extend=hourly"
return url
|
Build and returns a URL used to make a Dark Sky API call.
|
def handle(self, **options):
super(Command, self).handle(**options)
return "{} static file{} copied.".format(
self.num_copied_files,
'' if self.num_copied_files == 1 else 's')
|
Override handle to supress summary output
|
def metric(self, measurement_name, values, tags=None, timestamp=None):
if not measurement_name or values in (None, {}):
return
tags = tags or {}
all_tags = dict(self.tags, **tags)
line = Line(measurement_name, values, all_tags, timestamp)
self.send(line.to_line_protocol())
|
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
|
def add_singles(self, results):
logging.info('BKG Coincs %s stored %s bytes',
len(self.coincs), self.coincs.nbytes)
valid_ifos = [k for k in results.keys() if results[k] and k in self.ifos]
if len(valid_ifos) == 0: return {}
self._add_singles_to_buffer(results, ifos=valid_ifos)
_, coinc_results = self._find_coincs(results, ifos=valid_ifos)
if len(valid_ifos) == 2:
coinc_results['coinc_possible'] = True
return coinc_results
|
Add singles to the bacckground estimate and find candidates
Parameters
----------
results: dict of arrays
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format it determined by the
LiveBatchMatchedFilter class.
Returns
-------
coinc_results: dict of arrays
A dictionary of arrays containing the coincident results.
|
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
|
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
|
def load_dependencies(req, history=None):
if history is None:
history = set()
dist = pkg_resources.get_distribution(req)
spec = dict(
requirement=str(req),
resolved=str(dist),
)
if req not in history:
history.add(req)
extras = parse_extras(req)
depends = [
load_dependencies(dep, history=history)
for dep in dist.requires(extras=extras)
]
if depends:
spec.update(depends=depends)
return spec
|
Load the dependency tree as a Python object tree,
suitable for JSON serialization.
>>> deps = load_dependencies('jaraco.packaging')
>>> import json
>>> doc = json.dumps(deps)
|
def GUIDToStr(g):
try:
dat = uuid.UUID(bytes_le=bytes(g))
except:
dat = uuid.UUID(bytes_le=''.join(map(chr, g)))
return '{' + str(dat).upper() + '}'
|
Converts a GUID sequence of bytes into a string.
>>> GUIDToStr([103,22,79,173, 117,234, 36,65,
... 132, 212, 100, 27, 59, 25, 124, 101])
'{AD4F1667-EA75-4124-84D4-641B3B197C65}'
|
def shoelace_for_area(nodes):
r
_, num_nodes = nodes.shape
if num_nodes == 2:
shoelace = SHOELACE_LINEAR
scale_factor = 2.0
elif num_nodes == 3:
shoelace = SHOELACE_QUADRATIC
scale_factor = 6.0
elif num_nodes == 4:
shoelace = SHOELACE_CUBIC
scale_factor = 20.0
elif num_nodes == 5:
shoelace = SHOELACE_QUARTIC
scale_factor = 70.0
else:
raise _helpers.UnsupportedDegree(num_nodes - 1, supported=(1, 2, 3, 4))
result = 0.0
for multiplier, index1, index2 in shoelace:
result += multiplier * (
nodes[0, index1] * nodes[1, index2]
- nodes[1, index1] * nodes[0, index2]
)
return result / scale_factor
|
r"""Compute an auxiliary "shoelace" sum used to compute area.
.. note::
This is a helper for :func:`_compute_area`.
Defining :math:`\left[i, j\right] = x_i y_j - y_i x_j` as a shoelace
term illuminates the name of this helper. On a degree one curve, this
function will return
.. math::
\frac{1}{2}\left[0, 1\right].
on a degree two curve it will return
.. math::
\frac{1}{6}\left(2 \left[0, 1\right] + 2 \left[1, 2\right] +
\left[0, 2\right]\right)
and so on.
For a given :math:`\left[i, j\right]`, the coefficient comes from
integrating :math:`b_{i, d}, b_{j, d}` on :math:`\left[0, 1\right]` (where
:math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials).
Returns:
float: The computed sum of shoelace terms.
Raises:
.UnsupportedDegree: If the degree is not 1, 2, 3 or 4.
|
def split_ls(func):
@wraps(func)
def wrapper(self, files, silent=True, exclude_deleted=False):
if not isinstance(files, (tuple, list)):
files = [files]
counter = 0
index = 0
results = []
while files:
if index >= len(files):
results += func(self, files, silent, exclude_deleted)
break
length = len(str(files[index]))
if length + counter > CHAR_LIMIT:
runfiles = files[:index]
files = files[index:]
counter = 0
index = 0
results += func(self, runfiles, silent, exclude_deleted)
runfiles = None
del runfiles
else:
index += 1
counter += length
return results
return wrapper
|
Decorator to split files into manageable chunks as not to exceed the windows cmd limit
:param func: Function to call for each chunk
:type func: :py:class:Function
|
def remote_chassis_id_mac_uneq_store(self, remote_chassis_id_mac):
if remote_chassis_id_mac != self.remote_chassis_id_mac:
self.remote_chassis_id_mac = remote_chassis_id_mac
return True
return False
|
This function saves the Chassis MAC, if different from stored.
|
def standardise_quotes(self, val):
if self._in_quotes(val, self.altquote):
middle = self.remove_quotes(val)
val = self.add_quotes(middle)
return self.escape_quotes(val)
|
Change the quotes used to wrap a value to the pprint default
E.g. "val" to 'val' or 'val' to "val"
|
def predict(self, X):
Xt, _, _ = self._transform(X)
return self._final_estimator.predict(Xt)
|
Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target
|
def toggle_deriv(self, evt=None, value=None):
"toggle derivative of data"
if value is None:
self.conf.data_deriv = not self.conf.data_deriv
expr = self.conf.data_expr or ''
if self.conf.data_deriv:
expr = "deriv(%s)" % expr
self.write_message("plotting %s" % expr, panel=0)
self.conf.process_data()
|
toggle derivative of data
|
def register(template_class,*extensions):
for ext in extensions:
ext = normalize(ext)
if not Lean.template_mappings.has_key(ext):
Lean.template_mappings[ext] = []
Lean.template_mappings[ext].insert(0,template_class)
Lean.template_mappings[ext] = unique(Lean.template_mappings[ext])
|
Register a template for a given extension or range of extensions
|
def create_hparams():
if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set:
tf.logging.warn("Not all hyperparameter sets work on TPU. "
"Prefer hparams_sets with a '_tpu' suffix, "
"e.g. transformer_tpu, if available for your model.")
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,
hparams_path=hparams_path)
|
Create hparams.
|
def showDosHeaderData(peInstance):
dosFields = peInstance.dosHeader.getFields()
print "[+] IMAGE_DOS_HEADER values:\n"
for field in dosFields:
if isinstance(dosFields[field], datatypes.Array):
print "--> %s - Array of length %d" % (field, len(dosFields[field]))
counter = 0
for element in dosFields[field]:
print "[%d] 0x%08x" % (counter, element.value)
counter += 1
else:
print "--> %s = 0x%08x" % (field, dosFields[field].value)
|
Prints IMAGE_DOS_HEADER fields.
|
def consts(self):
consts = []
append_const = consts.append
for instr in self.instrs:
if isinstance(instr, LOAD_CONST) and instr.arg not in consts:
append_const(instr.arg)
return tuple(consts)
|
The constants referenced in this code object.
|
def input(self):
if self._input is None:
input_file = self.environ['wsgi.input']
content_length = self.content_length or 0
self._input = WsgiInput(input_file, self.content_length)
return self._input
|
Returns a file-like object representing the request body.
|
def readme(fname):
md = open(os.path.join(os.path.dirname(__file__), fname)).read()
output = md
try:
import pypandoc
output = pypandoc.convert(md, 'rst', format='md')
except ImportError:
pass
return output
|
Reads a markdown file and returns the contents formatted as rst
|
def get_feedback(self, question_id):
response = self.get_response(question_id)
item = self._get_item(response.get_item_id())
if response.is_answered():
try:
return item.get_feedback_for_response(response)
except errors.IllegalState:
pass
else:
return item.get_feedback()
|
get feedback for item
|
def precipitable_water(dewpt, pressure, bottom=None, top=None):
r
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
dewpt = dewpt[sort_inds]
if top is None:
top = np.nanmin(pressure) * pressure.units
if bottom is None:
bottom = np.nanmax(pressure) * pressure.units
pres_layer, dewpt_layer = get_layer(pressure, dewpt, bottom=bottom, depth=bottom - top)
w = mixing_ratio(saturation_vapor_pressure(dewpt_layer), pres_layer)
pw = -1. * (np.trapz(w.magnitude, pres_layer.magnitude) * (w.units * pres_layer.units)
/ (mpconsts.g * mpconsts.rho_l))
return pw.to('millimeters')
|
r"""Calculate precipitable water through the depth of a sounding.
Formula used is:
.. math:: -\frac{1}{\rho_l g} \int\limits_{p_\text{bottom}}^{p_\text{top}} r dp
from [Salby1996]_, p. 28.
Parameters
----------
dewpt : `pint.Quantity`
Atmospheric dewpoint profile
pressure : `pint.Quantity`
Atmospheric pressure profile
bottom: `pint.Quantity`, optional
Bottom of the layer, specified in pressure. Defaults to None (highest pressure).
top: `pint.Quantity`, optional
The top of the layer, specified in pressure. Defaults to None (lowest pressure).
Returns
-------
`pint.Quantity`
The precipitable water in the layer
|
def _metatile_contents_equal(zip_1, zip_2):
names_1 = set(zip_1.namelist())
names_2 = set(zip_2.namelist())
if names_1 != names_2:
return False
for n in names_1:
bytes_1 = zip_1.read(n)
bytes_2 = zip_2.read(n)
if bytes_1 != bytes_2:
return False
return True
|
Given two open zip files as arguments, this returns True if the zips
both contain the same set of files, having the same names, and each
file within the zip is byte-wise identical to the one with the same
name in the other zip.
|
def append(self, result):
if isinstance(result, Result):
self.data.append(result)
elif isinstance(result, ResultList):
self.data += result.data
else:
raise TypeError('unknown result type')
|
Append a new Result to the list.
:param result: Result to append
:return: Nothing
:raises: TypeError if result is not Result or ResultList
|
def exact_or_minor_exe_version_match(executable_name,
exe_version_tuples,
version):
exe = exact_exe_version_match(executable_name,
exe_version_tuples,
version)
if not exe:
exe = minor_exe_version_match(executable_name,
exe_version_tuples,
version)
return exe
|
IF there is an exact match then use it
OTHERWISE try to find a minor version match
|
def kogge_stone_add(A, B, cin=0):
if len(A) != len(B):
raise ValueError("expected A and B to be equal length")
N = len(A)
gs = [A[i] & B[i] for i in range(N)]
ps = [A[i] ^ B[i] for i in range(N)]
for i in range(clog2(N)):
start = 1 << i
for j in range(start, N):
gs[j] = gs[j] | ps[j] & gs[j-start]
ps[j] = ps[j] & ps[j-start]
ss = [A[0] ^ B[0] ^ cin]
ss += [A[i] ^ B[i] ^ gs[i-1] for i in range(1, N)]
return farray(ss), farray(gs)
|
Return symbolic logic for an N-bit Kogge-Stone adder.
|
def run_normalization(self):
for index, media_file in enumerate(
tqdm(
self.media_files,
desc="File",
disable=not self.progress,
position=0
)):
logger.info("Normalizing file {} ({} of {})".format(media_file, index + 1, self.file_count))
media_file.run_normalization()
logger.info("Normalized file written to {}".format(media_file.output_file))
|
Run the normalization procedures
|
def stats(self):
data = Counter()
for name, value, aggregated in self.raw:
if aggregated:
data['%s.max' % name] = max(data['%s.max' % name], value)
data['%s.total' % name] += value
else:
data[name] = value
return sorted(data.items())
|
Stats that have been aggregated appropriately.
|
def parse_xhtml_notes(entry):
for note in entry.xml_notes.itertext():
m = re.match(r'^([^:]+):(.+)$', note)
if m:
key, value = m.groups()
key = key.strip().lower().replace(' ', '_')
value = value.strip()
m = re.match(r'^"(.*)"$', value)
if m:
value = m.group(1)
if value != '':
yield key, value
|
Yield key, value pairs parsed from the XHTML notes section.
Each key, value pair must be defined in its own text block, e.g.
``<p>key: value</p><p>key2: value2</p>``. The key and value must be
separated by a colon. Whitespace is stripped from both key and value, and
quotes are removed from values if present. The key is normalized by
conversion to lower case and spaces replaced with underscores.
Args:
entry: :class:`_SBMLEntry`.
|
def confusion_matrix(self, slice_size:int=1):
"Confusion matrix as an `np.ndarray`."
x=torch.arange(0,self.data.c)
if slice_size is None: cm = ((self.pred_class==x[:,None]) & (self.y_true==x[:,None,None])).sum(2)
else:
cm = torch.zeros(self.data.c, self.data.c, dtype=x.dtype)
for i in range(0, self.y_true.shape[0], slice_size):
cm_slice = ((self.pred_class[i:i+slice_size]==x[:,None])
& (self.y_true[i:i+slice_size]==x[:,None,None])).sum(2)
torch.add(cm, cm_slice, out=cm)
return to_np(cm)
|
Confusion matrix as an `np.ndarray`.
|
def tile_exists(bounds, tile_z, tile_x, tile_y):
mintile = mercantile.tile(bounds[0], bounds[3], tile_z)
maxtile = mercantile.tile(bounds[2], bounds[1], tile_z)
return (
(tile_x <= maxtile.x + 1)
and (tile_x >= mintile.x)
and (tile_y <= maxtile.y + 1)
and (tile_y >= mintile.y)
)
|
Check if a mercatile tile is inside a given bounds.
Attributes
----------
bounds : list
WGS84 bounds (left, bottom, right, top).
x : int
Mercator tile Y index.
y : int
Mercator tile Y index.
z : int
Mercator tile ZOOM level.
Returns
-------
out : boolean
if True, the z-x-y mercator tile in inside the bounds.
|
def timesync_send(self, tc1, ts1, force_mavlink1=False):
return self.send(self.timesync_encode(tc1, ts1), force_mavlink1=force_mavlink1)
|
Time synchronization message.
tc1 : Time sync timestamp 1 (int64_t)
ts1 : Time sync timestamp 2 (int64_t)
|
def getStateIndex(self,state):
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int)
|
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
|
def _merge_list_of_dict(first, second, prepend=True):
first = _cleanup(first)
second = _cleanup(second)
if not first and not second:
return []
if not first and second:
return second
if first and not second:
return first
overlaps = []
merged = []
appended = []
for ele in first:
if _lookup_element(second, ele.keys()[0]):
overlaps.append(ele)
elif prepend:
merged.append(ele)
elif not prepend:
appended.append(ele)
for ele in second:
ele_key = ele.keys()[0]
if _lookup_element(overlaps, ele_key):
ele_val_first = _lookup_element(first, ele_key)
merged.append({ele_key: ele_val_first})
else:
merged.append(ele)
if not prepend:
merged.extend(appended)
return merged
|
Merge lists of dictionaries.
Each element of the list is a dictionary having one single key.
That key is then used as unique lookup.
The first element list has higher priority than the second.
When there's an overlap between the two lists,
it won't change the position, but the content.
|
def begin(self):
if not self._multi_use:
raise ValueError("Cannot call 'begin' on single-use snapshots")
if self._transaction_id is not None:
raise ValueError("Read-only transaction already begun")
if self._read_request_count > 0:
raise ValueError("Read-only transaction already pending")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_selector = self._make_txn_selector()
response = api.begin_transaction(
self._session.name, txn_selector.begin, metadata=metadata
)
self._transaction_id = response.id
return self._transaction_id
|
Begin a read-only transaction on the database.
:rtype: bytes
:returns: the ID for the newly-begun transaction.
:raises ValueError:
if the transaction is already begun, committed, or rolled back.
|
def grandparent_path(self):
return os.path.basename(os.path.join(self.path, '../..'))
|
return grandparent's path string
|
def product(self, *products):
r
for product in products:
self._product.append(product)
return self
|
r"""
When search is called, it will limit the results to items in a Product.
:param product: items passed in will be turned into a list
:returns: :class:`Search`
|
def execute(self):
self.print_info()
if (self._config.state.prepared
and not self._config.command_args.get('force')):
msg = 'Skipping, instances already prepared.'
LOG.warn(msg)
return
if not self._config.provisioner.playbooks.prepare:
msg = 'Skipping, prepare playbook not configured.'
LOG.warn(msg)
return
self._config.provisioner.prepare()
self._config.state.change_state('prepared', True)
|
Execute the actions necessary to prepare the instances and returns
None.
:return: None
|
def to_pickle(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
|
Save Camera to a pickle file, given a filename.
|
def write_results(self, filename):
with self.io(filename, 'a') as fp:
fp.write_samples(self.samples, self.model.variable_params,
last_iteration=self.niterations)
fp.write_samples(self.model_stats,
last_iteration=self.niterations)
fp.write_acceptance_fraction(self._sampler.acceptance_fraction)
fp.write_random_state(state=self._sampler.random_state)
|
Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
|
def get_lonlats(self, navid, nav_info, lon_out=None, lat_out=None):
lon_key = 'lon'
valid_min = self[lon_key + '/attr/valid_min']
valid_max = self[lon_key + '/attr/valid_max']
lon_out.data[:] = self[lon_key][::-1]
lon_out.mask[:] = (lon_out < valid_min) | (lon_out > valid_max)
lat_key = 'lat'
valid_min = self[lat_key + '/attr/valid_min']
valid_max = self[lat_key + '/attr/valid_max']
lat_out.data[:] = self[lat_key][::-1]
lat_out.mask[:] = (lat_out < valid_min) | (lat_out > valid_max)
return {}
|
Load an area.
|
def SetColumns( self, columns, sortOrder=None ):
self.columns = columns
self.sortOrder = [(x.defaultOrder,x) for x in self.columns if x.sortDefault]
self.CreateColumns()
|
Set columns to a set of values other than the originals and recreates column controls
|
def _create_clone(self, parent, part, **kwargs):
if part.category == Category.MODEL:
select_action = 'clone_model'
else:
select_action = 'clone_instance'
data = {
"part": part.id,
"parent": parent.id,
"suppress_kevents": kwargs.pop('suppress_kevents', None)
}
query_params = kwargs
query_params['select_action'] = select_action
response = self._request('POST', self._build_url('parts'),
params=query_params,
data=data)
if response.status_code != requests.codes.created:
raise APIError("Could not clone part, {}: {}".format(str(response), response.content))
return Part(response.json()['results'][0], client=self)
|
Create a new `Part` clone under the `Parent`.
.. versionadded:: 2.3
:param parent: parent part
:type parent: :class:`models.Part`
:param part: part to be cloned
:type part: :class:`models.Part`
:param kwargs: (optional) additional keyword=value arguments
:type kwargs: dict
:return: cloned :class:`models.Part`
:raises APIError: if the `Part` could not be cloned
|
def pre_state(*raw_state: GeneralState, filler: Dict[str, Any]) -> None:
@wraps(pre_state)
def _pre_state(filler: Dict[str, Any]) -> Dict[str, Any]:
test_name = get_test_name(filler)
old_pre_state = filler[test_name].get("pre_state", {})
pre_state = normalize_state(raw_state)
defaults = {address: {
"balance": 0,
"nonce": 0,
"code": b"",
"storage": {},
} for address in pre_state}
new_pre_state = deep_merge(defaults, old_pre_state, pre_state)
return assoc_in(filler, [test_name, "pre"], new_pre_state)
|
Specify the state prior to the test execution. Multiple invocations don't override
the state but extend it instead.
In general, the elements of `state_definitions` are nested dictionaries of the following form:
.. code-block:: python
{
address: {
"nonce": <account nonce>,
"balance": <account balance>,
"code": <account code>,
"storage": {
<storage slot>: <storage value>
}
}
}
To avoid unnecessary nesting especially if only few fields per account are specified, the
following and similar formats are possible as well:
.. code-block:: python
(address, "balance", <account balance>)
(address, "storage", <storage slot>, <storage value>)
(address, "storage", {<storage slot>: <storage value>})
(address, {"balance", <account balance>})
|
def add_text(self, reference_id, text):
self.add_words(reference_id, self._tokenize(text))
|
\
Adds the words from the provided text to the corpus.
The string will be tokenized.
`reference_id`
The reference identifier of the cable.
`text`
An string.
|
def copy(self, key):
copy = List(key, self.db)
copy.clear()
copy.extend(self)
return copy
|
Copy the list to a new list.
WARNING: If key exists, it clears it before copying.
|
def self_aware(fn):
if isgeneratorfunction(fn):
@wraps(fn)
def wrapper(*a,**k):
generator = fn(*a,**k)
if hasattr(
generator,
'gi_frame'
) and hasattr(
generator.gi_frame,
'f_builtins'
) and hasattr(
generator.gi_frame.f_builtins,
'__setitem__'
):
generator.gi_frame.f_builtins[
'self'
] = generator
return wrapper
else:
fn=strict_globals(**fn.__globals__)(fn)
fn.__globals__['self']=fn
return fn
|
decorating a function with this allows it to
refer to itself as 'self' inside the function
body.
|
def read_image(img_spec, bkground_thresh, ensure_num_dim=3):
img = load_image_from_disk(img_spec)
if not np.issubdtype(img.dtype, np.floating):
img = img.astype('float32')
if ensure_num_dim == 3:
img = check_image_is_3d(img)
elif ensure_num_dim == 4:
img = check_image_is_4d(img)
return threshold_image(img, bkground_thresh)
|
Image reader, with additional checks on size.
Can optionally remove stray values close to zero (smaller than 5 %ile).
|
def saturation_equivalent_potential_temperature(pressure, temperature):
r
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
|
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
|
def execute_action_list(obj, target, kw):
env = obj.get_build_env()
kw = obj.get_kw(kw)
status = 0
for act in obj.get_action_list():
args = ([], [], env)
status = act(*args, **kw)
if isinstance(status, SCons.Errors.BuildError):
status.executor = obj
raise status
elif status:
msg = "Error %s" % status
raise SCons.Errors.BuildError(
errstr=msg,
node=obj.batches[0].targets,
executor=obj,
action=act)
return status
|
Actually execute the action list.
|
def detx(self, det_id, t0set=None, calibration=None):
url = 'detx/{0}?'.format(det_id)
if t0set is not None:
url += '&t0set=' + t0set
if calibration is not None:
url += '&calibrid=' + calibration
detx = self._get_content(url)
return detx
|
Retrieve the detector file for given detector id
If t0set is given, append the calibration data.
|
def update_lbaas_l7rule(self, l7rule, l7policy, body=None):
return self.put(self.lbaas_l7rule_path % (l7policy, l7rule),
body=body)
|
Updates L7 rule.
|
def _unpickle_panel_compat(self, state):
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
|
Unpickle the panel.
|
def readin_volt(filename):
with open(filename, 'r') as fid:
content = np.loadtxt(fid, skiprows=1, usecols=[0, 1, 2])
volt = content[:, 2]
elecs = content[:, 0:2]
return elecs, volt
|
Read in measurement data from a volt.dat file and return electrodes and
measured resistance.
|
def start(self):
if not self._thread:
logging.info("Starting asterisk mbox thread")
try:
while True:
self.signal.get(False)
except queue.Empty:
pass
self._thread = threading.Thread(target=self._loop)
self._thread.setDaemon(True)
self._thread.start()
|
Start thread.
|
def __convert_env(env, encoding):
d = dict(os.environ, **(oget(env, {})))
if not SHOULD_NOT_ENCODE_ARGS:
return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
else:
return d
|
Environment variables should be bytes not unicode on Windows.
|
def del_qos(self, port_name):
command = ovs_vsctl.VSCtlCommand(
'del-qos',
[port_name])
self.run_command([command])
|
Deletes the Qos rule on the given port.
|
def update(self):
self.info.display_dataset()
self.overview.update()
self.labels.update(labels=self.info.dataset.header['chan_name'])
self.channels.update()
try:
self.info.markers = self.info.dataset.read_markers()
except FileNotFoundError:
lg.info('No notes/markers present in the header of the file')
else:
self.notes.update_dataset_marker()
|
Once you open a dataset, it activates all the widgets.
|
def create(self,params=None, headers=None):
path = '/creditor_bank_accounts'
if params is not None:
params = {self._envelope_key(): params}
try:
response = self._perform_request('POST', path, params, headers,
retry_failures=True)
except errors.IdempotentCreationConflictError as err:
return self.get(identity=err.conflicting_resource_id,
params=params,
headers=headers)
return self._resource_for(response)
|
Create a creditor bank account.
Creates a new creditor bank account object.
Args:
params (dict, optional): Request body.
Returns:
ListResponse of CreditorBankAccount instances
|
def get_notebook_url(self):
url = self._client._build_url('service_execution_notebook_url', service_execution_id=self.id)
response = self._client._request('GET', url, params=dict(format='json'))
if response.status_code != requests.codes.ok:
raise APIError("Could not retrieve notebook url '{}': {}".format(self, response))
data = response.json()
url = data.get('results')[0].get('url')
return url
|
Get the url of the notebook, if the notebook is executed in interactive mode.
.. versionadded:: 1.13
:return: full url to the interactive running notebook as `basestring`
:raises APIError: when the url cannot be retrieved.
|
def union_overlapping(intervals):
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals
|
Union any overlapping intervals in the given set.
|
def from_url(cls, db_url=ALL_SETS_ZIP_URL):
r = requests.get(db_url)
r.raise_for_status()
if r.headers['content-type'] == 'application/json':
return cls(json.loads(r.text))
if r.headers['content-type'] == 'application/zip':
with zipfile.ZipFile(six.BytesIO(r.content), 'r') as zf:
names = zf.namelist()
assert len(names) == 1, 'One datafile in ZIP'
return cls.from_file(io.TextIOWrapper(
zf.open(names[0]),
encoding='utf8'))
|
Load card data from a URL.
Uses :func:`requests.get` to fetch card data. Also handles zipfiles.
:param db_url: URL to fetch.
:return: A new :class:`~mtgjson.CardDb` instance.
|
def _delete(self, *criterion):
with self.flushing():
count = self._query(*criterion).delete()
if count == 0:
raise ModelNotFoundError
return True
|
Delete a model by some criterion.
Avoids race-condition check-then-delete logic by checking the count of affected rows.
:raises `ResourceNotFound` if the row cannot be deleted.
|
def _AddEdge(self, start_node, end_node):
self.graph[start_node].outgoing.append(end_node)
if end_node in self.graph:
self.graph[end_node].incoming.append(start_node)
|
Add a directed edge to the graph.
Add the end to the list of outgoing nodes of the start and the start to the
list of incoming nodes of the end node.
Args:
start_node: name of the start node
end_node: name of the end node
|
def worker_loop_v1(dataset, key_queue, data_queue, batchify_fn):
while True:
idx, samples = key_queue.get()
if idx is None:
break
batch = batchify_fn([dataset[i] for i in samples])
data_queue.put((idx, batch))
|
Worker loop for multiprocessing DataLoader.
|
def drop_all(self):
self.drop(self.get_table_names())
if self.persistent:
with self._lock:
try:
dbfolder = os.path.join(self.root_dir, self.name)
if os.path.exists(dbfolder) and not os.listdir(dbfolder):
rmtree(dbfolder)
except (IOError, WindowsError):
self._print('Failed to delete folder %s when dropping database' % self.name)
finally:
del self
|
Drops all tables from this database
|
def login(self, **kwargs):
if 'signed_username' in kwargs:
apiToken = kwargs['signed_username']
if kwargs.get('authenticate', False):
self._checkReturn(requests.get("{}/users?signed_username={}".format(self.url, apiToken)))
self.signedUsername = apiToken
else:
auth = (kwargs['user_id'], kwargs['token'])
self.signedUsername = self._checkReturn(requests.get("{}/users/login".format(self.url), auth=auth))[
'signed_username']
|
Logs the current user into the server with the passed in credentials. If successful the apiToken will be changed to match the passed in credentials.
:param apiToken: use the passed apiToken to authenticate
:param user_id: optional instead of apiToken, must be passed with token
:param token: optional instead of apiToken, must be passed with user_id
:param authenticate: only valid with apiToken. Force a call to the server to authenticate the passed credentials.
:return:
|
def copy(self):
result = copy.deepcopy(self)
result._cache.clear()
return result
|
Return a copy of the Primitive object.
|
def process_unknown_arguments(unknowns):
result = argparse.Namespace()
result.extra_control = {}
for unknown in unknowns:
prefix = '--parameter-'
if unknown.startswith(prefix):
values = unknown.split('=')
if len(values) == 2:
key = values[0][len(prefix):]
val = values[1]
if key:
result.extra_control[key] = val
return result
|
Process arguments unknown to the parser
|
def _handle_subscription(self, topics):
if not isinstance(topics, list):
topics = [topics]
for topic in topics:
topic_levels = topic.split('/')
try:
qos = int(topic_levels[-2])
except ValueError:
qos = 0
try:
_LOGGER.debug('Subscribing to: %s, qos: %s', topic, qos)
self._sub_callback(topic, self.recv, qos)
except Exception as exception:
_LOGGER.exception(
'Subscribe to %s failed: %s', topic, exception)
|
Handle subscription of topics.
|
def get_observation(observation_id: int) -> Dict[str, Any]:
r = get_observations(params={'id': observation_id})
if r['results']:
return r['results'][0]
raise ObservationNotFound()
|
Get details about an observation.
:param observation_id:
:returns: a dict with details on the observation
:raises: ObservationNotFound
|
def _boundary_value(self) -> str:
value = self._boundary
if re.match(self._valid_tchar_regex, value):
return value.decode('ascii')
if re.search(self._invalid_qdtext_char_regex, value):
raise ValueError("boundary value contains invalid characters")
quoted_value_content = value.replace(b'\\', b'\\\\')
quoted_value_content = quoted_value_content.replace(b'"', b'\\"')
return '"' + quoted_value_content.decode('ascii') + '"'
|
Wrap boundary parameter value in quotes, if necessary.
Reads self.boundary and returns a unicode sting.
|
def _marshal_claims(self, query_claims):
claims = reduce_claims(query_claims)
self.data['claims'] = claims
entities = set()
for eid in claims:
if self.user_labels:
if eid in self.user_labels or eid == 'P31':
entities.add(eid)
else:
continue
else:
entities.add(eid)
for val in claims[eid]:
if utils.is_text(val) and re.match(r'^Q\d+$', val):
entities.add(val)
self.data['entities'] = list(entities)
|
set Wikidata entities from query claims
|
def entries_published(queryset):
now = timezone.now()
return queryset.filter(
models.Q(start_publication__lte=now) |
models.Q(start_publication=None),
models.Q(end_publication__gt=now) |
models.Q(end_publication=None),
status=PUBLISHED, sites=Site.objects.get_current())
|
Return only the entries published.
|
def get_last_version(self, filename):
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with filename {0}".format(repr(filename)))
return GridOut(self.__collection, doc)
return self.__files.find_one({"filename": filename},
filter = filter.sort(DESCENDING("uploadDate"))).addCallback(ok)
|
Get a file from GridFS by ``"filename"``.
Returns the most recently uploaded file in GridFS with the
name `filename` as an instance of
:class:`~gridfs.grid_file.GridOut`. Raises
:class:`~gridfs.errors.NoFile` if no such file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get
.. versionadded:: 1.6
|
def _users_watching(self, **kwargs):
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs)
|
Return users watching this instance.
|
def _download_raw(self, url=None):
if url is None:
url = self.url
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode("utf8")
|
Download content from URL directly.
|
def configuration_list_all(self, environment_id):
data = dict()
data["environment_id"] = environment_id
url = ("environment/configuration/list/%(environment_id)s/" % data)
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml, force_list=['lists_configuration'])
|
List all prefix configurations by environment in DB
:return: Following dictionary:
::
{'lists_configuration': [{
'id': <id_ipconfig>,
'subnet': <subnet>,
'type': <type>,
'new_prefix': <new_prefix>,
}, ... ]}
:raise InvalidValueError: Invalid ID for Environment.
:raise AmbienteNotFoundError: Environment not registered.
:raise DataBaseError: Failed into networkapi access data base.
:raise XMLError: Networkapi failed to generate the XML response.
|
def delete(self):
config = self.get()
if not config:
return True
command = 'no router ospf {}'.format(config['ospf_process_id'])
return self.configure(command)
|
Removes the entire ospf process from the running configuration
Args:
None
Returns:
bool: True if the command completed succssfully
|
def convert_html_entities(text_string):
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string")
|
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
|
def markup_join(seq):
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
|
Concatenation that escapes if necessary and converts to unicode.
|
def b64decode(foo, *args):
'Only here for consistency with the above.'
if isinstance(foo, str):
foo = foo.encode('utf8')
return base64.b64decode(foo, *args)
|
Only here for consistency with the above.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.