Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,600 | def print_xmlsec_errors(filename, line, func, error_object, error_subject, reason, msg):
info = []
if error_object != "unknown":
info.append("obj=" + error_object)
if error_subject != "unknown":
info.append("subject=" + error_subject)
if msg.strip():
info.append("msg=" + msg)
if reason != 1:
info.append("errno=%d" % reason)
if info:
print("%s:%d(%s)" % (filename, line, func), " ".join(info)) | Auxiliary method. It overrides the default xmlsec debug message. |
18,601 | def set_metadata(self, obj, metadata, clear=False, prefix=None):
if prefix is None:
prefix = OBJECT_META_PREFIX
massaged = _massage_metakeys(metadata, prefix)
cname = utils.get_name(self.container)
oname = utils.get_name(obj)
new_meta = {}
if not clear:
obj_meta = self.get_metadata(obj, prefix=prefix)
new_meta = _massage_metakeys(obj_meta, prefix)
utils.case_insensitive_update(new_meta, massaged)
to_pop = []
for key, val in six.iteritems(new_meta):
if not val:
to_pop.append(key)
for key in to_pop:
new_meta.pop(key)
uri = "/%s/%s" % (cname, oname)
resp, resp_body = self.api.method_post(uri, headers=new_meta) | Accepts a dictionary of metadata key/value pairs and updates the
specified object metadata with them.
If 'clear' is True, any existing metadata is deleted and only the
passed metadata is retained. Otherwise, the values passed here update
the object's metadata.
By default, the standard object metadata prefix ('X-Object-Meta-') is
prepended to the header name if it isn't present. For non-standard
headers, you must include a non-None prefix, such as an empty string. |
18,602 | def get_library(path=None, root=None, db=None):
import ambry.library as _l
rc = config(path=path, root=root, db=db )
return _l.new_library(rc) | Return the default library for this installation. |
18,603 | def _cache_ops_associate(protocol, msgtype):
ops = cache_ops
while ops:
if ops.co_protocol == protocol:
for co_msgtype in ops.co_msgtypes:
if co_msgtype.mt_id == msgtype:
return ops
ops = ops.co_next
return None | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None. |
18,604 | def set_position(self, key, latlon, layer=None, rotation=0):
self.object_queue.put(SlipPosition(key, latlon, layer, rotation)) | move an object on the map |
18,605 | def _check_accept_keywords(approved, flag):
if flag in approved:
return False
elif (flag.startswith() and flag[1:] in approved) \
or (+flag in approved):
return False
else:
return True | check compatibility of accept_keywords |
18,606 | def _get_gpu():
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
gpu = 0
for i in range(device_count.value):
if (0 == libcudart.cudaSetDevice(i) and 0 == libcudart.cudaFree(0)):
gpu = i
break
return gpu | *DEPRECATED*. Allocates first available GPU using cudaSetDevice(), or returns 0 otherwise. |
18,607 | def _is_contiguous(positions):
previous = positions[0]
for current in positions[1:]:
if current != previous + 1:
return False
previous = current
return True | Given a non-empty list, does it consist of contiguous integers? |
18,608 | def merge_overlaps(self, threshold=0.0):
updated_labels = []
all_intervals = self.label_tree.copy()
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float()
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
self.label_tree.clear()
self.update(updated_labels) | Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
] |
18,609 | def polite_string(a_string):
if is_py3() and hasattr(a_string, ):
try:
return a_string.decode()
except UnicodeDecodeError:
return a_string
return a_string | Returns a "proper" string that should work in both Py3/Py2 |
18,610 | def _get_block_matches(self, attributes_a, attributes_b, filter_set_a=None, filter_set_b=None, delta=(0, 0, 0),
tiebreak_with_block_similarity=False):
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
for k in filtered_attributes_a:
filtered_attributes_a[k] = tuple((i+j) for i, j in zip(filtered_attributes_a[k], delta))
for k in filtered_attributes_b:
filtered_attributes_b[k] = tuple((i+j) for i, j in zip(filtered_attributes_b[k], delta))
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
if tiebreak_with_block_similarity:
for a in closest_a:
if len(closest_a[a]) > 1:
best_similarity = 0
best = []
for x in closest_a[a]:
similarity = self.block_similarity(a, x)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_a[a] = best
for b in closest_b:
if len(closest_b[b]) > 1:
best_similarity = 0
best = []
for x in closest_b[b]:
similarity = self.block_similarity(x, b)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_b[b] = best
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches | :param attributes_a: A dict of blocks to their attributes
:param attributes_b: A dict of blocks to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the blocks in this set.
:param filter_set_b: A set to limit attributes_b to the blocks in this set.
:param delta: An offset to add to each vector in attributes_a.
:returns: A list of tuples of matching objects. |
18,611 | def openflow_controller_connection_address_connection_method(self, **kwargs):
config = ET.Element("config")
openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow")
controller_name_key = ET.SubElement(openflow_controller, "controller-name")
controller_name_key.text = kwargs.pop()
connection_address = ET.SubElement(openflow_controller, "connection-address")
connection_method = ET.SubElement(connection_address, "connection-method")
connection_method.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
18,612 | def patch(self, nml_patch):
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec]) | Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section. |
18,613 | def accuracy(sess, model, x, y, batch_size=None, devices=None, feed=None,
attack=None, attack_params=None):
_check_x(x)
_check_y(y)
if x.shape[0] != y.shape[0]:
raise ValueError("Number of input examples and labels do not match.")
factory = _CorrectFactory(model, attack, attack_params)
correct, = batch_eval_multi_worker(sess, factory, [x, y],
batch_size=batch_size, devices=devices,
feed=feed)
return correct.mean() | Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value |
18,614 | def request(self, path, method=, params=None):
if params is None: params = {}
url = urljoin(self.endpoint, path)
headers = {
: ,
: + self.access_key,
: self.user_agent,
:
}
if method == :
response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params))
elif method == :
response = requests.get(url, verify=True, headers=headers, params=params)
elif method == :
response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params))
elif method == :
response = requests.post(url, verify=True, headers=headers, data=json.dumps(params))
elif method == :
response = requests.put(url, verify=True, headers=headers, data=json.dumps(params))
else:
raise ValueError(str(method) + )
if response.status_code in self.__supported_status_codes:
response_text = response.text
else:
response.raise_for_status()
return response_text | Builds a request and gets a response. |
18,615 | def clean_password(self, password, user=None):
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password | Validates a password. You can hook into this if you want to
restric the allowed password choices. |
18,616 | def status_codes_by_date_stats():
def date_counter(queryset):
return dict(Counter(map(
lambda dt: ms_since_epoch(datetime.combine(
make_naive(dt), datetime.min.time())),
list(queryset.values_list(, flat=True)))))
codes = {low: date_counter(
RequestLog.objects.filter(status_code__gte=low, status_code__lt=high))
for low, high in ((200, 300), (300, 400), (400, 500))}
codes[500] = date_counter(RequestLog.objects.filter(status_code__gte=500))
codes[] = date_counter(RequestLog.objects.filter(
status_code__in=(400, 444, 502)))
stats = {}
for code in (200, 300, 400, 500, ):
for date, count in codes[code].items():
if stats.get(date, None) is None:
stats[date] = {200: 0, 300: 0, 400: 0, 500: 0, : 0}
stats[date][code] += count
stats = sorted([(k, v) for k, v in stats.items()], key=lambda x: x[0])
return stats | Get stats for status codes by date.
Returns:
list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks. |
18,617 | def _write_frames(self, handle):
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = [is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle) | Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. |
18,618 | def auto(cls, syslog=None, stderr=None, level=None, extended=None,
server=None):
level = norm_level(level) or logging.INFO
if syslog is None and stderr is None:
if sys.stderr.isatty() or syslog_path() is None:
log.info()
syslog, stderr = None, level
if extended is None:
extended = (stderr or 0) <= logging.DEBUG
else:
log.info()
syslog, stderr = level, None
return cls(syslog=syslog, stderr=stderr, extended=extended,
server=server) | Tries to guess a sound logging configuration. |
18,619 | def trace_buffer_capacity(self):
cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException()
return data.value | Retrieves the trace buffer's current capacity.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The current capacity of the trace buffer. This is not necessarily
the maximum possible size the buffer could be configured with. |
18,620 | def keyrelease(self, data):
try:
window = self._get_front_most_window()
except (IndexError,):
window = self._get_any_window()
key_release_action = KeyReleaseAction(window, data)
return 1 | Release key. NOTE: keypress should be called before this
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer |
18,621 | def SRem(a: BitVec, b: BitVec) -> BitVec:
return _arithmetic_helper(a, b, z3.SRem) | Create a signed remainder expression.
:param a:
:param b:
:return: |
18,622 | def store_node_label_meta(self, x, y, tx, ty, rot):
self.node_label_coords["x"].append(x)
self.node_label_coords["y"].append(y)
self.node_label_coords["tx"].append(tx)
self.node_label_coords["ty"].append(ty)
if x == 0:
self.node_label_aligns["has"].append("center")
elif x > 0:
self.node_label_aligns["has"].append("left")
else:
self.node_label_aligns["has"].append("right")
if self.node_label_layout == "rotate" or y == 0:
self.node_label_aligns["vas"].append("center")
elif y > 0:
self.node_label_aligns["vas"].append("bottom")
else:
self.node_label_aligns["vas"].append("top")
self.node_label_rotation.append(rot) | This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float |
18,623 | def _mulf16(ins):
op1, op2 = tuple(ins.quad[2:])
if _f_ops(op1, op2) is not None:
op1, op2 = _f_ops(op1, op2)
if op2 == 1:
output = _f16_oper(op1)
output.append()
output.append()
return output
if op2 == -1:
return _neg32(ins)
output = _f16_oper(op1)
if op2 == 0:
output.append()
output.append()
output.append()
output.append()
output.append()
return output
output = _f16_oper(op1, str(op2))
output.append()
output.append()
output.append()
REQUIRES.add()
return output | Multiplies 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack. |
18,624 | def scan(self,
proxy_scanner,
expected_num=20,
val_thr_num=4,
queue_timeout=3,
val_timeout=5,
out_file=):
try:
proxy_scanner.scan()
self.logger.info(
.format(val_thr_num))
val_threads = []
for i in range(val_thr_num):
t = threading.Thread(
name=.format(i + 1),
target=self.validate,
kwargs=dict(
proxy_scanner=proxy_scanner,
expected_num=expected_num,
queue_timeout=queue_timeout,
val_timeout=val_timeout))
t.daemon = True
val_threads.append(t)
t.start()
for t in val_threads:
t.join()
self.logger.info()
except:
raise
finally:
if out_file is not None:
self.save(out_file) | Scan and validate proxies
Firstly, call the `scan` method of `proxy_scanner`, then using multiple
threads to validate them.
Args:
proxy_scanner: A ProxyScanner object.
expected_num: Max number of valid proxies to be scanned.
val_thr_num: Number of threads used for validating proxies.
queue_timeout: Timeout for getting a proxy from the queue.
val_timeout: An integer passed to `is_valid` as argument `timeout`.
out_file: A string or None. If not None, the proxies will be saved
into `out_file`. |
18,625 | def identify_misfeatured_regions(st, filter_size=5, sigma_cutoff=8.):
r = st.residuals
weights = np.ones([filter_size]*len(r.shape), dtype=)
weights /= weights.sum()
f = np.sqrt(nd.filters.convolve(r*r, weights, mode=))
if sigma_cutoff == :
max_ok = initializers.otsu_threshold(f)
else:
max_ok = f.mean() + sigma_cutoff * f.std()
bad = f > max_ok
labels, n = nd.measurements.label(bad)
inds = []
for i in range(1, n+1):
inds.append(np.nonzero(labels == i))
tiles = [Tile(np.min(ind, axis=1), np.max(ind, axis=1)+1) for ind in inds]
volumes = [t.volume for t in tiles]
return [tiles[i] for i in np.argsort(volumes)[::-1]] | Identifies regions of missing/misfeatured particles based on the
residuals' local deviation from uniform Gaussian noise.
Parameters
----------
st : :class:`peri.states.State`
The state in which to identify mis-featured regions.
filter_size : Int, best if odd.
The size of the filter for calculating the local standard deviation;
should approximately be the size of a poorly featured region in
each dimension. Default is 5.
sigma_cutoff : Float or `otsu`, optional
The max allowed deviation of the residuals from what is expected,
in units of the residuals' standard deviation. Lower means more
sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel
out of every 7*10^11 is mis-identified randomly. In practice the
noise is not Gaussian so there are still some regions mis-identified
as improperly featured. Set to ```otsu``` to calculate this number
based on an automatic Otsu threshold.
Returns
-------
tiles : List of :class:`peri.util.Tile`
Each tile is the smallest bounding tile that contains an improperly
featured region. The list is sorted by the tile's volume.
Notes
-----
Algorithm is
1. Create a field of the local standard deviation, as measured over
a hypercube of size filter_size.
2. Find the maximum reasonable value of the field. [The field should
be a random variable with mean of r.std() and standard deviation
of ~r.std() / sqrt(N), where r is the residuals and N is the
number of pixels in the hypercube.]
3. Label & Identify the misfeatured regions as portions where
the local error is too large.
4. Parse the misfeatured regions into tiles.
5. Return the sorted tiles.
The Otsu option to calculate the sigma cutoff works well for images
that actually contain missing particles, returning a number similar
to one calculated with a sigma cutoff. However, if the image is
well-featured with Gaussian residuals, then the Otsu threshold
splits the Gaussian down the middle instead of at the tails, which
is very bad. So use with caution. |
18,626 | def _put (self, url_data):
if self.shutdown or self.max_allowed_urls == 0:
return
log.debug(LOG_CACHE, "queueing %s", url_data.url)
key = url_data.cache_url
cache = url_data.aggregate.result_cache
if url_data.has_result or cache.has_result(key):
self.queue.appendleft(url_data)
else:
assert key is not None, "no result for None key: %s" % url_data
if self.max_allowed_urls is not None:
self.max_allowed_urls -= 1
self.num_puts += 1
if self.num_puts >= NUM_PUTS_CLEANUP:
self.cleanup()
self.queue.append(url_data)
self.unfinished_tasks += 1 | Put URL in queue, increase number of unfished tasks. |
18,627 | def _abs_pow_ufunc(self, fi, out, p):
if p == 0.5:
fi.ufuncs.absolute(out=out)
out.ufuncs.sqrt(out=out)
elif p == 2.0 and self.base_space.field == RealNumbers():
fi.multiply(fi, out=out)
else:
fi.ufuncs.absolute(out=out)
out.ufuncs.power(p, out=out) | Compute |F_i(x)|^p point-wise and write to ``out``. |
18,628 | def get_user_invitation_by_id(self, id):
return self.db_adapter.get_object(self.UserInvitationClass, id=id) | Retrieve a UserInvitation object by ID. |
18,629 | def parse_bibliography(source, loc, tokens):
bib = structures.Bibliography()
for entry in tokens:
bib.add(entry)
return bib | Combines the parsed entries into a Bibliography instance. |
18,630 | def push(self, values: np.ndarray):
n = len(values)
if len(self) + n > self.size:
raise ValueError("Too much data to push to RingBuffer")
slide_1 = np.s_[self.right_index:min(self.right_index + n, self.size)]
slide_2 = np.s_[:max(self.right_index + n - self.size, 0)]
with self.__data.get_lock():
data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
data[slide_1] = values[:slide_1.stop - slide_1.start]
data[slide_2] = values[slide_1.stop - slide_1.start:]
self.right_index += n
self.__length.value += n | Push values to buffer. If buffer can't store all values a ValueError is raised |
18,631 | def get_pplan(self, topologyName, callback=None):
isWatching = False
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
ret["result"] = data
self._get_pplan_with_watch(topologyName, callback, isWatching)
return ret["result"] | get physical plan |
18,632 | async def _watch(self, node, conn, names):
"Watches the values at keys ``names``"
for name in names:
slot = self._determine_slot(, name)
dist_node = self.connection_pool.get_node_by_slot(slot)
if node.get() != dist_node[]:
if len(node) > 0:
raise ClusterTransactionError("Keys in request donCannot issue a WATCH after a MULTIWATCH', *names)
return await conn.read_response() | Watches the values at keys ``names`` |
18,633 | def _process_worker(call_queue, result_queue):
while True:
call_item = call_queue.get(block=True)
if call_item is None:
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r)) | Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty. |
18,634 | def get_template(self, template, def_name=None):
try:
return MakoTemplateAdapter(self.get_mako_template(template), def_name)
except (TopLevelLookupException, TemplateLookupException) as e:
tdne = TemplateDoesNotExist( % (template, self.template_search_dirs))
if settings.DEBUG:
tdne.template_debug = get_template_debug(template, e)
raise tdne from e
except (CompileException, SyntaxException) as e:
tse = TemplateSyntaxError( % (template, e))
if settings.DEBUG:
tse.template_debug = get_template_debug(template, e)
raise tse from e | Retrieve a *Django* API template object for the given template name, using the app_path and template_subdir
settings in this object. This method still uses the corresponding Mako template and engine, but it
gives a Django API wrapper around it so you can use it the same as any Django template.
If def_name is provided, template rendering will be limited to the named def/block (see Mako docs).
This method corresponds to the Django templating system API.
A Django exception is raised if the template is not found or cannot compile. |
18,635 | def load(name):
global __tile_maps
TileMapManager.unload()
TileMapManager.active_map = __tile_maps[name]
TileMapManager.active_map.parse_tilemap()
TileMapManager.active_map.parse_collisions()
TileMapManager.active_map.parse_objects()
world = Ragnarok.get_world()
world.add_obj(TileMapManager.active_map)
for obj in TileMapManager.active_map.objects:
world.add_obj(obj) | Parse the tile map and add it to the world. |
18,636 | def on_error(self, ex):
if self._d:
self._d.errback()
self._d = None | Reimplemented from :meth:`~AsyncViewBase.on_error` |
18,637 | def init(self, request, paypal_request, paypal_response):
if request is not None:
from paypal.pro.helpers import strip_ip_port
self.ipaddress = strip_ip_port(request.META.get(, ))
if (hasattr(request, "user") and request.user.is_authenticated):
self.user = request.user
else:
self.ipaddress =
query_data = dict((k, v) for k, v in paypal_request.items() if k not in self.RESTRICTED_FIELDS)
self.query = urlencode(query_data)
self.response = urlencode(paypal_response)
ack = paypal_response.get(, False)
if ack != "Success":
if ack == "SuccessWithWarning":
warn_untested()
self.flag_info = paypal_response.get(, )
else:
self.set_flag(paypal_response.get(, ), paypal_response.get(, )) | Initialize a PayPalNVP instance from a HttpRequest. |
18,638 | def _default_template_args(self, content_template):
def include(text, args):
template_name = pystache.render(text, args)
return self._renderer.render_name(template_name, args)
ret = {: content_template}
ret[] = lambda text: include(text, ret)
return ret | Initialize template args. |
18,639 | def to_sequence_field(cls):
class SequenceConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or []
args = [to_model(self.cls, value) for value in values]
return TypedSequence(cls=self.cls, args=args)
return SequenceConverter(cls) | Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter. |
18,640 | def _queue_declare_ok(self, args):
queue = args.read_shortstr()
message_count = args.read_long()
consumer_count = args.read_long()
return queue, message_count, consumer_count | confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count. |
18,641 | def extract(code, tree, prefix=[]):
if isinstance(tree, list):
l, r = tree
prefix.append()
extract(code, l, prefix)
prefix.pop()
prefix.append()
extract(code, r, prefix)
prefix.pop()
else:
code[tree] = .join(prefix) | Extract Huffman code from a Huffman tree
:param tree: a node of the tree
:param prefix: a list with the 01 characters encoding the path from
the root to the node `tree`
:complexity: O(n) |
18,642 | def objects_copy(self, source_bucket, source_key, target_bucket, target_key):
url = Api._ENDPOINT + (Api._OBJECT_COPY_PATH % (source_bucket, Api._escape_key(source_key),
target_bucket, Api._escape_key(target_key)))
return datalab.utils.Http.request(url, method=, credentials=self._credentials) | Updates the metadata associated with an object.
Args:
source_bucket: the name of the bucket containing the source object.
source_key: the key of the source object being copied.
target_bucket: the name of the bucket that will contain the copied object.
target_key: the key of the copied object.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation. |
18,643 | def add(self, actors):
if utils.isSequence(actors):
for a in actors:
if a not in self.actors:
self.actors.append(a)
return None
else:
self.actors.append(actors)
return actors | Append input object to the internal list of actors to be shown.
:return: returns input actor for possible concatenation. |
18,644 | def remove_highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]]=None) -> None:
for node in graph if nodes is None else nodes:
if is_node_highlighted(graph, node):
del graph.node[node][NODE_HIGHLIGHT] | Removes the highlight from the given nodes, or all nodes if none given.
:param graph: A BEL graph
:param nodes: The list of nodes to un-highlight |
18,645 | def _strip_postfix(req):
match = re.search(r, req)
if match:
req = match.group(1)
return req | Strip req postfix ( -dev, 0.2, etc ) |
18,646 | def _process_resp(request_id, response, is_success_func):
if response.status != 200:
raise DataFailureException(request_id,
response.status,
response.reason
)
if response.data is None:
raise NoDataReturned()
root = objectify.fromstring(response.data)
if root.ResponseMessage is None or\
root.ResponseMessage.attrib[] is None:
raise UnknownError()
resp_code = int(root.ResponseMessage.attrib[])
func = partial(is_success_func)
if func(resp_code):
return True
_check_err(resp_code, request_id) | :param request_id: campus url identifying the request
:param response: the GET method response object
:param is_success_func: the name of the function for
verifying a success code
:return: True if successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned. |
18,647 | def shlex_quote(s):
if is_py3: | Return a shell-escaped version of the string *s*.
Backported from Python 3.3 standard library module shlex. |
18,648 | def register_dataset(self,
dataset,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
expr_data = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
for column in dataset.columns:
self._table_expressions[column] = expr_data | Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze` |
18,649 | def needs_to_auth(self, dbname):
log_debug("Checking if server needs to auth on db ...." %
(self.id, dbname))
try:
client = self.get_mongo_client()
db = client.get_database(dbname)
db.collection_names()
result = False
except (RuntimeError,Exception), e:
log_exception(e)
result = "authorized" in str(e) or "there are no users authenticated" in str(e)
log_debug("needs_to_auth check for server on db : %s" %
(self.id, dbname, result))
return result | Determines if the server needs to authenticate to the database.
NOTE: we stopped depending on is_auth() since its only a configuration
and may not be accurate |
18,650 | def dataset(directory, images_file, labels_file):
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8)
label = tf.reshape(label, [])
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels)) | Download and parse MNIST dataset. |
18,651 | def dt_cluster(dt_list, dt_thresh=16.0):
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
b = np.nonzero(d > dt_thresh)[0] + 1
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict[] = b_idx_orig
dict[] = b_dt
dict[] = o2dt(b_dt)
dict[] = all_idx_orig
dict[] = all_sort
dict[] = o2dt(all_sort)
f_list.append(dict)
return f_list | Find clusters of similar datetimes within datetime list |
18,652 | def integrate_adaptive(rhs, jac, y0, x0, xend, atol, rtol, dx0=.0, dx_max=.0,
check_callable=False, check_indexing=False, **kwargs):
jac = _ensure_5args(jac)
if check_callable:
_check_callable(rhs, jac, x0, y0)
if check_indexing:
_check_indexing(rhs, jac, x0, y0)
return adaptive(rhs, jac, np.asarray(y0, dtype=np.float64), x0, xend, atol, rtol, dx0, dx_max, **_bs(kwargs)) | Integrates a system of ordinary differential equations.
Parameters
----------
rhs: callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac: callable
Function with signature j(t, y, jmat_out, dfdx_out) which modifies
jmat_out and dfdx_out *inplace*.
y0: array_like
Initial values of the dependent variables.
x0: float
Initial value of the independent variable.
xend: float
Stopping value for the independent variable.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
dx0: float
Initial step-size.
dx_max: float
Maximum step-size.
check_callable: bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing: bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'method': str
'rosenbrock4', 'dopri5' or 'bs'
'return_on_error': bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart': int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: dictionary with information about the integration |
18,653 | def clone(self):
args = {k: getattr(self, k) for k in self.CLONE_ATTRS}
args[] = copy.copy(self.color_list)
return self.__class__([], **args) | Return an independent copy of this layout with a completely separate
color_list and no drivers. |
18,654 | def DirEntryScanner(**kw):
kw[] = SCons.Node.FS.Entry
kw[] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw) | Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries |
18,655 | def build_single(scheme_file, templates, base_output_dir):
scheme = get_yaml_dict(scheme_file)
scheme_slug = slugify(scheme_file)
format_scheme(scheme, scheme_slug)
scheme_name = scheme[]
print(.format(scheme_name))
for temp_group in templates:
for _, sub in temp_group.templates.items():
output_dir = os.path.join(base_output_dir,
temp_group.name,
sub[])
try:
os.makedirs(output_dir)
except FileExistsError:
pass
if sub[] is not None:
filename = .format(scheme_slug, sub[])
else:
filename = .format(scheme_slug)
build_path = os.path.join(output_dir, filename)
with open(build_path, ) as file_:
file_content = pystache.render(sub[], scheme)
file_.write(file_content)
print(.format(scheme_name)) | Build colorscheme for a single $scheme_file using all TemplateGroup
instances in $templates. |
18,656 | def __create_db_and_container(self):
db_id = self.config.database
container_name = self.config.container
self.db = self.__get_or_create_database(self.client, db_id)
self.container = self.__get_or_create_container(
self.client, container_name
) | Call the get or create methods. |
18,657 | def get(self, sid):
return EventContext(self._version, workspace_sid=self._solution[], sid=sid, ) | Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext |
18,658 | def metadata_add_description(self):
service_description = {}
if (self.args.json):
service_description = json.loads(self.args.json)
if (self.args.url):
if "url" in service_description:
raise Exception("json service description already contains url field")
service_description["url"] = self.args.url
if (self.args.description):
if "description" in service_description:
raise Exception("json service description already contains description field")
service_description["description"] = self.args.description
metadata = load_mpe_service_metadata(self.args.metadata_file)
if ("service_description" in metadata):
service_description = {**metadata["service_description"], **service_description}
metadata.set_simple_field("service_description", service_description)
metadata.save_pretty(self.args.metadata_file) | Metadata: add description |
18,659 | def parse_subdomain_missing_zonefiles_record(cls, rec):
txt_entry = rec[]
if isinstance(txt_entry, list):
raise ParseError("TXT entry too long for a missing zone file list")
try:
return [int(i) for i in txt_entry.split()] if txt_entry is not None and len(txt_entry) > 0 else []
except ValueError:
raise ParseError() | Parse a missing-zonefiles vector given by the domain.
Returns the list of zone file indexes on success
Raises ParseError on unparseable records |
18,660 | def json2value(json_string, params=Null, flexible=False, leaves=False):
if not is_text(json_string):
Log.error("only unicode json accepted")
try:
if flexible:
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + " ".join((c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error(CAN_NOT_DECODE_JSON + ":\n{{char_str}}\n{{hexx_str}}\n", char_str=char_str, hexx_str=hexx_str, cause=e) | :param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value |
18,661 | def validate(instance, schema, cls=None, *args, **kwargs):
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance) | Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates` |
18,662 | def param_changed_to(self, key, to_value, from_value=None):
last_value = getattr(self.last_manifest, key)
current_value = self.current_manifest.get(key)
if from_value is not None:
return last_value == from_value and current_value == to_value
return last_value != to_value and current_value == to_value | Returns true if the given parameter, with name key, has transitioned to the given value. |
18,663 | def main(args):
EXNAME = os.path.basename(__file__ if WIN32 else sys.argv[0])
for ext in (".py", ".pyc", ".exe", "-script.py", "-script.pyc"):
if EXNAME.endswith(ext):
EXNAME = EXNAME[:-len(ext)]
break
USAGE = % {"exname": EXNAME}
HELP = % {"exname": EXNAME, "usage": USAGE}
USAGE = textwrap.dedent(USAGE)
HELP = textwrap.dedent(HELP)
opts = _parse_args(args, USAGE, HELP)
quiet = opts.pop("quiet")
try:
first = True
for output in do(stream=sys.stdin, **opts):
if first:
first = False
else:
if opts["action"] not in ACTION_SUPPORTING_STREAMING:
die("Source YAML is multi-document, "
"which doesn%s' is not a valid action.\n%s"
% (e.args[0], USAGE)) | Entrypoint of the whole commandline application |
18,664 | def create(cls, name, engines, policy=None, comment=None, **kwargs):
json = {
: name,
: [eng.href for eng in engines],
: policy.href if policy is not None else policy,
: comment}
if kwargs:
json.update(policy_validation_settings(**kwargs))
return ElementCreator(cls, json) | Create a new validate policy task.
If a policy is not specified, the engines existing policy will
be validated. Override default validation settings as kwargs.
:param str name: name of task
:param engines: list of engines to validate
:type engines: list(Engine)
:param Policy policy: policy to validate. Uses the engines assigned
policy if none specified.
:param kwargs: see :func:`~policy_validation_settings` for keyword
arguments and default values.
:raises ElementNotFound: engine or policy specified does not exist
:raises CreateElementFailed: failure to create the task
:return: the task
:rtype: ValidatePolicyTask |
18,665 | def get_channels_in(self, guild_id: str) -> List[Dict[str, Any]]:
return self._query(f, ) | Get a list of channels in the guild
Args:
guild_id: id of the guild to fetch channels from
Returns:
List of dictionary objects of channels in the guild. Note the different
types of channels: text, voice, DM, group DM.
https://discordapp.com/developers/docs/resources/channel#channel-object
Example:
[
{
"id": "41771983423143937",
"guild_id": "41771983423143937",
"name": "general",
"type": 0,
"position": 6,
"permission_overwrites": [],
"topic": "24/7 chat about how to gank Mike #2",
"last_message_id": "155117677105512449"
},
{
"id": "155101607195836416",
"guild_id": "41771983423143937",
"name": "ROCKET CHEESE",
"type": 2,
"position": 5,
"permission_overwrites": [],
"bitrate": 64000,
"user_limit": 0
},
{
"last_message_id": "3343820033257021450",
"type": 1,
"id": "319674150115610528",
"recipients": [
{
"username": "test",
"discriminator": "9999",
"id": "82198898841029460",
"avatar": "33ecab261d4681afa4d85a04691c4a01"
}
]
}
] |
18,666 | def get_time_objects_from_model_timesteps(cls, times, start):
modelTimestep = []
newtimes = []
for i in xrange(0, len(times)):
try:
modelTimestep.append(times[i+1] - times[i])
except StandardError:
modelTimestep.append(times[i] - times[i-1])
newtimes.append(start + timedelta(seconds=times[i]))
return (modelTimestep, newtimes) | Calculate the datetimes of the model timesteps
times should start at 0 and be in seconds |
18,667 | def legend_title_header_element(feature, parent):
_ = feature, parent
header = legend_title_header[]
return header.capitalize() | Retrieve legend title header string from definitions. |
18,668 | def _drop_indices(self):
self._logger.info()
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info() | Drops the database indices relating to n-grams. |
18,669 | def run():
parser = OptionParser(
version=__version__, description=__doc__,
)
parser.add_option(
, , dest=,
help=,
)
parser.add_option(
, , dest=, default=,
choices=[, ],
help=,
)
parser.add_option(
, , dest=, action=,
help=,
)
parser.add_option(
, , dest=,
help=,
)
parser.add_option(
, , dest=,
help=,
)
(options, args) = parser.parse_args()
if not options.url:
print()
exit(1)
engine = create_engine(options.url)
meta = MetaData()
meta.reflect(bind=engine)
if options.list:
print()
tables = sorted(meta.tables.keys())
def _g(l, i):
try:
return tables[i]
except IndexError:
return
for i in range(0, len(tables), 2):
print(.format(
_g(tables, i),
* (38 - len(_g(tables, i))),
_g(tables, i + 1),
))
exit(0)
tables = set(meta.tables.keys())
if options.include:
tables &= set(map(string.strip, options.include.split()))
if options.exclude:
tables -= set(map(string.strip, options.exclude.split()))
desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables))
print(getattr(render, options.render)(desc)) | Command for reflection database objects |
18,670 | def monthly_build_list_regex(self):
return r % {
: self.date.year,
: str(self.date.month).zfill(2)} | Return the regex for the folder containing builds of a month. |
18,671 | def execute(self, conn, block_name, origin_site_name, transaction=False):
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Block/UpdateStatus. \
Expects db connection from upper layer.", self.logger.exception)
binds = {"block_name": block_name, "origin_site_name": origin_site_name, "mtime": dbsUtils().getTime(),
"myuser": dbsUtils().getCreateBy()}
self.dbi.processData(self.sql, binds, conn, transaction) | Update origin_site_name for a given block_name |
18,672 | def echo_warnings_via_pager(warnings: List[WarningTuple], sep: str = ) -> None:
if not warnings:
click.echo()
sys.exit(0)
max_line_width = max(
len(str(exc.line_number))
for _, exc, _ in warnings
)
max_warning_width = max(
len(exc.__class__.__name__)
for _, exc, _ in warnings
)
s1 = + str(max_line_width) + + sep
s2 = + str(max_warning_width) + + sep
def _make_line(path: str, exc: BELParserWarning):
s = click.style(path, fg=) + sep
s += click.style(s1.format(exc.line_number), fg=, bold=True)
s += click.style(s2.format(exc.__class__.__name__),
fg=( if exc.__class__.__name__.endswith() else ))
s += click.style(exc.line, bold=True) + sep
s += click.style(str(exc))
return s
click.echo_via_pager(.join(
_make_line(path, exc)
for path, exc, _ in warnings
)) | Output the warnings from a BEL graph with Click and the system's pager. |
18,673 | def create_matcher(dispatcher, parsers, apptags, matcher=, hosts=tuple(), time_range=None,
time_period=(None, None), patterns=tuple(), invert=False, count=False,
files_with_match=None, max_count=0, only_matching=False, quiet=False,
thread=False, name_cache=None):
parsers = CycleParsers(parsers)
max_matches = 1 if quiet else max_count
use_app_rules = matcher !=
select_unparsed = matcher ==
register_log_lines = not (quiet or count or files_with_match is not None)
start_dt, end_dt = get_mktime_period(time_period)
pattern_search = create_search_function(invert, only_matching)
dispatch_selected = dispatcher.dispatch_selected
dispatch_context = dispatcher.dispatch_context
display_progress_bar = sys.stdout.isatty() and all(c.name != for c in dispatcher.channels)
def process_logfile(source, apps, encoding=):
log_parser = next(parsers)
first_event = None
last_event = None
app_thread = None
selected_data = None
line_counter = 0
unknown_counter = 0
selected_counter = 0
extra_tags = Counter()
dispatcher.reset()
read_size = 0
progress_bar = None
with open_resource(source) as logfile:
logfile_name = logfile.name
fstat = os.fstat(logfile.fileno())
file_mtime = datetime.datetime.fromtimestamp(fstat.st_mtime)
file_year = file_mtime.year
file_month = file_mtime.month
prev_year = file_year - 1
if display_progress_bar:
read_size = 0
progress_bar = ProgressBar(sys.stdout, fstat.st_size, logfile_name)
for line in logfile:
line = line.decode(encoding)
line_counter += 1
if line[-1] != :
line +=
if display_progress_bar:
read_size += len(line)
if not line_counter % 100:
progress_bar.redraw(read_size)
log_match = log_parser.match(line)
if log_match is None:
if getattr(log_data, , None) is not None:
if selected_data is not None:
repeat = int(log_data.repeat)
if not thread:
selected_counter += repeat
if use_app_rules:
app = log_parser.app or get_app(selected_data, apps, apptags, extra_tags)
app.increase_last(repeat)
app.matches += 1
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=line
)
selected_data = None
continue
selected_data = None
app_matched, has_full_match, app_thread, output_data = app.match_rules(log_data)
if not pattern_matched and app_matched and app_thread is None:
continue
if output_data:
rawlog = name_cache.match_to_string(log_match, log_parser.parser.groupindex, output_data)
if app_matched:
app.matches += 1
if not has_full_match or select_unparsed:
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
continue
else:
app.unparsed += 1
if not select_unparsed:
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
continue
selected_data = log_data
if first_event is None:
first_event = event_dt
last_event = event_dt
else:
if first_event > event_dt:
first_event = event_dt
if last_event < event_dt:
last_event = event_dt
if pattern_matched:
if max_matches and selected_counter >= max_matches:
break
selected_counter += 1
if files_with_match:
break
if register_log_lines:
dispatch_selected(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
log_data=log_data,
rawlog=rawlog,
match=match
)
elif register_log_lines and not only_matching:
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
if display_progress_bar:
progress_bar.redraw(fstat.st_size)
try:
for key in list(dispatcher.keys()):
dispatcher.flush(key)
except (NameError, AttributeError):
pass
if files_with_match and selected_counter or files_with_match is False and not selected_counter:
dispatch_selected(filename=logfile.name)
elif count:
dispatch_selected(filename=logfile.name, counter=selected_counter)
return MatcherResult(
lines=line_counter,
matches=selected_counter,
unknown=unknown_counter,
extra_tags=extra_tags,
first_event=first_event,
last_event=last_event
)
return process_logfile | Create a matcher engine.
:return: A matcher function. |
18,674 | def set_password(self, service, username, password):
password = self._encrypt(password or )
keyring_working_copy = copy.deepcopy(self._keyring)
service_entries = keyring_working_copy.get(service)
if not service_entries:
service_entries = {}
keyring_working_copy[service] = service_entries
service_entries[username] = password
save_result = self._save_keyring(keyring_working_copy)
if save_result == self.OK:
self._keyring_dict = keyring_working_copy
return
elif save_result == self.CONFLICT:
self.docs_entry, keyring_dict = self._read()
existing_pwd = self._get_entry(self._keyring, service, username)
conflicting_pwd = self._get_entry(keyring_dict, service, username)
if conflicting_pwd == password:
self._keyring_dict = keyring_working_copy
return
elif conflicting_pwd is None or conflicting_pwd == existing_pwd:
| Set password for the username of the service |
18,675 | def _add_child(self, collection, set, child):
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset() | Adds 'child' to 'collection', first checking 'set' to see if it's
already present. |
18,676 | def resolve_inputs(self, layers):
resolved = {}
for name, shape in self._input_shapes.items():
if shape is None:
name, shape = self._resolve_shape(name, layers)
resolved[name] = shape
self._input_shapes = resolved | Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved. |
18,677 | def _document_structure(self):
logger.debug("Documenting dataset structure")
key = self.get_structure_key()
text = json.dumps(self._structure_parameters, indent=2, sort_keys=True)
self.put_text(key, text)
key = self.get_dtool_readme_key()
self.put_text(key, self._dtool_readme_txt) | Document the structure of the dataset. |
18,678 | def ip_address_list(ips):
try:
return ip_address(ips)
except ValueError:
pass
return list(ipaddress.ip_network(u(ips)).hosts()) | IP address range validation and expansion. |
18,679 | def cov_error(self, comp_cov, score_metric="frobenius"):
if not isinstance(self.precision_, list):
return _compute_error(
comp_cov, self.covariance_, self.precision_, score_metric
)
path_errors = []
for lidx, lam in enumerate(self.path_):
path_errors.append(
_compute_error(
comp_cov,
self.covariance_[lidx],
self.precision_[lidx],
score_metric,
)
)
return np.array(path_errors) | Computes the covariance error vs. comp_cov.
May require self.path_
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The min error between `self.covariance_` and `comp_cov`.
If self.precision_ is a list, returns errors for each matrix, otherwise
returns a scalar. |
18,680 | def _get_trailing_whitespace(marker, s):
suffix =
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in :
suffix += s[i]
elif s[i] in :
suffix += s[i]
if s[i] == and i + 1 < len(s) and s[i + 1] == :
suffix += s[i + 1]
break
else:
break
i += 1
return suffix | Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline. |
18,681 | async def ehlo(self, from_host=None):
if from_host is None:
from_host = self.fqdn
code, message = await self.do_cmd("EHLO", from_host)
self.last_ehlo_response = (code, message)
extns, auths = SMTP.parse_esmtp_extensions(message)
self.esmtp_extensions = extns
self.auth_mechanisms = auths
self.supports_esmtp = True
return code, message | Sends a SMTP 'EHLO' command. - Identifies the client and starts the
session.
If given ``from`_host`` is None, defaults to the client FQDN.
For further details, please check out `RFC 5321 § 4.1.1.1`_.
Args:
from_host (str or None): Name to use to identify the client.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPCommandFailedError: If the server refuses our EHLO greeting.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
.. _`RFC 5321 § 4.1.1.1`: https://tools.ietf.org/html/rfc5321#section-4.1.1.1 |
18,682 | def get_identity(identity):
if isinstance(identity, AnonymousUser):
return identity, None
if isinstance(identity, get_user_model()):
return identity, None
elif isinstance(identity, Group):
return None, identity
else:
raise NotUserNorGroup(
.format(identity),
) | Returns a (user_obj, None) tuple or a (None, group_obj) tuple depending on the considered
instance. |
18,683 | def template_filter(self, param=None):
def deco(func):
name = param or func.__name__
self.filters[name] = func
return func
return deco | Returns a decorator that adds the wrapped function to dictionary of template filters.
The wrapped function is keyed by either the supplied param (if supplied)
or by the wrapped functions name.
:param param: Optional name to use instead of the name of the function to be wrapped
:return: A decorator to wrap a template filter function
:rtype: callable |
18,684 | def put(self, request, bot_id, id, format=None):
return super(TelegramChatStateDetail, self).put(request, bot_id, id, format) | Update existing Telegram chat state
---
serializer: TelegramChatStateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request |
18,685 | def add_schema(self, database, schema):
self.schemas.add((_lower(database), _lower(schema))) | Add a schema to the set of known schemas (case-insensitive)
:param str database: The database name to add.
:param str schema: The schema name to add. |
18,686 | def load_lists(keys=[], values=[], name=):
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name) | Map namedtuples given a pair of key, value lists. |
18,687 | def from_etree(cls, etree_element):
ins = SaltElement.from_etree(etree_element)
ins.__class__ = SaltLayer.mro()[0]
for element in (, ):
elem_list = []
xpath_result = etree_element.xpath(+element)
if xpath_result:
val_str = xpath_result[0]
elem_list.extend(int(elem_id)
for elem_id in DIGITS.findall(val_str))
setattr(ins, element, elem_list)
return ins | creates a ``SaltLayer`` instance from the etree representation of an
<layers> element from a SaltXMI file. |
18,688 | def pattern_filter(items, whitelist=None, blacklist=None, key=None):
key = key or __return_self
if whitelist:
whitelisted = _filter(items, whitelist, key)
if blacklist:
blacklisted = _filter(items, blacklist, key)
whitelisted.difference_update(blacklisted)
return [item for item in items if key(item) in whitelisted]
elif blacklist:
blacklisted = _filter(items, blacklist, key)
return [item for item in items if key(item) not in blacklisted]
else:
return items | This filters `items` by a regular expression `whitelist` and/or
`blacklist`, with the `blacklist` taking precedence. An optional `key`
function can be provided that will be passed each item. |
18,689 | def body(self, value):
self.__body = value
if value is not None:
body_length = getattr(
self.__body, , None) or len(self.__body)
self.headers[] = str(body_length)
else:
self.headers.pop(, None)
| Sets the request body; handles logging and length measurement. |
18,690 | def get_user_config_dir():
user_home = os.getenv()
if user_home is None or not user_home:
config_path = os.path.expanduser(os.path.join(, , ))
else:
config_path = os.path.join(user_home, )
return config_path | Return the path to the user s-tui config directory |
18,691 | def draw_on_image(self,
image,
color=(0, 255, 0), color_face=None,
color_lines=None, color_points=None,
alpha=1.0, alpha_face=None,
alpha_lines=None, alpha_points=None,
size=1, size_lines=None, size_points=None,
raise_if_out_of_image=False):
for poly in self.polygons:
image = poly.draw_on_image(
image,
color=color,
color_face=color_face,
color_lines=color_lines,
color_points=color_points,
alpha=alpha,
alpha_face=alpha_face,
alpha_lines=alpha_lines,
alpha_points=alpha_points,
size=size,
size_lines=size_lines,
size_points=size_points,
raise_if_out_of_image=raise_if_out_of_image
)
return image | Draw all polygons onto a given image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``PolygonsOnImage.shape``.
color : iterable of int, optional
The color to use for the whole polygons.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_face`, `color_lines` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_face`, `color_lines`
and `color_points` are all set anything other than ``None``.
color_face : None or iterable of int, optional
The color to use for the inner polygon areas (excluding perimeters).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 1.0``.
color_lines : None or iterable of int, optional
The color to use for the lines (aka perimeters/borders) of the
polygons. Must correspond to the channel layout of the image.
Usually RGB. If this is ``None``, it will be derived
from ``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 0.5``.
alpha : float, optional
The opacity of the whole polygons, where ``1.0`` denotes
completely visible polygons and ``0.0`` invisible ones.
The values for `alpha_face`, `alpha_lines` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_face`, `alpha_lines`
and `alpha_points` are all set anything other than ``None``.
alpha_face : None or number, optional
The opacity of the polygon's inner areas (excluding the perimeters),
where ``1.0`` denotes completely visible inner areas and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 0.5``.
alpha_lines : None or number, optional
The opacity of the polygon's lines (aka perimeters/borders),
where ``1.0`` denotes completely visible perimeters and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``
are allowed.
If this is ``None``, it will be derived from ``alpha * 1.0``.
size : int, optional
Size of the polygons.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the polygon lines (aka perimeter/border).
If ``None``, this value is derived from `size`.
size_points : int, optional
The size of all corner points. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if any polygon is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
image : (H,W,C) ndarray
Image with drawn polygons. |
18,692 | def send_highspeed(self, data, progress_callback):
if not self.connected:
raise HardwareError("Cannot send a script if we are not in a connected state")
if isinstance(data, str) and not isinstance(data, bytes):
raise ArgumentError("You must send bytes or bytearray to _send_highspeed", type=type(data))
if not isinstance(data, bytes):
data = bytes(data)
try:
self._on_progress = progress_callback
self._loop.run_coroutine(self.adapter.send_script(0, data))
finally:
self._on_progress = None | Send a script to a device at highspeed, reporting progress.
This method takes a binary blob and downloads it to the device as fast
as possible, calling the passed progress_callback periodically with
updates on how far it has gotten.
Args:
data (bytes): The binary blob that should be sent to the device at highspeed.
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers. |
18,693 | def solve_mbar(u_kn_nonzero, N_k_nonzero, f_k_nonzero, solver_protocol=None):
if solver_protocol is None:
solver_protocol = DEFAULT_SOLVER_PROTOCOL
for protocol in solver_protocol:
if protocol[] is None:
protocol[] = DEFAULT_SOLVER_METHOD
all_results = []
for k, options in enumerate(solver_protocol):
f_k_nonzero, results = solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, **options)
all_results.append(results)
all_results.append(("Final gradient norm: %.3g" % np.linalg.norm(mbar_gradient(u_kn_nonzero, N_k_nonzero, f_k_nonzero))))
return f_k_nonzero, all_results | Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess. |
18,694 | def _get_config(config_file):
parser = ConfigParser.SafeConfigParser()
if os.path.lexists(config_file):
try:
log.info(, config_file)
inp = open(config_file)
parser.readfp(inp)
return parser
except (IOError, ConfigParser.ParsingError), err:
raise ConfigError("Failed to read configuration %s\n%s" % (config_file, err))
return None | find, read and parse configuraton. |
18,695 | def listen(self, address, ssl=False, family=0, flags=0, ipc=False, backlog=128):
handles = []
handle_args = ()
if isinstance(address, six.string_types):
handle_type = pyuv.Pipe
handle_args = (ipc,)
addresses = [address]
elif isinstance(address, tuple):
handle_type = pyuv.TCP
result = getaddrinfo(address[0], address[1], family, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
addresses = [res[4] for res in result]
elif isinstance(address, pyuv.Stream):
handles.append(address)
addresses = []
else:
raise TypeError()
for addr in addresses:
handle = handle_type(self._hub.loop, *handle_args)
try:
if compat.pyuv_pipe_helper(handle, handle_args, , addr):
handles.append(handle)
break
handle.bind(addr)
except pyuv.error.UVError as e:
self._log.warning(, e[0], saddr(addr))
continue
handles.append(handle)
addresses = []
for handle in handles:
if backlog is not None:
callback = functools.partial(self._on_new_connection, ssl=ssl)
handle.listen(callback, backlog)
addr = handle.getsockname()
self._log.debug(, saddr(addr))
addresses.append(addr)
self._handles += handles
self._addresses += addresses | Create a new transport, bind it to *address*, and start listening
for new connections.
See :func:`create_server` for a description of *address* and the
supported keyword arguments. |
18,696 | def _spectrogram_mono(self, x):
x = K.permute_dimensions(x, [0, 2, 1])
x = K.expand_dims(x, 3)
subsample = (self.n_hop, 1)
output_real = K.conv2d(x, self.dft_real_kernels,
strides=subsample,
padding=self.padding,
data_format=)
output_imag = K.conv2d(x, self.dft_imag_kernels,
strides=subsample,
padding=self.padding,
data_format=)
output = output_real ** 2 + output_imag ** 2
if self.image_data_format == :
output = K.permute_dimensions(output, [0, 3, 1, 2])
else:
output = K.permute_dimensions(output, [0, 2, 3, 1])
return output | x.shape : (None, 1, len_src),
returns 2D batch of a mono power-spectrogram |
18,697 | def _check_branching(X,Xsamples,restart,threshold=0.25):
check = True
if restart == 0:
Xsamples.append(X)
else:
for Xcompare in Xsamples:
Xtmax_diff = np.absolute(X[-1,:] - Xcompare[-1,:])
if np.partition(Xtmax_diff,-2)[-2] < threshold:
check = False
if check:
Xsamples.append(X)
if not check:
logg.m(.format(restart), , v=4)
else:
logg.m(.format(restart), , v=4)
return check, Xsamples | \
Check whether time series branches.
Parameters
----------
X (np.array): current time series data.
Xsamples (np.array): list of previous branching samples.
restart (int): counts number of restart trials.
threshold (float, optional): sets threshold for attractor
identification.
Returns
-------
check : bool
true if branching realization
Xsamples
updated list |
18,698 | def findHotspot( self, name ):
for hotspot in self._hotspots:
if ( hotspot.name() == name ):
return hotspot
return None | Finds the hotspot based on the inputed name.
:param name | <str>
:return <XNodeHotspot> || None |
18,699 | def main(inputstructs, inputpdbids):
pdbid, pdbpath = None, None
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message( + * len(title) + )
write_message(title)
write_message( + * len(title) + )
outputprefix = config.OUTPUTFILENAME
if inputstructs is not None:
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
read_from_stdin = False
for inputstruct in inputstructs:
if inputstruct == :
inputstruct = sys.stdin.read()
read_from_stdin = True
if config.RAWSTRING:
if sys.version_info < (3,):
inputstruct = bytes(inputstruct).decode()
else:
inputstruct = bytes(inputstruct, ).decode()
else:
if os.path.getsize(inputstruct) == 0:
sysexit(2, )
if num_structures > 1:
basename = inputstruct.split()[-2].split()[-1]
config.OUTPATH = .join([config.BASEPATH, basename])
outputprefix =
process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix)
else:
num_pdbids = len(inputpdbids)
inputpdbids = remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
config.OUTPATH = .join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
outputprefix =
process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in [, ]:
write_message()
else:
write_message( % config.BASEPATH) | Main function. Calls functions for processing, report generation and visualization. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.