Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
379,900 | def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images. |
379,901 | def display_files(self, pcs_files):
tree_iters = []
for pcs_file in pcs_files:
path = pcs_file[]
pixbuf, type_ = self.app.mime.get(path, pcs_file[],
icon_size=self.ICON_SIZE)
name = os.path.split(path)[NAME_COL]
tooltip = gutil.escape(name)
size = pcs_file.get(, 0)
if pcs_file[]:
human_size =
else:
human_size = util.get_human_size(pcs_file[])[0]
mtime = pcs_file.get(, 0)
human_mtime = time.ctime(mtime)
tree_iter = self.liststore.append([
pixbuf, name, path, tooltip, size, human_size,
pcs_file[], mtime, human_mtime, type_,
json.dumps(pcs_file)
])
tree_iters.append(tree_iter)
cache_path = Config.get_cache_path(self.app.profile[])
gutil.async_call(gutil.update_liststore_image, self.liststore,
tree_iters, PIXBUF_COL, pcs_files, cache_path,
self.ICON_SIZE) | 重新格式化一下文件列表, 去除不需要的信息
这一操作主要是为了便于接下来的查找工作.
文件的path都被提取出来, 然后放到了一个list中. |
379,902 | def from_tuples(cls, tups):
ivs = [Interval(*t) for t in tups]
return IntervalTree(ivs) | Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data. |
379,903 | def local_lru(obj):
@wraps(obj)
def memoizer(*args, **kwargs):
instance = args[0]
lru_size = instance._cache_size
if lru_size:
cache = instance._cache
key = str((args, kwargs))
try:
r = cache.pop(key)
cache[key] = r
except KeyError:
if len(cache) >= lru_size:
cache.popitem(last=False)
r = cache[key] = obj(*args, **kwargs)
return r
return obj(*args, **kwargs)
return memoizer | Property that maps to a key in a local dict-like attribute.
self._cache must be an OrderedDict
self._cache_size must be defined as LRU size
..
class Foo(object):
def __init__(self, cache_size=5000):
self._cache = OrderedDict()
self._cache_size = cache_size
@local_lru
def expensive_meth(self, arg):
pass
.. |
379,904 | def _deallocator(self):
lookup = {
"c_bool": "logical",
"c_double": "double",
"c_double_complex": "complex",
"c_char": "char",
"c_int": "int",
"c_float": "float",
"c_short": "short",
"c_long": "long"
}
ctype = type(self.pointer).__name__.replace("LP_", "").lower()
if ctype in lookup:
return "dealloc_{0}_{1:d}d".format(lookup[ctype], len(self.indices))
else:
return None | Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable. |
379,905 | def get_querystring(self):
to_remove = self.get_querystring_parameter_to_remove()
query_string = urlparse(self.request.get_full_path()).query
query_dict = parse_qs(query_string.encode())
for arg in to_remove:
if arg in query_dict:
del query_dict[arg]
clean_query_string = urlencode(query_dict, doseq=True)
return clean_query_string | Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page') |
379,906 | def run(self, deploy_attempted=False):
stdout = stderr = retcode = None
if self.opts.get(, False):
cmd_str = .join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode | Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode) |
379,907 | def get_fresh_primary_tumors(biospecimen):
df = biospecimen
num_before = len(df.index)
df = df.loc[~df[]]
logger.info(
,
num_before - len(df.index), num_before)
num_before = len(df.index)
df = df.loc[df[] == ]
logger.info(
,
num_before - len(df.index), num_before)
return df | Filter biospecimen data to only keep non-FFPE primary tumor samples.
Parameters
----------
biospecimen : `pandas.DataFrame`
The biospecimen data frame. This type of data frame is returned by
:meth:`get_biospecimen_data`.
Returns
-------
`pandas.DataFrame`
The filtered data frame. |
379,908 | def do_loop_turn(self):
if not self.is_master:
logger.debug("Waiting for my master death...")
self.wait_for_master_death()
return
if self.loop_count % self.alignak_monitor_period == 1:
self.get_alignak_status(details=True)
if self.kill_request:
logger.info("daemon stop mode ...")
if not self.dispatcher.stop_request_sent:
logger.info("entering daemon stop mode, time before exiting: %s",
self.conf.daemons_stop_timeout)
self.dispatcher.stop_request()
if time.time() > self.kill_timestamp + self.conf.daemons_stop_timeout:
logger.info("daemon stop mode delay reached, immediate stop")
self.dispatcher.stop_request(stop_now=True)
time.sleep(1)
self.interrupted = True
logger.info("exiting...")
if not self.kill_request:
self.check_and_del_zombie_modules()
_t0 = time.time()
self.hook_point()
statsmgr.timer(, time.time() - _t0)
self.check_and_log_tp_activation_change()
if not self.daemons_check():
if self.conf.daemons_failure_kill:
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
else:
logger.warning("Should have killed my children if "
" were set!")
if not self.daemons_reachability_check():
logger.warning("A new configuration dispatch is required!")
self.configuration_dispatch(self.dispatcher.not_configured)
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer(, time.time() - _t0)
_t0 = time.time()
self.get_broks_from_satellites()
statsmgr.timer(, time.time() - _t0)
_t0 = time.time()
self.push_broks_to_broker()
statsmgr.timer(, time.time() - _t0)
if self.system_health and (self.loop_count % self.system_health_period == 1):
perfdatas = []
cpu_count = psutil.cpu_count()
perfdatas.append("=%d" % cpu_count)
logger.debug(" . cpu count: %d", cpu_count)
cpu_percents = psutil.cpu_percent(percpu=True)
cpu = 1
for percent in cpu_percents:
perfdatas.append("=%.2f%%" % (cpu, percent))
cpu += 1
cpu_times_percent = psutil.cpu_times_percent(percpu=True)
cpu = 1
for cpu_times_percent in cpu_times_percent:
logger.debug(" . cpu time percent: %s", cpu_times_percent)
for key in cpu_times_percent._fields:
perfdatas.append(
"=%.2f%%" % (cpu, key,
getattr(cpu_times_percent, key)))
cpu += 1
logger.info("%s cpu|%s", self.name, " ".join(perfdatas))
perfdatas = []
disk_partitions = psutil.disk_partitions(all=False)
for disk_partition in disk_partitions:
logger.debug(" . disk partition: %s", disk_partition)
disk = getattr(disk_partition, )
disk_usage = psutil.disk_usage(disk)
logger.debug(" . disk usage: %s", disk_usage)
for key in disk_usage._fields:
if in key:
perfdatas.append("=%.2f%%"
% (disk, getattr(disk_usage, key)))
else:
perfdatas.append("=%dB"
% (disk, key, getattr(disk_usage, key)))
logger.info("%s disks|%s", self.name, " ".join(perfdatas))
perfdatas = []
virtual_memory = psutil.virtual_memory()
logger.debug(" . memory: %s", virtual_memory)
for key in virtual_memory._fields:
if in key:
perfdatas.append("=%.2f%%"
% (key, getattr(virtual_memory, key)))
else:
perfdatas.append("=%dB"
% (key, getattr(virtual_memory, key)))
swap_memory = psutil.swap_memory()
logger.debug(" . memory: %s", swap_memory)
for key in swap_memory._fields:
if in key:
perfdatas.append("=%.2f%%"
% (key, getattr(swap_memory, key)))
else:
perfdatas.append("=%dB"
% (key, getattr(swap_memory, key)))
logger.info("%s memory|%s", self.name, " ".join(perfdatas)) | Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
:return: None |
379,909 | def load(self, name):
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) | Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered. |
379,910 | def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_secondary_version(self, **kwargs):
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
secondary_version = ET.SubElement(firmware_version_info, "secondary-version")
secondary_version.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
379,911 | def load_extracted(src_dir: str,
patterns="*.npy",
vars_in_cols: bool = True,
index: pd.Series = None):
def _load(path, index):
if index is None:
arr = np.load(str(path))
else:
arr = np.load(str(path), mmap_mode="r")[index]
return arr
src_dir = Path(src_dir)
paths = []
if isinstance(patterns, str):
patterns = [patterns]
for pat in patterns:
paths += src_dir.glob(pat)
if vars_in_cols:
df_data = {}
for path in paths:
df_data[path.stem] = _load(path, index)
df_data = pd.DataFrame(df_data)
if index is not None:
df_data.index = index.index[index]
else:
df_data = []
for path in paths:
arr = _load(path, index)
df_data.append(pd.DataFrame(np.expand_dims(arr, 0), index=[path.stem]))
df_data = pd.concat(df_data)
if index is not None:
df_data.columns = index.index[index]
return df_data | Load data extracted and stored by :py:func:`extract`
Arguments:
src_dir {str} -- The directory where the data is stored.
Keyword Arguments:
patterns {str, or list of str} -- A pattern (str) or list of patterns (list)
to identify the variables to be loaded.
The default loads all variables, i.e. all .npy files. (default: {'*.npy'})
vars_in_cols {bool} -- Return the variables in columns (``True``) or rows ``False``
(default: {True})
index {pd.Series} -- A boolean pandas Series which indicates with ``True`` which samples to
load.
Returns:
pandas.DataFrame -- A dataframe with the data. |
379,912 | def dad_status_output_dad_status_entries_message(self, **kwargs):
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
message = ET.SubElement(dad_status_entries, "message")
message.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
379,913 | def get_space(self, space_key, expand=):
url = .format(space_key=space_key,
expand=expand)
return self.get(url) | Get information about a space through space key
:param space_key: The unique space key name
:param expand: OPTIONAL: additional info from description, homepage
:return: Returns the space along with its ID |
379,914 | def client_id(self, client):
params = {
"name": client
}
response = self._get(url.clients, params=params)
self._check_response(response, 200)
return self._create_response(response).get("client_id") | Get a client's ID. Uses GET to /clients?name=<client> interface.
:Args:
* *client*: (str) Client's name
:Returns: (str) Client id |
379,915 | def df2chucks(din,chunksize,outd,fn,return_fmt=,force=False):
from os.path import exists
from os import makedirs
din.index=range(0,len(din),1)
chunkrange=list(np.arange(0,len(din),chunksize))
chunkrange=list(zip([c+1 if ci!=0 else 0 for ci,c in enumerate(chunkrange)],chunkrange[1:]+[len(din)-1]))
chunk2range={}
for ri,r in enumerate(chunkrange):
chunk2range[ri+1]=r
if not exists(outd):
makedirs(outd)
chunks=[]
chunkps=[]
for chunk in chunk2range:
chunkp=.format(outd,fn,chunk)
rnge=chunk2range[chunk]
din_=din.loc[rnge[0]:rnge[1],:]
if not exists(chunkp) or force:
if return_fmt==:
chunks.append(din_)
else:
din_.to_csv(chunkp,sep=return_fmt)
del din_
chunkps.append(chunkp)
if return_fmt==:
return chunks
else:
return chunkps | :param return_fmt: '\t': tab-sep file, lly, '.', 'list': returns a list |
379,916 | def load(cosmicFiles, tag=None, sat_id=None):
import netCDF4
num = len(cosmicFiles)
if num != 0:
output = pysat.DataFrame(load_files(cosmicFiles, tag=tag, sat_id=sat_id))
output.index = pysat.utils.create_datetime_index(year=output.year,
month=output.month, day=output.day,
uts=output.hour*3600.+output.minute*60.+output.second)
output.sort_index(inplace=True)
meta = pysat.Meta()
ind = 0
repeat = True
while repeat:
try:
data = netCDF4.Dataset(cosmicFiles[ind])
ncattrsList = data.ncattrs()
for d in ncattrsList:
meta[d] = {:, :d}
keys = data.variables.keys()
for key in keys:
meta[key] = {:data.variables[key].units,
:data.variables[key].long_name}
repeat = False
except RuntimeError:
ind+=1
return output, meta
else:
return pysat.DataFrame(None), pysat.Meta() | cosmic data load routine, called by pysat |
379,917 | def split_task_parameters(line):
if line is None:
result = []
else:
result = [parameter.strip() for parameter in line.split(",")]
return result | Split a string of comma separated words. |
379,918 | def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):
self._whoosh.add_field(fieldname, fieldspec)
return self._whoosh.schema | Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned. |
379,919 | def _desy_bookkeeping(self, key, value):
return {
: normalize_date(value.get()),
: force_single_element(value.get()),
: value.get(),
} | Populate the ``_desy_bookkeeping`` key. |
379,920 | def safe_join(directory, *pathnames):
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
return None
parts.append(filename)
return posixpath.join(*parts) | Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory. |
379,921 | def get_transform(offset, scale):
return pd.DataFrame([[scale, 0, offset.x], [0, scale, offset.y],
[0, 0, 1]], index=[, , ]) | Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).** |
379,922 | def setex(self, name, time, value):
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command(, name, time, value) | Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object. |
379,923 | def expand_item(self, item, open_all=True):
self.expand_row(self._view_path_for(item), open_all) | Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded. |
379,924 | def addChromosome(
self, chrom, tax_id, tax_label=None, build_id=None, build_label=None):
family = Family(self.graph)
chr_id = makeChromID(str(chrom), tax_id)
if tax_label is not None:
chr_label = makeChromLabel(chrom, tax_label)
else:
chr_label = makeChromLabel(chrom)
genome_id = self.makeGenomeID(tax_id)
self.model.addClassToGraph(chr_id, chr_label, self.globaltt[])
self.addTaxon(tax_id, genome_id)
if build_id is not None:
chrinbuild_id = makeChromID(chrom, build_id)
if build_label is None:
build_label = build_id
chrinbuild_label = makeChromLabel(chrom, build_label)
self.model.addIndividualToGraph(chrinbuild_id, chrinbuild_label, chr_id)
family.addMember(build_id, chrinbuild_id)
family.addMemberOf(chrinbuild_id, build_id)
return | if it's just the chromosome, add it as an instance of a SO:chromosome,
and add it to the genome. If a build is included,
punn the chromosome as a subclass of SO:chromsome, and make the
build-specific chromosome an instance of the supplied chr.
The chr then becomes part of the build or genome. |
379,925 | def gen_round_trip_stats(round_trips):
stats = {}
stats[] = agg_all_long_short(round_trips, , PNL_STATS)
stats[] = agg_all_long_short(round_trips, ,
SUMMARY_STATS)
stats[] = agg_all_long_short(round_trips, ,
DURATION_STATS)
stats[] = agg_all_long_short(round_trips, ,
RETURN_STATS)
stats[] = \
round_trips.groupby()[].agg(RETURN_STATS).T
return stats | Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A dictionary where each value is a pandas DataFrame containing
various round-trip statistics.
See also
--------
round_trips.print_round_trip_stats |
379,926 | def build_rrule(count=None, interval=None, bysecond=None, byminute=None,
byhour=None, byweekno=None, bymonthday=None, byyearday=None,
bymonth=None, until=None, bysetpos=None, wkst=None, byday=None,
freq=None):
result = {}
if count is not None:
result[] = count
if interval is not None:
result[] = interval
if bysecond is not None:
result[] = bysecond
if byminute is not None:
result[] = byminute
if byhour is not None:
result[] = byhour
if byweekno is not None:
result[] = byweekno
if bymonthday is not None:
result[] = bymonthday
if byyearday is not None:
result[] = byyearday
if bymonth is not None:
result[] = bymonth
if until is not None:
result[] = until
if bysetpos is not None:
result[] = bysetpos
if wkst is not None:
result[] = wkst
if byday is not None:
result[] = byday
if freq is not None:
if freq not in vRecur.frequencies:
raise ValueError(
.format(vRecur.frequencies))
result[] = freq
return result | Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
:param wkst: str, two-letter weekday
:param byday: weekday
:param freq: str, frequency name ('WEEK', 'MONTH', etc)
:return: dict |
379,927 | def connect(self, recver):
r1 = recver
m1 = r1.middle
s2 = self
m2 = self.middle
r2 = self.other
r2.middle = m1
del m2.sender
del m2.recver
del m1.recver
m1.recver = weakref.ref(r2, m1.on_abandoned)
m1.recver_current = m2.recver_current
del r1.middle
del s2.middle
while True:
if getattr(r2, , None) is None:
break
r2 = r2.downstream.other
return r2 | Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2 |
379,928 | def raw_datastream_old(request, pid, dsid, type=None, repo=None,
headers=None, accept_range_request=False,
as_of_date=None, streaming=False):
DISABLED
if repo is None:
repo = Repository()
if headers is None:
headers = {}
get_obj_opts = {}
if type is not None:
get_obj_opts[] = type
obj = repo.get_object(pid, **get_obj_opts)
range_request = False
partial_request = False
try:
ds = obj.getDatastreamObject(dsid, as_of_date=as_of_date)
if ds and ds.exists:
if request.method == :
content =
elif accept_range_request and request.META.get(, None) is not None:
rng = request.META[]
logger.debug(, rng)
range_request = True
kind, numbers = rng.split()
if kind != :
return HttpResponseRangeNotSatisfiable()
try:
start, end = numbers.split()
except ValueError:
return HttpResponseRangeNotSatisfiable()
start = int(start)
if not end:
end = ds.info.size - 1
else:
end = int(end)
if end < start:
return HttpResponseRangeNotSatisfiable()
if start == end:
if int(response[]) < 0:
del response[]
return response
else:
raise Http404
except RequestFailed as rf:
raise | .. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False. |
379,929 | def get_gradient_y(shape, py):
import scipy.sparse
height, width = shape
rows = []
empty = scipy.sparse.dia_matrix((width, width))
identity = scipy.sparse.identity(width)
for n in range(py):
row = [empty]*n
row += [-identity, identity]
row += [empty]*(height-n-2)
rows.append(row)
rows.append([empty]*height)
for n in range(height-py-1):
row = [empty]*(py+n)
row += [identity, -identity]
row += [empty]*(height-py-n-2)
rows.append(row)
return scipy.sparse.bmat(rows) | Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros. |
379,930 | def loadSenderKey(self, senderKeyName):
q = "SELECT record FROM sender_keys WHERE group_id = ? and sender_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (senderKeyName.getGroupId(), senderKeyName.getSender().getName()))
result = cursor.fetchone()
if not result:
return SenderKeyRecord()
return SenderKeyRecord(serialized = result[0]) | :type senderKeyName: SenderKeyName |
379,931 | def p_for_stmt(p):
if len(p) == 8:
if not isinstance(p[2], node.ident):
raise_exception(SyntaxError, "Not implemented: for loop", new_lexer)
p[2].props = "I"
p[0] = node.for_stmt(ident=p[2], expr=p[4], stmt_list=p[6]) | for_stmt : FOR ident EQ expr SEMI stmt_list END_STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt_list END_STMT
| FOR matrix EQ expr SEMI stmt_list END_STMT |
379,932 | def mp2q(p, q):
p, q = flatten(p), flatten(q)
entropy_dist = 1 / len(p)
return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q))) | Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire |
379,933 | def _generate_subscribe_headers(self):
headers =[]
headers.append((, self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
headers.append((, self._config.subscriber_name))
headers.append((, token[(token.index() + 1):]))
if self._config.topics is []:
headers.append((, self.eventhub_client.zone_id + ))
else:
for topic in self._config.topics:
headers.append((, topic))
headers.append((, str(self._config.recency == self._config.Recency.NEWEST).lower()))
headers.append((, str(self._config.acks_enabled).lower()))
if self._config.acks_enabled:
headers.append((, str(self._config.ack_max_retries)))
headers.append((, str(self._config.ack_retry_interval_seconds) + ))
headers.append((, str(self._config.ack_duration_before_retry_seconds) + ))
if self._config.batching_enabled:
headers.append((, str(self._config.batch_size)))
headers.append((, str(self._config.batch_interval_millis) + ))
return headers | generate the subscribe stub headers based on the supplied config
:return: i |
379,934 | def authorize_handler(self, f):
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
if request.method in (, ):
redirect_uri = request.args.get(, self.error_uri)
log.debug(, redirect_uri)
try:
ret = server.validate_authorization_request(
uri, http_method, body, headers
)
scopes, credentials = ret
kwargs[] = scopes
kwargs.update(credentials)
except oauth2.FatalClientError as e:
log.debug(, e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug(, e, exc_info=True)
state = request.values.get()
if state and not e.state:
e.state = state
return self._on_exception(e, e.in_uri(redirect_uri))
if not isinstance(rv, bool):
return rv
if not rv:
e = oauth2.AccessDeniedError(state=request.values.get())
return self._on_exception(e, e.in_uri(redirect_uri))
return self.confirm_authorization_request()
return decorated | Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes' |
379,935 | def do_run_one(self, args):
work_spec_names = args.from_work_spec or None
worker = SingleWorker(self.config, task_master=self.task_master, work_spec_names=work_spec_names, max_jobs=args.max_jobs)
worker.register()
rc = False
starttime = time.time()
count = 0
try:
while True:
rc = worker.run()
if not rc:
break
count += 1
if (args.limit_seconds is None) and (args.limit_count is None):
break
if (args.limit_seconds is not None) and ((time.time() - starttime) >= args.limit_seconds):
break
if (args.limit_count is not None) and (count >= args.limit_count):
break
finally:
worker.unregister()
if not rc:
self.exitcode = 2 | run a single job |
379,936 | def format_rst(self):
res =
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += .join([ for _ in range(col_width - 1)]) +
res +=
for c in self.header:
res += c.ljust(col_width)
res +=
for _ in range(num_cols):
res += .join([ for _ in range(col_width - 1)]) +
res +=
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(col_width)
res +=
for _ in range(num_cols):
res += .join([ for _ in range(col_width - 1)]) +
res +=
return res | return table in RST format |
379,937 | def preserve_set_th1_add_directory(state=True):
with LOCK:
status = ROOT.TH1.AddDirectoryStatus()
try:
ROOT.TH1.AddDirectory(state)
yield
finally:
ROOT.TH1.AddDirectory(status) | Context manager to temporarily set TH1.AddDirectory() state |
379,938 | def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remaining_life(self, **kwargs):
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop()
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop()
remaining_life = ET.SubElement(lldp_neighbor_detail, "remaining-life")
remaining_life.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
379,939 | def unique_slugify(instance, value, slug_field_name=, queryset=None,
slug_separator=):
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
next += 1
setattr(instance, slug_field.attname, slug) | Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager. |
379,940 | def remove_volatile(type_):
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type)
return cpptypes.array_t(result_type, nake_type.size)
return nake_type.base | removes volatile from the type definition
If type is not volatile type, it will be returned as is |
379,941 | def data_received(self, data):
self.tokenizer.feed(data)
while self.tokenizer.has_tokens():
raw = self.tokenizer.get_next_token()
frame = frame_from_raw(raw)
if frame is not None:
self.frame_received_cb(frame) | Handle data received. |
379,942 | def target(self):
task = yield self.task()
if not task:
yield defer.succeed(None)
defer.returnValue(None)
defer.returnValue(task.target) | Find the target name for this build.
:returns: deferred that when fired returns the build task's target
name. If we could not determine the build task, or the task's
target, return None. |
379,943 | def hpss_demo(input_file, output_harmonic, output_percussive):
print(, input_file)
y, sr = librosa.load(input_file)
print()
y_harmonic, y_percussive = librosa.effects.hpss(y)
print(, output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print(, output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr) | HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav) |
379,944 | def flexifunction_directory_send(self, target_system, target_component, directory_type, start_index, count, directory_data, force_mavlink1=False):
return self.send(self.flexifunction_directory_encode(target_system, target_component, directory_type, start_index, count, directory_data), force_mavlink1=force_mavlink1) | Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
directory_type : 0=inputs, 1=outputs (uint8_t)
start_index : index of first directory entry to write (uint8_t)
count : count of directory entries to write (uint8_t)
directory_data : Settings data (int8_t) |
379,945 | def colorize(self, colormap):
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.channels[1]
else:
alpha = None
self.channels = colormap.colorize(self.channels[0])
if alpha is not None:
self.channels.append(alpha)
self.mode = "RGBA"
else:
self.mode = "RGB" | Colorize the current image using
*colormap*. Works only on"L" or "LA" images. |
379,946 | def find(node, filter_=None, stop=None, maxlevel=None):
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel) | Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e')) |
379,947 | def combine_metadata(*metadata_objects, **kwargs):
average_times = kwargs.get(, True)
shared_keys = None
info_dicts = []
for metadata_object in metadata_objects:
if isinstance(metadata_object, dict):
metadata_dict = metadata_object
elif hasattr(metadata_object, "attrs"):
metadata_dict = metadata_object.attrs
else:
continue
info_dicts.append(metadata_dict)
if shared_keys is None:
shared_keys = set(metadata_dict.keys())
else:
shared_keys &= set(metadata_dict.keys())
shared_info = {}
for k in shared_keys:
values = [nfo[k] for nfo in info_dicts]
any_arrays = any([isinstance(val, np.ndarray) for val in values])
if any_arrays:
if all(np.all(val == values[0]) for val in values[1:]):
shared_info[k] = values[0]
elif in k and isinstance(values[0], datetime) and average_times:
shared_info[k] = average_datetimes(values)
elif all(val == values[0] for val in values[1:]):
shared_info[k] = values[0]
return shared_info | Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of datetime objects will be averaged. This is to handle cases where
data were observed at almost the same time but not exactly.
Args:
*metadata_objects: MetadataObject or dict objects to combine
average_times (bool): Average any keys with 'time' in the name
Returns:
dict: the combined metadata |
379,948 | def enable(self):
ret = self.get()
if ret is None:
return False
else:
return ret.lower().startswith() | Return True|False if the AMP is enabled in the configuration file (enable=true|false). |
379,949 | def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
if not retstart:
retstart = 0
if not retmax:
retmax = 500
if not count:
count = retmax
retmax = 500
while retstart < count:
diff = count - retstart
if diff < 500:
retmax = diff
_url = url + f
resp = entrez_try_get_multiple_times(_url)
if resp is None:
return
retstart += retmax
yield resp | Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response |
379,950 | def _checkstatus(status, line):
newstatus = 0
if status == 0:
if _islinetype(line, GRPSTART):
newstatus = 1
elif _isfinal(line):
newstatus = 4
elif status == 1:
if _islinetype(line, GRPSTART):
newstatus = 1
elif _islinetype(line, GRPEND):
newstatus = 3
elif _isassignment(line):
newstatus = 2
elif status == 2:
if _islinetype(line, GRPEND):
newstatus = 3
elif _isassignment(line):
newstatus = 2
elif status == 3:
if _islinetype(line, GRPSTART):
newstatus = 1
elif _islinetype(line, GRPEND):
newstatus = 3
elif _isfinal(line):
newstatus = 4
if newstatus != 0:
return newstatus
elif status != 4:
raise MTLParseError(
"Cannot parse the following line after status "
+ ":\n%s" % (STATUSCODE[status], line)) | Returns state/status after reading the next line.
The status codes are::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF - BEGIN parsing; 1 - ENTER METADATA GROUP, 2 - READ METADATA LINE,
3 - END METDADATA GROUP, 4 - END PARSING
Permitted Transitions::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 1, LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 4
1 --> 1, 1 --> 2, 1 --> 3
2 --> 2, 2 --> 3
3 --> 1, 1 --> 3, 3 --> 4 |
379,951 | def get_bucket(self, hash_name, bucket_key):
results = []
for row in self._get_bucket_rows(hash_name, bucket_key):
val_dict = pickle.loads(row)
if in val_dict:
row = []
col = []
data = []
for e in val_dict[]:
row.append(e[0])
data.append(e[1])
col.append(0)
coo_row = numpy.array(row, dtype=numpy.int32)
coo_col = numpy.array(col, dtype=numpy.int32)
coo_data = numpy.array(data)
vector = scipy.sparse.coo_matrix((coo_data, (coo_row, coo_col)), shape=(val_dict[], 1))
else:
vector = numpy.fromstring(val_dict[],
dtype=val_dict[])
results.append((vector, val_dict.get()))
return results | Returns bucket content as list of tuples (vector, data). |
379,952 | def _add_grid_attributes(self, ds):
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Runs.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds | Add model grid attributes to a dataset |
379,953 | def straddle(self, strike, expiry):
_rows = {}
_prices = {}
for _opttype in _constants.OPTTYPES:
_rows[_opttype] = _relevant_rows(self.data, (strike, expiry, _opttype,),
"No key for {} strike {} {}".format(expiry, strike, _opttype))
_prices[_opttype] = _getprice(_rows[_opttype])
_eq = _rows[_constants.OPTTYPES[0]].loc[:, ].values[0]
_qt = _rows[_constants.OPTTYPES[0]].loc[:, ].values[0]
_index = [, , , , ]
_vals = np.array([_prices[], _prices[], _prices[] + _prices[], _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=[]) | Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle. |
379,954 | def stream(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
limits = self._version.read_limits(limit, page_size)
page = self.page(
from_=from_,
to=to,
date_created_on_or_before=date_created_on_or_before,
date_created_after=date_created_after,
page_size=limits[],
)
return self._version.stream(page, limits[], limits[]) | Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance] |
379,955 | def create_app(debug=False):
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
app.json_encoder = DateTimeEncoder
app.register_blueprint(page)
Bower(app)
api = Api(app)
api.add_resource(
PluginListAPI,
api_path + "plugins/",
endpoint="APIPlugins"
)
api.add_resource(
PluginAPI,
api_path + "plugins/<plugin_key>",
endpoint="APIPlugin"
)
api.add_resource(
PluginResourceAPI,
api_path + "plugins/<plugin_key>/resources/",
endpoint="APIPluginResource"
)
if debug:
app.debug = True
app.config[] = 0
@app.after_request
def add_header(response):
response.headers[] = "public, max-age=0"
return response
return app | Create the flask app
:param debug: Use debug mode
:type debug: bool
:return: Created app
:rtype: flask.Flask |
379,956 | def make_url(path, protocol=None, hosts=None):
protocol = if not protocol else protocol
host = hosts[random.randrange(len(hosts))] if hosts else
return protocol + host + path.strip() | Make an URL given a path, and optionally, a protocol and set of
hosts to select from randomly.
:param path: The Archive.org path.
:type path: str
:param protocol: (optional) The HTTP protocol to use. "https://" is
used by default.
:type protocol: str
:param hosts: (optional) A set of hosts. A host will be chosen at
random. The default host is "archive.org".
:type hosts: iterable
:rtype: str
:returns: An Absolute URI. |
379,957 | def get_users_for_assigned_to():
User = get_user_model()
return User.objects.filter(is_active=True, is_staff=True) | Return a list of users who can be assigned to workflow states |
379,958 | def get_by_name(self, name):
san_managers = self._client.get_all()
result = [x for x in san_managers if x[] == name]
return result[0] if result else None | Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager. |
379,959 | def get_currency(self, code):
for currency in self.currencies:
if currency[] == code:
return currency
raise RuntimeError("%s: %s not found" % (self.name, code)) | Helper function
Returns a dict containing:
shortname (the code)
longname
users - a comma separated list of countries/regions/cities that use it
alternatives - alternative names, e.g. ewro, Quid, Buck
symbol - e.g. £, $
highlight - ? |
379,960 | def _replace_with_specific_page(page, menu_item):
if type(page) is Page:
page = page.specific
if isinstance(menu_item, MenuItem):
menu_item.link_page = page
else:
menu_item = page
return page, menu_item | If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object. |
379,961 | def get_attributes(self, dataset):
attributes = self.attributes(dataset)
attr_ = [ (k, v[0]) for k, v in attributes.items()]
return pd.DataFrame(attr_, columns=["Attribute","Description"]) | Get available attritbutes from dataset you've selected |
379,962 | def _insert_html(self, cursor, html):
cursor.beginEditBlock()
cursor.insertHtml(html)
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() == :
cursor.removeSelectedText()
else:
cursor.movePosition(QtGui.QTextCursor.Right)
cursor.insertText(, QtGui.QTextCharFormat())
cursor.endEditBlock() | Inserts HTML using the specified cursor in such a way that future
formatting is unaffected. |
379,963 | def on_startup(self, callback: callable, polling=True, webhook=True):
self._check_frozen()
if not webhook and not polling:
warn(, UserWarning)
return
if isinstance(callback, (list, tuple, set)):
for cb in callback:
self.on_startup(cb, polling, webhook)
return
if polling:
self._on_startup_polling.append(callback)
if webhook:
self._on_startup_webhook.append(callback) | Register a callback for the startup process
:param callback:
:param polling: use with polling
:param webhook: use with webhook |
379,964 | def price_unit(self):
currency = self.currency
consumption_unit = self.consumption_unit
if not currency or not consumption_unit:
_LOGGER.error("Could not find price_unit.")
return " "
return currency + "/" + consumption_unit | Return the price unit. |
379,965 | def sample(self, bqm, num_reads=10):
values = tuple(bqm.vartype.value)
def _itersample():
for __ in range(num_reads):
sample = {v: choice(values) for v in bqm.linear}
energy = bqm.energy(sample)
yield sample, energy
samples, energies = zip(*_itersample())
return SampleSet.from_samples(samples, bqm.vartype, energies) | Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, default=10):
Number of reads.
Returns:
:obj:`.SampleSet` |
379,966 | def plot_welch_perdiogram(x, fs, nperseg):
import scipy.signal
import numpy
N = len(x)
time = numpy.arange(N) / fs
f, Pxx_den = scipy.signal.welch(x, fs, nperseg=nperseg)
plt.semilogy(f, Pxx_den)
plt.ylim([0.5e-3, 1])
plt.xlabel()
plt.ylabel()
plt.show()
numpy.mean(Pxx_den[256:])
f, Pxx_spec = scipy.signal.welch(x, fs, , 1024,
scaling=)
plt.figure()
plt.semilogy(f, numpy.sqrt(Pxx_spec))
plt.xlabel()
plt.ylabel()
plt.show()
return None | Plot Welch perdiogram
Args
----
x: ndarray
Signal array
fs: float
Sampling frequency
nperseg: float
Length of each data segment in PSD |
379,967 | def _account_table(accounts):
accountmap = {}
for acc in accounts:
accountmap[acc.address] = acc
for alias in acc.aliases:
accountmap[alias] = acc
return accountmap | creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`) |
379,968 | def fetch_url(url):
with closing(urllib.urlopen(url)) as f:
if f.code is 200:
response = f.read()
return strip_formfeeds(response).decode(ENCODING) | Fetch the given url, strip formfeeds and decode
it into the defined encoding |
379,969 | def _readClusterSettings(self):
instanceMetaData = get_instance_metadata()
region = zoneToRegion(self._zone)
conn = boto.ec2.connect_to_region(region)
instance = conn.get_all_instances(instance_ids=[instanceMetaData["instance-id"]])[0].instances[0]
self.clusterName = str(instance.tags["Name"])
self._buildContext()
self._subnetID = instance.subnet_id
self._leaderPrivateIP = instanceMetaData[]
self._keyName = list(instanceMetaData[].keys())[0]
self._tags = self.getLeader().tags
self._masterPublicKey = self._setSSH() | Reads the cluster settings from the instance metadata, which assumes the instance
is the leader. |
379,970 | def exec_(self, columns=(), by=(), where=(), **kwds):
return self._seu(, columns, by, where, kwds) | exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3 |
379,971 | def kube_pod_status_phase(self, metric, scraper_config):
metric_name = scraper_config[] +
status_phase_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(, sample[self.SAMPLE_LABELS], scraper_config),
self._label_to_tag(, sample[self.SAMPLE_LABELS], scraper_config),
] + scraper_config[]
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags)) | Phase a pod is in. |
379,972 | def _extract_html_hex(string):
try:
hex_string = string and _hex_regexp().search(string).group(0) or
except AttributeError:
return None
if len(hex_string) == 3:
hex_string = hex_string[0] * 2 + hex_string[1] * 2 + hex_string[2] * 2
return hex_string | Get the first 3 or 6 hex digits in the string |
379,973 | def is_prime(n, mr_rounds=25):
if n <= first_primes[-1]:
return n in first_primes
for p in first_primes:
if n % p == 0:
return False
return miller_rabin(n, mr_rounds) | Test whether n is probably prime
See <https://en.wikipedia.org/wiki/Primality_test#Probabilistic_tests>
Arguments:
n (int): the number to be tested
mr_rounds (int, optional): number of Miller-Rabin iterations to run;
defaults to 25 iterations, which is what the GMP library uses
Returns:
bool: when this function returns False, `n` is composite (not prime);
when it returns True, `n` is prime with overwhelming probability |
379,974 | def cancel_signature_request(self, signature_request_id):
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False) | Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature request to cancel
Returns:
None |
379,975 | def sortby(self, variables, ascending=True):
from .dataarray import DataArray
if not isinstance(variables, list):
variables = [variables]
else:
variables = variables
variables = [v if isinstance(v, DataArray) else self[v]
for v in variables]
aligned_vars = align(self, *variables, join=)
aligned_self = aligned_vars[0]
aligned_other_vars = aligned_vars[1:]
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
if (data_array.dtype == object and
LooseVersion(np.__version__) < LooseVersion()):
raise NotImplementedError(
)
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(**indices) | Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: Dataset
A new dataset where all the specified dims are sorted by dim
labels. |
379,976 | def _gdcm_to_numpy(self, image):
gdcm_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
pixel_format = image.GetPixelFormat().GetScalarType()
if pixel_format in gdcm_typemap:
self.data_type = gdcm_typemap[pixel_format]
else:
raise KeyError(.join(pixel_format, \
" is not a supported pixel format"))
self.dimensions = image.GetDimension(1), image.GetDimension(0)
gdcm_array = image.GetBuffer()
if sys.version_info >= (3, 0):
gdcm_array = gdcm_array.encode(sys.getfilesystemencoding(), "surrogateescape")
dimensions = image.GetDimensions()
result = numpy.frombuffer(gdcm_array, dtype=self.data_type).astype(float)
if len(dimensions) == 3:
result.shape = dimensions[2], dimensions[0], dimensions[1]
else:
result.shape = dimensions
return result | Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage() |
379,977 | def reset(self):
"Initialises all needed variables to default values"
self.metadata = {}
self.items = []
self.spine = []
self.guide = []
self.pages = []
self.toc = []
self.bindings = []
self.IDENTIFIER_ID =
self.FOLDER_NAME =
self._id_html = 0
self._id_image = 0
self._id_static = 0
self.title =
self.language =
self.direction = None
self.templates = {
: NCX_XML,
: NAV_XML,
: CHAPTER_XML,
: COVER_XML
}
self.add_metadata(, , , {
: , : % .join([str(s) for s in VERSION])
})
self.set_identifier(str(uuid.uuid4()))
self.prefixes = []
self.namespaces = {} | Initialises all needed variables to default values |
379,978 | def ParseFromString(self, text, message):
if not isinstance(text, str):
text = text.decode()
return self.ParseLines(text.split(), message) | Parses a text representation of a protocol message into a message. |
379,979 | def update(self, x, w=1):
self.n += w
if len(self) == 0:
self._add_centroid(Centroid(x, w))
return
S = self._find_closest_centroids(x)
while len(S) != 0 and w > 0:
j = choice(list(range(len(S))))
c_j = S[j]
q = self._compute_centroid_quantile(c_j)
if c_j.count + w > self._threshold(q):
S.pop(j)
continue
delta_w = min(self._threshold(q) - c_j.count, w)
self._update_centroid(c_j, x, delta_w)
w -= delta_w
S.pop(j)
if w > 0:
self._add_centroid(Centroid(x, w))
if len(self) > self.K / self.delta:
self.compress()
return | Update the t-digest with value x and weight w. |
379,980 | def parseValue(self, value):
if self.isVector():
return list(map(self._pythonType, value.split()))
if self.typ == :
return _parseBool(value)
return self._pythonType(value) | Parse the given value and return result. |
379,981 | def get_routers(self, context, router_ids=None, hd_ids=None):
cctxt = self.client.prepare(version=)
return cctxt.call(context, , host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids) | Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned. |
379,982 | def key(self, key, strictkey=None):
return self._select(self._pointer.key(key, strictkey)) | Return a chunk referencing a key in a mapping with the name 'key'. |
379,983 | def set_attributes(self, doc, fields,
parent_type=None, catch_all_field=None):
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type | :param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions. |
379,984 | def insert_many(self, rows, chunk_size=1000, ensure=None, types=None):
chunk = []
for row in rows:
row = self._sync_columns(row, ensure, types=types)
chunk.append(row)
if len(chunk) == chunk_size:
chunk = pad_chunk_columns(chunk)
self.table.insert().execute(chunk)
chunk = []
if len(chunk):
chunk = pad_chunk_columns(chunk)
self.table.insert().execute(chunk) | Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows) |
379,985 | def route(self, fn, **kwargs):
new_kwargs = fn(**kwargs)
if not isinstance(new_kwargs, dict):
return new_kwargs
new_kwargs["url"] = kwargs
return self.render(**new_kwargs) | Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions
:param fn: Function to run the route with
:type fn: function
:param kwargs: Parsed url arguments
:type kwargs: dict
:return: HTTP Response with rendered template
:rtype: flask.Response |
379,986 | def _formatFilepaths(self):
likedir=self[][]
self.likefile = join(likedir,self[][])
self.mergefile = join(likedir,self[][])
self.roifile = join(likedir,self[][])
searchdir=self[][]
self.labelfile = join(searchdir,self[][])
self.objectfile = join(searchdir,self[][])
self.assocfile = join(searchdir,self[][])
self.candfile = join(searchdir,self[][])
mcmcdir=self[][]
self.mcmcfile = join(mcmcdir,self[][]) | Join dirnames and filenames from config. |
379,987 | def platform_detect():
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
plat = platform.platform()
if plat.lower().find() > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find() > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find() > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find() > -1:
return JETSON_NANO
try:
import mraa
if mraa.getPlatformName()==:
return MINNOWBOARD
except ImportError:
pass
return UNKNOWN | Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN. |
379,988 | def leave_scope(self):
def entry_size(entry):
if entry.scope == SCOPE.global_ or \
entry.is_aliased:
return 0
if entry.class_ != CLASS.array:
return entry.size
return entry.memsize
for v in self.table[self.current_scope].values(filter_by_opt=False):
if not v.accessed:
if v.scope == SCOPE.parameter:
kind =
v.accessed = True
warning_not_used(v.lineno, v.name, kind=kind)
entries = sorted(self.table[self.current_scope].values(filter_by_opt=True), key=entry_size)
offset = 0
for entry in entries:
if entry.class_ is CLASS.unknown:
self.move_to_global_scope(entry.name)
if entry.class_ in (CLASS.function, CLASS.label, CLASS.type_):
continue
if entry.class_ == CLASS.var and entry.scope == SCOPE.local:
if entry.alias is not None:
if entry.offset is None:
entry.offset = entry.alias.offset
else:
entry.offset = entry.alias.offset - entry.offset
else:
offset += entry_size(entry)
entry.offset = offset
if entry.class_ == CLASS.array and entry.scope == SCOPE.local:
entry.offset = entry_size(entry) + offset
offset = entry.offset
self.mangle = self[self.current_scope].parent_mangle
self.table.pop()
global_.LOOPS = global_.META_LOOPS.pop()
return offset | Ends a function body and pops current scope out of the symbol table. |
379,989 | def buscar_healthchecks(self, id_ambiente_vip):
url = + str(id_ambiente_vip)
code, xml = self.submit(None, , url)
return self.response(code, xml, []) | Search healthcheck by environmentvip_id
:return: Dictionary with the following structure:
::
{'healthcheck_opt': [{'name': <name>, 'id': <id>},...]}
:raise InvalidParameterError: Environment VIP identifier is null and invalid.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise InvalidParameterError: id_ambiente_vip is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
379,990 | def update_mode(arg_namespace):
try:
updater.update(custom_sources=arg_namespace.custom)
except (PermissionError, FileNotFoundError) as exception:
if isinstance(exception, PermissionError):
print()
if isinstance(exception, FileNotFoundError):
print(
) | Check command line arguments and run update function. |
379,991 | def to_netjson(self, remove_block=True):
result = OrderedDict()
intermediate_data = list(self.intermediate_data[self.intermediate_key])
for index, block in enumerate(intermediate_data):
if self.should_skip_block(block):
continue
if remove_block:
self.intermediate_data[self.intermediate_key].remove(block)
result = self.to_netjson_loop(block, result, index + 1)
return result | Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``) |
379,992 | def update(self, num_iid, session, **kwargs):
request = TOPRequest()
request[] = num_iid
for k, v in kwargs.iteritems():
if k not in (, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ) and v==None: continue
if k == : k =
if k == : k =
request[k] = v
self.create(self.execute(request, session)[])
return self | taobao.item.update 更新商品信息
根据传入的num_iid更新对应的商品的数据 传入的num_iid所对应的商品必须属于当前会话的用户 商品的属性和sku的属性有包含的关系,商品的价格要位于sku的价格区间之中(例如,sku价格有5元、10元两种,那么商品的价格就需要大于等于5元,小于等于10元,否则更新商品会失败) 商品的类目和商品的价格、sku的价格都有一定的相关性(具体的关系要通过类目属性查询接口获得) 当关键属性值更新为“其他”的时候,需要输入input_pids和input_str商品才能更新成功。 |
379,993 | def add_auth(self, req, **kwargs):
if in req.headers:
del req.headers[]
req.headers[] = formatdate(usegmt=True)
req.headers[] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug( % string_to_sign)
hash_value = sha256(string_to_sign).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % .join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers[] = s | Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object. |
379,994 | def GetBalance(self, asset_id, watch_only=0):
total = Fixed8(0)
if type(asset_id) is NEP5Token.NEP5Token:
return self.GetTokenBalance(asset_id, watch_only)
for coin in self.GetCoins():
if coin.Output.AssetId == asset_id:
if coin.State & CoinState.Confirmed > 0 and \
coin.State & CoinState.Spent == 0 and \
coin.State & CoinState.Locked == 0 and \
coin.State & CoinState.Frozen == 0 and \
coin.State & CoinState.WatchOnly == watch_only:
total = total + coin.Output.Value
return total | Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only (bool): True, to limit to watch only wallets.
Returns:
Fixed8: total balance. |
379,995 | def _build(self, src, path, dest, mtime):
input_path = os.path.join(src, path)
output_paths = [os.path.join(dest, output) for output in
self._outputs(src, path)]
if path in self.failures and mtime <= self.failures[path]:
return
for output in output_paths:
try:
if \
os.path.exists(output) and \
mtime <= os.path.getmtime(output):
continue
except EnvironmentError:
pass
start = time.time()
try:
self.build(input_path, output_paths)
except Exception as e:
if isinstance(e, EnvironmentError):
logging.error("{0} failed after {1:.2f}s: {2}".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start, e.args[0]
))
else:
logging.exception("{0} failed after {1:.2f}s".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start
))
self.failures[path] = start
else:
logging.info("{0} completed in {1:.2f}s".format(
termcolor.colored(path, "green", attrs=["bold"]),
time.time() - start
))
self.failures.pop(path, None)
break | Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime. |
379,996 | def pkg_blacklist(self):
blacklist = BlackList()
options = [
"-b",
"--blacklist"
]
flag = [
"--add",
"--remove"
]
command = ["list"]
if (len(self.args) == 2 and self.args[0] in options and
self.args[1] == command[0]):
blacklist.listed()
elif (len(self.args) > 2 and self.args[0] in options and
flag[0] in self.args):
self.args.remove(flag[0])
blacklist.add(self.args[1:])
elif (len(self.args) == 3 and self.args[0] in options and
"ALL" in self.args and flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(blacklist.get_black())
elif (len(self.args) > 2 and self.args[0] in options and
flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(self.args[1:])
else:
usage("") | Manage blacklist packages |
379,997 | def get_ntlm_response(self, flags, challenge, target_info=None, channel_binding=None):
if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3:
response, key = PasswordAuthentication.get_ntlm2_response(self._password, challenge, self._client_challenge)
elif 0 <= self._lm_compatibility < 3:
response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge)
else:
if target_info is None:
target_info = TargetInfo()
if target_info[TargetInfo.NTLMSSP_AV_TIME] is None:
timestamp = PasswordAuthentication._get_ntlm_timestamp()
else:
timestamp = target_info[TargetInfo.NTLMSSP_AV_TIME][1]
response, key, target_info = PasswordAuthentication.get_ntlmv2_response(
self._domain, self._username, self._password.encode(), challenge,
self._client_challenge, timestamp, target_info)
return response, key, target_info | Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key.
If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned
:param challenge: The 8-byte challenge message generated by the server
:return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo |
379,998 | def _VarintBytes(value):
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces) | Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast. |
379,999 | def find_one(self, cls, id):
try:
db_result = self.get_class_table(cls).lookup(id)
except ItemNotFound:
return obj | Required functionality. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.