Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,000 | def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False):
fp = must_open(bedfile)
fw = must_open(bedpefile, "w")
if pairsbedfile:
fwpairs = must_open(pairsbedfile, "w")
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b)
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
lib, name, smin, smax = libraryline.split()
assert lib == "library"
smin, smax = int(smin), int(smax)
logging.debug("Happy mates for lib {0} fall between {1} - {2}".\
format(name, smin, smax))
nbedpe = 0
nspan = 0
for clonename, blines in clones.items():
nlines = len(blines)
if nlines == 2:
a, b = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = b.seqid, b.start, b.end
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand])
print("\t".join(str(x) for x in outcols), file=fw)
nbedpe += 1
elif nlines == 1:
a, = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = 0, 0, 0
else:
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue
span = end - start + 1
if (not matesfile) or (smin <= span <= smax):
print("\t".join(str(x) for x in \
(aseqid, start - 1, end, clonename)), file=fwpairs)
nspan += 1
fw.close()
logging.debug("A total of {0} bedpe written to `{1}`.".\
format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug("A total of {0} spans written to `{1}`.".\
format(nspan, pairsbedfile)) | This converts the bedfile to bedpefile, assuming the reads are from CA. |
8,001 | def get_repository(self, entity_cls):
entity_record = self._get_entity_by_class(entity_cls)
provider = self.get_provider(entity_record.provider_name)
return provider.get_repository(entity_record.entity_cls) | Retrieve a Repository for the Model with a live connection |
8,002 | def list_upgrades(refresh=True, root=None, **kwargs):
*
if refresh:
refresh_db(root)
ret = dict()
cmd = []
if in kwargs:
repo_name = kwargs[]
if not isinstance(repo_name, six.string_types):
repo_name = six.text_type(repo_name)
cmd.extend([, repo_name])
for update_node in __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName():
if update_node.getAttribute() == :
ret[update_node.getAttribute()] = update_node.getAttribute()
return ret | List all available package upgrades on this system
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades |
8,003 | def update_device(self, device_id, **kwargs):
api = self._get_api(device_directory.DefaultApi)
device = Device._create_request_map(kwargs)
body = DeviceDataPostRequest(**device)
return Device(api.device_update(device_id, body)) | Update existing device in catalog.
.. code-block:: python
existing_device = api.get_device(...)
updated_device = api.update_device(
existing_device.id,
certificate_fingerprint = "something new"
)
:param str device_id: The ID of the device to update (Required)
:param obj custom_attributes: Up to 5 custom JSON attributes
:param str description: The description of the device
:param str name: The name of the device
:param str alias: The alias of the device
:param str device_type: The endpoint type of the device - e.g. if the device is a gateway
:param str host_gateway: The endpoint_name of the host gateway, if appropriate
:param str certificate_fingerprint: Fingerprint of the device certificate
:param str certificate_issuer_id: ID of the issuer of the certificate
:returns: the updated device object
:rtype: Device |
8,004 | def plot_trajectory_with_elegans(
obs, width=350, height=350, config=None, grid=True, wireframe=False,
max_count=10, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6),
plot_range=None):
config = config or {}
from IPython.core.display import display, HTML
color_scale = default_color_scale(config=config)
plots = []
xmin, xmax, ymin, ymax, zmin, zmax = None, None, None, None, None, None
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[0])
yarr.append(pos[1])
zarr.append(pos[2])
if xmin is None:
if len(y) > 0:
xmin, xmax = min(xarr), max(xarr)
ymin, ymax = min(yarr), max(yarr)
zmin, zmax = min(zarr), max(zarr)
else:
xmin, xmax = min([xmin] + xarr), max([xmax] + xarr)
ymin, ymax = min([ymin] + yarr), max([ymax] + yarr)
zmin, zmax = min([zmin] + zarr), max([zmax] + zarr)
name = str(i + 1)
c = color_scale.get_color(name)
plots.append({
: ,
: {: xarr, : yarr, : zarr},
: {
: name,
: 2, | Generate a plot from received instance of TrajectoryObserver and show it
on IPython notebook.
Parameters
----------
obs : TrajectoryObserver
TrajectoryObserver to render.
width : float, default 350
Width of the plotting area.
height : float, default 350
Height of the plotting area.
config : dict, default {}
Dict for configure default colors. Its values are colors unique
to each particle. The dictionary will be updated during this plot.
Colors included in config dict will never be used for other particles.
camera_position : tuple, default (-30, 31, 42)
camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)
Initial position and rotation of camera.
plot_range : tuple, default None
Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).
If None, the minimum volume containing all the trajectories is used. |
8,005 | def load_configuration_from_text_file(register, configuration_file):
logging.info("Loading configuration: %s" % configuration_file)
register.configuration_file = configuration_file
config_dict = parse_global_config(register.configuration_file)
if in config_dict:
flavor = config_dict.pop().lower()
if register.flavor:
pass
else:
register.init_fe_type(flavor)
else:
if register.flavor:
pass
else:
raise ValueError()
if in config_dict:
chip_id = config_dict.pop()
if register.chip_address:
pass
else:
register.set_chip_address(chip_address=chip_id & 0x7, broadcast=True if chip_id & 0x8 else False)
elif in config_dict:
chip_address = config_dict.pop()
if register.chip_address:
pass
else:
register.set_chip_address(chip_address)
else:
if register.chip_id_initialized:
pass
else:
raise ValueError()
global_registers_configured = []
pixel_registers_configured = []
for key in config_dict.keys():
value = config_dict.pop(key)
if key in register.global_registers:
register.set_global_register_value(key, value)
global_registers_configured.append(key)
elif key in register.pixel_registers:
register.set_pixel_register_value(key, value)
pixel_registers_configured.append(key)
elif key in register.calibration_parameters:
register.calibration_parameters[key] = value
else:
register.miscellaneous[key] = value
global_registers = register.get_global_register_attributes(, readonly=False)
pixel_registers = register.pixel_registers.keys()
global_registers_not_configured = set(global_registers).difference(global_registers_configured)
pixel_registers_not_configured = set(pixel_registers).difference(pixel_registers_configured)
if global_registers_not_configured:
logging.warning("Following global register(s) not configured: {}".format(.join(\ for reg in global_registers_not_configured)))
if pixel_registers_not_configured:
logging.warning("Following pixel register(s) not configured: {}".format(.join(\ for reg in pixel_registers_not_configured)))
if register.miscellaneous:
logging.warning("Found following unknown parameter(s): {}".format(.join(\ for parameter in register.miscellaneous.iterkeys()))) | Loading configuration from text files to register object
Parameters
----------
register : pybar.fei4.register object
configuration_file : string
Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file. |
8,006 | def save_firefox_profile(self, remove_old=False):
self.logger.info("Saving profile from %s to %s" % (self._profile.path, self._profile_path))
if remove_old:
if os.path.exists(self._profile_path):
try:
shutil.rmtree(self._profile_path)
except OSError:
pass
shutil.copytree(os.path.join(self._profile.path), self._profile_path,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
else:
for item in os.listdir(self._profile.path):
if item in ["parent.lock", "lock", ".parentlock"]:
continue
s = os.path.join(self._profile.path, item)
d = os.path.join(self._profile_path, item)
if os.path.isdir(s):
shutil.copytree(s, d,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
else:
shutil.copy2(s, d)
with open(os.path.join(self._profile_path, self._LOCAL_STORAGE_FILE), ) as f:
f.write(dumps(self.get_local_storage())) | Function to save the firefox profile to the permanant one |
8,007 | def _compute_betas_gwr(y, x, wi):
xT = (x * wi).T
xtx = np.dot(xT, x)
xtx_inv_xt = linalg.solve(xtx, xT)
betas = np.dot(xtx_inv_xt, y)
return betas, xtx_inv_xt | compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships. |
8,008 | def enable_audit_device(self, device_type, description=None, options=None, path=None):
if path is None:
path = device_type
params = {
: device_type,
: description,
: options,
}
api_path = .format(path=path)
return self._adapter.post(
url=api_path,
json=params
) | Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response |
8,009 | def check_timeseries_id(self, dataset):
timeseries_ids = dataset.get_variables_by_attributes(cf_role=)
if not timeseries_ids:
return
test_ctx = TestCtx(BaseCheck.MEDIUM, )
timeseries_variable = timeseries_ids[0]
test_ctx.assert_true(
getattr(timeseries_variable, , ) != "",
"long_name attribute should exist and not be empty"
)
return test_ctx.to_result() | Checks that if a variable exists for the time series id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset |
8,010 | def _AlignDecodedDataOffset(self, decoded_data_offset):
self._file_object.seek(0, os.SEEK_SET)
self._decoder = self._GetDecoder()
self._decoded_data = b
encoded_data_offset = 0
encoded_data_size = self._file_object.get_size()
while encoded_data_offset < encoded_data_size:
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encoded_data_offset += read_count
if decoded_data_offset < self._decoded_data_size:
self._decoded_data_offset = decoded_data_offset
break
decoded_data_offset -= self._decoded_data_size | Aligns the encoded file with the decoded data offset.
Args:
decoded_data_offset (int): decoded data offset. |
8,011 | def notify_slack(title, content, attachment_color="
channel=None, mention_user=None, **kwargs):
import slackclient
cfg = Config.instance()
if not token:
token = cfg.get_expanded("notifications", "slack_token")
if not channel:
channel = cfg.get_expanded("notifications", "slack_channel")
if not token or not channel:
logger.warning("cannot send Slack notification, token ({}) or channel ({}) empty".format(
token, channel))
return False
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "slack_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
request = {
"channel": channel,
"as_user": True,
"parse": "full",
}
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
request["text"] = "{} {}".format(title, mention_text)
request["attachments"] = at = {
"color": attachment_color,
"fields": [],
"fallback": "{}{}\n\n".format(title, mention_text),
}
for key, value in content.items():
at["fields"].append({
"title": key,
"value": value,
"short": len(value) <= short_threshold,
})
at["fallback"] += "_{}_: {}\n".format(key, value)
request.update(kwargs)
thread = threading.Thread(target=_notify_slack, args=(token, request))
thread.start()
return True | Sends a slack notification and returns *True* on success. The communication with the slack API
might have some delays and is therefore handled by a thread. The format of the notification
depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it
should be a dictionary whose fields are used to build a message attachment with two-column
formatting. |
8,012 | def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, , None)
if location is None:
return False
is_device = False
if platform.system() == :
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors=)
if in exception_string:
is_device = True
return is_device or os.path.exists(location) or os.path.islink(location) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise. |
8,013 | def stft(func=None, **kwparams):
if func is None:
cfi = chain.from_iterable
mix_dict = lambda *dicts: dict(cfi(iteritems(d) for d in dicts))
result = lambda f=None, **new_kws: stft(f, **mix_dict(kwparams, new_kws))
return result
@tostream
@wraps(func)
def wrapper(sig, **kwargs):
kws = kwparams.copy()
kws.update(kwargs)
if "size" not in kws:
raise TypeError("Missing argument")
if "hop" in kws and kws["hop"] > kws["size"]:
raise ValueError("Hop value can{}{}' extra argument".format(k))
def blk_gen(size, hop, wnd, transform, inverse_transform, before, after):
if transform is NotSpecified:
from numpy.fft import rfft as transform
if inverse_transform is NotSpecified:
from numpy.fft import irfft as inverse_transform
if before is NotSpecified:
from numpy.fft import ifftshift as before
if after is NotSpecified:
from numpy.fft import fftshift as after
if callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Iterable):
wnd = list(wnd)
if len(wnd) != size:
raise ValueError("Incompatible window size")
elif wnd is not None:
raise TypeError("Window should be an iterable or a callable")
trans = transform and (lambda blk: transform(blk, size))
itrans = inverse_transform and (lambda blk:
inverse_transform(blk, size))
funcs = [f for f in [before, trans, func, itrans, after]
if f is not None]
process = lambda blk: reduce(lambda data, f: f(data), funcs, blk)
if wnd is None:
for blk in Stream(sig).blocks(size=size, hop=hop):
yield process(blk)
else:
blk_with_wnd = wnd[:]
mul = operator.mul
for blk in Stream(sig).blocks(size=size, hop=hop):
blk_with_wnd[:] = xmap(mul, blk, wnd)
yield process(blk_with_wnd)
if ola is None:
return blk_gen(**blk_params)
else:
return ola(blk_gen(**blk_params), **ola_params)
return wrapper | Short Time Fourier Transform block processor / phase vocoder wrapper.
This function can be used in many ways:
* Directly as a signal processor builder, wrapping a spectrum block/grain
processor function;
* Directly as a decorator to a block processor;
* Called without the ``func`` parameter for a partial evalution style
changing the defaults.
See the examples below for more information about these use cases.
The resulting function performs a full block-by-block analysis/synthesis
phase vocoder keeping this sequence of actions:
1. Blockenize the signal with the given ``size`` and ``hop``;
2. Lazily apply the given ``wnd`` window to each block;
3. Perform the 5 actions calling their functions in order:
a. ``before``: Pre-processing;
b. ``transform``: A transform like the FFT;
c. ``func``: the positional parameter with the single block processor;
d. ``inverse_transform``: inverse FFT;
e. ``after``: Post-processing.
4. Overlap-add with the ``ola`` overlap-add strategy. The given ``ola``
would deal with its own window application and normalization.
Any parameter from steps 3 and 4 can be set to ``None`` to skip it from
the full process, without changing the other [sub]steps. The parameters
defaults are based on the Numpy FFT subpackage.
Parameters
----------
func :
The block/grain processor function that receives a transformed block in
the frequency domain (the ``transform`` output) and should return the
processed data (it will be the first ``inverse_transform`` input). This
parameter shouldn't appear when this function is used as a decorator.
size :
Block size for the STFT process, in samples.
hop :
Duration in samples between two blocks. Defaults to the ``size`` value.
transform :
Function that receives the windowed block (in time domain) and the
``size`` as two positional inputs and should return the block (in
frequency domain). Defaults to ``numpy.fft.rfft``, which outputs a
Numpy 1D array with length equals to ``size // 2 + 1``.
inverse_transform :
Function that receives the processed block (in frequency domain) and the
``size`` as two positional inputs and should return the block (in
time domain). Defaults to ``numpy.fft.irfft``.
wnd :
Window function to be called as ``wnd(size)`` or window iterable with
length equals to ``size``. The windowing/apodization values are used
before taking the FFT of each block. Defaults to None, which means no
window should be applied (same behavior of a rectangular window).
before :
Function to be applied just before taking the transform, after the
windowing. Defaults to the ``numpy.fft.ifftshift``, which, together with
the ``after`` default, puts the time reference at the ``size // 2``
index of the block, centralizing it for the FFT (e.g. blocks
``[0, 1, 0]`` and ``[0, 0, 1, 0]`` would have zero phase). To disable
this realignment, just change both ``before=None`` and ``after=None``
keywords.
after :
Function to be applied just after the inverse transform, before calling
the overlap-add (as well as before its windowing, if any). Defaults to
the ``numpy.fft.fftshift`` function, which undo the changes done by the
default ``before`` pre-processing for block phase alignment. To avoid
the default time-domain realignment, set both ``before=None`` and
``after=None`` keywords.
ola :
Overlap-add strategy. Uses the ``overlap_add`` default strategy when
not given. The strategy should allow at least size and hop keyword
arguments, besides a first positional argument for the iterable with
blocks. If ``ola=None``, the result from using the STFT processor will be
the ``Stream`` of blocks that would be the overlap-add input.
ola_* :
Extra keyword parameters for the overlap-add strategy, if any. The extra
``ola_`` prefix is removed when calling it. See the overlap-add strategy
docs for more information about the valid parameters.
Returns
-------
A function with the same parameters above, besides ``func``, which is
replaced by the signal input (if func was given). The parameters used when
building the function should be seen as defaults that can be changed when
calling the resulting function with the respective keyword arguments.
Examples
--------
Let's process something:
>>> my_signal = Stream(.1, .3, -.1, -.3, .5, .4, .3)
Wrapping directly the processor function:
>>> processor_w = stft(abs, size=64)
>>> sig = my_signal.copy() # Any iterable
>>> processor_w(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> peek200_w = _.peek(200) # Needs Numpy
>>> type(peek200_w[0]).__name__ # Result is a signal (numpy.float64 data)
'float64'
Keyword parameters in a partial evaluation style (can be reassigned):
>>> stft64 = stft(size=64) # Same to ``stft`` but with other defaults
>>> processor_p = stft64(abs)
>>> sig = my_signal.copy() # Any iterable
>>> processor_p(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w # This should do the same thing
True
As a decorator, this time with other windowing configuration:
>>> stft64hann = stft64(wnd=window.hann, ola_wnd=window.hann)
>>> @stft64hann # stft(...) can also be used as an anonymous decorator
... def processor_d(blk):
... return abs(blk)
>>> processor_d(sig) # This leads to a different result
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w
False
You can also use other iterables as input, and keep the parameters to be
passed afterwards, as well as change transform calculation:
>>> stft_no_zero_phase = stft(before=None, after=None)
>>> stft_no_wnd = stft_no_zero_phase(ola=overlap_add.list, ola_wnd=None,
... ola_normalize=False)
>>> on_blocks = stft_no_wnd(transform=None, inverse_transform=None)
>>> processor_a = on_blocks(reversed, hop=4) # Reverse
>>> processor_a([1, 2, 3, 4, 5], size=4, hop=2)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # From blocks [1, 2, 3, 4] and [3, 4, 5, 0.0]
[4.0, 3.0, 2.0, 6, 4, 3]
>>> processor_a([1, 2, 3, 4, 5], size=4) # Default hop instead
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # No overlap, blocks [1, 2, 3, 4] and [5, 0.0, 0.0, 0.0]
[4, 3, 2, 1, 0.0, 0.0, 0.0, 5]
>>> processor_a([1, 2, 3, 4, 5]) # Size was never given
Traceback (most recent call last):
...
TypeError: Missing 'size' argument
For analysis only, one can set ``ola=None``:
>>> from numpy.fft import ifftshift # [1, 2, 3, 4, 5] -> [3, 4, 5, 1, 2]
>>> analyzer = stft(ifftshift, ola=None, size=8, hop=2)
>>> sig = Stream(1, 0, -1, 0) # A pi/2 rad/sample cosine signal
>>> result = analyzer(sig)
>>> result
<audiolazy.lazy_stream.Stream object at 0x...>
Let's see the result contents. That processing "rotates" the frequencies,
converting the original ``[0, 0, 4, 0, 0]`` real FFT block to a
``[4, 0, 0, 0, 0]`` block, which means the block cosine was moved to
a DC-only signal keeping original energy/integral:
>>> result.take()
array([ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
>>> result.take() # From [0, 0, -4, 0, 0] to [-4, 0, 0, 0, 0]
array([-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
Note
----
Parameters should be passed as keyword arguments. The only exception
is ``func`` for this function and ``sig`` for the returned function,
which are always the first positional argument, ald also the one that
shouldn't appear when using this function as a decorator.
Hint
----
1. When using Numpy FFT, one can keep data in place and return the
changed input block to save time;
2. Actually, there's nothing in this function that imposes FFT or Numpy
besides the default values. One can still use this even for other
transforms that have nothing to do with the Fourier Transform.
See Also
--------
overlap_add :
Overlap-add algorithm for an iterable (e.g. a Stream instance) of blocks
(sequences such as lists or Numpy arrays). It's also a StrategyDict.
window :
Window/apodization/tapering functions for a given size as a StrategyDict. |
8,014 | def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
parent_feature = validate_feature (feature_name)
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = " already declared as a subfeature of " % (subfeature, feature_name)
message += " specific to " % value_string
raise BaseException (message)
f = feature (feature_name + + subfeature_name, subvalues, attributes + [])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
extend_subfeature (feature_name, value_string, subfeature, subvalues) | Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature. |
8,015 | def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
for name, value in inspect.getmembers(section_schema):
if name.startswith("__") or value is None:
continue
elif inspect.isclass(value) and deep:
cls = value
for name, value in select_params_from_section_schema(cls,
param_class=param_class, deep=True):
yield (name, value)
elif isinstance(value, param_class):
yield (name, value) | Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params |
8,016 | def _to_repeatmasker_string(pairwise_alignment, column_width=DEFAULT_COL_WIDTH,
m_name_width=DEFAULT_MAX_NAME_WIDTH):
s1 = pairwise_alignment.s1
s2 = pairwise_alignment.s2
s1_neg = not s1.is_positive_strand()
s2_neg = not s2.is_positive_strand()
size = pairwise_alignment.size()
s1_comp = "C" if s1_neg else " "
s2_comp = "C" if s2_neg else " "
s1_len = len(s1.name)
s2_len = len(s2.name)
f_len = max(s1_len, s2_len)
if m_name_width is not None:
f_len = min(f_len, m_name_width)
s1_n = s1.name[:f_len] + ( * (f_len - s1_len))
s2_n = s2.name[:f_len] + ( * (f_len - s2_len))
s1_line_end_num = (s1.end if s1_neg else s1.start - 1)
s2_line_end_num = (s2.end if s2_neg else s2.start - 1)
max_num_len = max(len(str(s1.start + size)), len(str(s2.start + size)))
res = ""
i = 0
res += _get_repeat_masker_header(pairwise_alignment) + "\n\n"
while i < len(pairwise_alignment.s1):
if pairwise_alignment.meta is not None:
for k in pairwise_alignment.meta:
if k not in KNOWN_KEYS:
if k is ROUNDTRIP_KEY:
res += (pairwise_alignment.meta[k] + "\n")
else:
res += (k + " = " + str(pairwise_alignment.meta[k]) + "\n")
res = res.strip()
return res | generate a repeatmasker formated representation of this pairwise alignment.
:param column_width: number of characters to output per line of alignment
:param m_name_width: truncate names on alignment lines to this length
(set to None for no truncation) |
8,017 | def _make_y_title(self):
if self._y_title:
yc = self.margin_box.top + self.view.height / 2
for i, title_line in enumerate(self._y_title, 1):
text = self.svg.node(
self.nodes[],
,
class_=,
x=self._legend_at_left_width,
y=i * (self.style.title_font_size + self.spacing) + yc
)
text.attrib[] = "rotate(%d %f %f)" % (
-90, self._legend_at_left_width, yc
)
text.text = title_line | Make the Y-Axis title |
8,018 | def check_bottleneck(text):
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) | Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61 |
8,019 | def beacon_link(variant_obj, build=None):
build = build or 37
url_template = ("https://beacon-network.org/
"chrom={this[chromosome]}&allele={this[alternative]}&"
"ref={this[reference]}&rs=GRCh37")
return url_template.format(this=variant_obj) | Compose link to Beacon Network. |
8,020 | def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
min_ds = dsversion_min
content_acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)
if response.status_code == 301:
redirected_url = .join([response.headers[], path])
response = requests.put(redirected_url, data=body, headers=headers)
return response | Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body. |
8,021 | def expand_macros(raw_text, macros):
includes = {}
result = []
pattern = re.compile("
ipattern = re.compile("
for line in raw_text.split("\n"):
line = string.Template(line).safe_substitute(macros)
result.append(line)
if line.startswith("
match = pattern.match(line)
try:
var, opt, val, or_ = match.group(1, 2, 3, 4)
except:
raise InvalidMacroError("Failed to parse macro {}\n".format(line))
if or_:
if var not in macros:
raise InvalidMacroError("Macro {} is not defined: {}\n".format(var, or_))
elif not (opt and var in macros):
macros[var] = val
elif line.startswith("
match = ipattern.match(line)
try:
filename = match.group(1)
except:
error("Failed to parse include {}\n".format(line))
sys.exit(1)
try:
with io.open(filename, ) as f:
includes[filename] = expand_macros(f.read(), macros)
except IOError:
if match.group(2):
if match.group(2).startswith():
sprint(match.group(3))
else:
error("Nonexistent include {}\n".format(filename))
sys.exit(1)
return "\n".join(result), includes | this gets called before the sakefile is parsed. it looks for
macros defined anywhere in the sakefile (the start of the line
is '#!') and then replaces all occurences of '$variable' with the
value defined in the macro. it then returns the contents of the
file with the macros expanded. |
8,022 | def add_group_member(self, grp_name, user):
self.project_service.set_auth(self._token_project)
self.project_service.add_group_member(grp_name, user) | Add the given user to the named group.
Both group and user must already exist for this to succeed.
Args:
name (string): Name of group.
user_name (string): User to add to group.
Raises:
requests.HTTPError on failure. |
8,023 | def url(self):
if not self._url[2].endswith():
self._url[2] +=
return RestURL.url.__get__(self) | The URL as a string of the resource. |
8,024 | def _set_session_style(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 2}, u: {: 0}, u: {: 3}, u: {: 1}},), is_leaf=True, yang_name="session-style", rest_name="session-style", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-mpls-operational:session-reservation-style",
: ,
})
self.__session_style = t
if hasattr(self, ):
self._set() | Setter method for session_style, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_style (session-reservation-style)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_style is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_style() directly.
YANG Description: Style of session |
8,025 | def _ostaunicode(src):
if have_py_3:
bytename = src
else:
bytename = src.decode()
try:
enc = bytename.encode()
encbyte = b
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode()
encbyte = b
return encbyte + enc | Internal function to create an OSTA byte string from a source string. |
8,026 | def check_layer(layer, has_geometry=True):
if is_vector_layer(layer) or is_raster_layer(layer):
if not layer.isValid():
raise InvalidLayerError(
% layer.publicSource())
if is_vector_layer(layer):
sub_layers = layer.dataProvider().subLayers()
if len(sub_layers) > 1:
names = .join(sub_layers)
source = layer.source()
raise InvalidLayerError(
tr(
).format(source=source, names=names))
if layer.geometryType() == QgsWkbTypes.UnknownGeometry and (
layer.featureCount() != 0):
raise InvalidLayerError(
tr())
if layer.wkbType() == QgsWkbTypes.Unknown and (
layer.featureCount() != 0):
raise InvalidLayerError(
tr())
if isinstance(has_geometry, bool) and layer.featureCount() != 0:
if layer.isSpatial() != has_geometry:
raise InvalidLayerError(
tr())
else:
raise InvalidLayerError(
tr().format(
type=type(layer)))
return True | Helper to check layer validity.
This function wil; raise InvalidLayerError if the layer is invalid.
:param layer: The layer to check.
:type layer: QgsMapLayer
:param has_geometry: If the layer must have a geometry. True by default.
If it's a raster layer, we will no check this parameter. If we do not
want to check the geometry type, we can set it to None.
:type has_geometry: bool,None
:raise: InvalidLayerError
:return: Return True if the layer is valid.
:rtype: bool |
8,027 | def cli(env, columns, sortby, volume_id):
block_manager = SoftLayer.BlockStorageManager(env.client)
access_list = block_manager.get_block_volume_access_list(
volume_id=volume_id)
table = formatting.Table(columns.columns)
table.sortby = sortby
for key, type_name in [(, ),
(, ),
(, ),
(, )]:
for obj in access_list.get(key, []):
obj[] = type_name
table.add_row([value or formatting.blank()
for value in columns.row(obj)])
env.fout(table) | List ACLs. |
8,028 | def _select_best_remaining_qubit(self, prog_qubit):
reliab_store = {}
for hw_qubit in self.available_hw_qubits:
reliab = 1
for n in self.prog_graph.neighbors(prog_qubit):
if n in self.prog2hw:
reliab *= self.swap_costs[self.prog2hw[n]][hw_qubit]
reliab *= self.readout_errors[hw_qubit]
reliab_store[hw_qubit] = reliab
max_reliab = 0
best_hw_qubit = None
for hw_qubit in reliab_store:
if reliab_store[hw_qubit] > max_reliab:
max_reliab = reliab_store[hw_qubit]
best_hw_qubit = hw_qubit
return best_hw_qubit | Select the best remaining hardware qubit for the next program qubit. |
8,029 | def _fix_labels(self):
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf(.format(s,0))
n_other.tag = n0.tag
n0.tag = 0 | For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image |
8,030 | def fetch(self, addon_id, data={}, **kwargs):
return super(Addon, self).fetch(addon_id, data, **kwargs) | Fetch addon for given Id
Args:
addon_id : Id for which addon object has to be retrieved
Returns:
addon dict for given subscription Id |
8,031 | def MakeExecutableTemplate(self, output_file=None):
super(WindowsClientBuilder,
self).MakeExecutableTemplate(output_file=output_file)
self.MakeBuildDirectory()
self.BuildWithPyInstaller()
for module in EnumMissingModules():
logging.info("Copying additional dll %s.", module)
shutil.copy(module, self.output_dir)
self.BuildNanny()
shutil.copy(
os.path.join(self.output_dir, "GRRservice.exe"),
os.path.join(self.output_dir, "dbg_GRRservice.exe"))
with open(os.path.join(self.output_dir, "GRRservice.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=False)
with open(os.path.join(self.output_dir, "dbg_GRRservice.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=True)
shutil.copy(
os.path.join(self.output_dir, "grr-client.exe"),
os.path.join(self.output_dir, "dbg_grr-client.exe"))
with open(os.path.join(self.output_dir, "grr-client.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=False)
with open(os.path.join(self.output_dir, "dbg_grr-client.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=True)
self.MakeZip(self.output_dir, self.template_file) | Windows templates also include the nanny. |
8,032 | def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
):
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
fig = plt.figure(figsize=(16, 8))
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
N_TILES = 600
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show() | Plots this mapping on a map. |
8,033 | def call_bad_cb(self, tb):
with LiveExecution.lock:
if self.bad_cb and not self.bad_cb(tb):
self.bad_cb = None | If bad_cb returns True then keep it
:param tb: traceback that caused exception
:return: |
8,034 | def getExtensionArgs(self):
args = {}
if self.required:
args[] = .join(self.required)
if self.optional:
args[] = .join(self.optional)
if self.policy_url:
args[] = self.policy_url
return args | Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str} |
8,035 | def process_index(self, url, page):
def scan(link):
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url)
if pkg:
for new_url in find_external_links(url, page):
base, frag = egg_info_for_url(new_url)
if base.endswith() and not frag:
if ver:
new_url += % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: % m.group(1, 3, 2), page
)
else:
return "" | Process the contents of a PyPI page |
8,036 | def verify_exif(filename):
required_exif = required_fields()
exif = ExifRead(filename)
required_exif_exist = exif.fields_exist(required_exif)
return required_exif_exist | Check that image file has the required EXIF fields.
Incompatible files will be ignored server side. |
8,037 | def inspect_tables(conn, database_metadata):
" List tables and their row counts, excluding uninteresting tables. "
tables = {}
table_names = [
r["name"]
for r in conn.execute(
)
]
for table in table_names:
table_metadata = database_metadata.get("tables", {}).get(
table, {}
)
try:
count = conn.execute(
"select count(*) from {}".format(escape_sqlite(table))
).fetchone()[0]
except sqlite3.OperationalError:
count = 0
column_names = table_columns(conn, table)
tables[table] = {
"name": table,
"columns": column_names,
"primary_keys": detect_primary_keys(conn, table),
"count": count,
"hidden": table_metadata.get("hidden") or False,
"fts_table": detect_fts(conn, table),
}
foreign_keys = get_all_foreign_keys(conn)
for table, info in foreign_keys.items():
tables[table]["foreign_keys"] = info
hidden_tables = [
r["name"]
for r in conn.execute(
)
]
if detect_spatialite(conn):
hidden_tables += [
"ElementaryGeometries",
"SpatialIndex",
"geometry_columns",
"spatial_ref_sys",
"spatialite_history",
"sql_statements_log",
"sqlite_sequence",
"views_geometry_columns",
"virts_geometry_columns",
] + [
r["name"]
for r in conn.execute(
)
]
for t in tables.keys():
for hidden_table in hidden_tables:
if t == hidden_table or t.startswith(hidden_table):
tables[t]["hidden"] = True
continue
return tables | List tables and their row counts, excluding uninteresting tables. |
8,038 | def schedule_job(self, job):
l = _reraise_with_traceback(job.get_lambda_to_execute())
future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel)
self.job_future_mapping[future] = job
self.future_job_mapping[job.job_id] = future
future.add_done_callback(self.handle_finished_future)
self.cancel_notifications[job.job_id] = False
return future | schedule a job to the type of workers spawned by self.start_workers.
:param job: the job to schedule for running.
:return: |
8,039 | def assert_reset(self, asserted):
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | Assert or de-assert target reset line |
8,040 | def organization_users(self, id, permission_set=None, role=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users
api_path = "/api/v2/organizations/{id}/users.json"
api_path = api_path.format(id=id)
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if permission_set:
api_query.update({
"permission_set": permission_set,
})
if role:
api_query.update({
"role": role,
})
return self.call(api_path, query=api_query, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/users#list-users |
8,041 | def _repr_categories_info(self):
category_strs = self._repr_categories()
dtype = getattr(self.categories, ,
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader)
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n"
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" | Returns a string representation of the footer. |
8,042 | def av(self, data, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
ts = self.ts(data, lon_str=lon_str, lat_str=lat_str,
land_mask_str=land_mask_str, sfc_area_str=sfc_area_str)
if YEAR_STR not in ts.coords:
return ts
else:
return ts.mean(YEAR_STR) | Time-average of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to compute the regional time-average of
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The region-averaged and time-averaged data. |
8,043 | def convert(outputfile, inputfile, to_format, from_format):
emb = word_embedding.WordEmbedding.load(
inputfile, format=_input_choices[from_format][1],
binary=_input_choices[from_format][2])
emb.save(outputfile, format=_output_choices[to_format][1],
binary=_output_choices[to_format][2]) | Convert pretrained word embedding file in one format to another. |
8,044 | def liftover(args):
p = OptionParser(liftover.__doc__)
p.add_option("--checkvalid", default=False, action="store_true",
help="Check minscore, period and length")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
refbed, fastafile = args
genome = pyfasta.Fasta(fastafile)
edits = []
fp = open(refbed)
for i, row in enumerate(fp):
s = STRLine(row)
seq = genome[s.seqid][s.start - 1: s.end].upper()
s.motif = get_motif(seq, len(s.motif))
s.fix_counts(seq)
if opts.checkvalid and not s.is_valid():
continue
edits.append(s)
if i % 10000 == 0:
print(i, "lines read", file=sys.stderr)
edits = natsorted(edits, key=lambda x: (x.seqid, x.start))
for e in edits:
print(str(e)) | %prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers. |
8,045 | def __safe_errback(self, room_data, err_condition, err_text):
method = room_data.errback
if method is not None:
try:
method(room_data.room, room_data.nick, err_condition, err_text)
except Exception as ex:
self.__logger.exception("Error calling back room creator: %s",
ex) | Safe use of the callback method, to avoid errors propagation
:param room_data: A RoomData object
:param err_condition: Category of error
:param err_text: Description of the error |
8,046 | def log_errors(f, self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception:
self.log.error("Uncaught exception in %r" % f, exc_info=True) | decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed. |
8,047 | def stop_socket(self, conn_key):
if conn_key not in self._conns:
return
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + )
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self._stop_user_socket() | Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise |
8,048 | def readlines(self):
if self.grammar:
tot = []
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines() | Returns a list of all lines (optionally parsed) in the file. |
8,049 | def is_nameserver(self, path):
node = self.get_node(path)
if not node:
return False
return node.is_nameserver | Is the node pointed to by @ref path a name server (specialisation
of directory nodes)? |
8,050 | def get_fermi_interextrapolated(self, c, T, warn=True, c_ref=1e10, **kwargs):
try:
return self.get_fermi(c, T, **kwargs)
except ValueError as e:
if warn:
warnings.warn(str(e))
if abs(c) < c_ref:
if abs(c) < 1e-10:
c = 1e-10
f2 = self.get_fermi_interextrapolated(max(10, abs(c) * 10.), T, warn=False, **kwargs)
f1 = self.get_fermi_interextrapolated(-max(10, abs(c) * 10.), T, warn=False, **kwargs)
c2 = np.log(abs(1 + self.get_doping(f2, T)))
c1 = -np.log(abs(1 + self.get_doping(f1, T)))
slope = (f2 - f1) / (c2 - c1)
return f2 + slope * (np.sign(c) * np.log(abs(1 + c)) - c2)
else:
f_ref = self.get_fermi_interextrapolated(np.sign(c) * c_ref, T, warn=False, **kwargs)
f_new = self.get_fermi_interextrapolated(c / 10., T, warn=False, **kwargs)
clog = np.sign(c) * np.log(abs(c))
c_newlog = np.sign(c) * np.log(abs(self.get_doping(f_new, T)))
slope = (f_new - f_ref) / (c_newlog - np.sign(c) * 10.)
return f_new + slope * (clog - c_newlog) | Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi (depending on c) is returned with
the assumption that the fermi level changes linearly with log(abs(c)).
Args:
c (float): doping concentration in 1/cm3. c<0 represents n-type
doping and c>0 p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
warn (bool): whether to warn for the first time when no fermi can
be found.
c_ref (float): a doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref
**kwargs: see keyword arguments of the get_fermi function
Returns (float): the fermi level that is possibly interapolated or
extrapolated and must be used with caution. |
8,051 | def compile(self, X, verbose=False):
if self.feature >= X.shape[1]:
raise ValueError(\
\
.format(self.feature, X.shape[1]))
if self.by is not None and self.by >= X.shape[1]:
raise ValueError(\
\
.format(self.by, X.shape[1]))
if not hasattr(self, ):
self.edge_knots_ = gen_edge_knots(X[:, self.feature],
self.dtype,
verbose=verbose)
return self | method to validate and prepare data-dependent parameters
Parameters
---------
X : array-like
Input dataset
verbose : bool
whether to show warnings
Returns
-------
None |
8,052 | def apply_relationships(self, data, obj):
relationships_to_apply = []
relationship_fields = get_relationships(self.resource.schema, model_field=True)
for key, value in data.items():
if key in relationship_fields:
related_model = getattr(obj.__class__, key).property.mapper.class_
schema_field = get_schema_field(self.resource.schema, key)
related_id_field = self.resource.schema._declared_fields[schema_field].id_field
if isinstance(value, list):
related_objects = []
for identifier in value:
related_object = self.get_related_object(related_model, related_id_field, {: identifier})
related_objects.append(related_object)
relationships_to_apply.append({: key, : related_objects})
else:
related_object = None
if value is not None:
related_object = self.get_related_object(related_model, related_id_field, {: value})
relationships_to_apply.append({: key, : related_object})
for relationship in relationships_to_apply:
setattr(obj, relationship[], relationship[]) | Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False |
8,053 | def _parse_caps_bank(bank):
result = {
: int(bank.get()),
: int(bank.get()),
: bank.get(),
: "{} {}".format(bank.get(), bank.get()),
: bank.get()
}
controls = []
for control in bank.findall():
unit = control.get()
result_control = {
: "{} {}".format(control.get(), unit),
: control.get(),
: int(control.get())
}
minimum = control.get()
if minimum:
result_control[] = "{} {}".format(minimum, unit)
controls.append(result_control)
if controls:
result[] = controls
return result | Parse the <bank> element of the connection capabilities XML. |
8,054 | def water(target, temperature=, salinity=):
r
T = target[temperature]
if salinity in target.keys():
S = target[salinity]
else:
S = 0
a1 = -5.8002206E+03
a2 = 1.3914993E+00
a3 = -4.8640239E-02
a4 = 4.1764768E-05
a5 = -1.4452093E-08
a6 = 6.5459673E+00
Pv_w = np.exp((a1/T) + a2 + a3*T + a4*T**2 + a5*T**3 + a6*np.log(T))
Pv_sw = Pv_w/(1+0.57357*(S/(1000-S)))
value = Pv_sw
return value | r"""
Calculates vapor pressure of pure water or seawater given by [1] based on
Raoult's law. The pure water vapor pressure is given by [2]
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the phase temperature values
salinity : string
The dictionary key containing the phase salinity values
Returns
-------
The vapor pressure of water/seawater in [Pa]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg;
ACCURACY: 0.5 %
References
----------
[1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and
Water Treatment, 2010.
[2] ASHRAE handbook: Fundamentals, ASHRAE; 2005. |
8,055 | def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | Recursive function that sorts protein group by a number of sorting
functions. |
8,056 | def get_kgXref_hg19(self):
if self._kgXref_hg19 is None:
self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19())
return self._kgXref_hg19 | Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None |
8,057 | def ReadCronJobs(self, cronjob_ids=None, cursor=None):
query = ("SELECT job, UNIX_TIMESTAMP(create_time), enabled, "
"forced_run_requested, last_run_status, "
"UNIX_TIMESTAMP(last_run_time), current_run_id, state, "
"UNIX_TIMESTAMP(leased_until), leased_by "
"FROM cron_jobs")
if cronjob_ids is None:
cursor.execute(query)
return [self._CronJobFromRow(row) for row in cursor.fetchall()]
query += " WHERE job_id IN (%s)" % ", ".join(["%s"] * len(cronjob_ids))
cursor.execute(query, cronjob_ids)
res = []
for row in cursor.fetchall():
res.append(self._CronJobFromRow(row))
if len(res) != len(cronjob_ids):
missing = set(cronjob_ids) - set([c.cron_job_id for c in res])
raise db.UnknownCronJobError("CronJob(s) with id(s) %s not found." %
missing)
return res | Reads all cronjobs from the database. |
8,058 | def overwrite_file_check(args, filename):
if not args[] and os.path.exists(filename):
if args[]:
overwrite = False
else:
try:
overwrite = confirm_input(input(
.format(filename)))
except (KeyboardInterrupt, EOFError):
sys.exit()
if not overwrite:
new_filename = modify_filename_id(filename)
while os.path.exists(new_filename):
new_filename = modify_filename_id(new_filename)
return new_filename
return filename | If filename exists, overwrite or modify it to be unique. |
8,059 | def calculate_batch_normalization_output_shapes(operator):
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
if len(input_shape) not in [2, 4]:
raise RuntimeError()
operator.outputs[0].type.shape = copy.deepcopy(operator.inputs[0].type.shape) | Allowed input/output patterns are
1. [N, C] ---> [N, C]
2. [N, C, H, W] ---> [N, C, H, W]
This operator just uses the operator input shape as its output shape. |
8,060 | def _set_labels(self, catalogue):
with self._conn:
self._conn.execute(constants.UPDATE_LABELS_SQL, [])
labels = {}
for work, label in catalogue.items():
self._conn.execute(constants.UPDATE_LABEL_SQL, [label, work])
cursor = self._conn.execute(
constants.SELECT_TEXT_TOKEN_COUNT_SQL, [work])
token_count = cursor.fetchone()[]
labels[label] = labels.get(label, 0) + token_count
return labels | Returns a dictionary of the unique labels in `catalogue` and the
count of all tokens associated with each, and sets the record
of each Text to its corresponding label.
Texts that do not have a label specified are set to the empty
string.
Token counts are included in the results to allow for
semi-accurate sorting based on corpora size.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `dict` |
8,061 | def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data | Return a dict containing the IPFS hash, file name, and size of a file. |
8,062 | def register_component(self, path):
component = foundations.strings.get_splitext_basename(path)
LOGGER.debug("> Current Component: .".format(component))
profile = Profile(file=path)
if profile.initializeProfile():
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)) or \
os.path.basename(profile.directory) == profile.package:
self.__components[profile.name] = profile
return True
else:
raise manager.exceptions.ComponentModuleError(
"{0} | has no associated module and has been rejected!".format(self.__class__.__name__,
component))
else:
raise manager.exceptions.ComponentProfileError(
"{0} | is not a valid Component and has been rejected!".format(self.__class__.__name__,
component)) | Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool |
8,063 | def get_features(cls, entry):
features = []
for feature in entry.iterfind("./feature"):
feature_dict = {
: feature.attrib.get(),
: feature.attrib[],
: feature.attrib.get()
}
features.append(models.Feature(**feature_dict))
return features | get list of `models.Feature` from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.Feature` |
8,064 | def get_sort_limit():
limit = _.convert(get("sort_limit"), _.to_int)
if (limit < 1):
limit = None
return limit | returns the 'sort_limit' from the request |
8,065 | def _match_iter_generic(self, path_elements, start_at):
length = len(path_elements)
if self.bound_start:
end = 1
else:
end = length - self.length + 1
if self.bound_end:
start = length - self.length
else:
start = start_at
if start > end or start < start_at or end > length - self.length + 1:
return
for index in range(start, end):
matched = True
i = index
for matcher in self.elements:
element = path_elements[i]
i += 1
if not matcher.match(element):
matched = False
break
if matched:
yield index + self.length | Implementation of match_iter for >1 self.elements |
8,066 | def point_plane_distance(points,
plane_normal,
plane_origin=[0.0, 0.0, 0.0]):
points = np.asanyarray(points, dtype=np.float64)
w = points - plane_origin
distances = np.dot(plane_normal, w.T) / np.linalg.norm(plane_normal)
return distances | The minimum perpendicular distance of a point to a plane.
Parameters
-----------
points: (n, 3) float, points in space
plane_normal: (3,) float, normal vector
plane_origin: (3,) float, plane origin in space
Returns
------------
distances: (n,) float, distance from point to plane |
8,067 | def add_edge(self, vertex1, vertex2, multicolor, merge=True, data=None):
self.__add_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor, data=data), merge=merge) | Creates a new :class:`bg.edge.BGEdge` object from supplied information and adds it to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param multicolor: an information about multi-colors of added edge
:type multicolor: :class:`bg.multicolor.Multicolor`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes |
8,068 | def _get_contours(self):
contours = []
current_contour = None
empty = True
for i, el in enumerate(self._get_elements()):
if el.cmd == MOVETO:
if not empty:
contours.append(current_contour)
current_contour = BezierPath(self._bot)
current_contour.moveto(el.x, el.y)
empty = True
elif el.cmd == LINETO:
empty = False
current_contour.lineto(el.x, el.y)
elif el.cmd == CURVETO:
empty = False
current_contour.curveto(el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y)
elif el.cmd == CLOSE:
current_contour.closepath()
if not empty:
contours.append(current_contour)
return contours | Returns a list of contours in the path, as BezierPath objects.
A contour is a sequence of lines and curves separated from the next contour by a MOVETO.
For example, the glyph "o" has two contours: the inner circle and the outer circle. |
8,069 | def keys(self, element=None, mode=None):
r
if mode is None:
return super().keys()
element = self._parse_element(element=element)
allowed = [, ]
if in mode:
mode = allowed
mode = self._parse_mode(mode=mode, allowed=allowed)
keys = super().keys()
temp = []
if in mode:
temp.extend([i for i in keys if self.get(i).dtype != bool])
if in mode:
temp.extend([i for i in keys if self.get(i).dtype == bool])
if element:
temp = [i for i in temp if i.split()[0] in element]
return temp | r"""
This subclass works exactly like ``keys`` when no arguments are passed,
but optionally accepts an ``element`` and/or a ``mode``, which filters
the output to only the requested keys.
The default behavior is exactly equivalent to the normal ``keys``
method.
Parameters
----------
element : string
Can be either 'pore' or 'throat', which limits the returned list of
keys to only 'pore' or 'throat' keys. If neither is given, then
both are assumed.
mode : string (optional, default is 'skip')
Controls which keys are returned. Options are:
**``None``** : This mode (default) bypasses this subclassed method
and just returns the normal KeysView object.
**'labels'** : Limits the returned list of keys to only 'labels'
(boolean arrays)
**'props'** : Limits he return list of keys to only 'props'
(numerical arrays).
**'all'** : Returns both 'labels' and 'props'. This is equivalent
to sending a list of both 'labels' and 'props'.
See Also
--------
props
labels
Notes
-----
This subclass can be used to get dictionary keys of specific kinds of
data. It's use augments ``props`` and ``labels`` by returning a list
containing both types, but possibly limited by element type ('pores'
or 'throats'.)
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic([5, 5, 5])
>>> pn.keys(mode='props') # Get all props
['pore.coords', 'throat.conns']
>>> pn.keys(mode='props', element='pore') # Get only pore props
['pore.coords'] |
8,070 | def mail_message(smtp_server, message, from_address, rcpt_addresses):
if smtp_server[0] == :
p = os.popen(smtp_server, )
p.write(message)
p.close()
else:
import smtplib
server = smtplib.SMTP(smtp_server)
server.sendmail(from_address, rcpt_addresses, message)
server.quit() | Send mail using smtp. |
8,071 | def pre(*content, sep=):
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[3]) | Make mono-width text block (Markdown)
:param content:
:param sep:
:return: |
8,072 | def lookup(cls, backend, obj):
ids = set([el for el in obj.traverse(lambda x: x.id) if el is not None])
if len(ids) == 0:
raise Exception("Object does not own a custom options tree")
elif len(ids) != 1:
idlist = ",".join([str(el) for el in sorted(ids)])
raise Exception("Object contains elements combined across "
"multiple custom trees (ids %s)" % idlist)
return cls._custom_options[backend][list(ids)[0]] | Given an object, lookup the corresponding customized option
tree if a single custom tree is applicable. |
8,073 | def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
import matplotlib
matplotlib.use()
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
if feature.geometry.type not in (, ):
raise ValueError("Cannot handle feature of type " + feature.geometry.type)
return data | Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one |
8,074 | def mark_error(self, dispatch, error_log, message_cls):
if message_cls.send_retry_limit is not None and (dispatch.retry_count + 1) >= message_cls.send_retry_limit:
self.mark_failed(dispatch, error_log)
else:
dispatch.error_log = error_log
self._st[].append(dispatch) | Marks a dispatch as having error or consequently as failed
if send retry limit for that message type is exhausted.
Should be used within send().
:param Dispatch dispatch: a Dispatch
:param str error_log: error message
:param MessageBase message_cls: MessageBase heir |
8,075 | def lookup(values, name=None):
if name is None:
name =
if values is None:
raise ValueError()
try:
v = values.asList()
values = v
except AttributeError:
values = values
lookup_field = pp.oneOf(values)
lookup_field.setName(name)
lookup_field.setParseAction(lambda s: s[0].strip())
lookup_field.leaveWhitespace()
return lookup_field | Creates the grammar for a Lookup (L) field, accepting only values from a
list.
Like in the Alphanumeric field, the result will be stripped of all heading
and trailing whitespaces.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field |
8,076 | def _call(self, x, out=None):
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
out.assign(self.multiplicand * x)
else:
raise ValueError() | Multiply ``x`` and write to ``out`` if given. |
8,077 | def u2ver(self):
try:
part = urllib2.__version__.split(, 1)
return float(.join(part))
except Exception, e:
log.exception(e)
return 0 | Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float |
8,078 | def callback(msg, _):
nlh = nlmsg_hdr(msg)
iface = ifinfomsg(nlmsg_data(nlh))
hdr = IFLA_RTA(iface)
remaining = ctypes.c_int(nlh.nlmsg_len - NLMSG_LENGTH(iface.SIZEOF))
while RTA_OK(hdr, remaining):
if hdr.rta_type == IFLA_IFNAME:
print(.format(iface.ifi_index, get_string(RTA_DATA(hdr)).decode()))
hdr = RTA_NEXT(hdr, remaining)
return NL_OK | Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message. |
8,079 | def find_templates(input_dir):
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith():
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates | _find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates |
8,080 | def _handle_result(self, result):
is_ephemeral = result.node.is_ephemeral_model
if not is_ephemeral:
self.node_results.append(result)
node = CompileResultNode(**result.node)
node_id = node.unique_id
self.manifest.nodes[node_id] = node
if result.error is not None:
if is_ephemeral:
cause = result
else:
cause = None
self._mark_dependent_errors(node_id, result, cause) | Mark the result as completed, insert the `CompiledResultNode` into
the manifest, and mark any descendants (potentially with a 'cause' if
the result was an ephemeral model) as skipped. |
8,081 | def fill(self, color, start=0, end=-1):
start = max(start, 0)
if end < 0 or end >= self.numLEDs:
end = self.numLEDs - 1
for led in range(start, end + 1):
self._set_base(led, color) | Fill the entire strip with RGB color tuple |
8,082 | def set_widgets(self):
source = self.parent.get_existing_keyword()
if source or source == 0:
self.leSource.setText(source)
else:
self.leSource.clear()
source_scale = self.parent.get_existing_keyword()
if source_scale or source_scale == 0:
self.leSource_scale.setText(source_scale)
else:
self.leSource_scale.clear()
source_date = self.parent.get_existing_keyword()
if source_date:
self.ckbSource_date.setChecked(True)
self.dtSource_date.setDateTime(source_date)
else:
self.ckbSource_date.setChecked(False)
self.dtSource_date.clear()
source_url = self.parent.get_existing_keyword()
try:
source_url = source_url.toString()
except AttributeError:
pass
if source_url or source_url == 0:
self.leSource_url.setText(source_url)
else:
self.leSource_url.clear()
source_license = self.parent.get_existing_keyword()
if source_license or source_license == 0:
self.leSource_license.setText(source_license)
else:
self.leSource_license.clear() | Set widgets on the Source tab. |
8,083 | def find_files(self):
if getattr(self, , None):
for path in walk_directory(os.path.join(self.path, self.blueprint_name), ignore=self.project.EXCLUDES):
yield , {: path}
for path in walk_directory(self.path, ignore=self.project.EXCLUDES):
yield , {: path} | Find all file paths for publishing, yield (urlname, kwargs) |
8,084 | def get_value(self, spreadsheet_id: str, range_name: str) -> dict:
service = self.__get_service()
result = service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_name).execute()
return result.get(, []) | get value by range
:param spreadsheet_id:
:param range_name:
:return: |
8,085 | def get_hmac(password):
salt = _security.password_salt
if salt is None:
raise RuntimeError(
% _security.password_hash)
h = hmac.new(encode_string(salt), encode_string(password), hashlib.sha512)
return base64.b64encode(h.digest()) | Returns a Base64 encoded HMAC+SHA512 of the password signed with
the salt specified by ``SECURITY_PASSWORD_SALT``.
:param password: The password to sign |
8,086 | def preprocess(string):
string = unicode(string, encoding="utf-8")
return regex.sub(, string).encode() | Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return: |
8,087 | def most_read_creators_card(num=10):
if spectator_apps.is_enabled():
object_list = most_read_creators(num=num)
object_list = chartify(object_list, , cutoff=1)
return {
: ,
: ,
: object_list,
} | Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed. |
8,088 | def to_datetime(date_or_datetime):
if isinstance(date_or_datetime, date) and \
not isinstance(date_or_datetime, datetime):
d = date_or_datetime
return datetime.strptime(
% (d.year, d.month, d.day), )
return date_or_datetime | Convert a date object to a datetime object,
or return as it is if it's not a date object.
:param date_or_datetime: date or datetime object
:return: a datetime object |
8,089 | def profiler(sorting=(,), stripDirs=True,
limit=20, path=, autoclean=True):
def decorated(func):
@wraps(func)
def wrapped(*args, **kwds):
filename = os.path.join(path, % func.__name__)
prof = hotshot.Profile(filename)
results = prof.runcall(func, *args, **kwds)
prof.close()
stats = hotshot.stats.load(filename)
if stripDirs:
stats.strip_dirs()
stats.sort_stats(*sorting)
stats.print_stats(limit)
if autoclean:
os.remove(filename)
return results
return wrapped
return decorated | Creates a profile wrapper around a method to time out
all the operations that it runs through. For more
information, look into the hotshot Profile documentation
online for the built-in Python package.
:param sorting <tuple> ( <key>, .. )
:param stripDirs <bool>
:param limit <int>
:param path <str>
:param autoclean <bool>
:usage |from projex.decorators import profiler
|
|class A:
| @profiler() # must be called as a method
| def increment(amount, count = 1):
| return amount + count
|
|a = A()
|a.increment(10)
| |
8,090 | def as_set(self, include_weak=False):
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv | Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set. |
8,091 | def get_ISSNs(self):
invalid_issns = set(self.get_invalid_ISSNs())
return [
self._clean_isbn(issn)
for issn in self["022a"]
if self._clean_isbn(issn) not in invalid_issns
] | Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings. |
8,092 | def compute_samples(channels, nsamples=None):
return islice(izip(*(imap(sum, izip(*channel)) for channel in channels)), nsamples) | create a generator which computes the samples.
essentially it creates a sequence of the sum of each function in the channel
at each sample in the file for each channel. |
8,093 | def grid_str(self, path=None, start=None, end=None,
border=True, start_chr=, end_chr=,
path_chr=, empty_chr=, block_chr=,
show_weight=False):
data =
if border:
data = .format(*len(self.nodes[0]))
for y in range(len(self.nodes)):
line =
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
weight = str(node.weight) if node.weight < 10 else
line += weight if show_weight else empty_chr
else:
line += block_chr
if border:
line = +line+
if data:
data +=
data += line
if border:
data += .format(*len(self.nodes[0]))
return data | create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return: |
8,094 | def field2length(self, field, **kwargs):
attributes = {}
validators = [
validator
for validator in field.validators
if (
hasattr(validator, "min")
and hasattr(validator, "max")
and hasattr(validator, "equal")
)
]
is_array = isinstance(
field, (marshmallow.fields.Nested, marshmallow.fields.List)
)
min_attr = "minItems" if is_array else "minLength"
max_attr = "maxItems" if is_array else "maxLength"
for validator in validators:
if validator.min is not None:
if hasattr(attributes, min_attr):
attributes[min_attr] = max(attributes[min_attr], validator.min)
else:
attributes[min_attr] = validator.min
if validator.max is not None:
if hasattr(attributes, max_attr):
attributes[max_attr] = min(attributes[max_attr], validator.max)
else:
attributes[max_attr] = validator.max
for validator in validators:
if validator.equal is not None:
attributes[min_attr] = validator.equal
attributes[max_attr] = validator.equal
return attributes | Return the dictionary of OpenAPI field attributes for a set of
:class:`Length <marshmallow.validators.Length>` validators.
:param Field field: A marshmallow field.
:rtype: dict |
8,095 | def libvlc_video_set_logo_int(p_mi, option, value):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value) | Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value. |
8,096 | def select_name(source, name):
return filter(lambda x: x.xml_name == name, select_elements(source)) | Yields all the elements with the given name
source - if an element, starts with all child elements in order; can also be any other iterator
name - will yield only elements with this name |
8,097 | def with_preference_param(self):
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self | Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information. |
8,098 | def glow_hparams():
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
hparams.add_hparam("activation", "relu")
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("coupling_dropout", 0.0)
hparams.add_hparam("top_prior", "single_conv")
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams | Glow Hparams. |
8,099 | def validate_name(self, name, agent):
if name in self.bot_names_to_agent_dict and self.bot_names_to_agent_dict[name] is not agent:
i = 0
while True:
if name + " (" + str(i) + ")" in self.bot_names_to_agent_dict and \
self.bot_names_to_agent_dict[name + " (" + str(i) + ")"] is not agent:
i += 1
else:
value = name + " (" + str(i) + ")"
return value
else:
return name | Finds the modification of name which is not yet in the list
:param name: the (new) name for the agent
:param agent: the agent instance to allow the same name as the previous one if necessary
:return: the best modification of name not yet in a listwidget |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.