Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
17,300 | def load_data(cr, module_name, filename, idref=None, mode=):
if idref is None:
idref = {}
logger.info( % (module_name, filename))
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
try:
if ext == :
noupdate = True
tools.convert_csv_import(
cr, module_name, pathname, fp.read(), idref, mode, noupdate)
elif ext == :
yaml_import(cr, module_name, fp, None, idref=idref, mode=mode)
elif mode == :
for fp2 in _get_existing_records(cr, fp, module_name):
tools.convert_xml_import(
cr, module_name, fp2, idref, mode=,
)
else:
tools.convert_xml_import(cr, module_name, fp, idref, mode=mode)
finally:
fp.close() | Load an xml, csv or yml data file from your post script. The usual case for
this is the
occurrence of newly added essential or useful data in the module that is
marked with "noupdate='1'" and without "forcecreate='1'" so that it will
not be loaded by the usual upgrade mechanism. Leaving the 'mode' argument
to its default 'init' will load the data from your migration script.
Theoretically, you could simply load a stock file from the module, but be
careful not to reinitialize any data that could have been customized.
Preferably, select only the newly added items. Copy these to a file
in your migrations directory and load that file.
Leave it to the user to actually delete existing resources that are
marked with 'noupdate' (other named items will be deleted
automatically).
:param module_name: the name of the module
:param filename: the path to the filename, relative to the module \
directory.
:param idref: optional hash with ?id mapping cache?
:param mode:
one of 'init', 'update', 'demo', 'init_no_create'.
Always use 'init' for adding new items from files that are marked with
'noupdate'. Defaults to 'init'.
'init_no_create' is a hack to load data for records which have
forcecreate=False set. As those records won't be recreated during the
update, standard Odoo would recreate the record if it was deleted,
but this will fail in cases where there are required fields to be
filled which are not contained in the data file. |
17,301 | def set_beeper_mode(self, state):
values = {"desired_state": {"beeper_enabled": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | :param state: a boolean of ture (on) or false ('off')
:return: nothing |
17,302 | def validate_bands(bands):
if not isinstance(bands, list):
raise TypeError()
valid_bands = list(range(1, 12)) + []
for band in bands:
if band not in valid_bands:
raise InvalidBandError( % band) | Validate bands parameter. |
17,303 | def make_break(lineno, p):
global last_brk_linenum
if not OPTIONS.enableBreak.value or lineno == last_brk_linenum or is_null(p):
return None
last_brk_linenum = lineno
return make_sentence(, make_number(lineno, lineno, TYPE.uinteger)) | Checks if --enable-break is set, and if so, calls
BREAK keyboard interruption for this line if it has not been already
checked |
17,304 | def cache(self):
cache = self._cache or self.app.config.get()
return import_string(cache) if isinstance(cache, six.string_types) \
else cache | Return a cache instance. |
17,305 | def freqpoly_plot(data):
rel_data = OrderedDict()
for key, val in data.items():
tot = sum(val.values(), 0)
rel_data[key] = {k: v / tot for k, v in val.items()}
fplotconfig = {
: [
{: , : , : },
{: , : , : }
],
: , : ,
: dict(zip(data.keys(), MultiqcModule.get_colors(len(data))))
}
return linegraph.plot([data, rel_data], fplotconfig) | make freqpoly plot of merged read lengths |
17,306 | def datafield(*path, **kwargs):
if len(path) == 1 and isinstance(path[0], types.FunctionType):
return DataField(**kwargs)(*path)
else:
return DataField(*path, **kwargs) | A decorator that defines a field for data within a :class:`DataStruct`
The decorated function becomes the parser for a :class:`DataField` which
will be assigned to a data structure under the function's defined name.
Parameters
----------
path: tuple
The path to a value within a raw piece of data. If no path is provided
the path is assumed to be ``(self.this_name)`` where ``self.this_name``
is that of the attribute this field was defined under.
**kwargs:
By explicitely claiming ``path=None``, no assumptions are made about the
``path``, causing all the raw data to be passed to the handler for parsing. |
17,307 | def verify_sc_url(url: str) -> bool:
parsed = urlsplit(url)
scheme: str = parsed.scheme
netloc: str = parsed.netloc
path: str = parsed.path
try:
port = parsed.port
except ValueError:
port = None
result = (scheme.lower() == and
netloc.lower().split()[0] == and
path.startswith() and
(port == 443 or port is None))
return result | Verify signature certificate URL against Amazon Alexa requirements.
Each call of Agent passes incoming utterances batch through skills filter,
agent skills, skills processor. Batch of dialog IDs can be provided, in
other case utterances indexes in incoming batch are used as dialog IDs.
Args:
url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: True if verification was successful, False if not. |
17,308 | def write_word(self, cmd, value):
self.bus.write_word_data(self.address, cmd, value)
self.log.debug(
"write_word: Wrote 0x%04X to command register 0x%02X" % (
value, cmd
)
) | Writes a 16-bit word to the specified command register |
17,309 | def encode(self, value):
kassert.is_of_types(value, Bits)
if len(value) % 8 != 0:
raise KittyException()
return self._encoder.encode(value.bytes) | :param value: value to encode |
17,310 | def split_list(alist, wanted_parts=1):
length = len(alist)
return [
alist[i * length // wanted_parts:(i + 1) * length // wanted_parts]
for i in range(wanted_parts)
] | A = [0,1,2,3,4,5,6,7,8,9]
print split_list(A, wanted_parts=1)
print split_list(A, wanted_parts=2)
print split_list(A, wanted_parts=8) |
17,311 | def child(self, offset256):
a = bytes(self) + offset256
s = hashlib.sha256(a).digest()
return self.add(s) | Derive new public key from this key and a sha256 "offset" |
17,312 | def render_svg(self, render_id, words, arcs):
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)]
arcs = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words) + "".join(arcs)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
) | Render SVG.
render_id (int): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (unicode): Rendered SVG markup. |
17,313 | def __imap_search(self, ** criteria_dict):
self.imap_connect()
criteria = []
for key in criteria_dict:
if criteria_dict[key] is True:
criteria.append( % key)
else:
criteria.append( % (key, criteria_dict[key]))
status, msg_nums = self.mailbox.search(, * criteria)
self.imap_disconnect()
if 0 == len(msg_nums):
msg_nums = []
if in status:
return self.__parse_imap_search_result(msg_nums)
else:
raise EmailException("IMAP status is " + str(status)) | Searches for query in the given IMAP criteria and returns
the message numbers that match as a list of strings.
Criteria without values (eg DELETED) should be keyword args
with KEY=True, or else not passed. Criteria with values should
be keyword args of the form KEY="VALUE" where KEY is a valid
IMAP key.
IMAP default is to AND all criteria together. We don't support
other logic quite yet.
All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>,
BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM
<string>, HEADER <field-name> <string> (UNTESTED), KEYWORD
<flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>,
OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN,
SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>,
SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>,
UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED,
UNKEYWORD <flag>, UNSEEN.
For details on keys and their values, see
http://tools.ietf.org/html/rfc3501#section-6.4.4
:param criteria_dict: dictionary of search criteria keywords
:raises: EmailException if something in IMAP breaks
:returns: List of message numbers as strings matched by given criteria |
17,314 | def inverted(self):
inverse_type = globals()[self._inverse_type]
return inverse_type(self._center_longitude, self._center_latitude,
self._resolution) | Return the inverse of the transform. |
17,315 | def get_volumes(self):
vols = [self.find_volume(name) for name in self.virsp.listVolumes()]
return vols | Return a list of all Volumes in this Storage Pool |
17,316 | def _determine_auth_mechanism(username, password, delegation):
if re.match(, username) is not None:
if delegation is True:
raise Exception()
else:
raise Exception()
legacy = re.match(, username)
if legacy is not None:
if not legacy.group(1):
raise Exception()
if not legacy.group(2):
raise Exception()
if delegation is True:
return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password)
else:
return HttpNtlmAuth(legacy.group(1), legacy.group(2), password)
return HttpNtlmAuth(, username, password) | if the username contains at '@' sign we will use kerberos
if the username contains a '/ we will use ntlm
either NTLM or Kerberos. In fact its basically always Negotiate. |
17,317 | def markup_fragment(source, encoding=None):
s a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if itXXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY
doc = parse(source, encoding=encoding)
frag = doc.html.body
return frag | Parse a fragment if markup in HTML mode, and return a bindery node
Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name)
from amara.lib import inputsource
from amara.bindery import html
doc = html.markup_fragment(inputsource.text('XXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY'))
See also: http://wiki.xml3k.org/Amara2/Tagsoup |
17,318 | def _validate_method_decoration(meta, class_):
super_mro = class_.__mro__[1:]
own_methods = ((name, member)
for name, member in class_.__dict__.items()
if is_method(member))
for name, method in own_methods:
shadowed_method, base_class = next(
((getattr(base, name), base)
for base in super_mro if hasattr(base, name)),
(None, None)
)
if meta._is_override(method):
if not shadowed_method:
raise ClassError("unnecessary @override on %s.%s" % (
class_.__name__, name), class_=class_)
if meta._is_final(shadowed_method):
raise ClassError(
"illegal @override on a @final method %s.%s" % (
base_class.__name__, name), class_=class_)
override_base = meta._get_override_base(method)
if override_base and base_class is not override_base:
if is_class(override_base):
raise ClassError(
"incorrect override base: expected %s, got %s" % (
base_class.__name__, override_base.__name__))
else:
raise ClassError(
"invalid override base specified: %s" % (
override_base,))
setattr(class_, name, method.method)
else:
if shadowed_method and name not in meta.OVERRIDE_EXEMPTIONS:
if meta._is_final(shadowed_method):
msg = "%s.%s is hiding a @final method %s.%s" % (
class_.__name__, name, base_class.__name__, name)
else:
msg = ("overridden method %s.%s "
"must be marked with @override" % (
class_.__name__, name))
raise ClassError(msg, class_=class_) | Validate the usage of ``@override`` and ``@final`` modifiers
on methods of the given ``class_``. |
17,319 | def setmonitor(self, enable=True):
if enable:
res = self.setmode()
else:
res = self.setmode()
if not res:
log_runtime.error("Npcap WlanHelper returned with an error code !")
self.cache_mode = None
tmp = self.cache_mode = self.ismonitor()
return tmp if enable else (not tmp) | Alias for setmode('monitor') or setmode('managed')
Only available with Npcap |
17,320 | def login(config, username=None, password=None, email=None, url=None, client=None, *args, **kwargs):
try:
c = (_get_client(config) if not client else client)
lg = c.login(username, password, email, url)
print "%s logged to %s"%(username,(url if url else "default hub"))
except Exception as e:
utils.error("%s can't login to repo %s: %s"%(username,(url if url else "default repo"),e))
return False
return True | Wrapper to the docker.py login method |
17,321 | def update(self):
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
if data[] != "Charging":
self.__charger_state = False
else:
self.__charger_state = True | Update the charging state of the Tesla Vehicle. |
17,322 | def _apply_mt(self, doc_loader, parallelism, **kwargs):
if not Meta.postgres:
raise ValueError("Fonduer must use PostgreSQL as a database backend.")
def fill_input_queue(in_queue, doc_loader, terminal_signal):
for doc in doc_loader:
in_queue.put(doc)
in_queue.put(terminal_signal)
manager = Manager()
in_queue = manager.Queue()
out_queue = JoinableQueue()
total_count = len(doc_loader)
for i in range(parallelism):
udf = self.udf_class(
in_queue=in_queue,
out_queue=out_queue,
worker_id=i,
**self.udf_init_kwargs,
)
udf.apply_kwargs = kwargs
self.udfs.append(udf)
for udf in self.udfs:
udf.start()
terminal_signal = UDF.QUEUE_CLOSED
in_queue_filler = Process(
target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal)
)
in_queue_filler.start()
count_parsed = 0
while count_parsed < total_count:
y = out_queue.get()
if y == UDF.TASK_DONE:
count_parsed += 1
if self.pb is not None:
self.pb.update(1)
else:
raise ValueError("Got non-sentinal output.")
in_queue_filler.join()
in_queue.put(UDF.QUEUE_CLOSED)
for udf in self.udfs:
udf.join()
for udf in self.udfs:
udf.terminate()
self.udfs = [] | Run the UDF multi-threaded using python multiprocessing |
17,323 | def artist_commentary_revert(self, id_, version_id):
params = {: version_id}
return self._get(.format(id_),
params, method=, auth=True) | Revert artist commentary (Requires login) (UNTESTED).
Parameters:
id_ (int): The artist commentary id.
version_id (int): The artist commentary version id to
revert to. |
17,324 | def getaccountaddress(self, user_id=""):
address = self.rpc.call("getaccountaddress", user_id)
self.logger.debug("Your", self.coin, "address is", address)
return address | Get the coin address associated with a user id.
If the specified user id does not yet have an address for this
coin, then generate one.
Args:
user_id (str): this user's unique identifier
Returns:
str: Base58Check address for this account |
17,325 | def config(name, reset=False, **kwargs):
*
portpath = _check_portname(name)
if reset:
rmconfig(name)
configuration = showconfig(name, dict_return=True)
if not configuration:
raise CommandExecutionError(
{0}\.format(name)
)
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
opts = dict(
(six.text_type(x), _normalize(kwargs[x]))
for x in kwargs
if not x.startswith()
)
bad_opts = [x for x in opts if x not in conf_ptr]
if bad_opts:
raise SaltInvocationError(
.format(name, .join(bad_opts))
)
bad_vals = [
.format(x, y) for x, y in six.iteritems(opts)
if y not in (, )
]
if bad_vals:
raise SaltInvocationError(
.format(.join(bad_vals))
)
conf_ptr.update(opts)
_write_options(name, configuration)
new_config = showconfig(name, dict_return=True)
try:
new_config = new_config[next(iter(new_config))]
except (StopIteration, TypeError):
return False
return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr) | Modify configuration options for a given port. Multiple options can be
specified. To see the available options for a port, use
:mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`.
name
The port name, in ``category/name`` format
reset : False
If ``True``, runs a ``make rmconfig`` for the port, clearing its
configuration before setting the desired options
CLI Examples:
.. code-block:: bash
salt '*' ports.config security/nmap IPV6=off |
17,326 | def validate_sceneInfo(self):
if self.sceneInfo.prefix not in self.__prefixesValid:
raise WrongSceneNameError(
% (self.sceneInfo.name, self.sceneInfo.prefix)) | Check whether sceneInfo is valid to download from AWS Storage. |
17,327 | def _responsify(api_spec, error, status):
result_json = api_spec.model_to_json(error)
r = jsonify(result_json)
r.status_code = status
return r | Take a bravado-core model representing an error, and return a Flask Response
with the given error code and error instance as body |
17,328 | def get_disabled():
*
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if not is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services)) | .. versionadded:: 2014.7.0
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled |
17,329 | def get_minimum_span(low, high, span):
if is_number(low) and low == high:
if isinstance(low, np.datetime64):
span = span * np.timedelta64(1, )
low, high = low-span, high+span
return low, high | If lower and high values are equal ensures they are separated by
the defined span. |
17,330 | def read_folder(folder):
recordings = []
for filename in glob.glob(os.path.join(folder, )):
recording = parse_scg_ink_file(filename)
recordings.append(recording)
return recordings | Read all files of `folder` and return a list of HandwrittenData
objects.
Parameters
----------
folder : string
Path to a folder
Returns
-------
list :
A list of all .ink files in the given folder. |
17,331 | def EXPGauss(w_F, compute_uncertainty=True, is_timeseries=False):
T = float(np.size(w_F))
var = np.var(w_F)
DeltaF = np.average(w_F) - 0.5 * var
result_vals = dict()
if compute_uncertainty:
g = 1.0
T_eff = T
if is_timeseries:
import timeseries
g = timeseries.statisticalInefficiency(w_F, w_F)
T_eff = T / g
dx2 = var / T_eff + 0.5 * var * var / (T_eff - 1)
dDeltaF = np.sqrt(dx2)
result_vals[] = DeltaF
result_vals[] = dDeltaF
else:
result_vals[] = DeltaF
return result_vals | Estimate free energy difference using gaussian approximation to one-sided (unidirectional) exponential averaging.
Parameters
----------
w_F : np.ndarray, float
w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector.
compute_uncertainty : bool, optional, default=True
if False, will disable computation of the statistical uncertainty (default: True)
is_timeseries : bool, default=False
if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False)
Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples.
Returns
-------
result_vals : dictionary
Possible keys in the result_vals dictionary
'Delta_f' : float
Free energy difference between the two states
'dDelta_f': float
Estimated standard deviation of free energy difference between the two states.
Notes
-----
If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True
Examples
--------
Compute the free energy difference given a sample of forward work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> results = EXPGauss(w_F)
>>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT
>>> results = EXPGauss(w_R)
>>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT |
17,332 | def setup(self, redis_conn=None, host=, port=6379):
AbstractCounter.setup(self, redis_conn=redis_conn, host=host,
port=port)
self._threaded_start() | Set up the counting manager class
@param redis_conn: A premade redis connection (overrides host and port)
@param host: the redis host
@param port: the redis port |
17,333 | def prepare_stack_for_update(self, stack, tags):
if self.is_stack_destroyed(stack):
return False
elif self.is_stack_completed(stack):
return True
stack_name = self.get_stack_name(stack)
stack_status = self.get_stack_status(stack)
if self.is_stack_in_progress(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
)
if not self.is_stack_recreatable(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
)
if not self.recreate_failed:
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
)
stack_tags = self.get_stack_tags(stack)
if not check_tags_contain(stack_tags, tags):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
)
if self.interactive:
sys.stdout.write(
% (stack_name, stack_status))
sys.stdout.flush()
ask_for_approval(include_verbose=False)
logger.warn(, stack_name)
self.destroy_stack(stack)
return False | Prepare a stack for updating
It may involve deleting the stack if is has failed it's initial
creation. The deletion is only allowed if:
- The stack contains all the tags configured in the current context;
- The stack is in one of the statuses considered safe to re-create
- ``recreate_failed`` is enabled, due to either being explicitly
enabled by the user, or because interactive mode is on.
Args:
stack (dict): a stack object returned from get_stack
tags (list): list of expected tags that must be present in the
stack if it must be re-created
Returns:
bool: True if the stack can be updated, False if it must be
re-created |
17,334 | def u_shape(units: tf.Tensor,
n_hidden_list: List,
filter_width=7,
use_batch_norm=False,
training_ph=None):
units_for_skip_conn = []
conv_net_params = {: filter_width,
: use_batch_norm,
: training_ph}
for n_hidden in n_hidden_list:
units = stacked_cnn(units, [n_hidden], **conv_net_params)
units_for_skip_conn.append(units)
units = tf.layers.max_pooling1d(units, pool_size=2, strides=2, padding=)
units = stacked_cnn(units, [n_hidden], **conv_net_params)
for down_step, n_hidden in enumerate(n_hidden_list[::-1]):
units = tf.expand_dims(units, axis=2)
units = tf.layers.conv2d_transpose(units, n_hidden, filter_width, strides=(2, 1), padding=)
units = tf.squeeze(units, axis=2)
skip_units = units_for_skip_conn[-(down_step + 1)]
if skip_units.get_shape().as_list()[-1] != n_hidden:
skip_units = tf.layers.dense(skip_units, n_hidden)
units = skip_units + units
units = stacked_cnn(units, [n_hidden], **conv_net_params)
return units | Network architecture inspired by One Hundred layer Tiramisu.
https://arxiv.org/abs/1611.09326. U-Net like.
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
filter_width: width of the kernel in tokens
use_batch_norm: whether to use batch normalization between layers
training_ph: boolean placeholder determining whether is training phase now or not.
It is used only for batch normalization to determine whether to use
current batch average (std) or memory stored average (std)
Returns:
units: tensor at the output of the last convolutional layer
with dimensionality [None, n_tokens, n_hidden_list[-1]] |
17,335 | def deactivate(self):
if self._driver and self._driver.is_connected():
self._driver.deactivate() | Stop heating/cooling and turn off the fan |
17,336 | def _from_binary_objid(cls, binary_stream):
uid_size = ObjectID._UUID_SIZE
uids = [UUID(bytes_le=binary_stream[i*uid_size:(i+1)*uid_size].tobytes()) if i * uid_size < len(binary_stream) else None for i in range(0,4)]
_MOD_LOGGER.debug("Attempted to unpack OBJECT_ID Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), uids)
return cls(uids) | See base class. |
17,337 | def baseurl(url):
parsed_url = urlparse.urlparse(url)
if not parsed_url.netloc or parsed_url.scheme not in ("http", "https"):
raise ValueError()
service_url = "%s://%s%s" % (parsed_url.scheme, parsed_url.netloc, parsed_url.path.strip())
return service_url | return baseurl of given url |
17,338 | def _images_succeeded(cls, session):
for image, store in cls._deleted_images:
for stored_image, _ in cls._stored_images:
if stored_image.object_type == image.object_type and \
stored_image.object_id == image.object_id and \
stored_image.width == image.width and \
stored_image.height == image.height and \
stored_image.mimetype == image.mimetype:
break
else:
store.delete(image)
cls._stored_images.clear()
cls._deleted_images.clear() | Clears the :attr:`_stored_images` set and deletes actual
files that are marked as deleted in the storage
if the ongoing transaction has committed. |
17,339 | def select(self, node):
child = self.translate(node.child)
return \
.format(op=latex_operator[node.operator],
conditions=node.conditions, child=child) | Translate a select node into a latex qtree node.
:param node: a treebrd node
:return: a qtree subtree rooted at the node |
17,340 | def GetValue(self, row, col, table=None):
if table is None:
table = self.grid.current_table
try:
cell_code = self.code_array((row, col, table))
except IndexError:
cell_code = None
maxlength = int(config["max_textctrl_length"])
if cell_code is not None and len(cell_code) > maxlength:
chunk = 80
cell_code = "\n".join(cell_code[i:i + chunk]
for i in xrange(0, len(cell_code), chunk))
return cell_code | Return the result value of a cell, line split if too much data |
17,341 | def unwrap(self, value, session=None):
self.validate_unwrap(value)
ret = {}
for value_dict in value:
k = value_dict[]
v = value_dict[]
ret[self.key_type.unwrap(k, session=session)] = self.value_type.unwrap(v, session=session)
return ret | Expects a list of dictionaries with ``k`` and ``v`` set to the
keys and values that will be unwrapped into the output python
dictionary should have. Validates the input and then constructs the
dictionary from the list. |
17,342 | def save(thing, url_or_handle, **kwargs):
is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name")
if is_handle:
_, ext = os.path.splitext(url_or_handle.name)
else:
_, ext = os.path.splitext(url_or_handle)
if not ext:
raise RuntimeError("No extension in URL: " + url_or_handle)
if ext in savers:
saver = savers[ext]
if is_handle:
saver(thing, url_or_handle, **kwargs)
else:
with write_handle(url_or_handle) as handle:
saver(thing, handle, **kwargs)
else:
saver_names = [(key, fn.__name__) for (key, fn) in savers.items()]
message = "Unknown extension , supports {}."
raise ValueError(message.format(ext, saver_names)) | Save object to file on CNS.
File format is inferred from path. Use save_img(), save_npy(), or save_json()
if you need to force a particular format.
Args:
obj: object to save.
path: CNS path.
Raises:
RuntimeError: If file extension not supported. |
17,343 | def _setup_crontab():
from crontab import CronTab
command = .format(settings.venv)
user = _get_real_user()
if args["nolive"]:
vms("Skipping cron tab configuration because enabled.")
return
cron = CronTab(user=user)
existing = False
possible = cron.find_comment("pyci_cron")
if len(list(possible)) > 0:
if args["rollback"]:
vms("Removing {} from cron tab.".format(command))
cron.remove_all(command)
cron.write()
db["cron"] = False
_save_db()
else:
existing = True
if not existing and not args["rollback"]:
job = cron.new(command=command, comment="pyci_cron")
if args["cronfreq"] == 1:
vms("New cron tab configured *minutely* for {}".format(command))
job.setall("* * * * *")
else:
vms("New cron tab configured every {} minutes for {}.".format(args["cronfreq"], command))
job.setall("*/{} * * * *".format(args["cronfreq"]))
cron.write()
db["cron"] = True
_save_db() | Sets up the crontab if it hasn't already been setup. |
17,344 | def get_prefix_envname(self, name, log=False):
prefix = None
if name == :
prefix = self.ROOT_PREFIX
envs = self.get_envs()
for p in envs:
if basename(p) == name:
prefix = p
return prefix | Return full prefix path of environment defined by `name`. |
17,345 | async def run_task(self) -> None:
while self.running:
try:
Log.debug(, self.name)
before = self.time()
await self.run()
total = self.time() - before
Log.debug(,
self.name, total)
sleep = self.INTERVAL - total
if sleep > 0:
await self.sleep(sleep)
except CancelledError:
Log.debug(, self.name)
raise
except Exception:
Log.exception(, self.name) | Execute the task inside the asyncio event loop. Track the time it
takes to run, and log when it starts/stops. After `INTERVAL` seconds,
if/once the task has finished running, run it again until `stop()`
is called. |
17,346 | def CheckGlobalStatic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if linenum + 1 < clean_lines.NumLines() and not Search(r, line):
line += clean_lines.elided[linenum + 1].strip()
match = Match(
r
r,
line)
if (match and
not Search(r, line) and
not Search(r, line) and
not Match(r, match.group(4))):
if Search(r, line):
error(filename, linenum, , 4,
%
(match.group(1), match.group(2) or , match.group(3)))
else:
error(filename, linenum, , 4,
)
if (Search(r, line) or
Search(r, line)):
error(filename, linenum, , 4,
) | Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. |
17,347 | def find_by_ids(self, _ids, projection=None, **kwargs):
id_list = [ObjectId(_id) for _id in _ids]
if len(_ids) == 0:
return []
if projection is not None and list(projection.keys()) == ["_id"]:
return [self({"_id": x}, fetched_fields={"_id": True}) for x in id_list]
else:
return self.find({"_id": {"$in": id_list}}, projection=projection, **kwargs) | Does a big _id:$in query on any iterator |
17,348 | def delete(self, *args, **kwargs):
source_cache = self.get_source_cache()
self.delete_thumbnails(source_cache)
super(ThumbnailerFieldFile, self).delete(*args, **kwargs)
if source_cache and source_cache.pk is not None:
source_cache.delete() | Delete the image, along with any generated thumbnails. |
17,349 | def animation(self, animation):
self._animation = animation
self._text = self._get_text(self._text[]) | Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner |
17,350 | def _call_one_middleware(self, middleware):
args = {}
for arg in middleware[]:
if hasattr(self, arg):
args[arg] = reduce(getattr, arg.split(), self)
self.logger.debug(
.format(middleware[]))
middleware[](**args) | Evaluate arguments and execute the middleware function |
17,351 | async def export_wallet(self, von_wallet: Wallet, path: str) -> None:
LOGGER.debug(, von_wallet, path)
if not von_wallet.handle:
LOGGER.debug(, von_wallet.name)
raise WalletState(.format(von_wallet.name))
await wallet.export_wallet(
von_wallet.handle,
json.dumps({
: path,
**von_wallet.access_creds
}))
LOGGER.debug() | Export an existing VON anchor wallet. Raise WalletState if wallet is closed.
:param von_wallet: open wallet
:param path: path to which to export wallet |
17,352 | def distance(self, clr):
coord = lambda a, d: (cos(radians(a)) * d, sin(radians(a)) * d)
x0, y0 = coord(self.h * 360, self.s)
x1, y1 = coord(clr.h * 360, clr.s)
z0 = self.brightness
z1 = clr.brightness
d = sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2 + (z1 - z0) ** 2)
return d | Returns the Euclidean distance between two colors (0.0-1.0).
Consider colors arranged on the color wheel:
- hue is the angle of a color along the center
- saturation is the distance of a color from the center
- brightness is the elevation of a color from the center
(i.e. we're on color a sphere) |
17,353 | def reset(self):
logger.debug(
.format(self.__id, self.name))
self.base_url = .format(self.name)
logger.debug(
.format(self.__id, self.name)) | Reset the service to its' initial state. |
17,354 | def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):
params = {
: self.api_key,
: ,
: self._coerce_point_to_string(query),
}
url = self._construct_url(params)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_reverse_json(
self._call_geocoder(url, timeout=timeout), exactly_one=exactly_one
) | Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available. Baidu's API will always return at most one result.
.. versionadded:: 1.14.0
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``. |
17,355 | def process_vts_params(self, scanner_vts):
vt_selection = {}
filters = list()
for vt in scanner_vts:
if vt.tag == :
vt_id = vt.attrib.get()
vt_selection[vt_id] = {}
for vt_value in vt:
if not vt_value.attrib.get():
raise OSPDError(,
)
vt_value_id = vt_value.attrib.get()
vt_value_value = vt_value.text if vt_value.text else
vt_selection[vt_id][vt_value_id] = vt_value_value
if vt.tag == :
vts_filter = vt.attrib.get(, None)
if vts_filter is None:
raise OSPDError(,
)
filters.append(vts_filter)
vt_selection[] = filters
return vt_selection | Receive an XML object with the Vulnerability Tests an their
parameters to be use in a scan and return a dictionary.
@param: XML element with vt subelements. Each vt has an
id attribute. Optional parameters can be included
as vt child.
Example form:
<vt_selection>
<vt_single id='vt1' />
<vt_single id='vt2'>
<vt_value id='param1'>value</vt_value>
</vt_single>
<vt_group filter='family=debian'/>
<vt_group filter='family=general'/>
</vt_selection>
@return: Dictionary containing the vts attribute and subelements,
like the VT's id and VT's parameters.
Example form:
{'vt1': {},
'vt2': {'value_id': 'value'},
'vt_groups': ['family=debian', 'family=general']} |
17,356 | def library_sequencing_results(self):
sres_ids = self.sequencing_result_ids
res = {}
for i in sres_ids:
sres = SequencingResult(i)
res[sres.library_id] = sres
return res | Generates a dict. where each key is a Library ID on the SequencingRequest and each value
is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are
not inlcuded in the dict. |
17,357 | def closeSession(self):
rv = self.lib.C_CloseSession(self.session)
if rv != CKR_OK:
raise PyKCS11Error(rv) | C_CloseSession |
17,358 | def filter_resources(tables, relationships,
include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None):
_tables = copy.deepcopy(tables)
_relationships = copy.deepcopy(relationships)
include_tables = include_tables or [t.name for t in _tables]
include_columns = include_columns or [c.name for t in _tables for c in t.columns]
exclude_tables = exclude_tables or list()
exclude_columns = exclude_columns or list()
_tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables]
_relationships = [r for r in _relationships
if r.right_col not in exclude_tables
and r.left_col not in exclude_tables
and r.right_col in include_tables
and r.left_col in include_tables]
for t in _tables:
t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns]
return _tables, _relationships | Include the following:
1. Tables and relationships with tables present in the include_tables (lst of str, tables names)
2. Columns (of whichever table) present in the include_columns (lst of str, columns names)
Exclude the following:
1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names)
2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names)
Disclosure note:
All relationships are taken into consideration before ignoring columns.
In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables |
17,359 | def add_to_capabilities(self, capabilities):
proxy_caps = {}
proxy_caps[] = self.proxyType[]
if self.autodetect:
proxy_caps[] = self.autodetect
if self.ftpProxy:
proxy_caps[] = self.ftpProxy
if self.httpProxy:
proxy_caps[] = self.httpProxy
if self.proxyAutoconfigUrl:
proxy_caps[] = self.proxyAutoconfigUrl
if self.sslProxy:
proxy_caps[] = self.sslProxy
if self.noProxy:
proxy_caps[] = self.noProxy
if self.socksProxy:
proxy_caps[] = self.socksProxy
if self.socksUsername:
proxy_caps[] = self.socksUsername
if self.socksPassword:
proxy_caps[] = self.socksPassword
capabilities[] = proxy_caps | Adds proxy information as capability in specified capabilities.
:Args:
- capabilities: The capabilities to which proxy will be added. |
17,360 | def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs):
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
name = ET.SubElement(vmpolicy_macaddr, "name")
name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
17,361 | def prior_names(self):
return list(self.prior_information.groupby(
self.prior_information.index).groups.keys()) | get the prior information names
Returns
-------
prior_names : list
a list of prior information names |
17,362 | def next_token(expected_type, data):
next_data = copy.copy(data)
next_type = TokenType.INVALID
if len(next_data) == 0 or next_data[0] == None:
next_type = TokenType.END
elif (expected_type == TokenType.DIGIT or expected_type == TokenType.DIGIT_OR_ZERO) and next_data[0].isalpha():
next_type = TokenType.LETTER
elif expected_type == TokenType.LETTER and next_data[0].isdigit():
next_type = TokenType.DIGIT
elif expected_type == TokenType.SUFFIX and next_data[0].isdigit():
next_type = TokenType.SUFFIX_NO
else:
if next_data[0] == :
next_type = TokenType.DIGIT_OR_ZERO
elif next_data[0] == :
next_type = TokenType.SUFFIX
elif next_data[0] == :
if len(next_data) > 1 and next_data[1] == :
next_type = TokenType.REVISION_NO
next_data = next_data[1:]
else:
next_type = TokenType.INVALID
next_data = next_data[1:]
if next_type < expected_type:
if not ((next_type == TokenType.DIGIT_OR_ZERO and expected_type == TokenType.DIGIT) or
(next_type == TokenType.SUFFIX and expected_type == TokenType.SUFFIX_NO) or
(next_type == TokenType.DIGIT and expected_type == TokenType.LETTER)):
next_type = TokenType.INVALID
return next_type, next_data | Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token
removed
:param expected_type:
:param data:
:return: (TokenType, str) tuple where TokenType is the type of the next token expected |
17,363 | def getpaths(struct):
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
try:
return [getpaths(r) for r in struct]
except TypeError:
raise Exception( % str(struct)) | Maps all Tasks in a structured data object to their .output(). |
17,364 | def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):
from torchnlp.datasets import Dataset
concat = dataset.rows + other_dataset.rows
shuffle(concat, random_seed=random_seed)
if split is None:
return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])
else:
split = max(min(round(len(concat) * split), len(concat)), 0)
return Dataset(concat[:split]), Dataset(concat[split:]) | Deterministic shuffle and split algorithm.
Given the same two datasets and the same ``random_seed``, the split happens the same exact way
every call.
Args:
dataset (lib.datasets.Dataset): First dataset.
other_dataset (lib.datasets.Dataset): Another dataset.
random_seed (int, optional): Seed to control the shuffle of both datasets.
split (float, optional): If defined it is the percentage of rows that first dataset gets
after split otherwise the original proportions are kept.
Returns:
:class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets. |
17,365 | def validate_username_for_new_account(person, username):
query = Person.objects.filter(username__exact=username)
count = query.filter(pk=person.pk).count()
if count == 0 and account_exists(username):
raise UsernameTaken(
six.u()) | Validate the new username for a new account. If the username is invalid
or in use, raises :py:exc:`UsernameInvalid` or :py:exc:`UsernameTaken`.
:param person: Owner of new account.
:param username: Username to validate. |
17,366 | def read_from(self, provider, **options):
for item in self.values():
if is_mixin(item):
item.read_from(provider, **options) | All :class:`Pointer` fields in the `Structure` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Structure` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`. |
17,367 | def get_current_m2m_diff(self, instance, new_objects):
new_ids = self.pks_from_objects(new_objects)
relation_manager = self.__get__(instance)
filter = Q(**{relation_manager.source_field.attname: instance.pk})
qs = self.through.objects.current.filter(filter)
try:
target_name = relation_manager.target_field.attname
except AttributeError:
target_name = relation_manager.through._meta.get_field_by_name(
relation_manager.target_field_name)[0].attname
current_ids = set(qs.values_list(target_name, flat=True))
being_removed = current_ids - new_ids
being_added = new_ids - current_ids
return list(being_removed), list(being_added) | :param instance: Versionable object
:param new_objects: objects which are about to be associated with
instance
:return: (being_removed id list, being_added id list)
:rtype : tuple |
17,368 | def group_add_user_action(model, request):
user_id = request.params.get()
if not user_id:
user_ids = request.params.getall()
else:
user_ids = [user_id]
try:
group = model.model
validate_add_users_to_groups(model, user_ids, [group.id])
for user_id in user_ids:
group.add(user_id)
group()
model.parent.invalidate(group.name)
localizer = get_localizer(request)
message = localizer.translate(_(
,
default="Added user to group .",
mapping={
: .join(user_ids),
: group.id
}
))
return {
: True,
: message
}
except ManageMembershipError as e:
if e.reason is not LM_TARGET_UID_NOT_ALLOWED:
raise Exception(u"Unknown ManageMembershipError reason.")
localizer = get_localizer(request)
message = localizer.translate(_(
,
default=(
"Failed adding user to group . "
"Manage membership denied for user."
),
mapping={
: e.data,
: group.id
}
))
return {
: False,
: message
}
except Exception as e:
return {
: False,
: str(e)
} | Add user to group. |
17,369 | def fromBinaryString(value):
bitNo = 8
byte = 0
r = []
for v in value:
if bitNo:
bitNo -= 1
else:
bitNo = 7
r.append(byte)
byte = 0
if v in (, ):
v = int(v)
else:
raise error.PyAsn1Error(
% (v,)
)
byte |= v << bitNo
r.append(byte)
return octets.ints2octs(r) | Create a |ASN.1| object initialized from a string of '0' and '1'.
Parameters
----------
value: :class:`str`
Text string like '1010111' |
17,370 | def cli(ctx, feature_id, start, end, organism="", sequence=""):
return ctx.gi.annotations.set_boundaries(feature_id, start, end, organism=organism, sequence=sequence) | Set the boundaries of a genomic feature
Output:
A standard apollo feature dictionary ({"features": [{...}]}) |
17,371 | def _sanitize_url_components(comp_list, field):
if not comp_list:
return
elif comp_list[0].startswith(.format(field)):
ret = .format(field)
comp_list.remove(comp_list[0])
return ret + _sanitize_url_components(comp_list, field)
else:
ret = .format(comp_list[0])
comp_list.remove(comp_list[0])
return ret + _sanitize_url_components(comp_list, field) | Recursive function to sanitize each component of the url. |
17,372 | def import_categories(self, category_nodes):
self.write_out(self.style.STEP())
categories = {}
for category_node in category_nodes:
title = category_node.find( % WP_NS).text[:255]
slug = category_node.find(
% WP_NS).text[:255]
try:
parent = category_node.find(
% WP_NS).text[:255]
except TypeError:
parent = None
self.write_out( % title)
category, created = Category.objects.get_or_create(
slug=slug, defaults={: title,
: categories.get(parent)})
categories[title] = category
self.write_out(self.style.ITEM())
return categories | Import all the categories from 'wp:category' nodes,
because categories in 'item' nodes are not necessarily
all the categories and returning it in a dict for
database optimizations. |
17,373 | def sync(self, *sids):
if sids == ():
sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)]
for sid in sids:
self.dbcur.execute(SQL_TMPO_LAST, (sid,))
last = self.dbcur.fetchone()
if last:
rid, lvl, bid, ext = last
self._clean(sid, rid, lvl, bid)
if time.time() < bid + 256:
return
else:
rid, lvl, bid = 0, 0, 0
self._req_sync(sid, rid, lvl, bid) | Synchronise data
Parameters
----------
sids : list of str
SensorIDs to sync
Optional, leave empty to sync everything |
17,374 | def get_avg_price_fifo(self) -> Decimal:
balance = self.get_quantity()
if not balance:
return Decimal(0)
paid = Decimal(0)
accounts = self.get_holding_accounts()
for account in accounts:
splits = self.get_available_splits_for_account(account)
for split in splits:
paid += split.value
avg_price = paid / balance
return avg_price | Calculates the average price paid for the security.
security = Commodity
Returns Decimal value. |
17,375 | def unit_is_related(self, location, worksheet):
same_worksheet = worksheet == self.worksheet
if isinstance(location, (tuple, list)):
return (location[0] >= self.start[0] and location[0] < self.end[0] and
location[1] >= self.start[1] and location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet | Checks for relationship between a unit location and this block.
Returns:
True if the location is related to this block. |
17,376 | def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
if ax == None:
ax = pl.gca()
if ax_channels is not None:
flip = self._find_orientation(ax_channels)
plot_func = ax.axes.axhline if flip else ax.axes.axvline
kwargs.setdefault(, )
a1 = plot_func(self.vert[0], *args, **kwargs)
a2 = plot_func(self.vert[1], *args, **kwargs)
return (a1, a2) | {_gate_plot_doc} |
17,377 | def validate_steps(self, request, workflow, start, end):
errors = {}
for step in workflow.steps[start:end + 1]:
if not step.action.is_valid():
errors[step.slug] = dict(
(field, [six.text_type(error) for error in errors])
for (field, errors) in step.action.errors.items())
return {
: bool(errors),
: workflow.slug,
: errors,
} | Validates the workflow steps from ``start`` to ``end``, inclusive.
Returns a dict describing the validation state of the workflow. |
17,378 | def fcast(value: float) -> TensorLike:
newvalue = tf.cast(value, FTYPE)
if DEVICE == :
newvalue = newvalue.gpu()
return newvalue | Cast to float tensor |
17,379 | def contains (self, p):
inside = False
if p in self.bounds():
for s in self.segments():
if ((s.p.y > p.y) != (s.q.y > p.y) and
(p.x < (s.q.x - s.p.x) * (p.y - s.p.y) / (s.q.y - s.p.y) + s.p.x)):
inside = not inside
return inside | Returns True if point is contained inside this Polygon, False
otherwise.
This method uses the Ray Casting algorithm.
Examples:
>>> p = Polygon()
>>> p.vertices = [Point(1, 1), Point(1, -1), Point(-1, -1), Point(-1, 1)]
>>> p.contains( Point(0, 0) )
True
>>> p.contains( Point(2, 3) )
False |
17,380 | def import_medusa_data(mat_filename, config_file):
df_emd, df_md = _read_mat_mnu0(mat_filename)
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
print()
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md | Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
------- |
17,381 | def set_digital_latch(self, pin, threshold_type, cb=None):
if 0 <= threshold_type <= 1:
self._command_handler.set_digital_latch(pin, threshold_type, cb)
return True
else:
return False | This method "arms" a digital pin for its data to be latched and saved in the latching table
If a callback method is provided, when latching criteria is achieved, the callback function is called
with latching data notification. In that case, the latching table is not updated.
:param pin: Digital pin number
:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
:param cb: callback function
:return: True if successful, False if parameter data is invalid |
17,382 | def surviors_are_inconsistent(survivor_mapping: Mapping[BaseEntity, Set[BaseEntity]]) -> Set[BaseEntity]:
victim_mapping = set()
for victim in itt.chain.from_iterable(survivor_mapping.values()):
if victim in survivor_mapping:
victim_mapping.add(victim)
return victim_mapping | Check that there's no transitive shit going on. |
17,383 | def get_outliers(self):
log.info("Clipping outliers...")
log.info( %
(0, self.oiter, len(self.outmask)))
def M(x): return np.delete(x, np.concatenate(
[self.nanmask, self.badmask, self.transitmask]), axis=0)
t = M(self.time)
outmask = [np.array([-1]), np.array(self.outmask)]
if len(outmask) - 1 > self.oiter:
log.error( +
)
break
(len(outmask) - 2, self.oiter, len(self.outmask))) | Performs iterative sigma clipping to get outliers. |
17,384 | def initialize_renderer(extensions=None):
if extensions is None:
extensions = []
if isinstance(extensions, str):
extensions = [extension.strip() for extension in extensions.split()]
for extension in getattr(settings, , DEFAULT_MARKYMARK_EXTENSIONS):
extensions.append(extension)
return markdown.Markdown(extensions=extensions) | Initializes the renderer by setting up the extensions (taking a comma separated
string or iterable of extensions). These extensions are added alongside with the
configured always-on extensions.
Returns a markdown renderer instance. |
17,385 | def delete_subscription(self, subscription_id):
self._validate_uuid(subscription_id)
url = "/notification/v1/subscription/{}".format(subscription_id)
response = NWS_DAO().deleteURL(url, self._write_headers())
if response.status != 204:
raise DataFailureException(url, response.status, response.data)
return response.status | Deleting an existing subscription
:param subscription_id: is the subscription the client wants to delete |
17,386 | def boolbox(msg="Shall I continue?", title=" ",
choices=("[Y]es", "[N]o"), image=None,
default_choice=, cancel_choice=):
if len(choices) != 2:
raise AssertionError(
)
reply = bb.buttonbox(msg=msg,
title=title,
choices=choices,
image=image,
default_choice=default_choice,
cancel_choice=cancel_choice)
if reply == choices[0]:
return True
else:
return False | Display a boolean msgbox.
The returned value is calculated this way::
if the first choice is chosen, or if the dialog is cancelled:
returns True
else:
returns False
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: Filename of image to display
:param str default_choice: The choice you want highlighted
when the gui appears
:param str cancel_choice: If the user presses the 'X' close, which button
should be pressed
:return: True if first button pressed or dialog is cancelled, False if
second button is pressed |
17,387 | def set_computer_desc(desc=None):
minion-idThis computer belongs to Dave!
if six.PY2:
desc = _to_unicode(desc)
system_info = win32net.NetServerGetInfo(None, 101)
if desc is None:
return False
system_info[] = desc
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error()
log.error(, number)
log.error(, context)
log.error(, message)
return False
return {: get_computer_desc()} | Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!' |
17,388 | def _expand_authorized_keys_path(path, user, home):
converted_path =
had_escape = False
for char in path:
if had_escape:
had_escape = False
if char == :
converted_path +=
elif char == :
converted_path += user
elif char == :
converted_path += home
else:
error = .format(char)
raise CommandExecutionError(error)
continue
elif char == :
had_escape = True
else:
converted_path += char
if had_escape:
error = "AuthorizedKeysFile path: Last character can't be escape character"
raise CommandExecutionError(error)
return converted_path | Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) |
17,389 | def get_file_list():
if len(sys.argv) > 1:
file_list = list(sys.argv[1:])
else:
files_str = input(
re done: "", "") for f in file_list]
else:
file_list = shlex.split(files_str, posix=True)
for i in range(len(file_list)):
file_list[i] = os.path.abspath( os.path.expanduser(os.path.expandvars(file_list[i])) )
return file_list | Return a list of strings corresponding to file names supplied by drag and drop or standard input. |
17,390 | def local(self, *args, **kwargs):
s own CLI uses. Note the ``arg`` and ``kwarg``
parameters are sent down to the minion(s) and the given function,
``fun``, is called with those parameters.
:return: Returns the result from the execution module
'
local = salt.client.get_local_client(mopts=self.opts)
return local.cmd(*args, **kwargs) | Run :ref:`execution modules <all-salt.modules>` synchronously
See :py:meth:`salt.client.LocalClient.cmd` for all available
parameters.
Sends a command from the master to the targeted minions. This is the
same interface that Salt's own CLI uses. Note the ``arg`` and ``kwarg``
parameters are sent down to the minion(s) and the given function,
``fun``, is called with those parameters.
:return: Returns the result from the execution module |
17,391 | def from_cif_file(cif_file, source=, comment=):
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment) | Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object |
17,392 | def shard_data(source_fnames: List[str],
target_fname: str,
source_vocabs: List[vocab.Vocab],
target_vocab: vocab.Vocab,
num_shards: int,
buckets: List[Tuple[int, int]],
length_ratio_mean: float,
length_ratio_std: float,
output_prefix: str) -> Tuple[List[Tuple[List[str], str, ]], ]:
os.makedirs(output_prefix, exist_ok=True)
sources_shard_fnames = [[os.path.join(output_prefix, C.SHARD_SOURCE % i) + ".%d" % f for i in range(num_shards)]
for f in range(len(source_fnames))]
target_shard_fnames = [os.path.join(output_prefix, C.SHARD_TARGET % i)
for i in range(num_shards)]
data_stats_accumulator = DataStatisticsAccumulator(buckets, source_vocabs[0], target_vocab,
length_ratio_mean, length_ratio_std)
per_shard_stat_accumulators = [DataStatisticsAccumulator(buckets, source_vocabs[0], target_vocab, length_ratio_mean,
length_ratio_std) for shard_idx in range(num_shards)]
with ExitStack() as exit_stack:
sources_shards = [[exit_stack.enter_context(smart_open(f, mode="wt")) for f in sources_shard_fnames[i]] for i in
range(len(source_fnames))]
target_shards = [exit_stack.enter_context(smart_open(f, mode="wt")) for f in target_shard_fnames]
source_readers, target_reader = create_sequence_readers(source_fnames, target_fname,
source_vocabs, target_vocab)
random_shard_iter = iter(lambda: random.randrange(num_shards), None)
for (sources, target), random_shard_index in zip(parallel_iter(source_readers, target_reader),
random_shard_iter):
random_shard_index = cast(int, random_shard_index)
source_len = len(sources[0])
target_len = len(target)
buck_idx, buck = get_parallel_bucket(buckets, source_len, target_len)
data_stats_accumulator.sequence_pair(sources[0], target, buck_idx)
per_shard_stat_accumulators[random_shard_index].sequence_pair(sources[0], target, buck_idx)
if buck is None:
continue
for i, line in enumerate(sources):
sources_shards[i][random_shard_index].write(ids2strids(line) + "\n")
target_shards[random_shard_index].write(ids2strids(target) + "\n")
per_shard_stats = [shard_stat_accumulator.statistics for shard_stat_accumulator in per_shard_stat_accumulators]
sources_shard_fnames_by_shards = zip(*sources_shard_fnames)
return list(
zip(sources_shard_fnames_by_shards, target_shard_fnames, per_shard_stats)), data_stats_accumulator.statistics | Assign int-coded source/target sentence pairs to shards at random.
:param source_fnames: The path to the source text (and optional token-parallel factor files).
:param target_fname: The file name of the target file.
:param source_vocabs: Source vocabulary (and optional source factor vocabularies).
:param target_vocab: Target vocabulary.
:param num_shards: The total number of shards.
:param buckets: Bucket list.
:param length_ratio_mean: Mean length ratio.
:param length_ratio_std: Standard deviation of length ratios.
:param output_prefix: The prefix under which the shard files will be created.
:return: Tuple of source (and source factor) file names, target file names and statistics for each shard,
as well as global statistics. |
17,393 | def from_file(filename, use_cores=True, thresh=1.e-4):
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh) | Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file. |
17,394 | def get_synset_xml(self,syn_id):
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
xml_data = eval( content )
return etree.fromstring( xml_data ) | call cdb_syn with synset identifier -> returns the synset xml; |
17,395 | def setDataFrame(self, dataFrame):
if not isinstance(dataFrame, pandas.core.frame.DataFrame):
raise TypeError()
self.layoutAboutToBeChanged.emit()
self._dataFrame = dataFrame
self.layoutChanged.emit() | setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed. |
17,396 | def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True):
from pycbc.filter import notch_fir
ts = notch_fir(self, f1, f2, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts | notch filter the time series using an FIR filtered generated from
the ideal response passed through a time-domain kaiser
window (beta = 5.0)
The suppression of the notch filter is related to the bandwidth and
the number of samples in the filter length. For a few Hz bandwidth,
a length corresponding to a few seconds is typically
required to create significant suppression in the notched band.
Parameters
----------
Time Series: TimeSeries
The time series to be notched.
f1: float
The start of the frequency suppression.
f2: float
The end of the frequency suppression.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation. |
17,397 | def normalize(s, replace_spaces=True):
whitelist = ( + string.ascii_letters + string.digits)
if type(s) == six.binary_type:
s = six.text_type(s, , )
table = {}
for ch in [ch for ch in s if ch not in whitelist]:
if ch not in table:
try:
replacement = unicodedata.normalize(, ch)[0]
if replacement in whitelist:
table[ord(ch)] = replacement
else:
table[ord(ch)] = u
except:
table[ord(ch)] = u
if replace_spaces:
return s.translate(table).replace(u, u).replace(, )
else:
return s.translate(table).replace(u, u) | Normalize non-ascii characters to their closest ascii counterparts |
17,398 | def find_comp_by_target(self, target):
for comp in self._components:
if comp.id == target.component_id and \
comp.instance_name == target.instance_name:
return comp
raise MissingComponentError | Finds a component using a TargetComponent or one of its subclasses.
@param A @ref TargetComponent object or subclass of @ref
TargetComponent.
@return A Component object matching the target.
@raises MissingComponentError |
17,399 | def remove_thumbnail(self, thumbnail):
if thumbnail in self._thumbnails:
index = self._thumbnails.index(thumbnail)
self._thumbnails.remove(thumbnail)
self.layout().removeWidget(thumbnail)
thumbnail.deleteLater()
thumbnail.sig_canvas_clicked.disconnect()
thumbnail.sig_remove_figure.disconnect()
thumbnail.sig_save_figure.disconnect()
if thumbnail == self.current_thumbnail:
if len(self._thumbnails) > 0:
self.set_current_index(min(index, len(self._thumbnails)-1))
else:
self.current_thumbnail = None
self.figure_viewer.figcanvas.clear_canvas() | Remove thumbnail. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.