Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,800 | def filter_by_analysis_period(self, analysis_period):
_filtered_data = self.filter_by_months(analysis_period.months_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data |
384,801 | def runDeferred(self, deferred):
for handler, scope, offset in deferred:
self.scopeStack = scope
self.offset = offset
handler() | Run the callables in C{deferred} using their associated scope stack. |
384,802 | def initialize_weights(self):
n = self._outputSize
m = self._inputSize
self._Q = self._random.sample((n,m))
for i in range(n):
self._Q[i] /= np.sqrt( np.dot(self._Q[i], self._Q[i]) ) | Randomly initializes the visible-to-hidden connections. |
384,803 | def discard(self, value):
hash(value)
self.redis.srem(self.key, self._pickle(value)) | Remove element *value* from the set if it is present. |
384,804 | def _check_d1_characters(name):
bytename = bytearray(name)
for char in bytename:
if char not in _allowed_d1_characters:
raise pycdlibexception.PyCdlibInvalidInput() | A function to check that a name only uses d1 characters as defined by ISO9660.
Parameters:
name - The name to check.
Returns:
Nothing. |
384,805 | def controller(self):
if hasattr(self, ):
if len(self.controller_info[]) > 1:
raise TypeError(
)
return self.controller_id
raise AttributeError() | Check if multiple controllers are connected.
:returns: Return the controller_id of the active controller.
:rtype: string |
384,806 | def _get_bucket(self, bucket_name):
t exist, create it.
Parameters
==========
bucket_name: the name of the bucket to get (or create). It should
not contain google, and should be all lowercase with -
or underscores.
Cannot get or create %s' % bucket_name)
sys.exit(1)
return bucket | get a bucket based on a bucket name. If it doesn't exist, create it.
Parameters
==========
bucket_name: the name of the bucket to get (or create). It should
not contain google, and should be all lowercase with -
or underscores. |
384,807 | def listen(self, event):
if event in self.registered:
return
def handler(client, *args):
return self.process_event(event, client, args)
self.client.add_handler(event, handler)
self.registered.add(event)
_log.debug("Controller is now listening for events", event) | Request that the Controller listen for and dispatch an event.
Note: Even if the module that requested the listening is later
unloaded, the Controller will continue to dispatch the event, there
just might not be anything that cares about it. That's okay. |
384,808 | def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
group_axis = obj._get_axis(axis)
if level is not None:
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
key = group_axis.get_level_values(level)
level = None
else:
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError()
else:
raise ValueError(
)
if isinstance(level, str):
if obj.index.name != level:
raise ValueError(
.format(level))
elif level > 0 or level < -1:
raise ValueError(
)
level = None
key = group_axis
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, {key.key}, obj
elif isinstance(key, BaseGrouper):
return key, [], obj
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
msg = ("Interpreting tuple as a list of keys, rather than "
"a single key. Use instead of . In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
if (not any_callable and not any_arraylike and not any_groupers and
match_axis_length and level is None):
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in
obj.index.names for g in keys)
elif isinstance(obj, Series):
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr):
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr):
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
ping = (Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis)
if not isinstance(gpr, Grouping) else gpr)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError()
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj | create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps |
384,809 | def _prodterm_prime(lexer):
tok = next(lexer)
else:
lexer.unpop_token(tok)
return None | Return a product term' expression, eliminates left recursion. |
384,810 | def get_wcs(self, data_x, data_y):
img = self.fitsimage.get_image()
ra, dec = img.pixtoradec(data_x, data_y)
return ra, dec | Return (re_deg, dec_deg) for the (data_x, data_y) position
based on any WCS associated with the loaded image. |
384,811 | def set_table_cb(self, viewer, table):
self.clear()
tree_dict = OrderedDict()
a_tab = table.get_data()
try:
a_tab = a_tab.filled()
except Exception:
pass
i_fmt = .format(len(str(len(a_tab))))
columns = [(, )]
for c in a_tab.columns.values():
col_str = .format(c.name, str(c.unit))
columns.append((col_str, c.name))
self.widget.setup_table(columns, 1, )
for i, row in enumerate(a_tab, 1):
bnch = Bunch.Bunch(zip(row.colnames, row.as_void()))
i_str = i_fmt.format(i)
bnch[] = i_str
tree_dict[i_str] = bnch
self.widget.set_tree(tree_dict)
n_rows = len(tree_dict)
if n_rows < self.settings.get(, 5000):
self.widget.set_optimal_column_widths()
self.logger.debug(.format(n_rows))
tablename = table.get(, )
self.logger.debug(.format(tablename)) | Display the given table object. |
384,812 | def disable_svc_notifications(self, service):
if service.notifications_enabled:
service.modified_attributes |= \
DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
service.notifications_enabled = False
self.send_an_element(service.get_update_status_brok()) | Disable notifications for a service
Format of the line that triggers function call::
DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None |
384,813 | def list_resources(self, device_id):
api = self._get_api(mds.EndpointsApi)
return [Resource(r) for r in api.get_endpoint_resources(device_id)] | List all resources registered to a connected device.
.. code-block:: python
>>> for r in api.list_resources(device_id):
print(r.name, r.observable, r.uri)
None,True,/3/0/1
Update,False,/5/0/3
...
:param str device_id: The ID of the device (Required)
:returns: A list of :py:class:`Resource` objects for the device
:rtype: list |
384,814 | def run_model(self, model_run, run_url):
try:
credentials = pika.PlainCredentials(self.user, self.password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host=self.virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=self.queue, durable=True)
except pika.exceptions.AMQPError as ex:
err_msg = str(ex)
if err_msg == :
err_msg = + self.user +
err_msg += self.host + + str(self.port)
err_msg += self.virtual_host + + self.queue
raise EngineException(err_msg, 500)
request = RequestFactory().get_request(model_run, run_url)
channel.basic_publish(
exchange=,
routing_key=self.queue,
body=json.dumps(request.to_dict()),
properties=pika.BasicProperties(
delivery_mode = 2,
)
)
con.close() | Run model by sending message to RabbitMQ queue containing the
run end experiment identifier. Messages are persistent to ensure that
a worker will process process the run request at some point.
Throws a EngineException if communication with the server fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information |
384,815 | def warning(self, *msg):
label = colors.yellow("WARNING")
self._msg(label, *msg) | Prints a warning |
384,816 | def endpoints(self):
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints | Gets the Endpoints API client.
Returns:
Endpoints: |
384,817 | def lithospheric_stress(step, trench, ridge, time):
timestep = step.isnap
base_lith = step.geom.rcmb + 1 - 0.105
stressfld = step.fields[][0, :, :, 0]
stressfld = np.ma.masked_where(step.geom.r_mesh[0] < base_lith, stressfld)
dzm = (step.geom.r_coord[1:] - step.geom.r_coord[:-1])
stress_lith = np.sum((stressfld[:, 1:] * dzm.T), axis=1)
ph_coord = step.geom.p_coord | calculate stress in the lithosphere |
384,818 | def collect(self):
def traverse(d, metric_name=):
for key, value in d.iteritems():
if isinstance(value, dict):
if metric_name == :
metric_name_next = key
else:
metric_name_next = metric_name + + key
traverse(value, metric_name_next)
else:
metric_name_finished = metric_name + + key
self.publish_gauge(
name=metric_name_finished,
value=value,
precision=1
)
md_state = self._parse_mdstat()
traverse(md_state, ) | Publish all mdstat metrics. |
384,819 | def aggregationToMonthsSeconds(interval):
seconds = interval.get(, 0) * 0.000001
seconds += interval.get(, 0) * 0.001
seconds += interval.get(, 0)
seconds += interval.get(, 0) * 60
seconds += interval.get(, 0) * 60 * 60
seconds += interval.get(, 0) * 24 * 60 * 60
seconds += interval.get(, 0) * 7 * 24 * 60 * 60
months = interval.get(, 0)
months += 12 * interval.get(, 0)
return {: months, : seconds} | Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
For example:
::
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
:param interval: (dict) The aggregation interval representing a date and time
:returns: (dict) number of months and seconds in the interval:
``{months': XX, 'seconds': XX}``. The seconds is
a floating point that can represent resolutions down to a
microsecond. |
384,820 | def address_checksum_and_decode(addr: str) -> Address:
if not is_0x_prefixed(addr):
raise InvalidAddress()
if not is_checksum_address(addr):
raise InvalidAddress()
addr_bytes = decode_hex(addr)
assert len(addr_bytes) in (20, 0)
return Address(addr_bytes) | Accepts a string address and turns it into binary.
Makes sure that the string address provided starts is 0x prefixed and
checksummed according to EIP55 specification |
384,821 | def contour(z, x=None, y=None, v=5, xlbl=None, ylbl=None, title=None,
cfntsz=10, lfntsz=None, intrp=, alpha=0.5, cmap=None,
vmin=None, vmax=None, fgsz=None, fgnm=None, fig=None, ax=None):
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if cmap is None:
cmap = cm.coolwarm
if x is None:
x = np.arange(z.shape[1])
else:
x = np.array(x)
if y is None:
y = np.arange(z.shape[0])
else:
y = np.array(y)
xg, yg = np.meshgrid(x, y)
cntr = ax.contour(xg, yg, z, v, colors=)
if cfntsz is not None and cfntsz > 0:
plt.clabel(cntr, inline=True, fontsize=cfntsz)
im = ax.imshow(z, origin=, interpolation=intrp, aspect=,
extent=[x.min(), x.max(), y.min(), y.max()], cmap=cmap,
vmin=vmin, vmax=vmax, alpha=alpha)
ax.fmt_xdata = lambda x: "{: .2f}".format(x)
ax.fmt_ydata = lambda x: "{: .2f}".format(x)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, fontsize=lfntsz)
if ylbl is not None:
ax.set_ylabel(ylbl, fontsize=lfntsz)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, ax=ax, cax=cax)
attach_keypress(fig)
attach_zoom(ax)
if have_mpldc:
mpldc.datacursor()
if figp is None:
fig.show()
return fig, ax | Contour plot of a 2D surface. If a figure object is specified then the
plot is drawn in that figure, and ``fig.show()`` is not called. The
figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
v : int or sequence of ints, optional (default 5)
An int specifies the number of contours to plot, and a sequence
specifies the specific contour levels to plot.
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
cfntsz : int or None, optional (default 10)
Contour label font size. No contour labels are displayed if
set to 0 or None.
lfntsz : int, optional (default None)
Axis label font size. The default font size is used if set to None.
intrp : string, optional (default 'bicubic')
Specify type of interpolation used to display image underlying
contours (see ``interpolation`` parameter of
:meth:`matplotlib.axes.Axes.imshow`)
alpha : float, optional (default 0.5)
Underlying image display alpha value
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for surface. If none specifed, defaults to cm.coolwarm
vmin, vmax : float, optional (default None)
Set upper and lower bounds for the colour map (see the corresponding
parameters of :meth:`matplotlib.axes.Axes.imshow`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot |
384,822 | def check_honeypot(func=None, field_name=None):
if isinstance(func, six.string_types):
func, field_name = field_name, func
def decorated(func):
def inner(request, *args, **kwargs):
response = verify_honeypot_value(request, field_name)
if response:
return response
else:
return func(request, *args, **kwargs)
return wraps(func, assigned=available_attrs(func))(inner)
if func is None:
def decorator(func):
return decorated(func)
return decorator
return decorated(func) | Check request.POST for valid honeypot field.
Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if
not specified. |
384,823 | def _gen_delta_per_sec(self, path, value_delta, time_delta, multiplier,
prettyname, device):
if time_delta < 0:
return
value = (value_delta / time_delta) * multiplier
if value > 0.0:
self._replace_and_publish(path, prettyname, value, device) | Calulates the difference between to point, and scales is to per second. |
384,824 | def azimuth(lons1, lats1, lons2, lats2):
lons1, lats1, lons2, lats2 = _prepare_coords(lons1, lats1, lons2, lats2)
cos_lat2 = numpy.cos(lats2)
true_course = numpy.degrees(numpy.arctan2(
numpy.sin(lons1 - lons2) * cos_lat2,
numpy.cos(lats1) * numpy.sin(lats2)
- numpy.sin(lats1) * cos_lat2 * numpy.cos(lons1 - lons2)
))
return (360 - true_course) % 360 | Calculate the azimuth between two points or two collections of points.
Parameters are the same as for :func:`geodetic_distance`.
Implements an "alternative formula" from
http://williams.best.vwh.net/avform.htm#Crs
:returns:
Azimuth as an angle between direction to north from first point and
direction to the second point measured clockwise in decimal degrees. |
384,825 | def __locate_scubainit(self):
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, )
if not os.path.isfile(self.scubainit_path):
raise ScubaError(.format(self.scubainit_path)) | Determine path to scubainit binary |
384,826 | def GetStartTime(self, problems=problems_module.default_problem_reporter):
cursor = self._schedule._connection.cursor()
cursor.execute(
, (self.trip_id,))
(arrival_secs, departure_secs) = cursor.fetchone()
if arrival_secs != None:
return arrival_secs
elif departure_secs != None:
return departure_secs
else:
problems.InvalidValue(, ,
% self.trip_id) | Return the first time of the trip. TODO: For trips defined by frequency
return the first time of the first trip. |
384,827 | def sample(self, N=1):
if not self.filt:
self.forward()
paths = np.empty((len(self.filt), N), np.int)
paths[-1, :] = rs.multinomial(self.filt[-1], M=N)
log_trans = np.log(self.hmm.trans_mat)
for t, f in reversed(list(enumerate(self.filt[:-1]))):
for n in range(N):
probs = rs.exp_and_normalise(log_trans[:, paths[t + 1, n]] + np.log(f))
paths[t, n] = rs.multinomial_once(probs)
return paths | Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed. |
384,828 | def StrIndexOf(input_string, substring, startIndex, bitlength):
try:
s = input_string.value
t = substring.value
i = startIndex.value
return BVV(i + s[i:].index(t), bitlength)
except ValueError:
return BVV(-1, bitlength) | Return True if the concrete value of the input_string ends with suffix
otherwise false.
:param input_string: the string we want to check
:param substring: the substring we want to find the index
:param startIndex: the index to start searching at
:param bitlength: bitlength of the bitvector representing the index of the substring
:return BVV: index of the substring in bit-vector representation or -1 in bitvector representation |
384,829 | def add_fluctuations(hdf5_file, N_columns, N_processes):
random_state = np.random.RandomState(0)
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Fluctuations_worker(hdf5_file,
, random_state,
N_columns, slice_queue)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 4 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | This procedure organizes the addition of small fluctuations on top of
a matrix of similarities at 'hdf5_file' across 'N_processes'
different processes. Each of those processes is an instance of the
class 'Fluctuations_Worker' defined elsewhere in this module. |
384,830 | def accuracy_helper(egg, match=, distance=,
features=None):
def acc(lst):
return len([i for i in np.unique(lst) if i>=0])/(egg.list_length)
opts = dict(match=match, distance=distance, features=features)
if match is :
opts.update({ : })
recmat = recall_matrix(egg, **opts)
if match in [, ]:
result = [acc(lst) for lst in recmat]
elif match is :
result = np.mean(recmat, axis=1)
else:
raise ValueError()
return np.nanmean(result, axis=0) | Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled |
384,831 | def align_bam(in_bam, ref_file, names, align_dir, data):
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease").upper()
if not utils.file_exists(out_file):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-")
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa_cmd} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
return out_file | Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate |
384,832 | def _CallWindowsNetCommand(parameters):
import subprocess
popen = subprocess.Popen(["net"] + parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
if stderrdata:
raise OSError("Failed on call net.exe: %s" % stderrdata)
return stdoutdata | Call Windows NET command, used to acquire/configure network services settings.
:param parameters: list of command line parameters
:return: command output |
384,833 | def register(self, token, regexp):
self._tokens.append((token, re.compile(regexp))) | Register a token.
Args:
token (Token): the token class to register
regexp (str): the regexp for that token |
384,834 | def turn_right(self, angle_degrees, rate=RATE):
flight_time = angle_degrees / rate
self.start_turn_right(rate)
time.sleep(flight_time)
self.stop() | Turn to the right, staying on the spot
:param angle_degrees: How far to turn (degrees)
:param rate: The trurning speed (degrees/second)
:return: |
384,835 | def walk_dependencies(root, visitor):
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor) | Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`. |
384,836 | def activities(self, *args, **kwargs):
if self._client.match_app_version(label=, version=, default=True):
return self._client.activities(*args, scope=self.id, **kwargs)
else:
return self._client.activities(*args, scope_id=self.id, **kwargs) | Retrieve activities belonging to this scope.
See :class:`pykechain.Client.activities` for available parameters. |
384,837 | def delete(self, ids):
url = build_uri_with_ids(, ids)
return super(ApiVlan, self).delete(url) | Method to delete vlan's by their ids
:param ids: Identifiers of vlan's
:return: None |
384,838 | def execute(self):
python_tgts = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget))
)
if not python_tgts:
return 0
interpreter_cache = PythonInterpreterCache.global_instance()
with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:
failure_count = 0
tgts_by_compatibility, _ = interpreter_cache.partition_targets_by_compatibility(
[vt.target for vt in invalidation_check.invalid_vts]
)
for filters, targets in tgts_by_compatibility.items():
sources = self.calculate_sources([tgt for tgt in targets])
if sources:
allowed_interpreters = set(interpreter_cache.setup(filters=filters))
if not allowed_interpreters:
raise TaskError(
.format(targets, filters))
interpreter = min(allowed_interpreters)
failure_count += self.checkstyle(interpreter, sources)
if failure_count > 0 and self.get_options().fail:
raise TaskError(
.format(failure_count))
return failure_count | Run Checkstyle on all found non-synthetic source files. |
384,839 | def add_details(file_name, title, artist, album, lyrics=""):
tags = EasyMP3(file_name)
tags["title"] = title
tags["artist"] = artist
tags["album"] = album
tags.save()
tags = ID3(file_name)
uslt_output = USLT(encoding=3, lang=u, desc=u, text=lyrics)
tags["USLT::"] = uslt_output
tags.save(file_name)
log.log("> Adding properties")
log.log_indented("[*] Title: %s" % title)
log.log_indented("[*] Artist: %s" % artist)
log.log_indented("[*] Album: %s " % album) | Adds the details to song |
384,840 | def lower_camel(string, prefix=, suffix=):
return require_valid(append_underscore_if_keyword(.join(
word.lower() if index == 0 else upper_case_first_char(word)
for index, word in enumerate(en.words(.join([prefix, string, suffix]))))
)) | Generate a camel-case identifier.
Useful for unit test methods.
Takes a string, prefix, and optional suffix.
`prefix` can be set to `''`, though be careful - without a prefix, the
function will throw `InvalidIdentifier` when your string starts with a
number.
Example:
>>> lower_camel("User can login", prefix='test')
'testUserCanLogin' |
384,841 | def __send_command(
self, name, args=None, withcontent=False, extralines=None,
nblines=-1):
tosend = name.encode("utf-8")
if args:
tosend += b" " + b" ".join(self.__prepare_args(args))
self.__dprint(b"Command: " + tosend)
self.sock.sendall(tosend + CRLF)
if extralines:
for l in extralines:
self.sock.sendall(l + CRLF)
code, data, content = self.__read_response(nblines)
if isinstance(code, six.binary_type):
code = code.decode("utf-8")
if isinstance(data, six.binary_type):
data = data.decode("utf-8")
if withcontent:
return (code, data, content)
return (code, data) | Send a command to the server.
If args is not empty, we concatenate the given command with
the content of this list. If extralines is not empty, they are
sent one by one to the server. (CLRF are automatically
appended to them)
We wait for a response just after the command has been sent.
:param name: the command to sent
:param args: a list of arguments for this command
:param withcontent: tells the function to return the server's response
or not
:param extralines: a list of extra lines to sent after the command
:param nblines: the number of response lines to read (all by default)
:returns: a tuple of the form (code, data[, response]) |
384,842 | def avail_sizes(call=None):
if call == :
raise SaltCloudSystemExit(
)
conn = get_conn()
sizes = conn.fixed_server_flavors()
return sizes | Return a dict of all available VM sizes on the cloud provider with
relevant data. |
384,843 | def log_likelihood_pairwise(data, params):
loglik = 0
for winner, loser in data:
loglik -= np.logaddexp(0, -(params[winner] - params[loser]))
return loglik | Compute the log-likelihood of model parameters. |
384,844 | def read(self, size=None):
if not self.fd:
raise ValueError()
if not size:
size = self.remaining
size = min([self.remaining, size])
if not size:
return
data = self.fd.read(size)
self.remaining -= size
return data | Read a specified number of bytes from the file descriptor
This method emulates the normal file descriptor's ``read()`` method and
restricts the total number of bytes readable.
If file descriptor is not present (e.g., ``close()`` method had been
called), ``ValueError`` is raised.
If ``size`` is omitted, or ``None``, or any other falsy value, read
will be done up to the remaining length (constructor's ``length``
argument minus the bytes that have been read previously).
This method internally invokes the file descriptor's ``read()`` method,
and the method must accept a single integer positional argument. |
384,845 | def file_root_name(name):
base = os.path.basename(name)
root = os.path.splitext(base)[0]
if not root:
warning =
log.warning(warning.format(name))
return root | Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued. |
384,846 | def synthesize_software_module_info(modules, module_types):
res = {}
for mod_id, mod_info in modules.items():
mod_info = dict(mod_info)
mod_type = module_types[mod_info["type"]]
mod_info["package"] = mod_type["package"]
mod_info["executable"] = mod_type["executable"]
if not "categories" in mod_info:
mod_info["categories"] = mod_type.get(
"categories", all_categories
)
mod_info["inputs"] = mod_type["inputs"]
mod_info["outputs"] = mod_type["outputs"]
mod_info["arguments"] = process_args(
mod_id, mod_info.get("arguments", []), mod_type["arguments"]
)
mod_info["parameters"] = process_params(
mod_id, mod_info.get("parameters", {}), mod_type["parameters"]
)
res[mod_id] = mod_info
return res | This function takes as input a dictionary of `modules` (mapping module IDs
to :class:`~openag.models.SoftwareModule` objects) and a dictionary of
`module_types` (mapping module type IDs to
:class:`~openag.models.FirmwareModuleType` objects). For each module, it
synthesizes the information in that module and the corresponding module
type and returns all the results in a dictionary keyed on the ID of the
module. |
384,847 | def setupArgparse():
parser = argparse.ArgumentParser()
parser.add_argument("callsign", help="Callsign of radio")
parser.add_argument("id", type=int, help="ID number radio")
parser.add_argument("-l", "--loopback", action="store_true",
help="Use software loopback serial port")
parser.add_argument("-p", "--port", default="/dev/ttyUSB0",
help="Physical serial port of radio")
return parser.parse_args() | Sets up argparse module to create command line options and parse them.
Uses the argparse module to add arguments to the command line for
faradayio-cli. Once the arguments are added and parsed the arguments are
returned
Returns:
argparse.Namespace: Populated namespace of arguments |
384,848 | def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
else:
(data) = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
return data | Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread. |
384,849 | def is_job_config(config):
try:
if config[][][] is not None:
return True
except KeyError:
return False
except TypeError:
return False
except IndexError:
return False
return False | Check whether given dict of config is job config |
384,850 | def update(self, iterable={}, **kwargs):
def _merge(a, *args):
for key, value in itertools.chain(*args):
if key in a and isinstance(value, (dict, Conf)):
value = _merge(a[key], value.items())
a[key] = value
return a
if isinstance(iterable, (dict, Conf)):
iterable = iterable.items()
_merge(self._data, iterable, kwargs.items()) | Updates recursively a self with a given iterable.
TODO: rewrite this ugly stuff |
384,851 | def _load_enums(root):
out = collections.OrderedDict()
for elem in root.findall():
name = elem.attrib[]
value = elem.attrib[]
comment = elem.get()
out[name] = Enum(name, value, comment)
return out | Returns {name: Enum} |
384,852 | def alter_edge(self, from_index, to_index,
new_weight=None, new_edge_properties=None):
existing_edge = self.graph.get_edge_data(from_index, to_index)
if not existing_edge:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
if new_weight is not None:
self.graph[from_index][to_index][0][] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop] | Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return: |
384,853 | def cmd(send, msg, args):
if msg and not check_exists(msg):
send("Non-existant subreddit.")
return
subreddit = msg if msg else None
send(random_post(subreddit, args[][][])) | Gets a random Reddit post.
Syntax: {command} [subreddit] |
384,854 | def query(self, coords, return_sigma=False):
n_coords_ret = coords.shape[0]
has_dist = hasattr(coords.distance, )
d = coords.distance.kpc if has_dist else None
pix_idx = self._coords2idx(coords)
mask_idx = (pix_idx == self._n_pix)
if np.any(mask_idx):
pix_idx[mask_idx] = 0
if has_dist:
d = coords.distance.kpc
dist_idx_ceil = np.searchsorted(self._dists, d)
ret = np.empty((n_coords_ret,), dtype=)
if return_sigma:
sigma_ret = np.empty((n_coords_ret,), dtype=)
idx_near = (dist_idx_ceil == 0) & ~mask_idx
print(.format(np.sum(idx_near)))
if np.any(idx_near):
a = d[idx_near] / self._dists[0]
ret[idx_near] = a[:] * self._A[pix_idx[idx_near], 0]
if return_sigma:
sigma_ret[idx_near] = a[:] * self._sigma_A[pix_idx[idx_near], 0]
idx_far = (dist_idx_ceil == self._n_dists) & ~mask_idx
print(.format(np.sum(idx_far)))
if np.any(idx_far):
ret[idx_far] = self._A[pix_idx[idx_far], -1]
if return_sigma:
sigma_ret[idx_far] = self._sigma_A[pix_idx[idx_far], -1]
idx_btw = ~idx_near & ~idx_far & ~mask_idx
print(.format(np.sum(idx_btw)))
if np.any(idx_btw):
d_ceil = self._dists[dist_idx_ceil[idx_btw]]
d_floor = self._dists[dist_idx_ceil[idx_btw]-1]
a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor)
ret[idx_btw] = (
(1.-a[:]) * self._A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]]
+ a[:] * self._A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1])
if return_sigma:
w0 = (1.-a)**2
w1 = a**2
norm = 1. / (w0 + w1)
w0 *= norm
w1 *= norm
sigma_ret[idx_btw] = np.sqrt(
w0 * self._sigma_A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]]**2
+ w1 * self._sigma_A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1]**2
)
else:
ret = self._A[pix_idx, :]
if return_sigma:
sigma_ret = self._sigma_A[pix_idx, :]
if np.any(mask_idx):
ret[mask_idx] = np.nan
if return_sigma:
sigma_ret[mask_idx] = np.nan
if return_sigma:
return ret, sigma_ret
return ret | Returns r-band extinction, A_r, at the given coordinates. Can also
return uncertainties.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
return_sigma (Optional[:obj:`bool`]): If ``True``, returns the uncertainty in
extinction as well. Defaults to ``False``.
Returns:
Extinction in the r-band at the specified coordinates, in mags.
The shape of the output depends on whether :obj:`coords` contains
distances.
If :obj:`coords` does not specify distance(s), then the shape of the
output begins with :obj:`coords.shape`. If :obj:`coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``. |
384,855 | def makepipecomponent(idf, pname):
apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname)
apipe.Inlet_Node_Name = "%s_inlet" % (pname,)
apipe.Outlet_Node_Name = "%s_outlet" % (pname,)
return apipe | make a pipe component
generate inlet outlet names |
384,856 | def _match_files_flat_hierarchy(self, text_files, audio_files):
self.log(u"Matching files in flat hierarchy")
self.log([u"Text files: ", text_files])
self.log([u"Audio files: ", audio_files])
d_text = {}
d_audio = {}
for text_file in text_files:
text_file_no_ext = gf.file_name_without_extension(text_file)
d_text[text_file_no_ext] = text_file
self.log([u"Added text file to key ", text_file, text_file_no_ext])
for audio_file in audio_files:
audio_file_no_ext = gf.file_name_without_extension(audio_file)
d_audio[audio_file_no_ext] = audio_file
self.log([u"Added audio file to key ", audio_file, audio_file_no_ext])
tasks = []
for key in d_text.keys():
self.log([u"Examining text key ", key])
if key in d_audio:
self.log([u"Key is also in audio", key])
tasks.append([key, d_text[key], d_audio[key]])
self.log([u"Added pair (, )", d_text[key], d_audio[key]])
return tasks | Match audio and text files in flat hierarchies.
Two files match if their names,
once removed the file extension,
are the same.
Examples: ::
foo/text/a.txt foo/audio/a.mp3 => match: ["a", "foo/text/a.txt", "foo/audio/a.mp3"]
foo/text/a.txt foo/audio/b.mp3 => no match
foo/res/c.txt foo/res/c.mp3 => match: ["c", "foo/res/c.txt", "foo/res/c.mp3"]
foo/res/d.txt foo/res/e.mp3 => no match
:param list text_files: the entries corresponding to text files
:param list audio_files: the entries corresponding to audio files
:rtype: list of lists (see above) |
384,857 | def parse_requested_expands(query_key, request):
requested_expands = []
for key, val in request.params.items():
if key == query_key:
requested_expands += val.split()
return requested_expands | Extracts the value of the expand query string parameter from a request.
Supports comma separated lists.
:param query_key: The name query string parameter.
:param request: Request instance.
:return: List of strings representing the values of the expand query string value. |
384,858 | def _scalar_power(self, f, p, out):
f_copy = f.copy()
def pow_posint(x, n):
if isinstance(x, np.ndarray):
y = x.copy()
return ipow_posint(y, n)
else:
return x ** n
def ipow_posint(x, n):
if n == 1:
return x
elif n % 2 == 0:
x *= x
return ipow_posint(x, n // 2)
else:
tmp = x.copy()
x *= x
ipow_posint(x, n // 2)
x *= tmp
return x
def power_oop(x, **kwargs):
if p == 0:
return self.one()
elif p == int(p) and p >= 1:
return np.asarray(pow_posint(f_copy(x, **kwargs), int(p)),
dtype=self.scalar_out_dtype)
else:
result = np.power(f_copy(x, **kwargs), p)
return result.astype(self.scalar_out_dtype)
out._call_out_of_place = power_oop
decorator = preload_first_arg(out, )
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | Compute ``p``-th power of ``f`` for ``p`` scalar. |
384,859 | def find_pulls(self, testpulls=None):
result = {}
for lname, repo in self.repositories.items():
if lname not in self.archive:
raise ValueError("Trying to find pull requests for a repository "
"that hasnt even bother
continue
pulls = testpulls if testpulls is not None else repo.repo.get_pulls("open")
result[lname] = []
for pull in pulls:
newpull = True
if pull.snumber in self.archive[lname]:
if self.archive[lname][pull.snumber]["completed"] == True:
newpull = False
if newpull:
result[lname].append(PullRequest(self, repo, pull, testpulls is not None))
return result | Finds a list of new pull requests that need to be processed.
:arg testpulls: a list of tserver.FakePull instances so we can test the code
functionality without making live requests to github. |
384,860 | def toggle_buttons(self):
all_time_on = self.all_time.get_value()
all_chan_on = self.all_chan.get_value()
self.times[].setEnabled(not all_time_on)
self.times[].setEnabled(not all_time_on)
self.idx_chan.setEnabled(not all_chan_on) | Turn buttons on and off. |
384,861 | def get_selinux_status():
getenforce_command_exists()
o = run_cmd(["getenforce"], return_output=True).strip()
logger.debug("SELinux is %r", o)
return o | get SELinux status of host
:return: string, one of Enforced, Permissive, Disabled |
384,862 | def update(self, *args, **kwargs):
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v | Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None |
384,863 | def to_designspace_instances(self):
for instance in self.font.instances:
if self.minimize_glyphs_diffs or (
is_instance_active(instance)
and _is_instance_included_in_family(self, instance)
):
_to_designspace_instance(self, instance) | Write instance data from self.font to self.designspace. |
384,864 | def insured_losses(losses, deductible, insured_limit):
return numpy.piecewise(
losses,
[losses < deductible, losses > insured_limit],
[0, insured_limit - deductible, lambda x: x - deductible]) | :param losses: an array of ground-up loss ratios
:param float deductible: the deductible limit in fraction form
:param float insured_limit: the insured limit in fraction form
Compute insured losses for the given asset and losses, from the point
of view of the insurance company. For instance:
>>> insured_losses(numpy.array([3, 20, 101]), 5, 100)
array([ 0, 15, 95])
- if the loss is 3 (< 5) the company does not pay anything
- if the loss is 20 the company pays 20 - 5 = 15
- if the loss is 101 the company pays 100 - 5 = 95 |
384,865 | def on_data(self, raw_data):
data = json.loads(raw_data)
message_type = data[].get()
prepare_method = % (message_type)
args = getattr(self, prepare_method, self.prepare_fallback)(data.get())
method_name = % (message_type,)
func = getattr(self, method_name, self.on_fallback)
func(*args, meta=StreamingMeta.from_response_data(data.get(), self.api)) | Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection. |
384,866 | def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
if dest_path == os.devnull:
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path) | A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present. |
384,867 | def stupid_hack(most=10, wait=None):
if wait is not None:
time.sleep(wait)
else:
time.sleep(random.randrange(1, most)) | Return a random time between 1 - 10 Seconds. |
384,868 | def read_pl_dataset(infile):
m, n = [int(i) for i in infile.readline().split()]
gamma = np.array([float(f) for f in infile.readline().split()])
if len(gamma) != m:
infile.close()
raise ValueError("malformed file: len(gamma) != m")
votes = []
i = 0
for line in infile:
vote = [int(v) for v in line.split()]
if len(vote) != m:
infile.close()
raise ValueError("malformed file: len(vote) != m")
votes.append(vote)
i += 1
infile.close()
if i != n:
raise ValueError("malformed file: number of votes != n")
return (gamma, np.array(votes)) | Description:
Read from disk a Plackett-Luce dataset.
Parameters:
infile: open file object from which to read the dataset |
384,869 | def get_livestate(self):
livestate = 0
if self.active:
if not self.reachable:
livestate = 1
elif not self.alive:
livestate = 2
else:
livestate = 3
livestate_output = "%s/%s is %s" % (self.type, self.name, [
"up and running.",
"warning because not reachable.",
"critical because not responding.",
"not active by configuration."
][livestate])
return (livestate, livestate_output) | Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple |
384,870 | def validate(self, validator=None, skip_relations=False):
validator = validation.make_validator(validator)
self.log()
self.preload()
required = [
,
,
,
,
,
]
for f in required:
self.log("Validating required file: %s"%f)
data = self.read(f)
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
optional = [
,
,
,
,
,
,
]
for f in optional:
self.log("Validating optional file: %s"%f)
try:
data = self.read(f)
except KeyError, e:
data = []
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
return validator | Validate a GTFS
:param validator: a ValidationReport
:param (bool) skip_relations: skip validation of relations between entities (e.g. stop_times to stops)
:return: |
384,871 | def delete_feed(self, pid):
logger.info("delete_feed(pid=\"%s\") [lid=%s]", pid, self.__lid)
return self.__delete_point(R_FEED, pid) | Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete |
384,872 | def save(self) -> None:
path = str(self.save_path.absolute())
log.info(.format(path))
self._net.save(path) | Saves model to the save_path, provided in config. The directory is
already created by super().__init__, which is called in __init__ of this class |
384,873 | def from_url(cls, url, **kwargs):
url = urllib3.util.parse_url(url)
if url.host:
kwargs.setdefault(, url.host)
if url.port:
kwargs.setdefault(, url.port)
if url.scheme == :
kwargs.setdefault(, urllib3.HTTPSConnectionPool)
return cls(**kwargs) | Create a client from a url. |
384,874 | def LockRetryWrapper(self,
subject,
retrywrap_timeout=1,
retrywrap_max_timeout=10,
blocking=True,
lease_time=None):
timeout = 0
while timeout < retrywrap_max_timeout:
try:
return self.DBSubjectLock(subject, lease_time=lease_time)
except DBSubjectLockError:
if not blocking:
raise
stats_collector_instance.Get().IncrementCounter("datastore_retries")
time.sleep(retrywrap_timeout)
timeout += retrywrap_timeout
raise DBSubjectLockError("Retry number exceeded.") | Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached. |
384,875 | def parse_uci(self, uci: str) -> Move:
move = Move.from_uci(uci)
if not move:
return move
move = self._to_chess960(move)
move = self._from_chess960(self.chess960, move.from_square, move.to_square, move.promotion, move.drop)
if not self.is_legal(move):
raise ValueError("illegal uci: {!r} in {}".format(uci, self.fen()))
return move | Parses the given move in UCI notation.
Supports both Chess960 and standard UCI notation.
The returned move is guaranteed to be either legal or a null move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move). |
384,876 | def add_prefix_from_pool(arg, opts):
args = {}
if in opts:
res = Pool.list({ : opts[] })
if len(res) == 0:
print("No pool named found." % opts[], file=sys.stderr)
sys.exit(1)
args[] = res[0]
if not in opts:
print("ERROR: You have to specify the address family.", file=sys.stderr)
sys.exit(1)
if opts[] == :
afis = [4]
elif opts[] == :
afis = [6]
elif opts[] == :
afis = [4, 6]
if in opts:
print("ERROR: can not be specified for assignment", file=sys.stderr)
sys.exit(1)
else:
print("ERROR: must be one of: %s" % " ".join(valid_families), file=sys.stderr)
sys.exit(1)
if in opts:
args[] = int(opts[])
for afi in afis:
p = _prefix_from_opts(opts)
if opts.get() is None:
p.vrf = args[].vrf
else:
p.vrf = get_vrf(opts.get(), abort=True)
if p.type is None:
if args[].default_type is None:
print("ERROR: Type not specified and no default-type specified for pool: %s" % opts[], file=sys.stderr)
p.type = args[].default_type
for avp in opts.get(, []):
try:
key, value = avp.split(, 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: \n" % avp, file=sys.stderr)
return
p.avps[key] = value
args[] = afi
try:
p.save(args)
except NipapError as exc:
print("Could not add prefix to NIPAP: %s" % str(exc), file=sys.stderr)
sys.exit(1)
if p.type == :
print("Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description))
else:
print("Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description))
if opts.get() is not None:
if p.type != :
print("ERROR: Not possible to add hosts to non-assignment", file=sys.stderr)
sys.exit(1)
for host in opts.get().split():
h_opts = {
: p.prefix,
: p.vrf.rt,
: ,
: host
}
add_prefix({}, h_opts, {}) | Add prefix using from-pool to NIPAP |
384,877 | def _parse_vars_tbl(self, var_tbl):
T = self._check_forward_mode_input_dict(var_tbl)
shape = (T, 1)
X = np.zeros(shape)
X[:,0] = var_tbl[self.var_name]
return X | Parse a table of variable bindings (dictionary with key = variable name) |
384,878 | def _find_proj_root():
proj_files = frozenset((, ))
curr = os.getcwd()
while curr.startswith() and len(curr) > 1:
if proj_files & frozenset(os.listdir(curr)):
return curr
else:
curr = os.path.dirname(curr)
return None | Find the project path by going up the file tree.
This will look in the current directory and upwards for the pelconf file
(.yaml or .py) |
384,879 | def enclosing_frame(frame=None, level=2):
frame = frame or sys._getframe(level)
while frame.f_globals.get() == __name__: frame = frame.f_back
return frame | Get an enclosing frame that skips decorator code |
384,880 | def save_file(self, obj):
try:
import StringIO as pystringIO
raise pickle.PicklingError(
"Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) | Save a file |
384,881 | def checkpoint(self, message, header=None, delay=0, **kwargs):
if not self.transport:
raise ValueError(
"This RecipeWrapper object does not contain "
"a reference to a transport object."
)
if not self.recipe_step:
raise ValueError(
"This RecipeWrapper object does not contain "
"a recipe with a selected step."
)
kwargs["delay"] = delay
self._send_to_destination(
self.recipe_pointer, header, message, kwargs, add_path_step=False
) | Send a message to the current recipe destination. This can be used to
keep a state for longer processing tasks.
:param delay: Delay transport of message by this many seconds |
384,882 | def do_struct(self, subcmd, opts, message):
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout) | ${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list} |
384,883 | def feature_enabled(self, feature_name):
feature_list = self.prop(, None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.manager.cpc.name)
for feature in feature_list:
if feature[] == feature_name:
break
else:
raise ValueError("Firmware feature %s is not available on CPC %s" %
(feature_name, self.manager.cpc.name))
return feature[] | Indicates whether the specified feature is enabled for the CPC of this
partition.
The HMC must generally support features, and the specified feature must
be available for the CPC.
For a list of available features, see section "Features" in the
:term:`HMC API`, or use the :meth:`feature_info` method.
Authorization requirements:
* Object-access permission to this partition.
Parameters:
feature_name (:term:`string`): The name of the feature.
Returns:
bool: `True` if the feature is enabled, or `False` if the feature is
disabled (but available).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`ValueError`: The specified feature is not available for the
CPC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` |
384,884 | def file_loc():
import sys
import inspect
try:
raise Exception
except:
file_ = + .join((inspect.currentframe().f_code.co_filename.split())[-3:])
line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno
return "{}:{}".format(file_, line_) | Return file and line number |
384,885 | def setup_panel_params(self, coord):
if not self.panel_scales_x:
raise PlotnineError()
if not self.panel_scales_y:
raise PlotnineError()
self.panel_params = []
cols = [, ]
for i, j in self.layout[cols].itertuples(index=False):
i, j = i-1, j-1
params = coord.setup_panel_params(
self.panel_scales_x[i],
self.panel_scales_y[j])
self.panel_params.append(params) | Calculate the x & y range & breaks information for each panel
Parameters
----------
coord : coord
Coordinate |
384,886 | def as_error(self) :
"fills in and returns an Error object that reports the specified error name and message."
result = dbus.Error.init()
result.set(self.args[0], self.args[1])
return \
result | fills in and returns an Error object that reports the specified error name and message. |
384,887 | def get_for_model(self, obj):
qs = Tag.objects.language(get_language())
qs = qs.filter(
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj))
return qs.distinct() | Returns the tags for a specific model/content type. |
384,888 | def delete_pool(name):
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | Delete pool. |
384,889 | def serialize(script_string):
string_tokens = script_string.split()
serialized_script = bytearray()
for token in string_tokens:
if token == or token == :
raise NotImplementedError(.format(token))
if token in riemann.network.CODE_TO_INT_OVERWRITE:
serialized_script.extend(
[riemann.network.CODE_TO_INT_OVERWRITE[token]])
elif token in CODE_TO_INT:
serialized_script.extend([CODE_TO_INT[token]])
else:
token_bytes = bytes.fromhex(token)
if len(token_bytes) <= 75:
op = .format(len(token_bytes))
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(token_bytes)
elif len(token_bytes) > 75 and len(token_bytes) <= 255:
op =
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le(len(token_bytes)))
serialized_script.extend(token_bytes)
elif len(token_bytes) > 255 and len(token_bytes) <= 1000:
op =
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(
utils.i2le_padded(len(token_bytes), 2))
serialized_script.extend(token_bytes)
else:
raise NotImplementedError(
)
return serialized_script | str -> bytearray |
384,890 | def get_substrates(self, material_id, number=50, orient=None):
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req) | Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return;
n=0 returns all available matches
Returns:
list of dicts with substrate matches |
384,891 | def main():
arguments = docopt(__doc__)
cfg_filename = pkg_resources.resource_filename(
,
)
kb = KnowledgeBase(cfg_filename)
if arguments["find"]:
search_string = arguments["<search_string>"]
try:
urn = CTS_URN(search_string)
match = kb.get_resource_by_urn(str(urn))
show_result(match, verbose=True)
return
except BadCtsUrnSyntax as e:
pass
except IndexError as e:
raise e
print("\nNo records with this CTS URN!\n")
return
try:
matches = kb.search(search_string)
print("\nSearching for \"%s\" yielded %s results" % (
search_string,
len(matches)
))
print_results(matches)
return
except SparqlReaderException as e:
print("\nWildcard word needs at least 4 leading characters")
elif arguments["add"]:
input_urn = arguments["--to"]
try:
urn = CTS_URN(input_urn)
except Exception as e:
print("The provided URN ({}) is invalid!".format(input_urn))
return
try:
resource = kb.get_resource_by_urn(urn)
assert resource is not None
except ResourceNotFound:
print("The KB does not contain a resource identified by {}".format(
urn
))
return
print(arguments)
pass | Define the CLI inteface/commands. |
384,892 | def one(self, command, params=None):
dr = self.query(command, params)
if dr[]:
return dr[][0]
else:
return None | Возвращает первую строку ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: dict |
384,893 | def create_actor_delaunay(pts, color, **kwargs):
array_name = kwargs.get(, "")
array_index = kwargs.get(, 0)
use_delaunay3d = kwargs.get("d3d", False)
points = vtk.vtkPoints()
points.SetData(pts)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
triangulation = vtk.vtkDelaunay3D() if use_delaunay3d else vtk.vtkDelaunay2D()
triangulation.SetInputData(polydata)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(triangulation.GetOutputPort())
mapper.SetArrayName(array_name)
mapper.SetArrayId(array_index)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(*color)
return actor | Creates a VTK actor for rendering triangulated plots using Delaunay triangulation.
Keyword Arguments:
* ``d3d``: flag to choose between Delaunay2D (``False``) and Delaunay3D (``True``). *Default: False*
:param pts: points
:type pts: vtkFloatArray
:param color: actor color
:type color: list
:return: a VTK actor
:rtype: vtkActor |
384,894 | def subdevicenames(self) -> Tuple[str, ...]:
stats: List[str] = collections.deque()
for devicename, seq in self.sequences.items():
if seq.NDIM:
temp = devicename +
for prod in self._product(seq.shape):
stats.append(temp + .join(str(idx) for idx in prod))
else:
stats.append(devicename)
return tuple(stats) | A |tuple| containing the (sub)device names.
Property |NetCDFVariableFlat.subdevicenames| clarifies which
row of |NetCDFVariableAgg.array| contains which time series.
For 0-dimensional series like |lland_inputs.Nied|, the plain
device names are returned
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableFlat
>>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1)
>>> for element in elements:
... nied1 = element.model.sequences.inputs.nied
... ncvar.log(nied1, nied1.series)
>>> ncvar.subdevicenames
('element1', 'element2', 'element3')
For higher dimensional sequences like |lland_fluxes.NKor|, an
additional suffix defines the index of the respective subdevice.
For example contains the third row of |NetCDFVariableAgg.array|
the time series of the first hydrological response unit of the
second element:
>>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.series)
>>> ncvar.subdevicenames[1:3]
('element2_0', 'element2_1') |
384,895 | def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:
for name, observable in observables.items():
if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin:
return histogram.Histogram1D.from_existing_hist(observable.hist)
raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}") | Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found. |
384,896 | def get(self, *args, **kwargs):
self.before_get(args, kwargs)
qs = QSManager(request.args, self.schema)
objects_count, objects = self.get_collection(qs, kwargs)
schema_kwargs = getattr(self, , dict())
schema_kwargs.update({: True})
self.before_marshmallow(args, kwargs)
schema = compute_schema(self.schema,
schema_kwargs,
qs,
qs.include)
result = schema.dump(objects).data
view_kwargs = request.view_args if getattr(self, , None) is True else dict()
add_pagination_links(result,
objects_count,
qs,
url_for(self.view, _external=True, **view_kwargs))
result.update({: {: objects_count}})
final_result = self.after_get(result)
return final_result | Retrieve a collection of objects |
384,897 | def upload_image(vol, img, offset, parallel=1,
manual_shared_memory_id=None, manual_shared_memory_bbox=None, manual_shared_memory_order=):
global NON_ALIGNED_WRITE
if not np.issubdtype(img.dtype, np.dtype(vol.dtype).type):
raise ValueError(.format(vol.dtype, img.dtype))
(is_aligned, bounds, expanded) = check_grid_aligned(vol, img, offset)
if is_aligned:
upload_aligned(vol, img, offset, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
return
elif vol.non_aligned_writes == False:
msg = NON_ALIGNED_WRITE.format(mip=vol.mip, chunk_size=vol.chunk_size, offset=vol.voxel_offset, got=bounds, check=expanded)
raise AlignmentError(msg)
retracted = bounds.shrink_to_chunk_size(vol.underlying, vol.voxel_offset)
core_bbox = retracted.clone() - bounds.minpt
if not core_bbox.subvoxel():
core_img = img[ core_bbox.to_slices() ]
upload_aligned(vol, core_img, retracted.minpt, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
all_chunks = set(chunknames(expanded, vol.bounds, vol.key, vol.underlying))
core_chunks = set(chunknames(retracted, vol.bounds, vol.key, vol.underlying))
shell_chunks = all_chunks.difference(core_chunks)
def shade_and_upload(img3d, bbox):
img3d.setflags(write=1)
shade(img3d, bbox, img, bounds)
single_process_upload(vol, img3d, (( Vec(0,0,0), Vec(*img3d.shape[:3]), bbox.minpt, bbox.maxpt),), n_threads=0)
download_multiple(vol, shell_chunks, fn=shade_and_upload) | Upload img to vol with offset. This is the primary entry point for uploads. |
384,898 | def tuning_config(tuner, inputs, job_name=None):
train_config = training_base_config(tuner.estimator, inputs)
hyperparameters = train_config.pop(, None)
s3_operations = train_config.pop(, None)
if hyperparameters and len(hyperparameters) > 0:
tuner.static_hyperparameters = \
{utils.to_str(k): utils.to_str(v) for (k, v) in hyperparameters.items()}
if job_name is not None:
tuner._current_job_name = job_name
else:
base_name = tuner.base_tuning_job_name or utils.base_name_from_image(tuner.estimator.train_image())
tuner._current_job_name = utils.name_from_base(base_name, tuner.TUNING_JOB_NAME_MAX_LENGTH, True)
for hyperparameter_name in tuner._hyperparameter_ranges.keys():
tuner.static_hyperparameters.pop(hyperparameter_name, None)
train_config[] = tuner.static_hyperparameters
tune_config = {
: tuner._current_job_name,
: {
: tuner.strategy,
: {
: tuner.objective_type,
: tuner.objective_metric_name,
},
: {
: tuner.max_jobs,
: tuner.max_parallel_jobs,
},
: tuner.hyperparameter_ranges(),
},
: train_config
}
if tuner.metric_definitions is not None:
tune_config[][][] = \
tuner.metric_definitions
if tuner.tags is not None:
tune_config[] = tuner.tags
if s3_operations is not None:
tune_config[] = s3_operations
return tune_config | Export Airflow tuning config from an estimator
Args:
tuner (sagemaker.tuner.HyperparameterTuner): The tuner to export tuning config from.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator in the tuner, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a tuning job name if needed.
Returns:
dict: Tuning config that can be directly used by SageMakerTuningOperator in Airflow. |
384,899 | def _add_trits(left, right):
res = left + right
return res if -2 < res < 2 else (res < 0) - (res > 0) | Adds two individual trits together.
The result is always a single trit. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.