Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
386,600 | def _process_loaded_object(self, path):
file_name = os.path.basename(path)
name = os.path.splitext(file_name)[0]
with open(path) as file:
string = file.read()
self._instruction_type_to_file_content[name] = string | process the :paramref:`path`.
:param str path: the path to load an svg from |
386,601 | def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
: self.title,
: self.submit_value,
: self.cancel_url
})
return context | Add context data to view |
386,602 | def clean_out_dir(directory):
if not isinstance(directory, path):
directory = path(directory)
for file_path in directory.files():
file_path.remove()
for dir_path in directory.dirs():
dir_path.rmtree() | Delete all the files and subdirectories in a directory. |
386,603 | def dump_copy(self, path, relativePath, name=None,
description=None,
replace=False, verbose=False):
relativePath = os.path.normpath(relativePath)
if relativePath == :
relativePath =
if name is None:
_,name = os.path.split(path)
self.add_directory(relativePath)
realPath = os.path.join(self.__path, relativePath)
dirInfoDict, errorMessage = self.get_directory_info(relativePath)
assert dirInfoDict is not None, errorMessage
if name in dict.__getitem__(dirInfoDict, "files"):
if not replace:
if verbose:
warnings.warn("a file with the name is already defined in repository dictionary info. Set replace flag to True if you want to replace the existing file"%(name))
return
dump = "raise Exception(\"dump is ambiguous for copied file \")"
pull = "raise Exception(\"pull is ambiguous for copied file \")"
try:
shutil.copyfile(path, os.path.join(realPath,name))
except Exception as e:
if verbose:
warnings.warn(e)
return
klass = None
dict.__getitem__(dirInfoDict, "files")[name] = {"dump":dump,
"pull":pull,
"timestamp":datetime.utcnow(),
"id":str(uuid.uuid1()),
"class": klass,
"description":description}
self.save() | Copy an exisitng system file to the repository.
attribute in the Repository with utc timestamp.
:Parameters:
#. path (str): The full path of the file to copy into the repository.
#. relativePath (str): The relative to the repository path of the directory where the file should be dumped.
If relativePath does not exist, it will be created automatically.
#. name (string): The file name.
If None is given, name will be split from path.
#. description (None, string, pickable object): Any random description about the file.
#. replace (boolean): Whether to replace any existing file with the same name if existing.
#. verbose (boolean): Whether to be warn and informed about any abnormalities. |
386,604 | def new_pattern(self, id_, name, rows=None):
if rows is None:
rows = self.new_row_collection()
return self._spec.new_pattern(id_, name, rows, self) | Create a new knitting pattern.
If rows is :obj:`None` it is replaced with the
:meth:`new_row_collection`. |
386,605 | def get_file(self, sharename, fileid):
if not isinstance(fileid, int):
raise TypeError(" must be an integer")
response = GettRequest().get("/files/%s/%d" % (sharename, fileid))
if response.http_status == 200:
return GettFile(self.user, **response.response) | Get a specific file. Does not require authentication.
Input:
* A sharename
* A fileid - must be an integer
Output:
* A :py:mod:`pygett.files.GettFile` object
Example::
file = client.get_file("4ddfds", 0) |
386,606 | def plot(image, overlay=None, blend=False,
alpha=1, cmap=, overlay_cmap=, overlay_alpha=0.9,
cbar=False, cbar_length=0.8, cbar_dx=0., cbar_vertical=True,
axis=0, nslices=12, slices=None, ncol=None, slice_buffer=None, black_bg=True,
bg_thresh_quant=0.01, bg_val_quant=0.99, domain_image_map=None, crop=False, scale=False,
reverse=False, title=None, title_fontsize=20, title_dx=0., title_dy=0.,
filename=None, dpi=500, figsize=1.5, reorient=True):
if (axis == ) or (axis == ):
axis = 0
if (axis == ) or (axis == ):
axis = 1
if (axis == ) or (axis == ):
axis = 2
def mirror_matrix(x):
return x[::-1,:]
def rotate270_matrix(x):
return mirror_matrix(x.T)
def rotate180_matrix(x):
return x[::-1,:]
def rotate90_matrix(x):
return x.T
def flip_matrix(x):
return mirror_matrix(rotate180_matrix(x))
def reorient_slice(x, axis):
if (axis != 2):
x = rotate90_matrix(x)
if (axis == 2):
x = rotate270_matrix(x)
x = mirror_matrix(x)
return x
warnings.simplefilter()
if isinstance(image, str):
image = iio2.image_read(image)
if not isinstance(image, iio.ANTsImage):
raise ValueError()
if (image.pixeltype not in {, }) or (image.is_rgb):
scale = False
if overlay is not None:
if isinstance(overlay, str):
overlay = iio2.image_read(overlay)
if not isinstance(overlay, iio.ANTsImage):
raise ValueError()
if not iio.image_physical_space_consistency(image, overlay):
overlay = reg.resample_image_to_target(overlay, image, interp_type=)
if blend:
if alpha == 1:
alpha = 0.5
image = image*alpha + overlay*(1-alpha)
overlay = None
alpha = 1.
if domain_image_map is not None:
if isinstance(domain_image_map, iio.ANTsImage):
tx = tio2.new_ants_transform(precision=, transform_type=,
dimension=image.dimension)
image = tio.apply_ants_transform_to_image(tx, image, domain_image_map)
if overlay is not None:
overlay = tio.apply_ants_transform_to_image(tx, overlay,
domain_image_map,
interpolation=)
elif isinstance(domain_image_map, (list, tuple)):
if len(domain_image_map) != 2:
raise ValueError()
dimg = domain_image_map[0]
if not isinstance(dimg, iio.ANTsImage):
raise ValueError()
tx = domain_image_map[1]
image = reg.apply_transforms(dimg, image, transform_list=tx)
if overlay is not None:
overlay = reg.apply_transforms(dimg, overlay, transform_list=tx,
interpolator=)
if image.components == 1:
if crop:
plotmask = image.get_mask(cleanup=0)
if plotmask.max() == 0:
plotmask += 1
image = image.crop_image(plotmask)
if overlay is not None:
overlay = overlay.crop_image(plotmask)
if scale == True:
vmin, vmax = image.quantile((0.05,0.95))
elif isinstance(scale, (list,tuple)):
if len(scale) != 2:
raise ValueError()
vmin, vmax = image.quantile(scale)
else:
vmin = None
vmax = None
if image.dimension == 2:
img_arr = image.numpy()
img_arr = rotate90_matrix(img_arr)
if not black_bg:
img_arr[img_arr<image.quantile(bg_thresh_quant)] = image.quantile(bg_val_quant)
if overlay is not None:
ov_arr = overlay.numpy()
ov_arr = rotate90_matrix(ov_arr)
ov_arr[np.abs(ov_arr) == 0] = np.nan
fig = plt.figure()
if title is not None:
fig.suptitle(title, fontsize=title_fontsize, x=0.5+title_dx, y=0.95+title_dy)
ax = plt.subplot(111)
im = ax.imshow(img_arr, cmap=cmap,
alpha=alpha,
vmin=vmin, vmax=vmax)
if overlay is not None:
im = ax.imshow(ov_arr,
alpha=overlay_alpha,
cmap=overlay_cmap)
if cbar:
cbar_orient = if cbar_vertical else
fig.colorbar(im, orientation=cbar_orient)
plt.axis()
elif image.dimension == 3:
spacing = [s for i,s in enumerate(image.spacing) if i != axis]
was_resampled = False
if (max(spacing) / min(spacing)) > 3.:
was_resampled = True
new_spacing = (1,1,1)
image = image.resample_image(tuple(new_spacing))
if overlay is not None:
overlay = overlay.resample_image(tuple(new_spacing))
if reorient:
image = image.reorient_image2()
img_arr = image.numpy()
img_arr = np.rollaxis(img_arr, axis)
if overlay is not None:
if reorient:
overlay = overlay.reorient_image2()
ov_arr = overlay.numpy()
ov_arr[np.abs(ov_arr) == 0] = np.nan
ov_arr = np.rollaxis(ov_arr, axis)
if slices is None:
if not isinstance(slice_buffer, (list, tuple)):
if slice_buffer is None:
slice_buffer = (int(img_arr.shape[1]*0.1), int(img_arr.shape[2]*0.1))
else:
slice_buffer = (slice_buffer, slice_buffer)
nonzero = np.where(img_arr.sum(axis=(1,2)) > 0.01)[0]
min_idx = nonzero[0] + slice_buffer[0]
max_idx = nonzero[-1] - slice_buffer[1]
slice_idxs = np.linspace(min_idx, max_idx, nslices).astype()
if reverse:
slice_idxs = np.array(list(reversed(slice_idxs)))
else:
if isinstance(slices, (int,float)):
slices = [slices]
if sum([s > 1 for s in slices]) == 0:
slices = [int(s*img_arr.shape[0]) for s in slices]
slice_idxs = slices
nslices = len(slices)
if was_resampled:
slice_idxs = np.unique(np.array([int(s*(image.shape[axis]/img_arr.shape[0])) for s in slice_idxs]))
if ncol is None:
if (nslices <= 6):
ncol = nslices
else:
ncol = int(round(math.sqrt(nslices)))
nrow = math.ceil(nslices / ncol)
xdim = img_arr.shape[2]
ydim = img_arr.shape[1]
dim_ratio = ydim/xdim
fig = plt.figure(figsize=((ncol+1)*figsize*dim_ratio, (nrow+1)*figsize))
if title is not None:
fig.suptitle(title, fontsize=title_fontsize, x=0.5+title_dx, y=0.95+title_dy)
gs = gridspec.GridSpec(nrow, ncol,
wspace=0.0, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1),
left=0.5/(ncol+1), right=1-0.5/(ncol+1))
slice_idx_idx = 0
for i in range(nrow):
for j in range(ncol):
if slice_idx_idx < len(slice_idxs):
imslice = img_arr[slice_idxs[slice_idx_idx]]
imslice = reorient_slice(imslice, axis)
if not black_bg:
imslice[imslice<image.quantile(bg_thresh_quant)] = image.quantile(bg_val_quant)
else:
imslice = np.zeros_like(img_arr[0])
imslice = reorient_slice(imslice, axis)
ax = plt.subplot(gs[i,j])
im = ax.imshow(imslice, cmap=cmap,
vmin=vmin, vmax=vmax)
if overlay is not None:
if slice_idx_idx < len(slice_idxs):
ovslice = ov_arr[slice_idxs[slice_idx_idx]]
ovslice = reorient_slice(ovslice, axis)
im = ax.imshow(ovslice, alpha=overlay_alpha, cmap=overlay_cmap)
ax.axis()
slice_idx_idx += 1
if cbar:
cbar_start = (1-cbar_length) / 2
if cbar_vertical:
cax = fig.add_axes([0.9+cbar_dx, cbar_start, 0.03, cbar_length])
cbar_orient =
else:
cax = fig.add_axes([cbar_start, 0.08+cbar_dx, cbar_length, 0.03])
cbar_orient =
fig.colorbar(im, cax=cax, orientation=cbar_orient)
elif image.components > 1:
if not image.is_rgb:
raise ValueError()
img_arr = image.numpy()
img_arr = np.stack([rotate90_matrix(img_arr[:,:,i]) for i in range(3)], axis=-1)
fig = plt.figure()
ax = plt.subplot(111)
ax.imshow(img_arr, alpha=alpha)
plt.axis()
if filename is not None:
filename = os.path.expanduser(filename)
plt.savefig(filename, dpi=dpi, transparent=True, bbox_inches=)
plt.close(fig)
else:
plt.show()
warnings.simplefilter() | Plot an ANTsImage.
By default, images will be reoriented to 'LAI' orientation before plotting.
So, if axis == 0, the images will be ordered from the
left side of the brain to the right side of the brain. If axis == 1,
the images will be ordered from the anterior (front) of the brain to
the posterior (back) of the brain. And if axis == 2, the images will
be ordered from the inferior (bottom) of the brain to the superior (top)
of the brain.
ANTsR function: `plot.antsImage`
Arguments
---------
image : ANTsImage
image to plot
overlay : ANTsImage
image to overlay on base image
cmap : string
colormap to use for base image. See matplotlib.
overlay_cmap : string
colormap to use for overlay images, if applicable. See matplotlib.
overlay_alpha : float
level of transparency for any overlays. Smaller value means
the overlay is more transparent. See matplotlib.
axis : integer
which axis to plot along if image is 3D
nslices : integer
number of slices to plot if image is 3D
slices : list or tuple of integers
specific slice indices to plot if image is 3D.
If given, this will override `nslices`.
This can be absolute array indices (e.g. (80,100,120)), or
this can be relative array indices (e.g. (0.4,0.5,0.6))
ncol : integer
Number of columns to have on the plot if image is 3D.
slice_buffer : integer
how many slices to buffer when finding the non-zero slices of
a 3D images. So, if slice_buffer = 10, then the first slice
in a 3D image will be the first non-zero slice index plus 10 more
slices.
black_bg : boolean
if True, the background of the image(s) will be black.
if False, the background of the image(s) will be determined by the
values `bg_thresh_quant` and `bg_val_quant`.
bg_thresh_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1] - somewhere around 0.01 is recommended.
- equal to 1 will threshold the entire image
- equal to 0 will threshold none of the image
bg_val_quant : float
if white_bg=True, the background will be determined by thresholding
the image at the `bg_thresh` quantile value and setting the background
intensity to the `bg_val` quantile value.
This value should be in [0, 1]
- equal to 1 is pure white
- equal to 0 is pure black
- somewhere in between is gray
domain_image_map : ANTsImage
this input ANTsImage or list of ANTsImage types contains a reference image
`domain_image` and optional reference mapping named `domainMap`.
If supplied, the image(s) to be plotted will be mapped to the domain
image space before plotting - useful for non-standard image orientations.
crop : boolean
if true, the image(s) will be cropped to their bounding boxes, resulting
in a potentially smaller image size.
if false, the image(s) will not be cropped
scale : boolean or 2-tuple
if true, nothing will happen to intensities of image(s) and overlay(s)
if false, dynamic range will be maximized when visualizing overlays
if 2-tuple, the image will be dynamically scaled between these quantiles
reverse : boolean
if true, the order in which the slices are plotted will be reversed.
This is useful if you want to plot from the front of the brain first
to the back of the brain, or vice-versa
title : string
add a title to the plot
filename : string
if given, the resulting image will be saved to this file
dpi : integer
determines resolution of image if saved to file. Higher values
result in higher resolution images, but at a cost of having a
larger file size
Example
-------
>>> import ants
>>> import numpy as np
>>> img = ants.image_read(ants.get_data('r16'))
>>> segs = img.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(img, segs*(segs==1), crop=True)
>>> ants.plot(img, segs*(segs==1), crop=False)
>>> mni = ants.image_read(ants.get_data('mni'))
>>> segs = mni.kmeans_segmentation(k=3)['segmentation']
>>> ants.plot(mni, segs*(segs==1), crop=False) |
386,607 | def user_cache_dir():
r
if WINDOWS:
path = os.path.join(os.environ.get() or os.environ.get(),
, )
elif MACOS:
path = os.path.expanduser()
else:
path = os.path.join(os.environ.get() or os.path.expanduser(),
)
return path | r"""Return the per-user cache dir (full path).
- Linux, *BSD, SunOS: ~/.cache/glances
- macOS: ~/Library/Caches/glances
- Windows: {%LOCALAPPDATA%,%APPDATA%}\glances\cache |
386,608 | def wait_for_service_tasks_state(
service_name,
expected_task_count,
expected_task_states,
timeout_sec=120
):
return time_wait(
lambda: task_states_predicate(service_name, expected_task_count, expected_task_states),
timeout_seconds=timeout_sec) | Returns once the service has at least N tasks in one of the specified state(s)
:param service_name: the service name
:type service_name: str
:param expected_task_count: the expected number of tasks in the specified state(s)
:type expected_task_count: int
:param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING'
:type expected_task_states: [str]
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int |
386,609 | def display_candidates(self, candidates, pdf_file=None):
if not pdf_file:
pdf_file = os.path.join(
self.pdf_path, candidates[0][0].context.sentence.document.name
)
if os.path.isfile(pdf_file + ".pdf"):
pdf_file += ".pdf"
elif os.path.isfile(pdf_file + ".PDF"):
pdf_file += ".PDF"
else:
logger.error("display_candidates failed: pdf file missing.")
boxes = [
get_box(mention.context) for c in candidates for mention in c.get_mentions()
]
imgs = self.display_boxes(pdf_file, boxes, alternate_colors=True)
return display(*imgs) | Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right) |
386,610 | def CheckProg(context, prog_name):
res = SCons.Conftest.CheckProg(context, prog_name)
context.did_show_result = 1
return res | Simple check if a program exists in the path. Returns the path
for the application, or None if not found. |
386,611 | def readinto(self, b):
if not self._readable:
raise UnsupportedOperation()
with self._seek_lock:
seek = self._seek
queue = self._read_queue
if seek == 0:
self._preload_range()
size = len(b)
if size:
b_view = memoryview(b)
size_left = size
else:
b_view = b
size_left = -1
b_end = 0
buffer_size = self._buffer_size
while size_left > 0 or size_left == -1:
start = seek % buffer_size
queue_index = seek - start
try:
buffer = queue[queue_index]
except KeyError:
break
with handle_os_exceptions():
try:
queue[queue_index] = buffer = buffer.result()
except AttributeError:
pass
buffer_view = memoryview(buffer)
data_size = len(buffer)
if not data_size:
break
if size_left != -1:
end = start + size_left
else:
end = data_size - start
if end >= data_size:
end = data_size
del queue[queue_index]
index = queue_index + buffer_size * self._max_buffers
if index < self._size:
queue[index] = self._workers.submit(
self._read_range, index, index + buffer_size)
read_size = end - start
if size_left != -1:
size_left -= read_size
seek += read_size
b_start = b_end
b_end = b_start + read_size
b_view[b_start:b_end] = buffer_view[start:end]
self._seek = seek
self._raw.seek(seek)
return b_end | Read bytes into a pre-allocated, writable bytes-like object b,
and return the number of bytes read.
Args:
b (bytes-like object): buffer.
Returns:
int: number of bytes read |
386,612 | def error(self, message=None):
if self.__parser__:
self.__parser__.error(message)
else:
self.logger.error(message)
sys.exit(2) | Delegates to `ArgumentParser.error` |
386,613 | def parse_section_entry_points(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self[] = parsed | Parses `entry_points` configuration file section.
:param dict section_options: |
386,614 | def _submit(self, pathfile, filedata, filename):
if pathfile and os.path.exists(pathfile):
files = {: open(pathfile, )}
elif filedata:
assert filename
files = { : (filename, io.BytesIO(filedata))}
else:
raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!")
payload = {
: self.api_key,
: ,
: True,
}
self.log.info("Uploading to 2Captcha.com.")
url = self.getUrlFor(, {})
request = requests.post(url, files=files, data=payload)
if not request.ok:
raise exc.CaptchaSolverFailure("Posting captcha to solve failed!")
resp_json = json.loads(request.text)
return self._process_response(resp_json) | Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task. |
386,615 | def make_autogen_str():
r
import utool as ut
def get_regen_cmd():
try:
if len(sys.argv) > 0 and ut.checkpath(sys.argv[0]):
if ut.is_python_module(sys.argv[0]):
python_exe = ut.python_executable(check=False)
modname = ut.get_modname_from_modpath(sys.argv[0])
new_argv = [python_exe, , modname] + sys.argv[1:]
return .join(new_argv)
except Exception as ex:
ut.printex(ex, iswarning=True)
return .join(sys.argv)
autogenkw = dict(
stamp=ut.timestamp(),
regen_cmd=get_regen_cmd()
)
return ut.codeblock(
).format(**autogenkw) | r"""
Returns:
str:
CommandLine:
python -m utool.util_ipynb --exec-make_autogen_str --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_ipynb import * # NOQA
>>> import utool as ut
>>> result = make_autogen_str()
>>> print(result) |
386,616 | def sync_from_spec(redis, schema):
def get_experiments_dict(active=True):
return dict((experiment.name, experiment) for experiment in get_experiments(redis, active=active))
active_experiments = get_experiments_dict()
archived_experiments = get_experiments_dict(active=False)
new_experiment_names = set(schema.keys())
active_experiment_names = set(active_experiments.keys())
unarchivable_experiment_names = (new_experiment_names - active_experiment_names) & set(archived_experiments.keys())
for unarchivable_experiment_name in unarchivable_experiment_names:
print("- De-archiving %s" % unarchivable_experiment_name)
pipe = redis.pipeline(transaction=True)
pipe.sadd(ACTIVE_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name)
pipe.srem(ARCHIVED_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name)
pipe.execute()
if unarchivable_experiment_names:
active_experiments = get_experiments_dict()
active_experiment_names = set(active_experiments.keys())
for new_experiment_name in new_experiment_names - active_experiment_names:
print("- Creating experiment %s" % new_experiment_name)
experiment = add_experiment(redis, new_experiment_name)
for choice in schema[new_experiment_name]:
print(" - Adding choice %s" % choice)
experiment.add_choice(choice)
for archivable_experiment_name in active_experiment_names - new_experiment_names:
print("- Archiving %s" % archivable_experiment_name)
active_experiments[archivable_experiment_name].archive()
for experiment_name in new_experiment_names & active_experiment_names:
experiment = active_experiments[experiment_name]
new_choice_names = set(schema[experiment_name])
old_choice_names = set(experiment.choice_names)
for new_choice_name in new_choice_names - old_choice_names:
print("- Adding choice %s to existing experiment %s" % (new_choice_name, experiment_name))
experiment.add_choice(new_choice_name)
for removable_choice_name in old_choice_names - new_choice_names:
print("- Removing choice %s from existing experiment %s" % (removable_choice_name, experiment_name))
experiment.remove_choice(removable_choice_name) | Takes an input experiment spec and creates/modifies/archives the existing
experiments to match the spec.
If there's an experiment in the spec that currently doesn't exist, it will
be created along with the associated choices.
If there's an experiment in the spec that currently exists, and the set of
choices are different, that experiment's choices will be modified to match
the spec.
If there's an experiment not in the spec that currently exists, it will be
archived.
A spec looks like this:
{
"experiment 1": ["choice 1", "choice 2", "choice 3"],
"experiment 2": ["choice 1", "choice 2"]
} |
386,617 | def load_sgems_exp_var(filename):
assert os.path.exists(filename)
import xml.etree.ElementTree as etree
tree = etree.parse(filename)
root = tree.getroot()
dfs = {}
for variogram in root:
for attrib in variogram:
if attrib.tag == "title":
title = attrib.text.split()[0].split()[-1]
elif attrib.tag == "x":
x = [float(i) for i in attrib.text.split()]
elif attrib.tag == "y":
y = [float(i) for i in attrib.text.split()]
elif attrib.tag == "pairs":
pairs = [int(i) for i in attrib.text.split()]
for item in attrib:
print(item,item.tag)
df = pd.DataFrame({"x":x,"y":y,"pairs":pairs})
df.loc[df.y<0.0,"y"] = np.NaN
dfs[title] = df
return dfs | read an SGEM experimental variogram into a sequence of
pandas.DataFrames
Parameters
----------
filename : (str)
an SGEMS experimental variogram XML file
Returns
-------
dfs : list
a list of pandas.DataFrames of x, y, pairs for each
division in the experimental variogram |
386,618 | async def fetch_messages(self, selected: SelectedMailbox,
sequence_set: SequenceSet,
attributes: FrozenSet[FetchAttribute]) \
-> Tuple[Iterable[Tuple[int, MessageInterface]], SelectedMailbox]:
... | Get a list of loaded message objects corresponding to given sequence
set.
Args:
selected: The selected mailbox session.
sequence_set: Sequence set of message sequences or UIDs.
attributes: Fetch attributes for the messages.
Raises:
:class:`~pymap.exceptions.MailboxNotFound` |
386,619 | def slices(src_path):
pages = list_slices(src_path)
slices = []
for page in pages:
slices.extend(page.slices)
return slices | Return slices as a flat list |
386,620 | def addVariantAnnotationSet(self, variantAnnotationSet):
id_ = variantAnnotationSet.getId()
self._variantAnnotationSetIdMap[id_] = variantAnnotationSet
self._variantAnnotationSetIds.append(id_) | Adds the specified variantAnnotationSet to this dataset. |
386,621 | def add_external_reference(self,ext_ref):
node_ext_refs = self.node.find()
ext_refs = None
if node_ext_refs == None:
ext_refs = CexternalReferences()
self.node.append(ext_refs.get_node())
else:
ext_refs = CexternalReferences(node_ext_refs)
ext_refs.add_external_reference(ext_ref) | Adds an external reference to the role
@param ext_ref: the external reference object
@type ext_ref: L{CexternalReference} |
386,622 | def expects_call(self):
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self | The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations() |
386,623 | def _execute(self, query, model, adapter, raw=False):
values = self.load(model, adapter)
return IterableStore(values=values)._execute(query, model=model, adapter=None, raw=raw) | We have to override this because in some situation
(such as with Filebackend, or any dummy backend)
we have to parse / adapt results *before* when can execute the query |
386,624 | def delete(self, record_key):
title = % self.__class__.__name__
input_fields = {
: record_key
}
for key, value in input_fields.items():
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
try:
self.s3.delete_record(self.bucket_name, record_key)
except:
if not self.exists(record_key):
exit_msg = % record_key
return exit_msg
raise
exit_msg = % record_key
return exit_msg | a method to delete a record from S3
:param record_key: string with key of record
:return: string reporting outcome |
386,625 | def _find_supported(self, features, mechanism_classes):
try:
mechanisms = features[SASLMechanisms]
except KeyError:
logger.error("No sasl mechanisms: %r", list(features))
raise errors.SASLUnavailable(
"Remote side does not support SASL") from None
remote_mechanism_list = mechanisms.get_mechanism_list()
for our_mechanism in mechanism_classes:
token = our_mechanism.any_supported(remote_mechanism_list)
if token is not None:
return our_mechanism, token
return None, None | Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`. |
386,626 | def send_message(self, output):
file_system_event = None
if self.my_action_input:
file_system_event = self.my_action_input.file_system_event or None
output_action = ActionInput(file_system_event,
output,
self.name,
"*")
Global.MESSAGE_DISPATCHER.send_message(output_action) | Send a message to the socket |
386,627 | def process_fastq_minimal(fastq, **kwargs):
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df) | Swiftly extract minimal features (length and timestamp) from a rich fastq file |
386,628 | def build_documentation_lines(self):
return [
line_string for key in sorted(self.keys)
for line_string in self.build_paramter_string(key)
] | Build a parameter documentation string that can appended to the
docstring of a function that uses this :class:`~.Filters` instance
to build filters. |
386,629 | def manage_itstat(self):
itst = self.iteration_stats()
self.itstat.append(itst)
self.display_status(self.fmtstr, itst) | Compute, record, and display iteration statistics. |
386,630 | def fill_datetime(self):
if not self.filled:
raise SlotNotFilledError(
% (self.name, self.key))
return self._fill_datetime | Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet. |
386,631 | def templates(self, name=None, params=None):
return self.transport.perform_request(, _make_path(,
, name), params=params) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html>`_
:arg name: A pattern that returned template names must match
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False |
386,632 | def _read_para_overlay_ttl(self, code, cbit, clen, *, desc, length, version):
if clen != 4:
raise ProtocolError(f)
_ttln = self._read_unpack(2)
overlay_ttl = dict(
type=desc,
critical=cbit,
length=clen,
ttl=_ttln,
)
return overlay_ttl | Read HIP OVERLAY_TTL parameter.
Structure of HIP OVERLAY_TTL parameter [RFC 6078]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| TTL | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 overlay_ttl.type Parameter Type
1 15 overlay_ttl.critical Critical Bit
2 16 overlay_ttl.length Length of Contents
4 32 overlay_ttl.ttl TTL
6 48 - Reserved |
386,633 | def _process_status(self, status):
self._screen_id = status.get(ATTR_SCREEN_ID)
self.status_update_event.set() | Process latest status update. |
386,634 | def c32address(version, hash160hex):
if not re.match(r, hash160hex):
raise ValueError()
c32string = c32checkEncode(version, hash160hex)
return .format(c32string) | >>> c32address(22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d')
'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7'
>>> c32address(0, '0000000000000000000000000000000000000000')
'S0000000000000000000002AA028H'
>>> c32address(31, '0000000000000000000000000000000000000001')
'SZ00000000000000000005HZ3DVN'
>>> c32address(20, '1000000000000000000000000000000000000001')
'SM80000000000000000000000000000004WBEWKC'
>>> c32address(26, '1000000000000000000000000000000000000000')
'ST80000000000000000000000000000002YBNPV3' |
386,635 | def sign_blob(
self,
name,
payload,
delegates=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "sign_blob" not in self._inner_api_calls:
self._inner_api_calls[
"sign_blob"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.sign_blob,
default_retry=self._method_configs["SignBlob"].retry,
default_timeout=self._method_configs["SignBlob"].timeout,
client_info=self._client_info,
)
request = common_pb2.SignBlobRequest(
name=name, payload=payload, delegates=delegates
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["sign_blob"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Signs a blob using a service account's system-managed private key.
Example:
>>> from google.cloud import iam_credentials_v1
>>>
>>> client = iam_credentials_v1.IAMCredentialsClient()
>>>
>>> name = client.service_account_path('[PROJECT]', '[SERVICE_ACCOUNT]')
>>>
>>> # TODO: Initialize `payload`:
>>> payload = b''
>>>
>>> response = client.sign_blob(name, payload)
Args:
name (str): The resource name of the service account for which the credentials are
requested, in the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``.
payload (bytes): The bytes to sign.
delegates (list[str]): The sequence of service accounts in a delegation chain. Each service
account must be granted the ``roles/iam.serviceAccountTokenCreator``
role on its next service account in the chain. The last service account
in the chain must be granted the
``roles/iam.serviceAccountTokenCreator`` role on the service account
that is specified in the ``name`` field of the request.
The delegates must have the following format:
``projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}``
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.iam_credentials_v1.types.SignBlobResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
386,636 | def p_casecontent_condition_single(self, p):
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1)) | casecontent_condition : casecontent_condition COMMA expression |
386,637 | def _extract_shape(idx, x, j, cur_center):
_a = []
for i in range(len(idx)):
if idx[i] == j:
if cur_center.sum() == 0:
opt_x = x[i]
else:
_, opt_x = _sbd(cur_center, x[i])
_a.append(opt_x)
a = np.array(_a)
if len(a) == 0:
return np.zeros((1, x.shape[1]))
columns = a.shape[1]
y = zscore(a, axis=1, ddof=1)
s = np.dot(y.transpose(), y)
p = np.empty((columns, columns))
p.fill(1.0/columns)
p = np.eye(columns) - p
m = np.dot(np.dot(p, s), p)
_, vec = eigh(m)
centroid = vec[:, -1]
finddistance1 = math.sqrt(((a[0] - centroid) ** 2).sum())
finddistance2 = math.sqrt(((a[0] + centroid) ** 2).sum())
if finddistance1 >= finddistance2:
centroid *= -1
return zscore(centroid, ddof=1) | >>> _extract_shape(np.array([0,1,2]), np.array([[1,2,3], [4,5,6]]), 1, np.array([0,3,4]))
array([-1., 0., 1.])
>>> _extract_shape(np.array([0,1,2]), np.array([[-1,2,3], [4,-5,6]]), 1, np.array([0,3,4]))
array([-0.96836405, 1.02888681, -0.06052275])
>>> _extract_shape(np.array([1,0,1,0]), np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 0, np.array([0,0,0,0]))
array([-1.2089303 , -0.19618238, 0.19618238, 1.2089303 ])
>>> _extract_shape(np.array([0,0,1,0]), np.array([[1,2,3,4],[0,1,2,3],[-1,1,-1,1],[1,2,2,3]]), 0, np.array([-1.2089303,-0.19618238,0.19618238,1.2089303]))
array([-1.19623139, -0.26273649, 0.26273649, 1.19623139]) |
386,638 | def get_library_config(name):
try:
proc = Popen([, , , name], stdout=PIPE, stderr=PIPE)
except OSError:
print()
exit(1)
raw_cflags, err = proc.communicate()
if proc.wait():
return
known, unknown = parse_cflags(raw_cflags.decode())
if unknown:
print("pkg-config returned flags we don't understand: {}".format(unknown))
exit(1)
return known | Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``. |
386,639 | def _add_parameter(self, parameter):
if isinstance(parameter, MethodParameter):
parameter = parameter.bind(alloy=self)
self._parameters[parameter.name] = parameter
for alias in parameter.aliases:
self._aliases[alias] = parameter | Force adds a `Parameter` object to the instance. |
386,640 | def footprint(sobject):
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None:
continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, ):
if len(v):
n += 1
continue
n += 1
return n | Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant
value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int |
386,641 | def find_keys(self, regex, bucket_name=None):
log = logging.getLogger(self.cls_logger + )
matched_keys = []
if not isinstance(regex, basestring):
log.error(.format(t=regex.__class__.__name__))
return None
if bucket_name is None:
s3bucket = self.bucket
else:
log.debug(.format(n=bucket_name))
s3bucket = self.s3resource.Bucket(bucket_name)
log.info(.format(r=regex))
for item in s3bucket.objects.all():
log.debug(.format(k=item.key))
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key)
log.info(.format(k=matched_keys))
return matched_keys | Finds a list of S3 keys matching the passed regex
Given a regular expression, this method searches the S3 bucket
for matching keys, and returns an array of strings for matched
keys, an empty array if non are found.
:param regex: (str) Regular expression to use is the key search
:param bucket_name: (str) Name of bucket to search (optional)
:return: Array of strings containing matched S3 keys |
386,642 | def namedb_get_name_DID_info(cur, name, block_height):
sql = "SELECT name_records.name,history.creator_address,history.block_id,history.vtxindex FROM name_records JOIN history ON name_records.name = history.history_id " + \
"WHERE name = ? AND creator_address IS NOT NULL AND history.block_id <= ? ORDER BY history.block_id DESC, history.vtxindex DESC LIMIT 1;"
args = (name,block_height)
rows = namedb_query_execute(cur, sql, args)
row = rows.fetchone()
if row is None:
return None
creator_address = row[]
latest_block_height = row[]
latest_vtxindex = row[]
query = "SELECT COUNT(*) FROM name_records JOIN history ON name_records.name = history.history_id " + \
"WHERE history.creator_address = ? AND (history.block_id < ? OR (history.block_id = ? AND history.vtxindex <= ?));"
args = (creator_address,latest_block_height,latest_block_height,latest_vtxindex)
count_rows = namedb_query_execute(cur, query, args)
count_row = count_rows.fetchone()
if count_row is None:
return None
count = count_row[] - 1
return {: , : str(creator_address), : count} | Given a name and a DB cursor, find out its DID info at the given block.
Returns {'name_type': ..., 'address': ..., 'index': ...} on success
Return None if there is no such name |
386,643 | def try_lock(lock):
was_locked = lock.acquire(False)
try:
yield was_locked
finally:
if was_locked:
lock.release() | Attempts to acquire a lock, and auto releases if acquired (on exit). |
386,644 | def queue_actions(self, source, actions, event_args=None):
source.event_args = event_args
ret = self.trigger_actions(source, actions)
source.event_args = None
return ret | Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards. |
386,645 | def save(self, filename=None):
if filename:
if ".db" in filename:
filename = filename.split(".")[0]
self.properties.db_name = filename
else:
self.properties.db_name = "{}".format(self.properties.name)
if os.path.isfile("{}.db".format(self.properties.db_name)):
his = self._read_from_sql(
.format("history"), self.properties.db_name
)
his.index = his["index"].apply(Timestamp)
try:
last = his.index[-1]
df_to_backup = self.backup_histories_df()[last:]
except IndexError:
df_to_backup = self.backup_histories_df()
else:
self._log.debug("Creating a new backup database")
df_to_backup = self.backup_histories_df()
with contextlib.closing(
sqlite3.connect("{}.db".format(self.properties.db_name))
) as con:
sql.to_sql(
df_to_backup,
name="history",
con=con,
index_label="index",
index=True,
if_exists="append",
)
prop_backup = {}
prop_backup["device"] = self.dev_properties_df()
prop_backup["points"] = self.points_properties_df()
with open("{}.bin".format(self.properties.db_name), "wb") as file:
pickle.dump(prop_backup, file)
self._log.info("Device saved to {}.db".format(self.properties.db_name)) | Save the point histories to sqlite3 database.
Save the device object properties to a pickle file so the device can be reloaded. |
386,646 | def summarize_provenance(self):
provenance_per_cache = self.summarize_provenance_per_cache()
summary_provenance = None
num_discrepant = 0
for cache in provenance_per_cache:
if not(summary_provenance):
summary_provenance = provenance_per_cache[cache]
summary_provenance_name = cache
num_discrepant += compare_provenance(
provenance_per_cache[cache],
summary_provenance,
left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name),
right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache)
)
if num_discrepant == 0:
prov = summary_provenance
else:
prov = provenance_per_cache
return(prov) | Utility function to summarize provenance files for cached items used by a Cohort.
At the moment, most PROVENANCE files contain details about packages used to
generate files. However, this function is generic & so it summarizes the contents
of those files irrespective of their contents.
Returns
----------
Dict containing summary of provenance items, among all cache dirs used by the Cohort.
IE if all provenances are identical across all cache dirs, then a single set of
provenances is returned. Otherwise, if all provenances are not identical, the provenance
items per cache_dir are returned.
See also
----------
`?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance
for each existing cache_dir. |
386,647 | def source_list(source, source_hash, saltenv):
*{hash_type: , : <md5sum>}
contextkey = .format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__[](saltenv)]
mdirs = [(d, saltenv) for d in __salt__[](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__[](senv)]
mdirs += [(d, senv) for d in __salt__[](senv)]
ret = None
for single in source:
if isinstance(single, dict):
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
if salt.utils.platform.is_windows():
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse( + single_src)
proto = urlparsed_single_src.scheme
if proto == :
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith() or proto == :
ret = (single_src, single_hash)
break
elif proto == and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse( + single)
proto = urlparsed_src.scheme
if proto == and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith() or proto == :
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
raise CommandExecutionError(
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret | Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base |
386,648 | def relabel(self, qubits: Qubits) -> :
gate = copy(self)
gate.vec = gate.vec.relabel(qubits)
return gate | Return a copy of this Gate with new qubits |
386,649 | def get(self, sid):
return OriginationUrlContext(self._version, trunk_sid=self._solution[], sid=sid, ) | Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext |
386,650 | def new_job(self, task, inputdata, callback, launcher_name="Unknown", debug=False, ssh_callback=None):
job_id = str(uuid.uuid4())
if debug == "ssh" and ssh_callback is None:
self._logger.error("SSH callback not set in %s/%s", task.get_course_id(), task.get_id())
callback(("crash", "SSH callback not set."), 0.0, {}, {}, {}, None, "", "")
return
ssh_callback = _callable_once(ssh_callback if ssh_callback is not None else lambda _1, _2, _3: None)
environment = task.get_environment()
if environment not in self._available_containers:
self._logger.warning("Env %s not available for task %s/%s", environment, task.get_course_id(), task.get_id())
ssh_callback(None, None, None)
callback(("crash", "Environment not available."), 0.0, {}, {}, "", {}, None, "", "")
return
enable_network = task.allow_network_access_grading()
try:
limits = task.get_limits()
time_limit = int(limits.get(, 20))
hard_time_limit = int(limits.get(, 3 * time_limit))
mem_limit = int(limits.get(, 200))
except:
self._logger.exception("Cannot retrieve limits for task %s/%s", task.get_course_id(), task.get_id())
ssh_callback(None, None, None)
callback(("crash", "Error while reading task limits"), 0.0, {}, {}, "", {}, None, "", "")
return
msg = ClientNewJob(job_id, task.get_course_id(), task.get_id(), inputdata, environment, enable_network, time_limit,
hard_time_limit, mem_limit, debug, launcher_name)
self._loop.call_soon_threadsafe(asyncio.ensure_future, self._create_transaction(msg, task=task, callback=callback,
ssh_callback=ssh_callback))
return job_id | Add a new job. Every callback will be called once and only once.
:type task: Task
:param inputdata: input from the student
:type inputdata: Storage or dict
:param callback: a function that will be called asynchronously in the client's process, with the results.
it's signature must be (result, grade, problems, tests, custom, archive), where:
result is itself a tuple containing the result string and the main feedback (i.e. ('success', 'You succeeded');
grade is a number between 0 and 100 indicating the grade of the users;
problems is a dict of tuple, in the form {'problemid': result};
test is a dict of tests made in the container
custom is a dict containing random things set in the container
archive is either None or a bytes containing a tgz archive of files from the job
:type callback: __builtin__.function or __builtin__.instancemethod
:param launcher_name: for informational use
:type launcher_name: str
:param debug: Either True(outputs more info), False(default), or "ssh" (starts a remote ssh server. ssh_callback needs to be defined)
:type debug: bool or string
:param ssh_callback: a callback function that will be called with (host, port, password), the needed credentials to connect to the
remote ssh server. May be called with host, port, password being None, meaning no session was open.
:type ssh_callback: __builtin__.function or __builtin__.instancemethod or None
:return: the new job id |
386,651 | def load_items(self, items):
loaded_items = {}
requests = collections.deque(create_batch_get_chunks(items))
while requests:
request = requests.pop()
try:
response = self.dynamodb_client.batch_get_item(RequestItems=request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while loading items.") from error
for table_name, table_items in response.get("Responses", {}).items():
loaded_items.setdefault(table_name, []).extend(table_items)
if response["UnprocessedKeys"]:
requests.append(response["UnprocessedKeys"])
return loaded_items | Loads any number of items in chunks, handling continuation tokens.
:param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. |
386,652 | def v_type_extension(ctx, stmt):
(modulename, identifier) = stmt.keyword
revision = stmt.i_extension_revision
module = modulename_to_module(stmt.i_module, modulename, revision)
if module is None:
return
if identifier not in module.i_extensions:
if module.i_modulename == stmt.i_orig_module.i_modulename:
if identifier not in stmt.i_orig_module.i_extensions:
err_add(ctx.errors, stmt.pos, ,
(identifier, module.arg))
return
else:
stmt.i_extension = stmt.i_orig_module.i_extensions[identifier]
else:
err_add(ctx.errors, stmt.pos, ,
(identifier, module.arg))
return
else:
stmt.i_extension = module.i_extensions[identifier]
ext_arg = stmt.i_extension.search_one()
if stmt.arg is not None and ext_arg is None:
err_add(ctx.errors, stmt.pos, ,
identifier)
elif stmt.arg is None and ext_arg is not None:
err_add(ctx.errors, stmt.pos, ,
identifier) | verify that the extension matches the extension definition |
386,653 | def _init_records(self, record_types):
for record_type in record_types:
if str(record_type) not in self._my_map[]:
record_initialized = self._init_record(str(record_type))
if record_initialized:
self._my_map[].append(str(record_type)) | Initalize all records for this form. |
386,654 | def _push_processor(self, proc, index=None):
if index is None:
self._procstack.append(proc)
else:
self._procstack.insert(index, proc) | Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class. |
386,655 | def log(self, n=None, **kwargs):
kwargs[] = kwargs.pop(, self.template)
cmd = [, ]
if n:
cmd.append( % n)
cmd.extend(
(( % (k, v))
for (k, v) in iteritems(kwargs)))
try:
output = self.sh(cmd, shell=False)
if "fatal: bad default revision " in output:
return output
return output
except Exception as e:
e
return | Run the repository log command
Returns:
str: output of log command (``git log -n <n> <--kwarg=value>``) |
386,656 | async def create(self, token):
token = encode_token(token)
response = await self._api.put("/v1/acl/create", data=token)
return response.body | Creates a new token with a given policy
Parameters:
token (Object): Token specification
Returns:
Object: token ID
The create endpoint is used to make a new token.
A token has a name, a type, and a set of ACL rules.
The request body may take the form::
{
"Name": "my-app-token",
"Type": "client",
"Rules": ""
}
None of the fields are mandatory. The **Name** and **Rules** fields
default to being blank, and the **Type** defaults to "client".
**Name** is opaque to Consul. To aid human operators, it should
be a meaningful indicator of the ACL's purpose.
**Type** is either **client** or **management**. A management token
is comparable to a root user and has the ability to perform any action
including creating, modifying and deleting ACLs.
**ID** field may be provided, and if omitted a random UUID will be
generated.
The format of **Rules** is
`documented here <https://www.consul.io/docs/internals/acl.html>`_.
A successful response body will return the **ID** of the newly
created ACL, like so::
{
"ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
} |
386,657 | def get_short_reads(vals):
(args,txome,seed,chunk) = vals
txe = TranscriptomeEmitter(txome,TranscriptomeEmitter.Options(seed=seed))
if args.weights:
weights = {}
if args.weights[-3:]==: inf = gzip.open(args.weights)
else: inf = open(args.weights)
for line in inf:
f = line.rstrip().split("\t")
weights[f[0]] = float(f[1])
txs = {}
for tx in txome.transcripts: txs[tx.name] = tx.length
for name in weights:
weights[name] *= txs[name]
txe.set_weights_by_dict(weights)
else:
weights = {}
txs = {}
for tx in txome.transcripts: txs[tx.name] = tx.length
txe.set_weights_by_dict(weights)
reademit = ReadEmitter(txe)
shortreads = []
sp = args.short_read_insert_size
reademit.cutter.set_custom(sp[0],sp[1],sp[2])
if args.short_read_error_rate:
emfr = ErrorMakerFlatRate(rate=args.short_read_error_rate,rand=reademit.options.rand)
reademit.add_error_maker(emfr)
for i in range(0,chunk):
e = reademit.emit(args.short_read_length)
shortreads.append(e)
return shortreads | Emit the short reads first |
386,658 | def parse_lines(lines: [str], units: Units, use_na: bool = True) -> [dict]:
parsed_lines = []
prob =
while lines:
raw_line = lines[0].strip()
line = core.sanitize_line(raw_line)
if line.startswith():
if len(line) == 6:
prob = line
line =
elif len(line) > 6:
prob = line[:6]
line = line[6:].strip()
if line:
parsed_line = (parse_na_line if use_na else parse_in_line)(line, units)
for key in (, ):
parsed_line[key] = core.make_timestamp(parsed_line[key])
parsed_line[] = core.make_number(prob[4:])
parsed_line[] = raw_line
parsed_line[] = prob + + line if prob else line
prob =
parsed_lines.append(parsed_line)
lines.pop(0)
return parsed_lines | Returns a list of parsed line dictionaries |
386,659 | def apply_modification(self):
self.__changing_model = True
if self.adding_model: self.model.add(self.adding_model)
elif self.editing_model and self.editing_iter:
path = self.model.get_path(self.editing_iter)
self.model.row_changed(path, self.editing_iter)
pass
self.view.remove_currency_view()
self.adding_model = None
self.editing_model = None
self.editing_iter = None
self.curreny = None
self.unselect()
self.__changing_model = False
return | Modifications on the right side need to be committed |
386,660 | def title_line(text):
columns = shutil.get_terminal_size()[0]
start = columns // 2 - len(text) // 2
output = *columns + + \
* start + str(text) + "\n\n" + \
*columns +
return output | Returns a string that represents the
text as a title blurb |
386,661 | def density_contour(self, *args, **kwargs):
lon, lat, totals, kwargs = self._contour_helper(args, kwargs)
return self.contour(lon, lat, totals, **kwargs) | Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.) and plots contour lines of
the resulting density distribution.
Parameters
----------
*args : A variable number of sequences of measurements.
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake
measurements require three parameters, thus the variable number of
arguments.) The ``measurement`` kwarg controls how these arguments
are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips
of planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes
in the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units
are in numbers of standard deviations by which the density
estimate differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points
within a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are
in points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher
sigmas will lead to more smoothing of the resulting density
distribution. This parameter only applies to Kamb-based methods.
Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single
int is given, it is interpreted as an NxN grid. If a tuple of ints
is given it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The
array will be normalized to sum to 1, so absolute value of the
weights do not affect the result. Defaults to None.
**kwargs
Additional keyword arguments are passed on to matplotlib's
`contour` function.
Returns
-------
A matplotlib ContourSet.
See Also
--------
mplstereonet.density_grid
mplstereonet.StereonetAxes.density_contourf
matplotlib.pyplot.contour
matplotlib.pyplot.clabel
Examples
--------
Plot density contours of poles to the specified planes using a
modified Kamb method with exponential smoothing [1]_.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips)
Plot density contours of a set of linear orientation measurements.
>>> plunges, bearings = [-10, 20, -30], [120, 315, 86]
>>> ax.density_contour(plunges, bearings, measurement='lines')
Plot density contours of a set of rake measurements.
>>> strikes, dips, rakes = [120, 315, 86], [22, 85, 31], [-5, 20, 9]
>>> ax.density_contour(strikes, dips, rakes, measurement='rakes')
Plot density contours of a set of "raw" longitudes and latitudes.
>>> lon, lat = np.radians([-40, 30, -85]), np.radians([21, -59, 45])
>>> ax.density_contour(lon, lat, measurement='radians')
Plot density contours of poles to planes using a Kamb method [2]_
with the density estimated on a 10x10 grid (in long-lat space)
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, method='kamb', gridsize=10)
Plot density contours of poles to planes with contours at [1,2,3]
standard deviations.
>>> strikes, dips = [120, 315, 86], [22, 85, 31]
>>> ax.density_contour(strikes, dips, levels=[1,2,3])
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. |
386,662 | def invert_index(source_dir, index_url=INDEX_URL, init=False):
raw_index = defaultdict(list)
for base, dir_list, fn_list in os.walk(source_dir):
for fn in fn_list:
fp = os.path.join(base, fn)
code = fn
with open(fp) as f:
tokens = f.read().strip().split()
for token in tokens:
raw_index[token].append(code)
index = Shove(store=index_url)
if init:
index.clear()
index.update(raw_index)
index.sync()
return index | Build the invert index from give source_dir
Output a Shove object built on the store_path
Input:
source_dir: a directory on the filesystem
index_url: the store_path for the Shove object
init: clear the old index and rebuild from scratch
Output:
index: a Shove object |
386,663 | def trapz2(f, x=None, y=None, dx=1.0, dy=1.0):
return numpy.trapz(numpy.trapz(f, x=y, dx=dy), x=x, dx=dx) | Double integrate. |
386,664 | def get_vector(self, max_choice=3):
vec = {}
for dim in [, , ]:
if self.meta[dim] is None:
continue
dim_vec = map(lambda x: (x, max_choice), self.meta[dim])
vec[dim] = dict(dim_vec)
return vec | Return pseudo-choice vectors. |
386,665 | def column_keymap(self):
keystates = set()
shortcuts = self.cp.items()
keymap_dict = dict(shortcuts)
for combo, action in shortcuts:
combo_as_list = re.split(, combo)[1::2]
if len(combo_as_list) > 1:
keystates |= set(accumulate(combo_as_list[:-1]))
if action in [, , ]:
keystates.add(combo)
if action == :
for c in ascii_lowercase:
keymap_dict[combo + c] = + c
return (keymap_dict, keystates) | Returns keymap and keystates used in column mode |
386,666 | def _get_magnitude_term(self, C, mag):
lny = C[] + (C[] * ((8.5 - mag) ** 2.))
if mag > 6.3:
return lny + (-C[] * C[]) * (mag - 6.3)
else:
return lny + C[] * (mag - 6.3) | Returns the magnitude scaling term. |
386,667 | def SETPE(cpu, dest):
dest.write(Operators.ITEBV(dest.size, cpu.PF, 1, 0)) | Sets byte if parity even.
:param cpu: current CPU.
:param dest: destination operand. |
386,668 | def assign(self, expr):
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load()) | Give *expr* a name. |
386,669 | def _sort_converters(cls, app_ready=False):
| Sorts the converter functions |
386,670 | def _register_server_authenticator(klass, name):
SERVER_MECHANISMS_D[name] = klass
items = sorted(SERVER_MECHANISMS_D.items(), key = _key_func, reverse = True)
SERVER_MECHANISMS[:] = [k for (k, v) in items ]
SECURE_SERVER_MECHANISMS[:] = [k for (k, v) in items
if v._pyxmpp_sasl_secure] | Add a client authenticator class to `SERVER_MECHANISMS_D`,
`SERVER_MECHANISMS` and, optionally, to `SECURE_SERVER_MECHANISMS` |
386,671 | def create_contact(self, *args, **kwargs):
url =
data = {
: False,
:
}
data.update(kwargs)
return Contact(**self._api._post(url, data=json.dumps(data))) | Creates a contact |
386,672 | async def _retrieve_messages_around_strategy(self, retrieve):
if self.around:
around = self.around.id if self.around else None
data = await self.logs_from(self.channel.id, retrieve, around=around)
self.around = None
return data
return [] | Retrieve messages using around parameter. |
386,673 | def append(self, element):
from refract.refraction import refract
self.content.append(refract(element)) | Append an element onto the array.
>>> array = Array()
>>> array.append('test') |
386,674 | def _get_ptext_to_endchars(value, endchars):
_3to2list = list(_wsp_splitter(value, 1))
fragment, remainder, = _3to2list[:1] + [_3to2list[1:]]
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == :
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return .join(vchars), .join([fragment[pos:]] + remainder), had_qp | Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded. |
386,675 | def _build_request(self, type, commands):
request = {}
headers = {
: ,
}
if self.nxargs[]:
user = self.nxargs[]
headers[] = + user +
request[] = self.NXAPI_UDS_URI_PATH
else:
request[] = .format(
transport=self.nxargs[],
host=self.nxargs[],
port=self.nxargs[],
uri=self.NXAPI_REMOTE_URI_PATH,
)
if isinstance(commands, (list, set, tuple)):
commands = .join(commands)
payload = {}
payload[] = {
: self.NXAPI_VERSION,
: type,
: ,
: ,
: commands,
: ,
}
request[] = headers
request[] = json.dumps(payload)
request[] = {
: self.nxargs[]
}
log.info(, request)
return request | Build NX-API JSON request. |
386,676 | def variants(self, case_id, skip=0, count=1000, filters=None):
filters = filters or {}
case_obj = self.case(case_id=case_id)
limit = count + skip
genes = set()
if filters.get():
genes = set([gene_id.strip() for gene_id in filters[]])
frequency = None
if filters.get():
frequency = float(filters[])
cadd = None
if filters.get():
cadd = float(filters[])
genetic_models = None
if filters.get():
genetic_models = set(filters[])
sv_len = None
if filters.get():
sv_len = float(filters[])
impact_severities = None
if filters.get():
impact_severities = set(filters[])
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
variants = self._get_filtered_variants(vcf_file_path, filters)
result = []
skip_index = 0
for index, variant in enumerate(variants):
index += 1
if skip_index >= skip:
variant_obj = self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
)
if genes and variant_obj:
if not set(variant_obj[]).intersection(genes):
variant_obj = None
if impact_severities and variant_obj:
if not variant_obj[] in impact_severities:
variant_obj = None
if frequency and variant_obj:
if variant_obj.max_freq > frequency:
variant_obj = None
if cadd and variant_obj:
if variant_obj[] < cadd:
variant_obj = None
if genetic_models and variant_obj:
models = set(variant_obj.genetic_models)
if not models.intersection(genetic_models):
variant_obj = None
if sv_len and variant_obj:
if variant_obj.sv_len < sv_len:
variant_obj = None
if variant_obj:
skip_index += 1
if skip_index <= limit:
result.append(variant_obj)
else:
break
else:
skip_index += 1
return Results(result, len(result)) | Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants |
386,677 | def tag_manifest_into_registry(self, session, worker_digest):
self.log.info("%s: Tagging manifest", session.registry)
digest = worker_digest[]
source_repo = worker_digest[]
image_manifest, _, media_type, _ = self.get_manifest(session, source_repo, digest)
if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2:
digests = ManifestDigest(v1=digest)
elif media_type == MEDIA_TYPE_OCI_V1:
digests = ManifestDigest(oci=digest)
else:
raise RuntimeError("Unexpected media type found in worker repository: {}"
.format(media_type))
push_conf_registry = self.workflow.push_conf.add_docker_registry(session.registry,
insecure=session.insecure)
for image in self.workflow.tag_conf.images:
target_repo = image.to_str(registry=False, tag=False)
self.store_manifest_in_repository(session, image_manifest, media_type,
source_repo, target_repo, tag=image.tag)
push_conf_registry.digests[image.tag] = digests | Tags the manifest identified by worker_digest into session.registry with all the
configured tags found in workflow.tag_conf. |
386,678 | def push(self, repository=None, tag=None):
image = self
if repository or tag:
image = self.tag_image(repository, tag)
for json_e in self.d.push(repository=image.name, tag=image.tag, stream=True, decode=True):
logger.debug(json_e)
status = graceful_get(json_e, "status")
if status:
logger.info(status)
else:
error = graceful_get(json_e, "error")
if error is not None:
logger.error(status)
raise ConuException("There was an error while pushing the image %s: %s",
self.name, error)
return image | Push image to registry. Raise exception when push fail.
:param repository: str, see constructor
:param tag: str, see constructor
:return: None |
386,679 | def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False):
addr = ""
A = ord()
col += 1
while col > 0:
addr = chr(A + ((col - 1) % 26)) + addr
col = (col - 1) // 26
prefix = ("!" % worksheet) if worksheet else ""
col_modifier = "$" if col_fixed else ""
row_modifier = "$" if row_fixed else ""
return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1) | converts a (0,0) based coordinate to an excel address |
386,680 | def connect_async(self, connection_id, connection_string, callback):
topics = MQTTTopicValidator(self.prefix + .format(connection_string))
key = self._generate_key()
name = self.name
conn_message = {: , : , : key, : name}
context = {: key, : connection_string, : topics}
self.conns.begin_connection(connection_id, connection_string, callback, context, self.get_config())
self._bind_topics(topics)
try:
self.client.publish(topics.connect, conn_message)
except IOTileException:
self._unbind_topics(topics)
self.conns.finish_connection(connection_id, False, ) | Connect to a device by its connection_string
This function looks for the device on AWS IOT using the preconfigured
topic prefix and looking for:
<prefix>/devices/connection_string
It then attempts to lock that device for exclusive access and
returns a callback if successful.
Args:
connection_id (int): A unique integer set by the caller for referring to this connection
once created
connection_string (string): A device id of the form d--XXXX-YYYY-ZZZZ-WWWW
callback (callable): A callback function called when the connection has succeeded or
failed |
386,681 | def event_update(
self,
event_id,
name=None,
season=None,
start_time=None,
event_group_id=None,
status=None,
account=None,
**kwargs
):
assert isinstance(season, list)
assert isinstance(
start_time, datetime
), "start_time needs to be a `datetime.datetime`"
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
event = Event(event_id)
op_data = {
"fee": {"amount": 0, "asset_id": "1.3.0"},
"event_id": event["id"],
"prefix": self.prefix,
}
if event["status"] == status:
status = None
if event_group_id:
if event_group_id[0] == "1":
EventGroup(event_group_id)
else:
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer),
"event_group_create",
event_group_id,
)
op_data.update({"new_event_group_id": event_group_id})
if name:
op_data.update({"new_name": name})
if season:
op_data.update({"new_season": season})
if start_time:
op_data.update({"new_start_time": formatTime(start_time)})
if status:
op_data.update({"new_status": status})
op = operations.Event_update(**op_data)
return self.finalizeOp(op, account["name"], "active", **kwargs) | Update an event. This needs to be **proposed**.
:param str event_id: Id of the event to update
:param list name: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param list season: Internationalized season, e.g. ``[['de',
'Foo'], ['en', 'bar']]``
:param str event_group_id: Event group ID to create the event for
(defaults to *relative* id ``0.0.0``)
:param datetime start_time: Time of the start of the event
:param str status: Event status
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) |
386,682 | def convert_snapshot(self, shift, instruction):
command_dict = {
: ,
: shift+instruction.start_time,
: instruction.name,
: instruction.type
}
return self._qobj_model(**command_dict) | Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters. |
386,683 | def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
routes = (
routes
.filter([, ])
.merge(pfeed.frequencies.drop([], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter([, ])
.values
)
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g[].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row[]
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g | Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops. |
386,684 | def expect_constructor(target):
if not isinstance(target, ClassDouble):
raise ConstructorDoubleError(
.format(target),
)
return expect(target)._doubles__new__ | Set an expectation on a ``ClassDouble`` constructor
:param ClassDouble target: The ClassDouble to set the expectation on.
:return: an ``Expectation`` for the __new__ method.
:raise: ``ConstructorDoubleError`` if target is not a ClassDouble. |
386,685 | def speech_speaker(self):
if self.speaker:
return self.speaker
elif self.parent:
return self.parent.speech_speaker()
else:
return None | Retrieves the speaker of the audio or video file associated with the element.
The source is inherited from ancestor elements if none is specified. For this reason, always use this method rather than access the ``src`` attribute directly.
Returns:
str or None if not found |
386,686 | def execute_script(code_block, example_globals, image_path, fig_count,
src_file, gallery_conf):
time_elapsed = 0
stdout =
print( % src_file)
plt.close()
cwd = os.getcwd()
orig_stdout = sys.stdout
try:
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, * 4))
os.chdir(cwd)
figure_list = save_figures(image_path, fig_count, gallery_conf)
raise
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout)
return code_output, time_elapsed, fig_count + len(figure_list) | Executes the code block of the example file |
386,687 | def police_priority_map_exceed_map_pri3_exceed(self, **kwargs):
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop()
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed")
map_pri3_exceed.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
386,688 | def get_diff_idxs(array, rtol, atol):
C, N, L = array.shape
diff_idxs = set()
for c in range(1, C):
for n in range(N):
if not numpy.allclose(array[c, n], array[0, n], rtol, atol):
diff_idxs.add(n)
return numpy.fromiter(diff_idxs, int) | Given an array with (C, N, L) values, being the first the reference value,
compute the relative differences and discard the one below the tolerance.
:returns: indices where there are sensible differences. |
386,689 | def _dens(self,R,z,phi=0.,t=0.):
x,y,z= bovy_coords.cyl_to_rect(R,phi,z)
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
m= numpy.sqrt(xp**2.+yp**2./self._b2+zp**2./self._c2)
return self._mdens(m) | NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2018-08-06 - Written - Bovy (UofT) |
386,690 | def tatoeba(language, word, minlength = 10, maxlength = 100):
word, sentences = unicode(word), []
page = requests.get( % (word, lltk.locale.iso639_1to3(language)))
tree = html.fromstring(page.text)
for sentence in tree.xpath():
sentence = sentence.strip(u).replace(u, u).replace(, u)
if word in sentence and len(sentence) < maxlength and len(sentence) > minlength:
sentences.append(sentence)
return sentences | Returns a list of suitable textsamples for a given word using Tatoeba.org. |
386,691 | def get_map(self, url, auth_map=None):
response_code, content = self.get(url, auth_map)
return response_code, content | Envia uma requisição GET.
:param url: URL para enviar a requisição HTTP.
:param auth_map: Dicionário com as informações para autenticação na networkAPI.
:return: Retorna uma tupla contendo:
(< código de resposta http >, < corpo da resposta >).
:raise ConnectionError: Falha na conexão com a networkAPI.
:raise RestError: Falha no acesso à networkAPI. |
386,692 | def robots(request):
resp = request.response
resp.status =
resp.content_type =
resp.body =
return resp | Return a simple "don't index me" robots.txt file. |
386,693 | def get_email(self, token):
resp = requests.get(self.emails_url,
params={: token.token})
emails = resp.json().get(, [])
email =
try:
email = emails[0].get()
primary_emails = [e for e in emails if e.get(, False)]
email = primary_emails[0].get()
except (IndexError, TypeError, KeyError):
return
finally:
return email | Fetches email address from email API endpoint |
386,694 | def _create_function(name, doc=""):
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | Create a PySpark function by its name |
386,695 | def _create_menu(self, items):
menu = Gtk.Menu()
self._create_menu_items(menu, items)
return menu | Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node |
386,696 | def file_md5(file_name):
md5 = hashlib.md5()
with open(file_name, ) as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b):
md5.update(chunk)
return md5.hexdigest() | Generate an MD5 hash of the specified file.
@file_name - The file to hash.
Returns an MD5 hex digest string. |
386,697 | def get_force(self, component_info=None, data=None, component_position=None):
components = []
append_components = components.append
for _ in range(component_info.plate_count):
component_position, plate = QRTPacket._get_exact(
RTForcePlate, data, component_position
)
force_list = []
for _ in range(plate.force_count):
component_position, force = QRTPacket._get_exact(
RTForce, data, component_position
)
force_list.append(force)
append_components((plate, force_list))
return components | Get force data. |
386,698 | def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None):
label_param =
if filter_labels:
label_param = .join([.format(label, value) for label, value in filter_labels.items()])
filters = [
.format(filter_name) if filter_name else None,
.format(.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
.format(label_param) if filter_labels else None,
.format(page) if page else None
]
return self._get(
url=.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
} |
386,699 | def DetermineRunner(bbdir):
tacfile = os.path.join(bbdir, )
if not os.path.exists(tacfile):
import buildbot.scripts.runner
return buildbot.scripts.runner.run
with open(tacfile, ) as f:
contents = f.read()
try:
if in contents:
import buildbot_worker.scripts.runner
return buildbot_worker.scripts.runner.run
except ImportError:
pass
try:
if in contents:
import buildslave.scripts.runner
return buildslave.scripts.runner.run
except ImportError:
pass
import buildbot.scripts.runner
return buildbot.scripts.runner.run | Checks if the given directory is a worker or a master and returns the
appropriate run function. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.