Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
2,200 | def cells_rt_meta(workbook, sheet, row, col):
logger_excel.info("enter cells_rt_meta")
col_loop = 0
cell_data = []
temp_sheet = workbook.sheet_by_name(sheet)
while col_loop < temp_sheet.ncols:
col += 1
col_loop += 1
try:
if temp_sheet.cell_value(row, col) != xlrd.empty_cell and temp_sheet.cell_value(row, col) != :
cell_data.append(temp_sheet.cell_value(row, col))
except IndexError as e:
logger_excel.warn("cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e))
logger_excel.info("exit cells_right_meta")
return cell_data | Traverse all cells in a row. If you find new data in a cell, add it to the list.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list: Cell data for a specific row |
2,201 | def render_html(self, obj, context=None):
provided_context = context or Context()
context = RequestContext(mock_request())
context.update(provided_context)
context.push()
context[self._meta.context_varname] = obj
rendered = render_to_string(self._meta.template_name, context)
context.pop()
return rendered | Generate the 'html' attribute of an oembed resource using a template.
Sort of a corollary to the parser's render_oembed method. By default,
the current mapping will be passed in as the context.
OEmbed templates are stored in:
oembed/provider/[app_label]_[model].html
-- or --
oembed/provider/media_video.html |
2,202 | def lreshape(data, groups, dropna=True, label=None):
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError()
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
import pandas.core.dtypes.concat as _concat
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = {k: v[mask] for k, v in mdata.items()}
return data._constructor(mdata, columns=id_cols + pivot_cols) | Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame |
2,203 | def face_function(script, function=):
filter_xml = .join([
,
,
.format(str(function).replace(, ).replace(, )),
,
,
,
])
util.write_filter(script, filter_xml)
return None | Boolean function using muparser lib to perform face selection over
current mesh.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use parenthesis, per-vertex variables and boolean operator:
(, ), and, or, <, >, =
It's possible to use per-face variables like attributes associated to the three
vertices of every face.
Variables (per face):
x0, y0, z0 for first vertex; x1,y1,z1 for second vertex; x2,y2,z2 for third vertex
nx0, ny0, nz0, nx1, ny1, nz1, etc. for vertex normals
r0, g0, b0, a0, etc. for vertex color
q0, q1, q2 for quality
wtu0, wtv0, wtu1, wtv1, wtu2, wtv2 (per wedge texture coordinates)
ti for face texture index (>= ML2016.12)
vsel0, vsel1, vsel2 for vertex selection (1 yes, 0 no) (>= ML2016.12)
fr, fg, fb, fa for face color (>= ML2016.12)
fq for face quality (>= ML2016.12)
fnx, fny, fnz for face normal (>= ML2016.12)
fsel face selection (1 yes, 0 no) (>= ML2016.12)
Args:
script: the FilterScript object or script filename to write
the filter] to.
function (str): a boolean function that will be evaluated in order
to select a subset of faces.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA |
2,204 | def fun_inverse(fun=None, y=0, x0=None, args=(), disp=False, method=, **kwargs):
r
fun_inverse.fun = cost_fun.fun = fun if fun is not None else getattr(fun_inverse, , lambda x: x)
fun_inverse.target = cost_fun.target = y or 0
fun_inverse.verbose = verbose = cost_fun.verbose = kwargs.pop(
, getattr(cost_fun, , getattr(fun_inverse, , False)))
fun_inverse.x0 = x0 = x0 if x0 is not None else getattr(fun_inverse, , 0) or 0
if verbose:
print(.format(fun_inverse.x0, fun_inverse.target))
res = minimize(cost_fun,
x0=x0,
args=args,
options=kwargs.pop(, {}),
method=method,
**kwargs
)
if isinstance(x0, NUMERIC_TYPES):
return res.x[0]
return res.x | r"""Find the threshold level that accomplishes the desired specificity
Call indicated function repeatedly to find answer to the inverse function evaluation
Arguments:
fun (function): function to be calculate an inverse for
y (float): desired output of fun
x0 (float): initial guess at input to fun, the fun arg that will be adjusted
args (list or tuple): constants arguments to fun which will not be adjusted
constraints (tuple): dictionary of optimizer constraints (see scipy.optimize.minimize)
disp (bool): whether to display incremental results during optimization
method (str): one of the scipy.optimize.minimize methods
additional kwargs are passed along to the minimize function
fun_inverse can be used to calculate a trivial square root:
>>> round(fun_inverse(fun=lambda x: x**2, y=9, x0=0), 6)
3.0 |
2,205 | def create_ar (archive, compression, cmd, verbosity, interactive, filenames):
opts =
if verbosity > 1:
opts +=
cmdlist = [cmd, opts, archive]
cmdlist.extend(filenames)
return cmdlist | Create a AR archive. |
2,206 | def available_composite_ids(self, available_datasets=None):
if available_datasets is None:
available_datasets = self.available_dataset_ids(composites=False)
else:
if not all(isinstance(ds_id, DatasetID) for ds_id in available_datasets):
raise ValueError(
" must all be DatasetID objects")
all_comps = self.all_composite_ids()
comps, mods = self.cpl.load_compositors(self.attrs[])
dep_tree = DependencyTree(self.readers, comps, mods)
dep_tree.find_dependencies(set(available_datasets + all_comps))
available_comps = set(x.name for x in dep_tree.trunk())
return sorted(available_comps & set(all_comps)) | Get names of compositors that can be generated from the available datasets.
Returns: generator of available compositor's names |
2,207 | def _hijack_gtk(self):
def dummy(*args, **kw):
pass
orig_main, gtk.main = gtk.main, dummy
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
return orig_main, orig_main_quit | Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- gtk.main
- gtk.main_quit |
2,208 | def _detect_sse41(self):
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start()
result = self.hasfunction( ,
include=,
extra_postargs=[])
self._print_support_end(, result)
return result | Does this compiler support SSE4.1 intrinsics? |
2,209 | def newton(self):
dae = self.system.dae
while True:
inc = self.calc_inc()
dae.x += inc[:dae.n]
dae.y += inc[dae.n:dae.n + dae.m]
self.niter += 1
max_mis = max(abs(inc))
self.iter_mis.append(max_mis)
self._iter_info(self.niter)
if max_mis < self.config.tol:
self.solved = True
break
elif self.niter > 5 and max_mis > 1000 * self.iter_mis[0]:
logger.warning(.format(self.niter))
break
if self.niter > self.config.maxit:
logger.warning()
break
return self.solved, self.niter | Newton power flow routine
Returns
-------
(bool, int)
success flag, number of iterations |
2,210 | def type_converter(text):
if text.isdigit():
return int(text), int
try:
return float(text), float
except ValueError:
return text, STRING_TYPE | I convert strings into integers, floats, and strings! |
2,211 | def jsonresolver_loader(url_map):
def endpoint(doi_code):
pid_value = "10.13039/{0}".format(doi_code)
_, record = Resolver(pid_type=, object_type=,
getter=Record.get_record).resolve(pid_value)
return record
pattern =
url_map.add(Rule(pattern, endpoint=endpoint, host=))
url_map.add(Rule(pattern, endpoint=endpoint, host=)) | Jsonresolver hook for funders resolving. |
2,212 | def b_pathInTree(self, astr_path):
if astr_path == : return True, []
al_path = astr_path.split()
if astr_path != and al_path[-1] == :
al_path = al_path[0:-2]
if not len(al_path[0]):
al_path[0] =
return self.b_pathOK(al_path), al_path
l_path = self.l_cwd[:]
if al_path[0] == :
while al_path[0] == and len(al_path):
l_path = l_path[0:-1]
if len(al_path) >= 2: al_path = al_path[1:]
else: al_path[0] =
if len(al_path[0]):
l_path.extend(al_path)
else:
l_path = self.l_cwd
l_path.extend(al_path)
if len(l_path)>=1 and l_path[0] != : l_path.insert(0, )
if len(l_path)>1: l_path[0] =
if not len(l_path): l_path = []
str_path = .join(l_path)
b_valid, al_path = self.b_pathInTree(str_path)
return b_valid, al_path | Converts a string <astr_path> specifier to a list-based
*absolute* lookup, i.e. "/node1/node2/node3" is converted
to ['/' 'node1' 'node2' 'node3'].
The method also understands a paths that start with: '..' or
combination of '../../..' and is also aware that the root
node is its own parent.
If the path list conversion is valid (i.e. exists in the
space of existing paths, l_allPaths), return True and the
destination path list; else return False and the current
path list. |
2,213 | def get_model_indexes(model, add_reserver_flag=True):
import uliweb.orm as orm
from sqlalchemy.engine.reflection import Inspector
indexes = []
engine = model.get_engine().engine
insp = Inspector.from_engine(engine)
for index in insp.get_indexes(model.tablename):
d = {}
d[] = index[]
d[] = index[]
d[] = index[]
if add_reserver_flag:
d[] = True
indexes.append(d)
return indexes | Creating indexes suit for model_config. |
2,214 | def parse_mini(memory_decriptor, buff):
mms = MinidumpMemorySegment()
mms.start_virtual_address = memory_decriptor.StartOfMemoryRange
mms.size = memory_decriptor.Memory.DataSize
mms.start_file_address = memory_decriptor.Memory.Rva
mms.end_virtual_address = mms.start_virtual_address + mms.size
return mms | memory_descriptor: MINIDUMP_MEMORY_DESCRIPTOR
buff: file_handle |
2,215 | def obfn_dfd(self):
r
Ef = self.eval_Rf(self.Xf)
E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)
return (np.linalg.norm(self.W * E)**2) / 2.0 | r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m
\mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2` |
2,216 | def create_argparser():
parser = argparse.ArgumentParser()
arg_defaults = {
"daemon": False,
"loop": False,
"listpresets": False,
"config": None,
"debug": False,
"sleeptime": 300,
"version": False,
"verbose_count": 0
}
parser.add_argument("-c", "--config", dest="config",
help="config file", default=arg_defaults["config"])
parser.add_argument("--list-presets", dest="listpresets",
help="list all available presets",
action="store_true", default=arg_defaults["listpresets"])
parser.add_argument("-d", "--daemon", dest="daemon",
help="go into daemon mode (implies --loop)",
action="store_true", default=arg_defaults["daemon"])
parser.add_argument("--debug", dest="debug",
help="increase logging level to DEBUG (DEPRECATED, please use -vvv)",
action="store_true", default=arg_defaults["debug"])
parser.add_argument("--loop", dest="loop",
help="loop forever (default is to update once)",
action="store_true", default=arg_defaults["loop"])
parser.add_argument("--sleeptime", dest="sleeptime",
help="how long to sleep between checks in seconds",
default=arg_defaults["sleeptime"])
parser.add_argument("--version", dest="version",
help="show version and exit",
action="store_true", default=arg_defaults["version"])
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=arg_defaults["verbose_count"],
help="increases log verbosity for each occurrence")
return parser, arg_defaults | Instantiate an `argparse.ArgumentParser`.
Adds all basic cli options including default values. |
2,217 | def get_block_info(self):
if not self.finished:
raise Exception("Not finished downloading")
ret = []
for (block_hash, block_data) in self.block_info.items():
ret.append( (block_data[], block_data[]) )
return ret | Get the retrieved block information.
Return [(height, [txs])] on success, ordered on height
Raise if not finished downloading |
2,218 | def request_forward_agent(self, handler):
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException()
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string()
m.add_boolean(False)
self.transport._send_user_message(m)
self.transport._set_forward_agent_handler(handler)
return True | Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from OpenSSH !!!
:param function handler:
a required handler to use for incoming SSH Agent connections
:return: True if we are ok, else False (at that time we always return ok)
:raises: SSHException in case of channel problem. |
2,219 | def guest_delete_disks(self, userid, disk_vdev_list):
action = "delete disks from guest " % (str(disk_vdev_list),
userid)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.delete_disks(userid, disk_vdev_list) | Delete disks from an existing guest vm.
:param userid: (str) the userid of the vm to be deleted
:param disk_vdev_list: (list) the vdev list of disks to be deleted,
for example: ['0101', '0102'] |
2,220 | def _extensions(self, line):
line = line.strip()
if not line.startswith("//") and "." in line:
line = line.encode("idna").decode("utf-8")
if line.startswith("*."):
line = line[2:]
extension = line.split(".")[-1]
if extension in self.public_suffix_db:
self.public_suffix_db[extension] = List(
self.public_suffix_db[extension] + [line]
).format()
else:
self.public_suffix_db.update({extension: [line]}) | Extract the extension from the given line.
:param line: The line from the official public suffix repository.
:type line: str |
2,221 | def _set_request_referer_metric(self, request):
if in request.META and request.META[]:
monitoring.set_custom_metric(, request.META[]) | Add metric 'request_referer' for http referer. |
2,222 | def get_effect_class(self, effect_name: str, package_name: str = None) -> Type[]:
return self._project.get_effect_class(effect_name, package_name=package_name) | Get an effect class by the class name
Args:
effect_name (str): Name of the effect class
Keyword Args:
package_name (str): The package the effect belongs to. This is optional and only
needed when effect class names are not unique.
Returns:
:py:class:`Effect` class |
2,223 | def open_required(func):
def wrapper(self, *args, **kwargs):
if self._status == "closed":
raise aiohttp.web.HTTPForbidden(text="The project is not opened")
return func(self, *args, **kwargs)
return wrapper | Use this decorator to raise an error if the project is not opened |
2,224 | def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names):
print()
if names == :
tf_name = + random_string(4)
elif names == :
tf_name = w_name
else:
tf_name = w_name + str(random.random())
output_size = params[]
align_corners = params[] > 0
def target_layer(x, size=output_size, align_corners=align_corners):
import tensorflow as tf
x = tf.transpose(x, [0, 2, 3, 1])
x = tf.image.resize_images(x, size, align_corners=align_corners)
x = tf.transpose(x, [0, 3, 1, 2])
return x
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) | Convert upsample_bilinear2d layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers |
2,225 | def hoverEnterEvent(self, event):
self._hovered = True
if self.toolTip():
QToolTip.showText(QCursor.pos(), self.toolTip())
return True
return self.style() == XNodeHotspot.Style.Icon | Processes when this hotspot is entered.
:param event | <QHoverEvent>
:return <bool> | processed |
2,226 | def derivatives_factory(cls, coef, degree, knots, ext, **kwargs):
return cls._basis_spline_factory(coef, degree, knots, 1, ext) | Given some coefficients, return a the derivative of a B-spline. |
2,227 | def voltage_delta_vde(v_nom, s_max, r, x, cos_phi):
delta_v = (s_max * (
r * cos_phi + x * math.sin(math.acos(cos_phi)))) / v_nom ** 2
return delta_v | Estimate voltrage drop/increase
The VDE [#]_ proposes a simplified method to estimate voltage drop or
increase in radial grids.
Parameters
----------
v_nom : int
Nominal voltage
s_max : float
Apparent power
r : float
Short-circuit resistance from node to HV/MV substation (in ohm)
x : float
Short-circuit reactance from node to HV/MV substation (in ohm). Must
be a signed number indicating (+) inductive reactive consumer (load
case) or (-) inductive reactive supplier (generation case)
cos_phi : float
Returns
-------
:any:`float`
Voltage drop or increase
References
----------
.. [#] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz –
Technische Mindestanforderungen für Anschluss und Parallelbetrieb von
Erzeugungsanlagen am Niederspannungsnetz, 2011 |
2,228 | def open(filename, mode="r", iline = 189,
xline = 193,
strict = True,
ignore_geometry = False,
endian = ):
if in mode:
problem =
solution =
raise ValueError(.join((problem, solution)))
endians = {
: 256,
: 256,
: 0,
: 0,
}
if endian not in endians:
problem =
opts = .join(endians.keys())
raise ValueError(problem.format(endian) + opts)
from . import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.segyopen()
metrics = fd.metrics()
f = segyio.SegyFile(fd,
filename = str(filename),
mode = mode,
iline = iline,
xline = xline,
endian = endian,
)
try:
dt = segyio.tools.dt(f, fallback_dt = 4000.0) / 1000.0
t0 = f.header[0][segyio.TraceField.DelayRecordingTime]
samples = metrics[]
f._samples = (numpy.arange(samples) * dt) + t0
except:
f.close()
raise
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) | Open a segy file.
Opens a segy file and tries to figure out its sorting, inline numbers,
crossline numbers, and offsets, and enables reading and writing to this
file in a simple manner.
For reading, the access mode `r` is preferred. All write operations will
raise an exception. For writing, the mode `r+` is preferred (as `rw` would
truncate the file). Any mode with `w` will raise an error. The modes used
are standard C file modes; please refer to that documentation for a
complete reference.
Open should be used together with python's ``with`` statement. Please refer
to the examples. When the ``with`` statement is used the file will
automatically be closed when the routine completes or an exception is
raised.
By default, segyio tries to open in ``strict`` mode. This means the file will
be assumed to represent a geometry with consistent inline, crosslines and
offsets. If strict is False, segyio will still try to establish a geometry,
but it won't abort if it fails. When in non-strict mode is opened,
geometry-dependent modes such as iline will raise an error.
If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or
other geometry related structures, which leads to faster opens. This is
essentially the same as using ``strict=False`` on a file that has no
geometry.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.SegyFile
An open segyio file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.8
endian argument
When a file is opened non-strict, only raw traces access is allowed, and
using modes such as ``iline`` raise an error.
Examples
--------
Open a file in read-only mode:
>>> with segyio.open(path, "r") as f:
... print(f.ilines)
...
[1, 2, 3, 4, 5]
Open a file in read-write mode:
>>> with segyio.open(path, "r+") as f:
... f.trace = np.arange(100)
Open two files at once:
>>> with segyio.open(path) as src, segyio.open(path, "r+") as dst:
... dst.trace = src.trace # copy all traces from src to dst
Open a file little-endian file:
>>> with segyio.open(path, endian = 'little') as f:
... f.trace[0] |
2,229 | def _unmount_devicemapper(self, cid):
mountpoint = self.mountpoint
Mount.unmount_path(mountpoint)
cinfo = self.client.inspect_container(cid)
dev_name = cinfo[][][]
Mount.remove_thin_device(dev_name)
self._cleanup_container(cinfo) | Devicemapper unmount backend. |
2,230 | def text_ui(self):
self.logger.info("Starting command line interface")
self.help()
try:
self.ipython_ui()
except ImportError:
self.fallback_ui()
self.system.cleanup() | Start Text UI main loop |
2,231 | def _get_name(self, name):
team_name = name.text()
abbr = self._parse_abbreviation(name)
non_di = False
if not abbr:
abbr = team_name
non_di = True
return team_name, abbr, non_di | Find a team's name and abbreviation.
Given the team's HTML name tag, determine their name, abbreviation, and
whether or not they compete in Division-I.
Parameters
----------
name : PyQuery object
A PyQuery object of a team's HTML name tag in the boxscore.
Returns
-------
tuple
Returns a tuple containing the name, abbreviation, and whether or
not the team participates in Division-I. Tuple is in the following
order: Team Name, Team Abbreviation, boolean which evaluates to
True if the team does not participate in Division-I. |
2,232 | def save_code(self, title, addr, _bytes):
self.standard_bytes_header(title, addr, len(_bytes))
_bytes = [self.BLOCK_TYPE_DATA] + [(int(x) & 0xFF) for x in _bytes]
self.standard_block(_bytes) | Saves the given bytes as code. If bytes are strings,
its chars will be converted to bytes |
2,233 | def nvmlDeviceGetCurrPcieLinkWidth(handle):
r
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth")
width = c_uint()
ret = fn(handle, byref(width))
_nvmlCheckReturn(ret)
return bytes_to_str(width.value) | r"""
/**
* Retrieves the current PCIe link width
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param currLinkWidth Reference in which to return the current PCIe link generation
*
* @return
* - \ref NVML_SUCCESS if \a currLinkWidth has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null
* - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth |
2,234 | def image(self, well_row, well_column, field_row, field_column):
return next((i for i in self.images
if attribute(i, ) == well_column and
attribute(i, ) == well_row and
attribute(i, ) == field_column and
attribute(i, ) == field_row), ) | Get path of specified image.
Parameters
----------
well_row : int
Starts at 0. Same as --U in files.
well_column : int
Starts at 0. Same as --V in files.
field_row : int
Starts at 0. Same as --Y in files.
field_column : int
Starts at 0. Same as --X in files.
Returns
-------
string
Path to image or empty string if image is not found. |
2,235 | def modified_Wilson_Vc(zs, Vcs, Aijs):
r
if not none_and_length_check([zs, Vcs]):
raise Exception()
C = -2500
Vcm = sum(zs[i]*Vcs[i] for i in range(len(zs)))
for i in range(len(zs)):
Vcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs))))/1E6
return Vcm | r'''Calculates critical volume of a mixture according to
mixing rules in [1]_ with parameters. Equation
.. math::
V_{cm} = \sum_i x_i V_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)V_{ref}
For a binary mxiture, this simplifies to:
.. math::
V_{cm} = x_1 V_{c1} + x_2 V_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})]
Parameters
----------
zs : float
Mole fractions of all components
Vcs : float
Critical volumes of all components, [m^3/mol]
Aijs : matrix
Interaction parameters, [cm^3/mol]
Returns
-------
Vcm : float
Critical volume of the mixture, [m^3/mol]
Notes
-----
The equation and original article has been reviewed.
All parameters, even if zero, must be given to this function.
C = -2500
All parameters, even if zero, must be given to this function.
nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function
Examples
--------
1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol.
>>> modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256],
... [[0, 0.6671250], [1.3939900, 0]])
0.0002664335032706881
References
----------
.. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the
Calculation of Gas-Liquid Critical Temperatures and Pressures of
Multicomponent Mixtures." Industrial & Engineering Chemistry Process
Design and Development 22, no. 4 (1983): 672-76.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001. |
2,236 | def __startOpenThread(self):
print
try:
if self.hasActiveDatasetToCommit:
if self.__sendCommand()[0] != :
raise Exception()
else:
self.hasActiveDatasetToCommit = False
if self.isPowerDown:
if self._addressfilterMode == :
if self.__setAddressfilterMode():
for addr in self._addressfilterSet:
self.addAllowMAC(addr)
elif self._addressfilterMode == :
if self.__setAddressfilterMode():
for addr in self._addressfilterSet:
self.addBlockedMAC(addr)
if self.deviceRole in [Thread_Device_Role.Leader, Thread_Device_Role.Router, Thread_Device_Role.REED]:
self.__setRouterSelectionJitter(1)
if self.__sendCommand()[0] == :
if self.__sendCommand()[0] == :
self.isPowerDown = False
return True
else:
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("startOpenThread() Error: " + str(e)) | start OpenThread stack
Returns:
True: successful to start OpenThread stack and thread interface up
False: fail to start OpenThread stack |
2,237 | def _close_stdout_stderr_streams(self):
if self._stdout_tee.tee_file is not None:
self._stdout_tee.tee_file.close()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
self._stdout_tee.close_join()
self._stderr_tee.close_join()
if self._cloud:
self._stdout_stream.close()
self._stderr_stream.close()
self._output_log.f.close()
self._output_log = None | Close output-capturing stuff. This also flushes anything left in
the buffers. |
2,238 | def expected_log_joint_probability(self):
from pyslds.util import expected_hmm_logprob
elp = expected_hmm_logprob(
self.pi_0, self.trans_matrix,
(self.expected_states, self.expected_transcounts, self._normalizer))
elp += np.sum(self.expected_states * self.vbem_aBl)
return elp | Compute E_{q(z) q(x)} [log p(z) + log p(x | z) + log p(y | x, z)] |
2,239 | def _run_configure_script(self, script):
_, tmpFile = tempfile.mkstemp()
with open(tmpFile, ) as f:
f.write(script)
try:
ssh = self._get_ssh_client(
self.host,
"ubuntu",
self.private_key_path,
)
sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())
sftp.put(
tmpFile,
tmpFile,
)
stdout, stderr = self._run_command(
ssh,
"sudo /bin/bash {}".format(tmpFile),
)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
os.remove(tmpFile)
ssh.close() | Run the script to install the Juju agent on the target machine.
:param str script: The script returned by the ProvisioningScript API
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the upload fails |
2,240 | def run(suite, stream, args, testing=False):
if not issubclass(GreenStream, type(stream)):
stream = GreenStream(stream, disable_windows=args.disable_windows,
disable_unidecode=args.disable_unidecode)
result = GreenTestResult(args, stream)
if not msg:
break
else:
result.startTest(msg)
proto_test_result = queue.get()
result.addProtoTestResult(proto_test_result)
if result.shouldStop:
abort = True
break
if abort:
break
pool.close()
pool.join()
result.stopTestRun()
removeResult(result)
return result | Run the given test case or test suite with the specified arguments.
Any args.stream passed in will be wrapped in a GreenStream |
2,241 | def data(self, data, part=False, dataset=):
imgs = self.imgtype.convert(data)
for channel, data in zip(self.imgtype.channels, imgs):
key = dataset + channel
data = self.scanner(data, part)
if isinstance(self.parser, LevelParser):
self.storage.add_links(self.parser(data, part, key))
else:
for level, level_data in enumerate(data):
level_key = key + level_dataset(level)
level_data = self.parser(level_data, part, level_key)
self.storage.add_links(level_data) | Parameters
----------
data : `PIL.Image`
Image to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: ''). |
2,242 | def get_object(self, subject=None, predicate=None):
results = self.rdf.objects(subject, predicate)
as_list = list(results)
if not as_list:
return None
return as_list[0] | Eliminates some of the glue code for searching RDF. Pass
in a URIRef object (generated by the `uri` function above or
a BNode object (returned by this function) for either of the
parameters. |
2,243 | def gcs_get_url(url,
altexts=None,
client=None,
service_account_json=None,
raiseonfail=False):
bucket_item = url.replace(,)
bucket_item = bucket_item.split()
bucket = bucket_item[0]
filekey = .join(bucket_item[1:])
return gcs_get_file(bucket,
filekey,
bucket_item[-1],
altexts=altexts,
client=client,
service_account_json=service_account_json,
raiseonfail=raiseonfail) | This gets a single file from a Google Cloud Storage bucket.
This uses the gs:// URL instead of a bucket name and key.
Parameters
----------
url : str
GCS URL to download. This should begin with 'gs://'.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful. |
2,244 | def prime(self):
for d in self.definitions.values():
self.defined[d.name] = d.default
return self | Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties} |
2,245 | def _verify_inputs(inputs, channel_index, data_format):
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != len(data_format):
raise base.IncompatibleShapeError((
"Input Tensor must have rank {} corresponding to "
"data_format {}, but instead was {} of rank {}.").format(
len(data_format), data_format, input_shape, len(input_shape)))
if not (tf.float16.is_compatible_with(inputs.dtype) or
tf.bfloat16.is_compatible_with(inputs.dtype) or
tf.float32.is_compatible_with(inputs.dtype)):
raise TypeError(
"Input must have dtype tf.float16, tf.bfloat16 or tf.float32, "
"but dtype was {}".format(inputs.dtype))
input_channels = input_shape[channel_index]
if input_channels is None:
raise base.UnderspecifiedError(
"Number of input channels must be known at module build time") | Verifies `inputs` is semantically correct.
Args:
inputs: An input tensor provided by the user.
channel_index: The index of the channel dimension.
data_format: The format of the data in `inputs`.
Raises:
base.IncompatibleShapeError: If the shape of `inputs` doesn't match
`data_format`.
base.UnderspecifiedError: If the channel dimension of `inputs` isn't
defined.
TypeError: If input Tensor dtype is not compatible with either
`tf.float16`, `tf.bfloat16` or `tf.float32`. |
2,246 | async def get_protocol_version(self):
value = await self.core.get_protocol_version()
if value:
reply = json.dumps({"method": "protocol_version_reply", "params": value})
else:
reply = json.dumps({"method": "protocol_version_reply", "params": "Unknown"})
await self.websocket.send(reply) | This method retrieves the Firmata protocol version.
JSON command: {"method": "get_protocol_version", "params": ["null"]}
:returns: {"method": "protocol_version_reply", "params": [PROTOCOL_VERSION]} |
2,247 | def fixed_inputs(model, non_fixed_inputs, fix_routine=, as_list=True, X_all=False):
from ...inference.latent_function_inference.posterior import VariationalPosterior
f_inputs = []
if hasattr(model, ) and model.has_uncertain_inputs():
X = model.X.mean.values.copy()
elif isinstance(model.X, VariationalPosterior):
X = model.X.values.copy()
else:
if X_all:
X = model.X_all.copy()
else:
X = model.X.copy()
for i in range(X.shape[1]):
if i not in non_fixed_inputs:
if fix_routine == :
f_inputs.append( (i, np.mean(X[:,i])) )
if fix_routine == :
f_inputs.append( (i, np.median(X[:,i])) )
else:
f_inputs.append( (i, 0) )
if not as_list:
X[:,i] = f_inputs[-1][1]
if as_list:
return f_inputs
else:
return X | Convenience function for returning back fixed_inputs where the other inputs
are fixed using fix_routine
:param model: model
:type model: Model
:param non_fixed_inputs: dimensions of non fixed inputs
:type non_fixed_inputs: list
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
:type fix_routine: string
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
:type as_list: boolean |
2,248 | def refreshButtons(self):
last = self._last
first = self._first
joiner = self._containerWidget.currentJoiner()
if first:
self.uiJoinSBTN.setActionTexts([, ])
elif joiner == QueryCompound.Op.And:
self.uiJoinSBTN.setActionTexts([])
else:
self.uiJoinSBTN.setActionTexts([])
if last:
self.uiJoinSBTN.setCurrentAction(None)
else:
act = self.uiJoinSBTN.findAction(QueryCompound.Op[joiner].upper())
self.uiJoinSBTN.setCurrentAction(act)
enable = QueryCompound.typecheck(self._query) or self.isChecked()
self.uiEnterBTN.setEnabled(enable) | Refreshes the buttons for building this sql query. |
2,249 | def display(self):
w, h = self.session.window_size()
return Display(w*self.scale, h*self.scale) | Get screen width and height |
2,250 | def expQt(self, t):
eLambdaT = np.diag(self._exp_lt(t))
Qs = self.v.dot(eLambdaT.dot(self.v_inv))
return np.maximum(0,Qs) | Parameters
----------
t : float
Time to propagate
Returns
--------
expQt : numpy.array
Matrix exponential of exo(Qt) |
2,251 | def codigo_ibge_uf(sigla):
idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla)
return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE] | Retorna o código do IBGE para a UF informada. |
2,252 | def ColorLuminance(color):
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:6], 16)
return (299*r + 587*g + 114*b) / 1000.0 | Compute the brightness of an sRGB color using the formula from
http://www.w3.org/TR/2000/WD-AERT-20000426#color-contrast.
Args:
color: a string of 6 hex digits in the format verified by IsValidHexColor().
Returns:
A floating-point number between 0.0 (black) and 255.0 (white). |
2,253 | def entity_to_protobuf(entity):
entity_pb = entity_pb2.Entity()
if entity.key is not None:
key_pb = entity.key.to_protobuf()
entity_pb.key.CopyFrom(key_pb)
for name, value in entity.items():
value_is_list = isinstance(value, list)
value_pb = _new_value_pb(entity_pb, name)
_set_protobuf_value(value_pb, value)
if name in entity.exclude_from_indexes:
if not value_is_list:
value_pb.exclude_from_indexes = True
for sub_value in value_pb.array_value.values:
sub_value.exclude_from_indexes = True
_set_pb_meaning_from_entity(
entity, name, value, value_pb, is_list=value_is_list
)
return entity_pb | Converts an entity into a protobuf.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be turned into a protobuf.
:rtype: :class:`.entity_pb2.Entity`
:returns: The protobuf representing the entity. |
2,254 | def connect(self, factory):
try:
factory = self._factories[factory]
except KeyError:
raise NoSuchFactory()
remote = self.getProtocol()
addr = remote.transport.getPeer()
proto = factory.buildProtocol(addr)
if proto is None:
raise ConnectionRefused()
identifier = uuid4().hex
transport = MultiplexedTransport(identifier, remote)
proto.makeConnection(transport)
self._protocols[identifier] = proto
return {"connection": identifier} | Attempts to connect using a given factory.
This will find the requested factory and use it to build a
protocol as if the AMP protocol's peer was making the
connection. It will create a transport for the protocol and
connect it immediately. It will then store the protocol under
a unique identifier, and return that identifier. |
2,255 | def error_redirect(self, errormsg=, errorlog=):
from django.shortcuts import redirect
self.lti_errormsg = errormsg
self.lti_errorlog = errorlog
return redirect(self.build_return_url()) | Shortcut for redirecting Django view to LTI Consumer with errors |
2,256 | def add_eval(self, agent, e, fr=None):
self._evals[agent.name] = e
self._framings[agent.name] = fr | Add or change agent's evaluation of the artifact with given framing
information.
:param agent: Name of the agent which did the evaluation.
:param float e: Evaluation for the artifact.
:param object fr: Framing information for the evaluation. |
2,257 | def XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True):
if _extra_rot:
X,Y,Z= nu.dot(galcen_extra_rot,nu.array([X,Y,Z]))
dgc= nu.sqrt(Xsun**2.+Zsun**2.)
costheta, sintheta= Xsun/dgc, Zsun/dgc
return nu.dot(nu.array([[costheta,0.,-sintheta],
[0.,1.,0.],
[sintheta,0.,costheta]]),
nu.array([-X+dgc,Y,nu.sign(Xsun)*Z])).T | NAME:
XYZ_to_galcenrect
PURPOSE:
transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
(Xg, Yg, Zg)
HISTORY:
2010-09-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT) |
2,258 | def value_splitter(self, reference, prop, value, mode):
items = []
if mode == :
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference raised JSON decoder error when "
"splitting values from : {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items | Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list: |
2,259 | def t_string_NGRAPH(t):
r"\\[ .:]"
global __STRING
P = {: 0, ".: ": 1, : 4, : 5}
__STRING += chr(128 + P[t.value[1]] + N[t.value[2]]) | r"\\[ '.:][ '.:] |
2,260 | def writeImageToFile(self, filename, _format="PNG"):
filename = self.device.substituteDeviceTemplate(filename)
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path (fielname=)" % filename)
if os.path.isdir(filename):
filename = os.path.join(filename, self.variableNameFromId() + + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to in %s format" % (filename, _format)
((l, t), (r, b)) = self.getCoords()
box = (l, t, r, b)
if DEBUG:
print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect
if self.uiAutomatorHelper:
if DEBUG_UI_AUTOMATOR_HELPER:
print >> sys.stderr, "Taking screenshot using UiAutomatorHelper"
received = self.uiAutomatorHelper.takeScreenshot()
stream = StringIO.StringIO(received)
try:
from PIL import Image
image = Image.open(stream)
except ImportError as ex:
self.pilNotInstalledWarning()
sys.exit(1)
except IOError, ex:
print >> sys.stderr, ex
print repr(stream)
sys.exit(1)
else:
image = self.device.takeSnapshot(reconnect=self.device.reconnect)
image.crop(box).save(filename, _format) | Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG) |
2,261 | def _num_players(self):
self._player_num = 0
self._computer_num = 0
for player in self._header.scenario.game_settings.player_info:
if player.type == :
self._player_num += 1
elif player.type == :
self._computer_num += 1 | Compute number of players, both human and computer. |
2,262 | def _iter_path_collection(paths, path_transforms, offsets, styles):
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles[]
if np.size(edgecolor) == 0:
edgecolor = []
facecolor = styles[]
if np.size(facecolor) == 0:
facecolor = []
elements = [paths, path_transforms, offsets,
edgecolor, styles[], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N) | Build an iterator over the elements of the path collection |
2,263 | def ecn(ns=None, cn=None, di=None):
return CONN.EnumerateClassNames(ns,
ClassName=cn,
DeepInheritance=di) | This function is a wrapper for
:meth:`~pywbem.WBEMConnection.EnumerateClassNames`.
Enumerate the names of subclasses of a class, or of the top-level classes
in a namespace.
Parameters:
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the namespace of the `cn` parameter if
specified as a `CIMClassName`, or to the default namespace of the
connection.
cn (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class whose subclasses are to be enumerated (case
independent).
`None` will enumerate the top-level classes.
If specified as a `CIMClassName` object, its `host` attribute will be
ignored.
di (:class:`py:bool`):
DeepInheritance flag: Include also indirect subclasses.
`None` will cause the server default of `False` to be used.
Returns:
list of :term:`unicode string`:
The enumerated class names. |
2,264 | def index(self, index, doc_type, body, id=None, **query_params):
self._es_parser.is_not_empty_params(index, doc_type, body)
method = HttpMethod.POST if id in NULL_VALUES else HttpMethod.PUT
path = self._es_parser.make_path(index, doc_type, id)
result = yield self._perform_request(method, path, body, params=query_params)
returnValue(result) | Adds or updates a typed JSON document in a specific index, making it searchable.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:param index: The name of the index
:param doc_type: The type of the document
:param body: The document
:param id: Document ID
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg refresh: Refresh the index after performing the operation
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force' |
2,265 | def encrypted_gradient(self, sum_to=None):
gradient = self.compute_gradient()
encrypted_gradient = encrypt_vector(self.pubkey, gradient)
if sum_to is not None:
return sum_encrypted_vectors(sum_to, encrypted_gradient)
else:
return encrypted_gradient | Compute and encrypt gradient.
When `sum_to` is given, sum the encrypted gradient to it, assumed
to be another vector of the same size |
2,266 | def validate(self, ip, **kwargs):
if ip is None:
return False
ip = stringify(ip)
if self.IPV4_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET, ip)
return True
except AttributeError:
try:
socket.inet_aton(ip)
except socket.error:
return False
return ip.count() == 3
except socket.error:
return False
if self.IPV6_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error:
return False
return True | Check to see if this is a valid ip address. |
2,267 | def decorate_with_checker(func: CallableT) -> CallableT:
assert not hasattr(func, "__preconditions__"), \
"Expected func to have no list of preconditions (there should be only a single contract checker per function)."
assert not hasattr(func, "__postconditions__"), \
"Expected func to have no list of postconditions (there should be only a single contract checker per function)."
assert not hasattr(func, "__postcondition_snapshots__"), \
"Expected func to have no list of postcondition snapshots (there should be only a single contract checker " \
"per function)."
sign = inspect.signature(func)
param_names = list(sign.parameters.keys())
kwdefaults = dict()
for param in sign.parameters.values():
if param.default != inspect.Parameter.empty:
kwdefaults[param.name] = param.default
def wrapper(*args, **kwargs):
preconditions = getattr(wrapper, "__preconditions__")
snapshots = getattr(wrapper, "__postcondition_snapshots__")
postconditions = getattr(wrapper, "__postconditions__")
resolved_kwargs = _kwargs_from_call(param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)
if postconditions:
if in resolved_kwargs:
raise TypeError("Unexpected argument in a function decorated with postconditions.")
if in resolved_kwargs:
raise TypeError("Unexpected argument in a function decorated with postconditions.")
violation_err = None
for group in preconditions:
violation_err = None
try:
for contract in group:
_assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs)
break
except ViolationError as err:
violation_err = err
if violation_err is not None:
raise violation_err
if postconditions:
old_as_mapping = dict()
for snap in snapshots:
assert snap.name not in old_as_mapping, "Snapshots with the conflicting name: {}"
old_as_mapping[snap.name] = _capture_snapshot(a_snapshot=snap, resolved_kwargs=resolved_kwargs)
resolved_kwargs[] = _Old(mapping=old_as_mapping)
result = func(*args, **kwargs)
if postconditions:
resolved_kwargs[] = result
for contract in postconditions:
_assert_postcondition(contract=contract, resolved_kwargs=resolved_kwargs)
return result
functools.update_wrapper(wrapper=wrapper, wrapped=func)
assert not hasattr(wrapper, "__preconditions__"), "Expected no preconditions set on a pristine contract checker."
assert not hasattr(wrapper, "__postcondition_snapshots__"), \
"Expected no postcondition snapshots set on a pristine contract checker."
assert not hasattr(wrapper, "__postconditions__"), "Expected no postconditions set on a pristine contract checker."
setattr(wrapper, "__preconditions__", [])
setattr(wrapper, "__postcondition_snapshots__", [])
setattr(wrapper, "__postconditions__", [])
return wrapper | Decorate the function with a checker that verifies the preconditions and postconditions. |
2,268 | def changeSubMenu(self,submenu):
if submenu not in self.submenus:
raise ValueError("Submenu %s does not exist!"%submenu)
elif submenu == self.activeSubMenu:
return
old = self.activeSubMenu
self.activeSubMenu = submenu
if old is not None:
self.submenus[old].on_exit(submenu)
self.submenus[old].doAction("exit")
self.submenu.on_enter(old)
self.submenu.doAction("enter") | Changes the submenu that is displayed.
:raises ValueError: if the name was not previously registered |
2,269 | def add_model_name_to_payload(cls, payload):
if not cls.MODEL_NAME in payload:
payload = {cls.MODEL_NAME: payload}
return payload | Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`. |
2,270 | def add(path=None, force=False, quiet=False):
option = if force else
return run( % (option, path) or , quiet=quiet) | Add that path to git's staging area (default current dir)
so that it will be included in next commit |
2,271 | def algorithm(G, method_name, **kwargs):
warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.",
DeprecationWarning)
return G.analyze(method_name, **kwargs) | Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the
:class:`.GraphCollection` ``G``.
For options, see the `list of algorithms
<http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_
in the NetworkX documentation. Not all of these have been tested.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method_name : string
Name of a method in NetworkX to execute on graph collection.
**kwargs
A list of keyword arguments that should correspond to the parameters
of the specified method.
Returns
-------
results : dict
Indexed by element (node or edge) and graph index (e.g. ``date``).
Raises
------
ValueError
If no such method exists.
Examples
--------
*Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\)
.. code-block:: python
>>> from tethne.analyze import collection
>>> BC = collection.algorithm(G, 'betweenness_centrality')
>>> print BC[0]
{1999: 0.010101651117889644,
2000: 0.0008689093723107329,
2001: 0.010504898852426189,
2002: 0.009338654511194512,
2003: 0.007519105636349891} |
2,272 | def expect(self, searcher, timeout=3):
timeout = float(timeout)
end = time.time() + timeout
match = searcher.search(self._history[self._start:])
while not match:
incoming = self._stream_adapter.poll(end - time.time())
self.input_callback(incoming)
self._history += incoming
match = searcher.search(self._history[self._start:])
trimlength = len(self._history) - self._window
if trimlength > 0:
self._start -= trimlength
self._history = self._history[trimlength:]
self._start += match.end
if (self._start < 0):
self._start = 0
return match | Wait for input matching *searcher*
Waits for input matching *searcher* for up to *timeout* seconds. If
a match is found, the match result is returned (the specific type of
returned result depends on the :class:`Searcher` type). If no match is
found within *timeout* seconds, raise an :class:`ExpectTimeout`
exception.
:param Searcher searcher: :class:`Searcher` to apply to underlying
stream.
:param float timeout: Timeout in seconds. |
2,273 | def build_structure(self, check_name, groups, source_name, limit=1):
aggregates = {}
aggregates[] = 0
aggregates[] = 0
high_priorities = []
medium_priorities = []
low_priorities = []
all_priorities = []
aggregates[] = 0
aggregates[] = 0
aggregates[] = 0
def named_function(result):
for child in result.children:
all_priorities.append(child)
named_function(child)
for res in groups:
if res.weight < limit:
continue
if res.value[1] == 0:
continue
aggregates[] += res.value[0]
aggregates[] += res.value[1]
if res.weight == 3:
high_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates[] += 1
elif res.weight == 2:
medium_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates[] += 1
else:
low_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates[] += 1
all_priorities.append(res)
return aggregates | Compiles the checks, results and scores into an aggregate structure which looks like:
{
"scored_points": 396,
"low_count": 0,
"possible_points": 400,
"testname": "gliderdac",
"medium_count": 2,
"source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc",
"high_count": 0,
"all_priorities" : [...],
"high_priorities": [...],
"medium_priorities" : [...],
"low_priorities" : [...]
}
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title |
2,274 | def delete_collection_namespaced_replica_set(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
return data | delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
2,275 | def make_bcbiornaseq_object(data):
if "bcbiornaseq" not in dd.get_tools_on(data):
return data
upload_dir = tz.get_in(("upload", "dir"), data)
report_dir = os.path.join(upload_dir, "bcbioRNASeq")
safe_makedir(report_dir)
organism = dd.get_bcbiornaseq(data).get("organism", None)
groups = dd.get_bcbiornaseq(data).get("interesting_groups", None)
loadstring = create_load_string(upload_dir, groups, organism)
r_file = os.path.join(report_dir, "load_bcbioRNAseq.R")
with file_transaction(r_file) as tmp_file:
memoize_write_file(loadstring, tmp_file)
rcmd = Rscript_cmd()
with chdir(report_dir):
do.run([rcmd, "--no-environ", r_file], "Loading bcbioRNASeq object.")
make_quality_report(data)
return data | load the initial bcb.rda object using bcbioRNASeq |
2,276 | def rollback(self, label=None, plane=):
begin = time.time()
rb_label = self._chain.target_device.rollback(label=label, plane=plane)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration rollback last {:.0f}s. Label: {}".format(elapsed, rb_label),
log_level=logging.INFO)
else:
self.emit_message("Configuration failed.", log_level=logging.WARNING)
return rb_label | Rollback the configuration.
This method rolls back the configuration on the device.
Args:
label (text): The configuration label ID
plane: (text): sdr or admin
Returns:
A string with commit label or None |
2,277 | def resource_redirect(id):
resource = get_resource(id)
return redirect(resource.url.strip()) if resource else abort(404) | Redirect to the latest version of a resource given its identifier. |
2,278 | def submission_storage_path(instance, filename):
string = .join([, instance.submission_user.user_nick, str(instance.submission_question.question_level), str(instance.submission_question.question_level_id)])
string += +datetime.datetime.now().strftime("%I:%M%p-%m-%d-%Y")
string += filename
return string | Function DocString |
2,279 | def items(self):
for values_str in self._v2b_dict:
element_value = self._v2b_dict[values_str]
yield element_value, values_str | Generator that iterates through the items of the value mapping. The
items are the array entries of the `Values` and `ValueMap` qualifiers,
and they are iterated in the order specified in the arrays.
If the `ValueMap` qualifier is not specified, the default of consecutive
integers starting at 0 is used as a default, consistent with
:term:`DSP0004`.
Each iterated item is a tuple of integer value(s) representing the
`ValueMap` array entry, and the corresponding `Values` string.
Any integer value in the iterated items is represented as the CIM type
of the element (e.g. :class:`~pywbem.Uint16`).
If the `Values` string corresponds to a single element value, the
first tuple item is that single integer value.
If the `Values` string corresponds to a value range (e.g. "1.." or
"..2" or "3..4"), that value range is returned as a tuple with two items
that are the lowest and the highest value of the range. That is the
case also when the value range is open on the left side or right side.
If the `Values` string corresponds to the `unclaimed` indicator "..",
the first tuple item is `None`.
Returns:
:term:`iterator` for tuple of integer value(s) and `Values` string. |
2,280 | def save(self, logmessage=None):
if self.as_of_date is not None:
raise RuntimeError()
save_opts = {}
if self.info_modified:
if self.label:
save_opts[] = self.label
if self.mimetype:
save_opts[] = self.mimetype
if self.versionable is not None:
save_opts[] = self.versionable
if self.state:
save_opts[] = self.state
if self.format:
save_opts[] = self.format
if self.checksum:
if self.checksum_modified:
save_opts[] = self.checksum
if self.checksum_type:
save_opts[] = self.checksum_type
if not in save_opts.keys():
if self._info is not None:
save_opts[] = self.mimetype
else:
save_opts[] = self.defaults[]
if self.ds_location is not None:
save_opts[] = self.ds_location
else:
save_opts[] = self._raw_content()
if self.exists:
if not self.versionable:
self._backup()
r = self.obj.api.modifyDatastream(self.obj.pid, self.id,
logMessage=logmessage, **save_opts)
success = (r.status_code == requests.codes.ok)
else:
r = self.obj.api.addDatastream(self.obj.pid, self.id,
controlGroup=self.defaults[],
logMessage=logmessage, **save_opts)
success = (r.status_code == requests.codes.created)
if success:
self.exists = True
self._content = None
self._content_modified = False
if success:
self.info_modified = False
self.checksum_modified = False
self.digest = self._content_digest()
self.ds_location = None
return success | Save datastream content and any changed datastream profile
information to Fedora.
:rtype: boolean for success |
2,281 | def member_add(self, cluster_id, params):
cluster = self._storage[cluster_id]
result = cluster.member_add(params.get(, None), params.get(, {}))
self._storage[cluster_id] = cluster
return result | add new member into configuration |
2,282 | def find_field(self, field=None, alias=None):
if alias:
field = alias
field = FieldFactory(field, table=self, alias=alias)
identifier = field.get_identifier()
for field in self.fields:
if field.get_identifier() == identifier:
return field
return None | Finds a field by name or alias.
:param field: string of the field name or alias, dict of {'alias': field}, or a Field instance
:type field: str or dict or Field
:returns: The field if it is found, otherwise None
:rtype: :class:`Field <querybuilder.fields.Field>` or None |
2,283 | def _array_slice(array, index):
if isinstance(index, slice):
start = index.start
stop = index.stop
if (start is not None and start < 0) or (
stop is not None and stop < 0
):
raise ValueError()
step = index.step
if step is not None and step != 1:
raise NotImplementedError()
op = ops.ArraySlice(array, start if start is not None else 0, stop)
else:
op = ops.ArrayIndex(array, index)
return op.to_expr() | Slice or index `array` at `index`.
Parameters
----------
index : int or ibis.expr.types.IntegerValue or slice
Returns
-------
sliced_array : ibis.expr.types.ValueExpr
If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then
the return type is the element type of `array`. If `index` is a
``slice`` then the return type is the same type as the input. |
2,284 | def cxxRecordDecl(*args):
kinds = [
CursorKind.CLASS_DECL,
CursorKind.CLASS_TEMPLATE,
]
inner = [ PredMatcher(is_kind(k)) for k in kinds ]
return allOf(anyOf(*inner), *args) | Matches C++ class declarations.
>>> from glud import *
>>> config = '''
... class W;
... template<typename T> class X {};
... struct Y {};
... union Z {};
... '''
>>> m = cxxRecordDecl()
>>> for c in walk(m, parse_string(config).cursor):
... print(c.spelling)
W
X |
2,285 | def get(package_name, pypi_server="https://pypi.python.org/pypi/"):
if not pypi_server.endswith("/"):
pypi_server = pypi_server + "/"
response = requests.get("{0}{1}/json".format(pypi_server,
package_name))
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, ):
return json2package(response.content.decode())
else:
return json2package(response.content) | Constructs a request to the PyPI server and returns a
:class:`yarg.package.Package`.
:param package_name: case sensitive name of the package on the PyPI server.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> package = yarg.get('yarg')
<Package yarg> |
2,286 | def handle_sap(q):
question_votes = votes = Answer.objects.filter(question=q)
users = q.get_users_voted()
num_users_votes = {u.id: votes.filter(user=u).count() for u in users}
user_scale = {u.id: (1 / num_users_votes[u.id]) for u in users}
choices = []
for c in q.choice_set.all().order_by("num"):
votes = question_votes.filter(choice=c)
vote_users = set([v.user for v in votes])
choice = {
"choice": c,
"votes": {
"total": {
"all": len(vote_users),
"all_percent": perc(len(vote_users), users.count()),
"male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])),
"female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes]))
}
},
"users": [v.user for v in votes]
}
for yr in range(9, 14):
yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(set(yr_votes)),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes])),
}
choices.append(choice)
votes = question_votes.filter(clear_vote=True)
clr_users = set([v.user for v in votes])
choice = {
"choice": "Clear vote",
"votes": {
"total": {
"all": len(clr_users),
"all_percent": perc(len(clr_users), users.count()),
"male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])),
"female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes]))
}
},
"users": clr_users
}
for yr in range(9, 14):
yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(yr_votes),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes]))
}
choices.append(choice)
choice = {
"choice": "Total",
"votes": {
"total": {
"all": users.count(),
"votes_all": question_votes.count(),
"all_percent": perc(users.count(), users.count()),
"male": users.filter(gender=True).count(),
"female": users.filter(gender__isnull=False, gender=False).count()
}
}
}
for yr in range(9, 14):
yr_votes = [u if u.grade and u.grade.number == yr else None for u in users]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(set(yr_votes)),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes]))
}
choices.append(choice)
return {"question": q, "choices": choices, "user_scale": user_scale} | Clear vote |
2,287 | def get_available_ip6(self, id_network6):
if not is_valid_int_param(id_network6):
raise InvalidParameterError(
u)
url = + str(id_network6) + "/"
code, xml = self.submit(None, , url)
return self.response(code, xml) | Get a available IP in Network ipv6
:param id_network6: Network ipv6 identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip6': {'ip6': < available_ip6 >}}
:raise IpNotAvailableError: Network dont have available IP.
:raise NetworkIPv4NotFoundError: Network was not found.
:raise UserNotAuthorizedError: User dont have permission to get a available IP.
:raise InvalidParameterError: Network ipv6 identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database. |
2,288 | def hasx(self, name, *args):
return lib.zargs_hasx(self._as_parameter_, name, *args) | Returns true if named parameter(s) was specified on command line |
2,289 | def Put(self, key, obj):
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
node = Node(key=key, data=obj)
self._hash[key] = node
self._age.AppendNode(node)
self.Expire()
return key | Add the object to the cache. |
2,290 | def create_socketpair(size=None):
parentfp, childfp = socket.socketpair()
parentfp.setsockopt(socket.SOL_SOCKET,
socket.SO_SNDBUF,
size or mitogen.core.CHUNK_SIZE)
childfp.setsockopt(socket.SOL_SOCKET,
socket.SO_RCVBUF,
size or mitogen.core.CHUNK_SIZE)
return parentfp, childfp | Create a :func:`socket.socketpair` to use for use as a child process's UNIX
stdio channels. As socket pairs are bidirectional, they are economical on
file descriptor usage as the same descriptor can be used for ``stdin`` and
``stdout``. As they are sockets their buffers are tunable, allowing large
buffers to be configured in order to improve throughput for file transfers
and reduce :class:`mitogen.core.Broker` IO loop iterations. |
2,291 | def by_name(cls, session, name, **kwargs):
classifier = cls.first(session, where=(cls.name == name,))
if not kwargs.get(, False):
return classifier
if not classifier:
splitted_names = [n.strip() for n in name.split(u)]
classifiers = [u.join(splitted_names[:i + 1])
for i in range(len(splitted_names))]
parent_id = None
category = splitted_names[0]
for c in classifiers:
classifier = cls.first(session, where=(cls.name == c,))
if not classifier:
classifier = Classifier(name=c, parent_id=parent_id,
category=category)
session.add(classifier)
session.flush()
parent_id = classifier.id
return classifier | Get a classifier from a given name.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param name: name of the classifier
:type name: `unicode
:return: classifier instance
:rtype: :class:`pyshop.models.Classifier` |
2,292 | def get_point(
self, x: float = 0, y: float = 0, z: float = 0, w: float = 0
) -> float:
return float(lib.NoiseGetSample(self._tdl_noise_c, (x, y, z, w))) | Return the noise value at the (x, y, z, w) point.
Args:
x (float): The position on the 1st axis.
y (float): The position on the 2nd axis.
z (float): The position on the 3rd axis.
w (float): The position on the 4th axis. |
2,293 | def _ReadIntegerDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
definition_object = self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.IntegerDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member,
supported_size_values=(1, 2, 4, 8))
attributes = definition_values.get(, None)
if attributes:
format_attribute = attributes.get(, definitions.FORMAT_SIGNED)
if format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES:
error_message = .format(
format_attribute)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.format = format_attribute
return definition_object | Reads an integer data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
IntegerDataTypeDefinition: integer data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. |
2,294 | def transformer_tall_pretrain_lm():
hparams = transformer_tall()
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.optimizer = "adam_w"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
hparams.multiproblem_schedule_max_examples = 5e8
hparams.learning_rate_decay_steps = 5000000
return hparams | Hparams for transformer on LM pretraining (with 64k vocab). |
2,295 | def set_topic_attributes(TopicArn, AttributeName, AttributeValue, region=None, key=None, keyid=None,
profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.set_topic_attributes(TopicArn=TopicArn, AttributeName=AttributeName,
AttributeValue=AttributeValue)
log.debug(,
AttributeName, AttributeValue, TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error(,
AttributeName, AttributeValue, TopicArn, e)
return False | Set an attribute of a topic to a new value.
CLI example::
salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue |
2,296 | def get_title(self):
try:
return extract_literal(self.meta_kwargs[])
except KeyError:
slot = self.get_slot()
if slot is not None:
return slot.replace(, ).title()
return None | Return the string literal that is used in the template.
The title is used in the admin screens. |
2,297 | def counter(self, ch, part=None):
return Counter(self(self._key(ch), part=part)) | Return a counter on the channel ch.
ch: string or integer.
The channel index number or channel name.
part: int or None
The 0-based enumeration of a True part to return. This
has an effect whether or not the mask or filter is turned
on. Raise IndexError if the part does not exist.
See `Counter
<https://docs.python.org/2.7/library/collections.html#counter-objects>`_
for the counter object returned. |
2,298 | def set_ssl_logging(self, enable=False, func=_ssl_logging_cb):
u
if enable:
SSL_CTX_set_info_callback(self._ctx, func)
else:
SSL_CTX_set_info_callback(self._ctx, 0) | u''' Enable or disable SSL logging
:param True | False enable: Enable or disable SSL logging
:param func: Callback function for logging |
2,299 | def create_sslcert(self, name, common_name, pri, ca):
req = {}
req.update({"name": name})
req.update({"common_name": common_name})
req.update({"pri": pri})
req.update({"ca": ca})
body = json.dumps(req)
url = .format(self.server)
return self.__post(url, body) | 修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11
Args:
name: 证书名称
common_name: 相关域名
pri: 证书私钥
ca: 证书内容
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回dict{certID: <CertID>},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.