Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
379,200 | def add_system(self, system):
if system not in self._systems:
system.set_world(self)
self._systems.append(system)
else:
raise DuplicateSystemError(system) | Add system to the world.
All systems will be processed on World.process()
system is of type System |
379,201 | def alias_repository(self, repository_id=None, alias_id=None):
if not self._can():
raise PermissionDenied()
else:
return self._provider_session.alias_repository(repository_id) | Adds an ``Id`` to a ``Repository`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Repository`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another repository, it is reassigned
to the given repository ``Id``.
arg: repository_id (osid.id.Id): the ``Id`` of a
``Repository``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is in use as a primary
``Id``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
379,202 | def createRandomObjectDescriptions(numObjects,
numLocationsPerObject,
featurePool=("A", "B", "C")):
return dict(("Object %d" % i,
zip(xrange(numLocationsPerObject),
[random.choice(featurePool)
for _ in xrange(numLocationsPerObject)]))
for i in xrange(1, numObjects + 1)) | Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...],
"Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]} |
379,203 | def simBirth(self,which_agents):
N = np.sum(which_agents)
self.aNrmNow[which_agents] = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1))
pLvlInitMeanNow = self.pLvlInitMean + np.log(self.PlvlAggNow)
self.pLvlNow[which_agents] = drawLognormal(N,mu=pLvlInitMeanNow,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.t_age[which_agents] = 0
self.t_cycle[which_agents] = 0
return None | Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and permanent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None |
379,204 | def generate_openmp_enabled_py(packagename, srcdir=, disable_openmp=None):
if packagename.lower() == :
packagetitle =
else:
packagetitle = packagename
epoch = int(os.environ.get(, time.time()))
timestamp = datetime.datetime.utcfromtimestamp(epoch)
if disable_openmp is not None:
import builtins
builtins._ASTROPY_DISABLE_SETUP_WITH_OPENMP_ = disable_openmp
if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_:
log.info("OpenMP support has been explicitly disabled.")
openmp_support = False if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_ else is_openmp_supported()
src = _IS_OPENMP_ENABLED_SRC.format(packagetitle=packagetitle,
timestamp=timestamp,
return_bool=openmp_support)
package_srcdir = os.path.join(srcdir, *packagename.split())
is_openmp_enabled_py = os.path.join(package_srcdir, )
with open(is_openmp_enabled_py, ) as f:
f.write(src) | Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used
to determine, post build, whether the package was built with or without
OpenMP support. |
379,205 | def interpolate_to_isosurface(level_var, interp_var, level, **kwargs):
r
bottom_up_search = kwargs.pop(, True)
above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=0,
from_below=bottom_up_search)
interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))
* (interp_var[below] - interp_var[above])) + interp_var[above]
interp_level[~good] = np.nan
minvar = (np.min(level_var, axis=0) >= level)
maxvar = (np.max(level_var, axis=0) <= level)
interp_level[0][minvar] = interp_var[-1][minvar]
interp_level[0][maxvar] = interp_var[0][maxvar]
return interp_level.squeeze() | r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
Other Parameters
----------------
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface) |
379,206 | def getlist(self, key):
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value] | Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[] |
379,207 | def start(self):
log.info(, self.host, self.port)
if not self.base_pathname:
tmpl = (
)
raise NotInitializedError(tmpl % self.base_pathname)
conf_file = os.path.join(self.base_pathname, )
if not os.path.exists(conf_file):
tmpl =
raise NotInitializedError(tmpl % self.base_pathname)
if not self.is_running():
version = self.get_version()
if version and version >= (9, 3):
socketop =
else:
socketop =
postgres_options = [
if not self.is_running():
tmpl = (
)
raise RuntimeError(tmpl % self) | Launch this postgres server. If it's already running, do nothing.
If the backing storage directory isn't configured, raise
NotInitializedError.
This method is optional. If you're running in an environment
where the DBMS is provided as part of the basic infrastructure,
you probably want to skip this step! |
379,208 | def get_process_curses_data(self, p, first, args):
ret = [self.curse_new_line()]
if in p and p[] is not None and p[] != :
if args.disable_irix and self.nb_log_core != 0:
msg = self.layout_stat[].format(p[] / float(self.nb_log_core))
else:
msg = self.layout_stat[].format(p[])
alert = self.get_alert(p[],
highlight_zero=False,
is_max=(p[] == self.max_values[]),
header="cpu")
ret.append(self.curse_add_line(msg, alert))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
if in p and p[] is not None and p[] != :
msg = self.layout_stat[].format(p[])
alert = self.get_alert(p[],
highlight_zero=False,
is_max=(p[] == self.max_values[]),
header="mem")
ret.append(self.curse_add_line(msg, alert))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
if in p and p[] is not None and p[] != :
msg = self.layout_stat[].format(self.auto_unit(p[][1], low_precision=False))
ret.append(self.curse_add_line(msg, optional=True))
msg = self.layout_stat[].format(self.auto_unit(p[][0], low_precision=False))
ret.append(self.curse_add_line(msg, optional=True))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
msg = self.layout_stat[].format(p[], width=self.__max_pid_size())
ret.append(self.curse_add_line(msg))
if in p:
msg = self.layout_stat[].format(str(p[])[:9])
ret.append(self.curse_add_line(msg))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
try:
user_system_time = p[][0] + p[][1]
except (OverflowError, TypeError) as e:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg, optional=True))
else:
hours, minutes, seconds = seconds_to_hms(user_system_time)
if hours > 99:
msg = .format(hours)
elif 0 < hours < 100:
msg = .format(hours, minutes, seconds)
else:
msg = .format(minutes, seconds)
msg = self.layout_stat[].format(msg)
if hours > 0:
ret.append(self.curse_add_line(msg,
decoration=,
optional=True))
else:
ret.append(self.curse_add_line(msg, optional=True))
if in p:
num_threads = p[]
if num_threads is None:
num_threads =
msg = self.layout_stat[].format(num_threads)
ret.append(self.curse_add_line(msg))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
if in p:
nice = p[]
if nice is None:
nice =
msg = self.layout_stat[].format(nice)
ret.append(self.curse_add_line(msg,
decoration=self.get_nice_alert(nice)))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
if in p:
status = p[]
msg = self.layout_stat[].format(status)
if status == :
ret.append(self.curse_add_line(msg, decoration=))
else:
ret.append(self.curse_add_line(msg))
else:
msg = self.layout_header[].format()
ret.append(self.curse_add_line(msg))
if in p and p[][4] == 1 and p[] != 0:
io_rs = int((p[][0] - p[][2]) / p[])
if io_rs == 0:
msg = self.layout_stat[].format("0")
else:
msg = self.layout_stat[].format(self.auto_unit(io_rs,
low_precision=True))
ret.append(self.curse_add_line(msg, optional=True, additional=True))
io_ws = int((p[][1] - p[][3]) / p[])
if io_ws == 0:
msg = self.layout_stat[].format("0")
else:
msg = self.layout_stat[].format(self.auto_unit(io_ws,
low_precision=True))
ret.append(self.curse_add_line(msg, optional=True, additional=True))
else:
msg = self.layout_header[].format("?")
ret.append(self.curse_add_line(msg, optional=True, additional=True))
msg = self.layout_header[].format("?")
ret.append(self.curse_add_line(msg, optional=True, additional=True))
if in p:
cmdline = p[]
else:
cmdline =
try:
if cmdline:
path, cmd, arguments = split_cmdline(cmdline)
if os.path.isdir(path) and not args.process_short_name:
msg = self.layout_stat[].format(path) + os.sep
ret.append(self.curse_add_line(msg, splittable=True))
ret.append(self.curse_add_line(cmd, decoration=, splittable=True))
else:
msg = self.layout_stat[].format(cmd)
ret.append(self.curse_add_line(msg, decoration=, splittable=True))
if arguments:
msg = + self.layout_stat[].format(arguments)
ret.append(self.curse_add_line(msg, splittable=True))
else:
msg = self.layout_stat[].format(p[])
ret.append(self.curse_add_line(msg, splittable=True))
except (TypeError, UnicodeEncodeError) as e:
logger.debug("Can not decode command line ({})".format(cmdline, e))
ret.append(self.curse_add_line(, splittable=True))
if first and in p and args.enable_process_extended:
xpad = * 13
if in p and p[] is not None:
ret.append(self.curse_new_line())
msg = xpad + + str(len(p[])) +
ret.append(self.curse_add_line(msg, splittable=True))
if in p and \
p[] is not None:
ret.append(self.curse_new_line())
msg = .format(xpad, p[])
if in p and p[] is not None:
msg += + self.auto_unit(p[], low_precision=False)
ret.append(self.curse_add_line(msg, splittable=True))
msg =
if in p and p[] is not None:
msg += str(p[]) +
if in p and p[] is not None:
msg += str(p[]) +
if in p and p[] is not None:
msg += str(p[]) +
if in p and p[] is not None:
msg += str(p[]) +
if in p and p[] is not None:
msg += str(p[]) +
if msg != :
ret.append(self.curse_new_line())
msg = xpad + + msg
ret.append(self.curse_add_line(msg, splittable=True))
if in p and \
p[] is not None \
and hasattr(p[], ):
ret.append(self.curse_new_line())
msg = xpad +
k =
v = p[].ioclass
if WINDOWS:
if v == 0:
msg += k +
elif v == 1:
msg += k +
elif v == 2:
msg +=
else:
msg += k + str(v)
else:
if v == 0:
msg +=
elif v == 1:
msg += k +
elif v == 2:
msg += k +
elif v == 3:
msg += k +
else:
msg += k + str(v)
if hasattr(p[], ) and p[].value != 0:
msg += % str(p[].value)
ret.append(self.curse_add_line(msg, splittable=True))
return ret | Get curses data to display for a process.
- p is the process to display
- first is a tag=True if the process is the first on the list |
379,209 | def get(self, key, failobj=None, exact=0):
if not exact:
key = self.getfullkey(key,new=1)
return self.data.get(key,failobj) | Raises exception if key is ambiguous |
379,210 | def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self.geno = Genotype(self.graph)
self.pathway = Pathway(self.graph)
self._parse_ctd_file(
limit, self.files[][])
self._parse_ctd_file(limit, self.files[][])
self._parse_ctd_file(limit, self.files[][])
self._parse_curated_chem_disease(limit)
LOG.info("Done parsing files.")
return | Override Source.parse()
Parses version and interaction information from CTD
Args:
:param limit (int, optional) limit the number of rows processed
Returns:
:return None |
379,211 | def hydrate_target(hydrated_struct):
target_adaptor = hydrated_struct.value
hydrated_fields = yield [Get(HydratedField, HydrateableField, fa)
for fa in target_adaptor.field_adaptors]
kwargs = target_adaptor.kwargs()
for field in hydrated_fields:
kwargs[field.name] = field.value
yield HydratedTarget(target_adaptor.address,
TargetAdaptor(**kwargs),
tuple(target_adaptor.dependencies)) | Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields. |
379,212 | def eject_media(self):
try:
super(VirtualMedia, self).eject_media()
except sushy_exceptions.SushyError:
target_uri = self._get_action_element().target_uri
self._conn.post(target_uri, data={}) | Ejects Virtual Media.
:raises: SushyError, on an error from iLO. |
379,213 | def RepackTemplate(self,
template_path,
output_dir,
upload=False,
token=None,
sign=False,
context=None,
signed_template=False):
orig_config = config.CONFIG
repack_config = RepackConfig()
print("Repacking template: %s" % template_path)
config.CONFIG = repack_config.GetConfigFromTemplate(template_path)
result_path = None
try:
repack_context = config.CONFIG["Template.build_context"]
if context:
repack_context.extend(context)
output_path = os.path.join(
output_dir,
config.CONFIG.Get(
"ClientRepacker.output_filename", context=repack_context))
print("Using context: %s and labels: %s" %
(repack_context,
config.CONFIG.Get("Client.labels", context=repack_context)))
try:
signer = None
if sign:
signer = self.GetSigner(repack_context)
builder_obj = self.GetRepacker(context=repack_context, signer=signer)
builder_obj.signed_template = signed_template
result_path = builder_obj.MakeDeployableBinary(template_path,
output_path)
except Exception:
logging.exception("Repacking template %s failed:", template_path)
if result_path:
print("Repacked into %s" % result_path)
if upload:
from grr_response_server import maintenance_utils
client_platform = config.CONFIG.Get(
"Client.platform", context=repack_context)
repack_basename = config.CONFIG.Get(
"ClientRepacker.output_basename", context=repack_context)
repack_extension = config.CONFIG.Get(
"ClientBuilder.output_extension", context=repack_context)
repack_filename = repack_basename + repack_extension
binary_urn = rdfvalue.RDFURN("aff4:/config/executables").Add(
client_platform).Add("installers").Add(repack_filename)
maintenance_utils.UploadSignedConfigBlob(
open(result_path, "rb").read(100 * 1024 * 1024),
binary_urn,
client_context=repack_context,
token=token)
else:
print("Failed to repack %s." % template_path)
finally:
config.CONFIG = orig_config
return result_path | Repack binaries based on the configuration.
We repack all templates in the templates directory. We expect to find only
functioning templates, all other files should be removed. Each template
contains a build.yaml that specifies how it was built and how it should be
repacked.
Args:
template_path: template path string
output_dir: Output files will be put in this directory.
upload: If specified we also upload the repacked binary into the
token: Token to use when uploading to the datastore.
sign: If true, we want to digitally sign the installer.
context: Array of context strings
signed_template: If true, the libraries in the template are already
signed. This is only used for windows when repacking the template
multiple times.
Returns:
A list of output installers generated. |
379,214 | def is_choked_turbulent_l(dP, P1, Psat, FF, FL=None, FLP=None, FP=None):
r
if FLP and FP:
return dP >= (FLP/FP)**2*(P1-FF*Psat)
elif FL:
return dP >= FL**2*(P1-FF*Psat)
else:
raise Exception() | r'''Calculates if a liquid flow in IEC 60534 calculations is critical or
not, for use in IEC 60534 liquid valve sizing calculations.
Either FL may be provided or FLP and FP, depending on the calculation
process.
.. math::
\Delta P > F_L^2(P_1 - F_F P_{sat})
.. math::
\Delta P >= \left(\frac{F_{LP}}{F_P}\right)^2(P_1 - F_F P_{sat})
Parameters
----------
dP : float
Differential pressure across the valve, with reducer/expanders [Pa]
P1 : float
Pressure of the fluid before the valve and reducers/expanders [Pa]
Psat : float
Saturation pressure of the fluid at inlet temperature [Pa]
FF : float
Liquid critical pressure ratio factor [-]
FL : float, optional
Liquid pressure recovery factor of a control valve without attached fittings [-]
FLP : float, optional
Combined liquid pressure recovery factor with piping geometry factor,
for a control valve with attached fittings [-]
FP : float, optional
Piping geometry factor [-]
Returns
-------
choked : bool
Whether or not the flow is choked [-]
Examples
--------
>>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.9)
False
>>> is_choked_turbulent_l(460.0, 680.0, 70.1, 0.94, 0.6)
True
References
----------
.. [1] IEC 60534-2-1 / ISA-75.01.01-2007 |
379,215 | def colored_pygments_excepthook(type_, value, tb):
tbtext = .join(traceback.format_exception(type_, value, tb))
try:
from utool import util_str
formatted_text = util_str.highlight_text(tbtext, lexer_name=,
stripall=True)
except Exception:
formatted_text = tbtext
return sys.__excepthook__(type_, value, tb)
sys.stderr.write(formatted_text) | References:
https://stackoverflow.com/questions/14775916/color-exceptions-python
CommandLine:
python -m utool.util_inject --test-colored_pygments_excepthook |
379,216 | def read_rels(archive):
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, % PKG_REL_NS):
rId = element.get()
pth = element.get("Target")
typ = element.get()
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {:pth, :typ} | Read relationships for a workbook |
379,217 | def removeApplicationManifest(self, pchApplicationManifestFullPath):
fn = self.function_table.removeApplicationManifest
result = fn(pchApplicationManifestFullPath)
return result | Removes an application manifest from the list to load when building the list of installed applications. |
379,218 | def messages(request, year=None, month=None, day=None,
template="gnotty/messages.html"):
query = request.REQUEST.get("q")
prev_url, next_url = None, None
messages = IRCMessage.objects.all()
if hide_joins_and_leaves(request):
messages = messages.filter(join_or_leave=False)
if query:
search = Q(message__icontains=query) | Q(nickname__icontains=query)
messages = messages.filter(search).order_by("-message_time")
elif year and month and day:
messages = messages.filter(message_time__year=year,
message_time__month=month,
message_time__day=day)
day_delta = timedelta(days=1)
this_date = date(int(year), int(month), int(day))
prev_date = this_date - day_delta
next_date = this_date + day_delta
prev_url = reverse("gnotty_day", args=prev_date.timetuple()[:3])
next_url = reverse("gnotty_day", args=next_date.timetuple()[:3])
else:
return redirect("gnotty_year", year=datetime.now().year)
context = dict(settings)
context["messages"] = messages
context["prev_url"] = prev_url
context["next_url"] = next_url
return render(request, template, context) | Show messages for the given query or day. |
379,219 | def ChangeUserStatus(self, Status):
if self.CurrentUserStatus.upper() == Status.upper():
return
self._ChangeUserStatus_Event = threading.Event()
self._ChangeUserStatus_Status = Status.upper()
self.RegisterEventHandler(, self._ChangeUserStatus_UserStatus)
self.CurrentUserStatus = Status
self._ChangeUserStatus_Event.wait()
self.UnregisterEventHandler(, self._ChangeUserStatus_UserStatus)
del self._ChangeUserStatus_Event, self._ChangeUserStatus_Status | Changes the online status for the current user.
:Parameters:
Status : `enums`.cus*
New online status for the user.
:note: This function waits until the online status changes. Alternatively, use the
`CurrentUserStatus` property to perform an immediate change of status. |
379,220 | def unlock_key(key_name,
stash,
passphrase,
backend):
stash = _get_stash(backend, stash, passphrase)
try:
click.echo()
stash.unlock(key_name=key_name)
click.echo()
except GhostError as ex:
sys.exit(ex) | Unlock a key to allow it to be modified, deleted or purged
`KEY_NAME` is the name of the key to unlock |
379,221 | def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True):
if len(x.shape) == 1:
x = x.reshape((-1, 1))
if start is None:
start = np.nanmin(x)
else:
if x.min() < start:
if warn:
print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start")
x = _trunc(x, minval=start)
if end is None:
end = np.nanmax(x)
else:
if x.max() > end:
if warn:
print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end")
x = _trunc(x, maxval=end)
bs = BSpline(start, end,
n_bases=n_bases,
spline_order=spline_order
)
assert len(x.shape) == 2
n_rows = x.shape[0]
n_cols = x.shape[1]
x_long = x.reshape((-1,))
x_feat = bs.predict(x_long, add_intercept=False)
x_final = x_feat.reshape((n_rows, n_cols, n_bases))
return x_final | **Deprecated**. Function version of the transformer class `EncodeSplines`.
Get B-spline base-function expansion
# Details
First, the knots for B-spline basis functions are placed
equidistantly on the [start, end] range.
(inferred from the data if None). Next, b_n(x) value is
is computed for each x and each n (spline-index) with
`scipy.interpolate.splev`.
# Arguments
x: a numpy array of positions with 2 dimensions
n_bases int: Number of spline bases.
spline_order: 2 for quadratic, 3 for qubic splines
start, end: range of values. If None, they are inferred from the data
as minimum and maximum value.
warn: Show warnings.
# Returns
`np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)` |
379,222 | def dimap_V(D, I):
DI = np.array([D, I]).transpose()
X = dir2cart(DI).transpose()
R = np.sqrt(1. - abs(X[2]))/(np.sqrt(X[0]**2 + X[1]**2))
XY = np.array([X[1] * R, X[0] * R]).transpose()
return XY | FUNCTION TO MAP DECLINATION, INCLINATIONS INTO EQUAL AREA PROJECTION, X,Y
Usage: dimap_V(D, I)
D and I are both numpy arrays |
379,223 | def load_model(path):
assert_is_type(path, str)
res = api("POST /99/Models.bin/%s" % "", data={"dir": path})
return get_model(res["models"][0]["model_id"]["name"]) | Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
>>> h2o.load_model(path) |
379,224 | def _escalation_rules_to_string(escalation_rules):
result =
for rule in escalation_rules:
result += .format(rule[])
for target in rule[]:
result += .format(target[], target[])
return result | convert escalation_rules dict to a string for comparison |
379,225 | def merge(self, workdir, ddb_files, out_ddb, description, delete_source_ddbs=True):
ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
if not os.path.isabs(out_ddb):
out_ddb = os.path.join(os.path.abspath(workdir), os.path.basename(out_ddb))
if self.verbose:
print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
for i, f in enumerate(ddb_files):
print(" [%d] %s" % (i, f))
if len(ddb_files) == 1:
with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
for line in inh:
out.write(line)
return out_ddb
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"])
inp = StringIO()
inp.write(out_ddb + "\n")
inp.write(str(description) + "\n")
inp.write(str(len(ddb_files)) + "\n")
for fname in ddb_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "wt") as fh:
fh.writelines(self.stdin_data)
fh.flush()
os.fsync(fh.fileno())
retcode = self.execute(workdir, exec_args=[])
if retcode == 0 and delete_source_ddbs:
for f in ddb_files:
try:
os.remove(f)
except IOError:
pass
return out_ddb | Merge DDB file, return the absolute path of the new database in workdir. |
379,226 | def initialize_zones(self):
zone_list = self.location_info.get(, {: True})
for zone_id in zone_list:
if zone_list[zone_id]:
self.zones[zone_id] = Zone(self, zone_id=zone_id)
else:
_LOGGER.debug("Ignoring zone: %s", zone_id) | initialize receiver zones |
379,227 | def get_root():
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like ), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like ).")
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root | Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py . |
379,228 | def valueReadPreprocessor(valueString, replaceParamsFile=None):
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
processedValue = valueString
if replaceParamsFile is not None and valueString is not None:
if in valueString or in valueString:
processedValue = .format(REPLACE_NO_VALUE)
for targetParam in replaceParamsFile.targetParameters:
if targetParam.targetVariable == valueString:
processedValue = .format(-1 * targetParam.id)
break
return processedValue | Apply global pre-processing to values during reading throughout the project.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string |
379,229 | def deletescript(self, name):
code, data = self.__send_command(
"DELETESCRIPT", [name.encode("utf-8")])
if code == "OK":
return True
return False | Delete a script from the server
See MANAGESIEVE specifications, section 2.10
:param name: script's name
:rtype: boolean |
379,230 | def parse_serialdiff(sd_dict):
"helper for translate_check"
if isinstance(sd_dict,list):
if len(sd_dict)!=2 or sd_dict[0]!=: raise NotImplementedError(sd_dict[0],len(sd_dict))
return CheckStale(sd_dict[1])
if isinstance(sd_dict[],list):
sd_dict[]=[diff.Delta(d[][],d[][],d[]) for d in sd_dict[]]
return SerialDiff(**sd_dict) | helper for translate_check |
379,231 | def get_log_entry_ids_by_log(self, log_id):
id_list = []
for log_entry in self.get_log_entries_by_log(log_ids):
id_list.append(log_entry.get_id())
return IdList(id_list) | Gets the list of ``LogEntry`` ``Ids`` associated with a ``Log``.
arg: log_id (osid.id.Id): ``Id`` of a ``Log``
return: (osid.id.IdList) - list of related logEntry ``Ids``
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
379,232 | def raw(node):
o = nodes.raw(node.literal, node.literal, format=)
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o | Add some raw html (possibly as a block) |
379,233 | def _read_header(filename):
with filename.open() as f:
h = f.read(HDR_LENGTH).decode()
header = {}
for line in h.split():
if in line:
key, value = line.split()
key = key.strip()[7:]
value = value.strip()[:-1]
header[key] = value
return header | Read the text header for each file
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
dict
header |
379,234 | def custom_config(request):
if request.method == :
config_dict = json_body(request.body.decode())
CustomConfig.objects.try_create(
config_dict[],
config_dict[],
config_dict[],
request.user.id,
config_dict.get() if config_dict.get() else None,
urllib.parse.unquote(config_dict.get()) if config_dict.get() else None
)
return config(request)
else:
return render_json(request, {}, template=, help_text=custom_config.__doc__) | Save user-specific configuration property.
POST parameters (JSON keys):
app_name: application name for which the configuration property is
valid (e.g., proso_models)
key: name of the property (e.g., predictive_model.class)
value: value of the property (number, string, boolean, ...,
e.g, proso.models.prediction.PriorCurrentPredictiveModel)
condition_key (optional): name of the condition which is used to filter
the property (e.g., practice_filter)
condition_value (optional): value for the condition filtering the
property (e.g., [["context/world"],["category/state"]]) |
379,235 | def reg_load(self, reg, value):
if isinstance(value, (six.text_type, six.binary_type)):
value = self.alloc_data(value)
if value is None:
return self.reg_load_imm(reg, 0)
elif isinstance(value, Register):
if reg is not value:
return self.reg_load_reg(reg, value)
else:
return []
elif isinstance(value, Offset):
if value:
return self.reg_load_offset(reg, value)
else:
return self.reg_load(reg, self.OFFSET_REG)
elif isinstance(value, Buffer):
return self.reg_load_offset(reg, sum(len(v) for v in six.iterkeys(self.data)) + value.offset)
elif isinstance(value, six.integer_types):
reg_width = self.REGISTER_WIDTH[reg]
if value < -2 ** (reg_width-1):
raise ValueError( % (value, reg))
elif value >= 2 ** reg_width:
raise ValueError( % (value, reg))
return self.reg_load_imm(reg, value)
elif isinstance(value, (list, tuple)):
return self.reg_load_array(reg, value)
elif isinstance(value, SyscallInvoke):
return self.syscall(value) + self.reg_load(reg, self.SYSCALL_RET_REG)
else:
raise TypeError( % repr(value)) | Load a value into a register. The value can be a string or binary (in
which case the value is passed to :meth:`alloc_data`), another
:class:`Register`, an :class:`Offset` or :class:`Buffer`, an integer
immediate, a ``list`` or ``tuple`` or a syscall invocation.
Arguments:
reg(pwnypack.shellcode.types.Register): The register to load the
value into.
value: The value to load into the register.
Returns:
list: A list of mnemonics that will load value into reg. |
379,236 | def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None | Return the normalization vector for the approximant |
379,237 | def handle_message(self, ch, method, properties, body):
input = {}
headers = {}
try:
self.sessid = method.routing_key
input = json_decode(body)
data = input[]
self.send_output(output) | this is a pika.basic_consumer callback
handles client inputs, runs appropriate workflows and views
Args:
ch: amqp channel
method: amqp method
properties:
body: message body |
379,238 | def make_accessors(self, columns):
accessors = list(self.accessors_def or range(columns))
for i, x in enumerate(accessors):
if not callable(x):
if isinstance(x, collections.abc.Sequence) and \
not isinstance(x, str):
key, default = x
else:
key = x
default =
def acc(row, key=key, default=default):
try:
return row[key]
except (KeyError, IndexError):
return default
accessors[i] = acc
return accessors | Accessors can be numeric keys for sequence row data, string keys
for mapping row data, or a callable function. For numeric and string
accessors they can be inside a 2 element tuple where the 2nd value is
the default value; Similar to dict.get(lookup, default). |
379,239 | def get_id(self, natural_key, enhancement=None):
if natural_key in self._map:
return self._map[natural_key]
self.pre_call_stored_procedure()
success = False
try:
key = self.call_stored_procedure(natural_key, enhancement)
success = True
finally:
self.post_call_stored_procedure(success)
self._map[natural_key] = key
return key | Returns the technical ID for a natural key or None if the given natural key is not valid.
:param T natural_key: The natural key.
:param T enhancement: Enhancement data of the dimension row.
:rtype: int|None |
379,240 | def write(pkg_file, pkg_rels, parts):
phys_writer = PhysPkgWriter(pkg_file)
PackageWriter._write_content_types_stream(phys_writer, parts)
PackageWriter._write_pkg_rels(phys_writer, pkg_rels)
PackageWriter._write_parts(phys_writer, parts)
phys_writer.close() | Write a physical package (.pptx file) to *pkg_file* containing
*pkg_rels* and *parts* and a content types stream based on the
content types of the parts. |
379,241 | def spine_to_terminal_wedge(mol):
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
ni, nb = list(mol.neighbors(i).items())[0]
if nb.order == 1 and nb.type in (1, 2) \
and ni > i != nb.is_lower_first:
nb.is_lower_first = not nb.is_lower_first
nb.type = {1: 2, 2: 1}[nb.type] | Arrange stereo wedge direction from spine to terminal atom |
379,242 | def create(self, sid=values.unset, phone_number=values.unset,
is_reserved=values.unset):
data = values.of({: sid, : phone_number, : is_reserved, })
payload = self._version.create(
,
self._uri,
data=data,
)
return PhoneNumberInstance(self._version, payload, service_sid=self._solution[], ) | Create a new PhoneNumberInstance
:param unicode sid: The SID of a Twilio IncomingPhoneNumber resource
:param unicode phone_number: The phone number in E.164 format
:param bool is_reserved: Whether the new phone number should be reserved
:returns: Newly created PhoneNumberInstance
:rtype: twilio.rest.proxy.v1.service.phone_number.PhoneNumberInstance |
379,243 | def compare(self, otherdigest, ishex=False):
bits = 0
myd = self.digest()
if ishex:
otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)])
for i in range(32):
bits += POPC[255 & myd[i] ^ otherdigest[i]]
return 128 - bits | Compute difference in bits between own digest and another.
returns -127 to 128; 128 is the same, -127 is different |
379,244 | def api_version_elb_backend(*args, **kwargs):
request = args[0]
if hasattr(request, ):
version = request.values.get()
elif isinstance(request, AWSPreparedRequest):
version = parse_qs(request.body).get()[0]
else:
request.parse_request()
version = request.querystring.get()[0]
if == version:
return ELBResponse.dispatch(*args, **kwargs)
elif == version:
return ELBV2Response.dispatch(*args, **kwargs)
else:
raise Exception("Unknown ELB API version: {}".format(version)) | ELB and ELBV2 (Classic and Application load balancers) use the same
hostname and url space. To differentiate them we must read the
`Version` parameter out of the url-encoded request body. TODO: There
has _got_ to be a better way to do this. Please help us think of
one. |
379,245 | def delete_resource(self, resource_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
if not isinstance(resource_id, ABCId):
raise errors.InvalidArgument()
resource_map = collection.find_one(
dict({: ObjectId(resource_id.get_identifier())},
**self._view_filter()))
objects.Resource(osid_object_map=resource_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({: ObjectId(resource_id.get_identifier())}) | Deletes a ``Resource``.
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
to remove
raise: NotFound - ``resource_id`` not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
379,246 | def correct(self, images,
bgImages=None,
exposure_time=None,
light_spectrum=None,
threshold=0.1,
keep_size=True,
date=None,
deblur=False,
denoise=False):
30. Nov 15dark current30. Nov 15flat field15. Nov 15lens14. Nov 15noise01. Nov 15
print()
if isinstance(date, string_types) or date is None:
date = {: date,
: date,
: date,
: date,
: date}
if light_spectrum is None:
try:
light_spectrum = self.coeffs[][0]
except IndexError:
pass
if (type(images) in (list, tuple) or
(isinstance(images, np.ndarray) and
images.ndim == 3 and
images.shape[-1] not in (3, 4)
)):
if len(images) > 1:
n = self.coeffs[]
if self.noise_level_function is None and len(n):
n = _getFromDate(n, date[])[2]
self.noise_level_function = lambda x: NoiseLevelFunction.boundedFunction(
x, *n)
print()
ste = SingleTimeEffectDetection(images, nStd=4,
noise_level_function=self.noise_level_function)
image = ste.noSTE
if self.noise_level_function is None:
self.noise_level_function = ste.noise_level_function
else:
image = np.asfarray(imread(images[0], dtype=np.float))
else:
image = np.asfarray(imread(images, dtype=np.float))
self._checkShape(image)
self.last_light_spectrum = light_spectrum
self.last_img = image
try:
self._correctDarkCurrent(image, exposure_time, bgImages,
date[])
except Exception as errm:
print( % errm)
try:
self._correctVignetting(image, light_spectrum,
date[])
except Exception as errm:
print( % errm)
if threshold > 0:
print()
try:
image = self._correctArtefacts(image, threshold)
except Exception as errm:
print( % errm)
if deblur:
print()
try:
image = self._correctBlur(image, light_spectrum, date[])
except Exception as errm:
print( % errm)
try:
image = self._correctLens(image, light_spectrum, date[],
keep_size)
except TypeError:
except Exception as errm:
print( % errm)
if denoise:
print()
image = self._correctNoise(image)
print()
return image | exposure_time [s]
date -> string e.g. '30. Nov 15' to get a calibration on from date
-> {'dark current':'30. Nov 15',
'flat field':'15. Nov 15',
'lens':'14. Nov 15',
'noise':'01. Nov 15'} |
379,247 | def MessageToJson(message, include_fields=None):
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields) | Convert the given message to JSON. |
379,248 | def from_filename(cls, filename, sync_from_start=True):
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start) | Create a `Lexer` from a filename. |
379,249 | def get_default_config_help(self):
config = super(StatsdHandler, self).get_default_config_help()
config.update({
: ,
: ,
: ,
})
return config | Returns the help text for the configuration options for this handler |
379,250 | def update(self):
con = self.subpars.pars.control
self(0.)
for idx in range(2):
if (con.bbv[idx] > 0.) and (con.bnv[idx] > 0.):
self.values[idx] = con.bbv[idx]/con.bnv[idx] | Update value based on :math:`HV=BBV/BNV`.
Required Parameters:
|BBV|
|BNV|
Examples:
>>> from hydpy.models.lstream import *
>>> parameterstep('1d')
>>> bbv(left=10., right=40.)
>>> bnv(left=10., right=20.)
>>> derived.hv.update()
>>> derived.hv
hv(left=1.0, right=2.0)
>>> bbv(left=10., right=0.)
>>> bnv(left=0., right=20.)
>>> derived.hv.update()
>>> derived.hv
hv(0.0) |
379,251 | def get_web_auth_session_key(self, url, token=""):
session_key, _username = self.get_web_auth_session_key_username(url, token)
return session_key | Retrieves the session key of a web authorization process by its URL. |
379,252 | def make_grid(xx, yy):
n = len(xx)
xx, yy = np.meshgrid(xx, yy)
grid = np.array([xx.ravel(), yy.ravel()]).T
x = grid[:, 0].reshape(n, n)
y = grid[:, 1].reshape(n, n)
return x, y | Returns two n-by-n matrices. The first one contains all the x values
and the second all the y values of a cartesian product between `xx` and `yy`. |
379,253 | def get_login_information(self, code=None):
access_token = self._get_access_token(code)
return self._get_user_info(access_token) | Return Clef user info after exchanging code for OAuth token. |
379,254 | def make_osm_query(query):
osm_url =
req = requests.get(osm_url, params={: query})
req.raise_for_status()
return req.json() | Make a request to OSM and return the parsed JSON.
Parameters
----------
query : str
A string in the Overpass QL format.
Returns
-------
data : dict |
379,255 | def read_stream (stream):
section = None
key = None
data = None
for fullline in stream:
line = fullline.split (, 1)[0]
m = sectionre.match (line)
if m is not None:
if section is not None:
if key is not None:
section.set_one (key, data.strip ().decode ())
key = data = None
yield section
section = Holder ()
section.section = m.group (1)
continue
if len (line.strip ()) == 0:
if key is not None:
section.set_one (key, data.strip ().decode ())
key = data = None
continue
m = escre.match (fullline)
if m is not None:
if section is None:
raise InifileError ()
if key is not None:
section.set_one (key, data.strip ().decode ())
key = m.group (1)
data = m.group (2).replace (r, ).replace (r, ).replace (r, )
section.set_one (key, data.decode ())
key = data = None
continue
m = keyre.match (line)
if m is not None:
if section is None:
raise InifileError ()
if key is not None:
section.set_one (key, data.strip ().decode ())
key = m.group (1)
data = m.group (2)
if not len (data):
data =
elif not data[-1].isspace ():
data +=
continue
if line[0].isspace () and key is not None:
data += line.strip () +
continue
raise InifileError ( + line[:-1])
if section is not None:
if key is not None:
section.set_one (key, data.strip ().decode ())
yield section | Python 3 compat note: we're assuming `stream` gives bytes not unicode. |
379,256 | def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
gridmask
assert grid.shape == mask.shape,
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter) | same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask' |
379,257 | def psq2(d1, d2):
d1, d2 = flatten(d1), flatten(d2)
def f(p):
return sum((p ** 2) * np.nan_to_num(np.log(p * len(p))))
return abs(f(d1) - f(d2)) | Compute the PSQ2 measure.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution. |
379,258 | def add_proxy_auth(possible_proxy_url, proxy_auth):
if possible_proxy_url == :
return possible_proxy_url
parsed = urlparse(possible_proxy_url)
return .format(parsed.scheme, proxy_auth.username, proxy_auth.password, parsed.netloc) | Add a username and password to a proxy URL, if the input value is a proxy URL.
:param str possible_proxy_url: Proxy URL or ``DIRECT``.
:param requests.auth.HTTPProxyAuth proxy_auth: Proxy authentication info.
:returns: Proxy URL with auth info added, or ``DIRECT``.
:rtype: str |
379,259 | def owner(self):
r
if os.name == :
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name | r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory. |
379,260 | def netconf_session_end_termination_reason(self, **kwargs):
config = ET.Element("config")
netconf_session_end = ET.SubElement(config, "netconf-session-end", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
termination_reason = ET.SubElement(netconf_session_end, "termination-reason")
termination_reason.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
379,261 | def main():
args = docopt(__doc__, version=__version__)
if in args:
bam_coverage(args[],
args[],
int(args[]),
min_mapq=int(args[]),
min_len=float(args[])) | Main entry point for the bioinfo CLI. |
379,262 | def makeLUTfromCTF(sclist, N=None):
ctf = vtk.vtkColorTransferFunction()
ctf.SetColorSpaceToDiverging()
for sc in sclist:
scalar, col = sc
r, g, b = getColor(col)
ctf.AddRGBPoint(scalar, r, g, b)
if N is None:
N = len(sclist)
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(N)
lut.Build()
for i in range(N):
rgb = list(ctf.GetColor(float(i) / N)) + [1]
lut.SetTableValue(i, rgb)
return lut | Use a Color Transfer Function to generate colors in a vtk lookup table.
See `here <http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html>`_.
:param list sclist: a list in the form ``[(scalar1, [r,g,b]), (scalar2, 'blue'), ...]``.
:return: the lookup table object ``vtkLookupTable``. This can be fed into ``colorMap``. |
379,263 | def blast_seqs_to_pdb(self, seq_ident_cutoff=0, evalue=0.0001, all_genes=False, display_link=False,
outdir=None, force_rerun=False):
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
if g.protein.num_structures_experimental > 0 and not all_genes and not force_rerun:
log.debug(
.format(g.id,
g.protein.num_structures_experimental))
continue
new_pdbs = g.protein.blast_representative_sequence_to_pdb(seq_ident_cutoff=seq_ident_cutoff,
evalue=evalue,
display_link=display_link,
outdir=outdir,
force_rerun=force_rerun)
if new_pdbs:
counter += 1
log.debug(.format(g.id, len(new_pdbs)))
else:
log.debug(.format(g.id))
log.info()
log.info(.format(counter)) | BLAST each representative protein sequence to the PDB. Saves raw BLAST results (XML files).
Args:
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal,
0.0001 is stringent (default).
all_genes (bool): If all genes should be BLASTed, or only those without any structures currently mapped
display_link (bool, optional): Set to True if links to the HTML results should be displayed
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False |
379,264 | def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
cff = SInterCan15Mid.SITE_COEFFS[imt]
mean += np.log(cff[])
return mean, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. |
379,265 | def reset_generation(self, trigger):
self.tone_lock.acquire()
npts = self.stim.size
try:
self.aotask = AOTaskFinite(self.aochan, self.fs, npts, trigsrc=trigger)
self.aotask.write(self.stim)
if self.attenuator is not None:
self.attenuator.SetAtten(self.atten)
else:
pass
self.ngenerated +=1
if self.stim_changed:
new_gen = self.stim
else:
new_gen = None
self.stim_changed = False
except:
print u
self.tone_lock.release()
raise
self.tone_lock.release()
return new_gen | Re-arms the analog output according to current settings
:param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run
:type trigger: str |
379,266 | def visit_BinOp(self, node: AST, dfltChaining: bool = True) -> str:
op = node.op
with self.op_man(op):
if isinstance(op, ast.Pow):
src = self.visit(op).join((self.visit(node.left,
dfltChaining=False),
self.visit(node.right)))
else:
src = self.visit(op).join((self.visit(node.left),
self.visit(node.right,
dfltChaining=False)))
return self.wrap_expr(src, dfltChaining) | Return `node`s operator and operands as inlined expression. |
379,267 | def create(self, fail_on_found=False, force_on_exists=False, **kwargs):
config_item = self._separate(kwargs)
jt_id = kwargs.pop(, None)
status = kwargs.pop(, )
old_endpoint = self.endpoint
if jt_id is not None:
jt = get_resource()
jt.get(pk=jt_id)
try:
nt_id = self.get(**copy.deepcopy(kwargs))[]
except exc.NotFound:
pass
else:
if fail_on_found:
raise exc.TowerCLIError(
)
else:
debug.log(
,
header=)
return jt.associate_notification_template(
jt_id, nt_id, status=status)
self.endpoint = %\
(jt_id, status)
self._configuration(kwargs, config_item)
result = super(Resource, self).create(**kwargs)
self.endpoint = old_endpoint
return result | Create a notification template.
All required configuration-related fields (required according to
notification_type) must be provided.
There are two types of notification template creation: isolatedly
creating a new notification template and creating a new notification
template under a job template. Here the two types are discriminated by
whether to provide --job-template option. --status option controls
more specific, job-run-status-related association.
Fields in the resource's `identity` tuple are used for a lookup;
if a match is found, then no-op (unless `force_on_exists` is set) but
do not fail (unless `fail_on_found` is set).
=====API DOCS=====
Create an object.
:param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria
already exists.
:type fail_on_found: bool
:param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will
be updated to the provided values.; If unset, a match causes the request to be
a no-op.
:type force_on_exists: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the
resource object.
:returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:
"changed", a flag indicating if the resource is created successfully; "id", an integer which
is the primary key of the created object.
:rtype: dict
=====API DOCS===== |
379,268 | def _get_link_status_code(link, allow_redirects=False, timeout=5):
status_code = None
try:
response = requests.get(
link, allow_redirects=allow_redirects, timeout=timeout)
status_code = response.status_code
except Exception:
status_code = 404
return status_code | Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes |
379,269 | def toDict(self):
return {
: [hsp.toDict() for hsp in self.hsps],
: self.read.toDict(),
} | Get information about a title alignment as a dictionary.
@return: A C{dict} representation of the title aligment. |
379,270 | def _compute(self):
i = 0
while i < len(self.setd):
if self.ucld:
self.do_cld_check(self.setd[i:])
i = 0
if self.setd:
if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [self.setd[i]]):
self._filter_satisfied()
else:
self.bb_assumps.append(-self.setd[i])
i += 1 | The main method of the class, which computes an MCS given its
over-approximation. The over-approximation is defined by a model
for the hard part of the formula obtained in :func:`compute`.
The method is essentially a simple loop going over all literals
unsatisfied by the previous model, i.e. the literals of
``self.setd`` and checking which literals can be satisfied. This
process can be seen a refinement of the over-approximation of the
MCS. The algorithm follows the pseudo-code of the LBX algorithm
presented in [1]_.
Additionally, if :class:`LBX` was constructed with the requirement
to make "clause :math:`D`" calls, the method calls
:func:`do_cld_check` at every iteration of the loop using the
literals of ``self.setd`` not yet checked, as the contents of
"clause :math:`D`". |
379,271 | def delete_user(self, user_email):
res = self.get_user_ids([user_email])
if res[0] == False:
return res
userid = res[1][0]
res = requests.delete(self.url + + str(userid), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, None] | **Description**
Deletes a user from Sysdig Monitor.
**Arguments**
- **user_email**: the email address of the user that will be deleted from Sysdig Monitor
**Example**
`examples/user_team_mgmt.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt.py>`_ |
379,272 | def get_content_metadata(self, enterprise_customer):
content_metadata = OrderedDict()
if enterprise_customer.catalog:
response = self._load_data(
self.ENTERPRISE_CUSTOMER_ENDPOINT,
detail_resource=,
resource_id=str(enterprise_customer.uuid),
traverse_pagination=True,
)
for course in response[]:
for course_run in course[]:
course_run[] =
content_metadata[course_run[]] = course_run
for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():
response = self._load_data(
self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,
resource_id=str(enterprise_customer_catalog.uuid),
traverse_pagination=True,
querystring={: 1000},
)
for item in response[]:
content_id = utils.get_content_metadata_item_id(item)
content_metadata[content_id] = item
return content_metadata.values() | Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.
Arguments:
enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.
Returns:
list: List of dicts containing content metadata. |
379,273 | def get_failing_line(xml_string, exc_msg):
max_before = 500
max_after = 500
max_unknown = 1000
assert isinstance(xml_string, six.binary_type)
m = re.search(r, exc_msg)
if not m:
xml_string, _ = truncate_line(xml_string, 1, 0, max_unknown - 1)
return None, None, None, xml_string
lineno = int(m.group(1))
colno = int(m.group(2))
if not xml_string.endswith(b):
xml_string += b
xml_lines = xml_string.splitlines()
if len(xml_lines) < lineno:
xml_string, _ = truncate_line(xml_string, 1, 0, max_unknown - 1)
return None, None, None, xml_string
line = xml_lines[lineno - 1]
line, new_pos = truncate_line(line, colno, max_before, max_after)
return lineno, colno, new_pos, line | Extract the failing line from the XML string, as indicated by the
line/column information in the exception message.
Returns a tuple (lineno, colno, new_pos, line), where lineno and colno
and marker_pos may be None. |
379,274 | def get_evcodes(self, inc_set=None, exc_set=None):
codes = self.get_evcodes_all(inc_set, exc_set)
codes.discard()
return codes | Get evidence code for all but NOT 'No biological data |
379,275 | def get_unit_property_names(self, unit_id=None):
if unit_id is None:
property_names = []
for unit_id in self.get_unit_ids():
curr_property_names = self.get_unit_property_names(unit_id)
for curr_property_name in curr_property_names:
property_names.append(curr_property_name)
property_names = sorted(list(set(property_names)))
return property_names
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_properties:
self._unit_properties[unit_id] = {}
property_names = sorted(self._unit_properties[unit_id].keys())
return property_names
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int") | Get a list of property names for a given unit, or for all units if unit_id is None
Parameters
----------
unit_id: int
The unit id for which the property names will be returned
If None (default), will return property names for all units
Returns
----------
property_names
The list of property names from the specified unit(s) |
379,276 | def _convert_to_bytes(type_name, value):
int_types = {: , : , : , : , : , : }
type_name = type_name.lower()
if type_name not in int_types and type_name not in [, ]:
raise ArgumentError(, known_integers=int_types.keys(), actual_type=type_name)
if type_name == :
bytevalue = bytes(value)
elif type_name == :
bytevalue = bytes(value)
else:
bytevalue = struct.pack("<%s" % int_types[type_name], value)
return bytevalue | Convert a typed value to a binary array |
379,277 | def grep(expression, file, flags=0, invert=False):
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines | Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list |
379,278 | def _escape_sequence(self, char):
num = ord(char)
if char == "[":
self.state = "escape-lb"
elif char == "(":
self.state = "charset-g0"
elif char == ")":
self.state = "charset-g1"
elif num in self.escape:
self.dispatch(self.escape[num])
self.state = "stream"
elif self.fail_on_unknown_esc:
raise StreamProcessError("Unexpected character == " % (char, ord(char))) | Handle characters seen when in an escape sequence. Most non-vt52
commands start with a left-bracket after the escape and then a
stream of parameters and a command. |
379,279 | def plot_campaign_outline(self, campaign=0, facecolor="
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]:
ra_outline, dec_outline = [], []
for channel in rectangle:
idx = np.where(corners[::, 2] == channel)
ra_outline.append(corners[idx, 3][0][0])
dec_outline.append(corners[idx, 4][0][0])
ra = np.array(ra_outline + ra_outline[:1])
dec = np.array(dec_outline + dec_outline[:1])
if campaign == 1002:
ra[ra > 180] -= 360
myfill = self.ax.fill(ra, dec,
facecolor=facecolor,
zorder=151, lw=0)
if text is None:
text = "{}".format(campaign)
ra_center, dec_center, _ = fov.getBoresight()
if campaign == 6:
dec_center -= 2
elif campaign == 12:
ra_center += 0.5
dec_center -= 1.7
elif campaign == 13:
dec_center -= 1.5
elif campaign == 16:
dec_center += 1.5
elif campaign == 18:
dec_center -= 1.5
elif campaign == 19:
dec_center += 1.7
elif campaign == 20:
dec_center += 1.5
offsets = {5: (40, -20), 16: (-20, 40), 18: (-15, -50)}
if campaign in [5]:
pl.annotate(text, xy=(ra_center, dec_center),
xycoords=, ha=,
xytext=offsets[campaign], textcoords=,
size=18, zorder=0, color=facecolor,
arrowprops=dict(arrowstyle="-", ec=facecolor, lw=2))
else:
self.ax.text(ra_center, dec_center, text,
fontsize=18, color="white",
ha="center", va="center",
zorder=155)
return myfill | Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch. |
379,280 | def asbaseline(self, pos):
if not is_measure(pos) or pos[] not in [, ]:
raise TypeError()
if pos[] == :
loc = self.measure(pos, )
loc[] =
return self.measure(loc, )
return pos | Convert a position measure into a baseline measure. No actual
baseline is calculated, since operations can be done on positions,
with subtractions to obtain baselines at a later stage.
:param pos: a position measure
:returns: a baseline measure |
379,281 | def get_permissions(self):
path = Client.urls[]
conns = self._call(path, )
return conns | :returns: list of dicts, or an empty list if there are no permissions. |
379,282 | def get_memory_usage(pid=None,timeout=1):
rss = []
vms = []
pid = get_pid(pid)
process = psutil.Process(pid)
print(process.status())
while process.status() == :
mem = process.memory_info()
rss.append(mem.rss)
vms.append(mem.vms)
time.sleep(timeout)
result = {"rss":rss,"vms":vms}
print(result) | get_memory_usage returns a dictionary of resident set size (rss) and virtual
memory size (vms) for a process of interest, for as long as the process is running
:param pid: the pid to use:
:param timeout: the timeout
:: notes
example:
sleep 3 & exec python -m memory "$!" |
379,283 | def _add_entry(self, formdata=None, data=unset_value, index=None):
if formdata:
prefix = .join((self.name, str(index)))
basekey = .join((prefix, ))
idkey = basekey.format()
if prefix in formdata:
formdata[idkey] = formdata.pop(prefix)
if hasattr(self.nested_model, ) and idkey in formdata:
id = self.nested_model.id.to_python(formdata[idkey])
data = get_by(self.initial_data, , id)
initial = flatten_json(self.nested_form,
data.to_mongo(),
prefix)
for key, value in initial.items():
if key not in formdata:
formdata[key] = value
else:
data = None
return super(NestedModelList, self)._add_entry(formdata, data, index) | Fill the form with previous data if necessary to handle partial update |
379,284 | def bunzip2(filename):
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile) | Uncompress `filename` in place |
379,285 | def commit(self, **kwargs):
r
if self.model is None or self.model.json is None:
raise MissingModelError()
with db.session.begin_nested():
before_record_update.send(
current_app._get_current_object(),
record=self
)
self.validate(**kwargs)
self.model.json = dict(self)
flag_modified(self.model, )
db.session.merge(self.model)
after_record_update.send(
current_app._get_current_object(),
record=self
)
return self | r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance. |
379,286 | def open_new_window(self, switch_to=True):
self.driver.execute_script("window.open();")
time.sleep(0.01)
if switch_to:
self.switch_to_window(len(self.driver.window_handles) - 1) | Opens a new browser tab/window and switches to it by default. |
379,287 | def _add_app_menu(self):
mainMenu = AppKit.NSMenu.alloc().init()
self.app.setMainMenu_(mainMenu)
mainAppMenuItem = AppKit.NSMenuItem.alloc().init()
mainMenu.addItem_(mainAppMenuItem)
appMenu = AppKit.NSMenu.alloc().init()
mainAppMenuItem.setSubmenu_(appMenu)
appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.about"]), "orderFrontStandardAboutPanel:", "")
appMenu.addItem_(AppKit.NSMenuItem.separatorItem())
appServicesMenu = AppKit.NSMenu.alloc().init()
self.app.setServicesMenu_(appServicesMenu)
servicesMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.services"], nil, "")
servicesMenuItem.setSubmenu_(appServicesMenu)
appMenu.addItem_(AppKit.NSMenuItem.separatorItem())
appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.hide"]), "hide:", "h")
hideOthersMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.hideOthers"], "hideOtherApplications:", "h")
hideOthersMenuItem.setKeyEquivalentModifierMask_(AppKit.NSAlternateKeyMask | AppKit.NSCommandKeyMask)
appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.showAll"], "unhideAllApplications:", "")
appMenu.addItem_(AppKit.NSMenuItem.separatorItem())
appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.quit"]), "terminate:", "q") | Create a default Cocoa menu that shows 'Services', 'Hide',
'Hide Others', 'Show All', and 'Quit'. Will append the application name
to some menu items if it's available. |
379,288 | def ingest(self):
self.log.debug()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
self.dbTableName = "tcs_cat_ifs_stream"
self.databaseInsertbatchSize = 500
dictList = self._create_dictionary_of_IFS()
tableName = self.dbTableName
createStatement = % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self.log.debug()
return None | *Import the IFS catalogue into the sherlock-catalogues database*
The method first generates a list of python dictionaries from the IFS datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage:**
See class docstring for usage |
379,289 | def _get_minutes_from_last_update(self, time):
time_from_last_update = time - self.last_update_time
return int(time_from_last_update.total_seconds() / 60) | How much minutes passed from last update to given time |
379,290 | def course_modal(context, course=None):
if course:
context.update({
: course.get(, ),
: course.get(, ),
: course.get(, ),
: course.get(, ),
: course.get(, ),
: course.get(, ),
: course.get(, []),
: course.get(, []),
: course.get(, []),
})
return context | Django template tag that returns course information to display in a modal.
You may pass in a particular course if you like. Otherwise, the modal will look for course context
within the parent context.
Usage:
{% course_modal %}
{% course_modal course %} |
379,291 | def display(self, data, x=None, y=None, xlabel=None, ylabel=None,
style=None, nlevels=None, levels=None, contour_labels=None,
store_data=True, col=0, unzoom=True, auto_contrast=False,
contrast_level=0, **kws):
if style is not None:
self.conf.style = style
self.axes.cla()
conf = self.conf
conf.log_scale = False
conf.rot, conf.flip_ud, conf.flip_lr = False, False, False
conf.highlight_areas = []
if 1 in data.shape:
data = data.squeeze()
self.data_shape = data.shape
self.data_range = [0, data.shape[1], 0, data.shape[0]]
conf.contrast_level = contrast_level
if auto_contrast:
conf.contrast_level = 1
if x is not None:
self.xdata = np.array(x)
if self.xdata.shape[0] != data.shape[1]:
self.xdata = None
if y is not None:
self.ydata = np.array(y)
if self.ydata.shape[0] != data.shape[0]:
self.ydata = None
if xlabel is not None:
self.xlab = xlabel
if ylabel is not None:
self.ylab = ylabel
if store_data:
self.conf.data = data
cmap = self.conf.cmap[col]
if self.conf.style == :
if levels is None:
levels = self.conf.ncontour_levels
else:
self.conf.ncontour_levels = levels
if nlevels is None:
nlevels = self.conf.ncontour_levels = 9
nlevels = max(2, nlevels)
clevels = np.linspace(data.min(), data.max(), nlevels+1)
self.conf.contour_levels = clevels
self.conf.image = self.axes.contourf(data, cmap=self.conf.cmap[col],
levels=clevels)
self.conf.contour = self.axes.contour(data, cmap=self.conf.cmap[col],
levels=clevels)
cmap_name = self.conf.cmap[col].name
xname =
try:
if cmap_name == :
xname =
elif cmap_name == :
xname =
elif cmap_name.endswith():
xname =
except:
pass
self.conf.contour.set_cmap(getattr(colormap, xname))
if contour_labels is None:
contour_labels = self.conf.contour_labels
if contour_labels:
self.axes.clabel(self.conf.contour, fontsize=10, inline=1)
if hasattr(self.contour_callback , ):
self.contour_callback(levels=clevels)
else:
if data.max() == data.min():
img = data
else:
img = (data - data.min()) /(1.0*data.max() - data.min())
self.conf.image = self.axes.imshow(img, cmap=self.conf.cmap[col],
interpolation=self.conf.interp)
self.axes.set_axis_off()
if unzoom:
self.unzoom_all()
if hasattr(self.data_callback, ):
self.data_callback(data, x=x, y=y, **kws)
self.conf.indices = None
self.indices_thread = Thread(target=self.calc_indices, args=(data.shape, ))
self.indices_thread.start() | generic display, using imshow (default) or contour |
379,292 | def highlight(self, **kwargs):
self.debug_log("Highlighting element")
style = kwargs.get()
highlight_time = kwargs.get(, .3)
driver = self._element._parent
try:
original_style = self._element.get_attribute()
driver.execute_script(
"arguments[0].setAttribute(, arguments[1]);",
self._element,
style
)
except Exception as e:
self.error_log("highlight exception: %s" % str(e))
sleep(highlight_time)
try:
driver.execute_script(
"arguments[0].setAttribute(, arguments[1]);",
self._element,
original_style
)
except Exception as e:
self.error_log("highlight exception: %s" % str(e))
return True | kwargs:
style: css
highlight_time: int; default: .3 |
379,293 | def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self | Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0. |
379,294 | def preview(src_path):
previews = []
for page in list_artboards(src_path):
previews.append(page.export())
for artboard in page.artboards:
previews.append(artboard.export())
return previews | Generates a preview of src_path as PNG.
:returns: A list of preview paths, one for each page. |
379,295 | def barmatch2(data, tups, cutters, longbar, matchdict, fnum):
waitchunk = int(1e6)
epid = os.getpid()
filestat = np.zeros(3, dtype=np.int)
samplehits = {}
dsort1 = {}
dsort2 = {}
dbars = {}
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
samplehits[sname] = 0
dsort1[sname] = []
dsort2[sname] = []
dbars[sname] = set()
barhits = {}
for barc in matchdict:
barhits[barc] = 0
misses = {}
misses[] = 0
getbarcode = get_barcode_func(data, longbar)
if tups[0].endswith(".gz"):
ofunc = gzip.open
else:
ofunc = open
ofile1 = ofunc(tups[0], )
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], )
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2)
else:
quarts = itertools.izip(quart1, iter(int, 1))
while 1:
try:
read1, read2 = quarts.next()
read1 = list(read1)
filestat[0] += 1
except StopIteration:
break
barcode = ""
if in data.paramsdict["datatype"]:
if not filestat[0] % waitchunk:
writetofile(data, dsort1, 1, epid)
if in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
for sample in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
dsort1[sname] = []
dsort2[sname] = []
ofile1.close()
if tups[1]:
ofile2.close()
writetofile(data, dsort1, 1, epid)
if in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
samplestats = [samplehits, barhits, misses, dbars]
outname = os.path.join(data.dirs.fastqs, "tmp_{}_{}.p".format(epid, fnum))
with open(outname, ) as wout:
pickle.dump([filestat, samplestats], wout)
return outname | cleaner barmatch func... |
379,296 | def write(self, country_code, frames, scaling_factors=None):
if scaling_factors is None:
scaling_factors = DEFAULT_SCALING_FACTORS
with self.h5_file(mode=) as h5_file:
h5_file.attrs[] = VERSION
country_group = h5_file.create_group(country_code)
data_group = country_group.create_group(DATA)
index_group = country_group.create_group(INDEX)
lifetimes_group = country_group.create_group(LIFETIMES)
days, sids = days_and_sids_for_frames(list(frames.values()))
index_group.create_dataset(SID, data=sids)
index_group.create_dataset(DAY, data=days.astype(np.int64))
log.debug(
,
index_group.name,
self._filename,
)
start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames)
lifetimes_group.create_dataset(START_DATE, data=start_date_ixs)
lifetimes_group.create_dataset(END_DATE, data=end_date_ixs)
if len(sids):
chunks = (len(sids), min(self._date_chunk_size, len(days)))
else:
chunks = None
for field in FIELDS:
frame = frames[field]
frame.sort_index(inplace=True)
frame.sort_index(axis=, inplace=True)
data = coerce_to_uint32(
frame.T.fillna(0).values,
scaling_factors[field],
)
dataset = data_group.create_dataset(
field,
compression=,
shuffle=True,
data=data,
chunks=chunks,
)
dataset.attrs[SCALING_FACTOR] = scaling_factors[field]
log.debug(
,
dataset.name, self._filename
) | Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used. |
379,297 | def create(self, target, configuration_url=values.unset,
configuration_method=values.unset,
configuration_filters=values.unset,
configuration_triggers=values.unset,
configuration_flow_sid=values.unset,
configuration_retry_count=values.unset,
configuration_replay_after=values.unset,
configuration_buffer_messages=values.unset,
configuration_buffer_window=values.unset):
data = values.of({
: target,
: configuration_url,
: configuration_method,
: serialize.map(configuration_filters, lambda e: e),
: serialize.map(configuration_triggers, lambda e: e),
: configuration_flow_sid,
: configuration_retry_count,
: configuration_replay_after,
: configuration_buffer_messages,
: configuration_buffer_window,
})
payload = self._version.create(
,
self._uri,
data=data,
)
return WebhookInstance(self._version, payload, session_sid=self._solution[], ) | Create a new WebhookInstance
:param WebhookInstance.Target target: The target of this webhook.
:param unicode configuration_url: The absolute url the webhook request should be sent to.
:param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request.
:param unicode configuration_filters: The list of events, firing webhook event for this Session.
:param unicode configuration_triggers: The list of keywords, firing webhook event for this Session.
:param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to.
:param unicode configuration_retry_count: The number of retries in case of webhook request failures.
:param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed.
:param bool configuration_buffer_messages: The flag whether buffering should be applied to messages.
:param unicode configuration_buffer_window: The period of buffering messages.
:returns: Newly created WebhookInstance
:rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance |
379,298 | def process_tick(self, tup):
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
tuple_batch = []
for (tup, tm) in self.current_tuples:
tuple_batch.append(tup)
self.processWindow(window_info, tuple_batch)
self._expire(curtime) | Called every slide_interval |
379,299 | def inverse(self):
inv = self.__class__()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
if (inv.has_edge(each)):
inv.del_edge(each)
return inv | Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.