Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
16,800 | def check_error(res, error_enum):
if res.HasField("error"):
enum_name = error_enum.DESCRIPTOR.full_name
error_name = error_enum.Name(res.error)
details = getattr(res, "error_details", "<none>")
raise RequestError("%s.%s: " % (enum_name, error_name, details), res)
return res | Raise if the result has an error, otherwise return the result. |
16,801 | def get_schema_model():
try:
return django_apps.get_model(settings.POSTGRES_SCHEMA_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("POSTGRES_SCHEMA_MODEL must be of the form ")
except LookupError:
raise ImproperlyConfigured(
"POSTGRES_SCHEMA_MODEL refers to model that has not been installed" % settings.POSTGRES_SCHEMA_MODEL
) | Returns the schema model that is active in this project. |
16,802 | def create(cls, api, run_id=None, project=None, username=None):
run_id = run_id or util.generate_id()
project = project or api.settings.get("project")
mutation = gql()
variables = {: username,
: project, : run_id}
res = api.client.execute(mutation, variable_values=variables)
res = res[][]
return Run(api.client, res["project"]["entity"]["name"], res["project"]["name"], res["name"], {
"id": res["id"],
"config": "{}",
"systemMetrics": "{}",
"summaryMetrics": "{}",
"tags": [],
"description": None,
"state": "running"
}) | Create a run for the given project |
16,803 | def distinct_seeds(k):
seeds = []
for _ in range(k):
while True:
s = random.randint(2**32 - 1)
if s not in seeds:
break
seeds.append(s)
return seeds | returns k distinct seeds for random number generation |
16,804 | def delta(self,local=False):
(s,e) = self.get(local)
return e-s | Returns the number of days of difference |
16,805 | def read_user_data(self, user_data_path):
raw_user_data = read_value_from_path(user_data_path)
variables = self.get_variables()
return parse_user_data(variables, raw_user_data, self.name) | Reads and parses a user_data file.
Args:
user_data_path (str):
path to the userdata file
Returns:
str: the parsed user data file |
16,806 | def shrink(self, shrink):
if isinstance(shrink, list):
return self._shrink_list(shrink)
if isinstance(shrink, dict):
return self._shrink_dict(shrink)
return shrink | Remove unnecessary parts
:param shrink: Object to shringk
:type shrink: dict | list
:return: Shrunk object
:rtype: dict | list |
16,807 | def path_components(path):
def yield_components(path):
chars = zip_longest(path, path[1:])
try:
while True:
c, n = next(chars)
if c != :
raise ValueError("Invalid path, expected \"/\"")
elif (n is not None and n != "\"")
else:
yield "".join(component)
break
else:
component += c
except StopIteration:
return
return list(yield_components(path)) | Convert a path into group and channel name components |
16,808 | def get_newest_app_version() -> Version:
with urllib3.PoolManager(cert_reqs=, ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen(, static_data.PYPI_JSON_URL).data.decode()
releases = json.loads(pypi_json).get(, [])
online_version = Version()
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version | Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version |
16,809 | def normalizeInterpolationFactor(value):
if not isinstance(value, (int, float, list, tuple)):
raise TypeError("Interpolation factor must be an int, float, or tuple "
"instances, not %s." % type(value).__name__)
if isinstance(value, (int, float)):
value = (float(value), float(value))
else:
if not len(value) == 2:
raise ValueError("Interpolation factor tuple must contain two "
"values, not %d." % len(value))
for v in value:
if not isinstance(v, (int, float)):
raise TypeError("Interpolation factor tuple values must be an "
":ref:`type-int-float`, not %s."
% type(value).__name__)
value = tuple([float(v) for v in value])
return value | Normalizes interpolation factor.
* **value** must be an :ref:`type-int-float`, ``tuple`` or ``list``.
* If **value** is a ``tuple`` or ``list``, it must have exactly two items.
These items must be instances of :ref:`type-int-float`.
* Returned value is a ``tuple`` of two ``float``. |
16,810 | def gen_modules(self, initial_load=False):
*
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions[] = self.gen_modules | Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules |
16,811 | def echo_via_pager(*args, **kwargs):
try:
restore = not in os.environ
os.environ.setdefault(, )
click.echo_via_pager(*args, **kwargs)
finally:
if restore:
os.environ.pop(, None) | Display pager only if it does not fit in one terminal screen.
NOTE: The feature is available only on ``less``-based pager. |
16,812 | def delete_and_rm_options(*args, **kwargs):
def inner_decorator(f, supports_batch=True, default_enable_globs=False):
f = click.option(
"--recursive", "-r", is_flag=True, help="Recursively delete dirs"
)(f)
f = click.option(
"--ignore-missing",
"-f",
is_flag=True,
help="DonDon\
+ (" Implicit in --batch" if supports_batch else "")
),
)(f)
f = click.option(
"--enable-globs/--no-enable-globs",
is_flag=True,
default=default_enable_globs,
show_default=True,
help=(
"Enable expansion of *, ?, and [ ] characters in the last "
"component of file paths, unless they are escaped with "
"a preceeding backslash, \\"
),
)(f)
if supports_batch:
f = click.option(
"--batch",
is_flag=True,
help=(
"Accept a batch of paths on stdin (i.e. run in "
"batchmode). Uses ENDPOINT_ID as passed on the "
"commandline. Any commandline PATH given will be used "
"as a prefix to all paths given"
),
)(f)
return f
return detect_and_decorate(inner_decorator, args, kwargs) | Options which apply both to `globus delete` and `globus rm` |
16,813 | def gen_file_lines(path, mode=, strip_eol=True, ascii=True, eol=):
if isinstance(path, str):
path = open(path, mode)
with path:
for line in path:
if ascii:
line = str(line)
if strip_eol:
line = line.rstrip()
yield line | Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg) |
16,814 | def _match_member(self, i, column):
self.col_match = self.RE_MEMBERS.match(self._source[i])
if self.col_match is not None:
if column < self._source[i].index(":"):
self.el_call = "name"
else:
self.el_call = "assign"
return True
else:
return False | Looks at line 'i' to see if the line matches a module member def. |
16,815 | async def connect_controller(self, controller_name=None):
if not controller_name:
controller_name = self.jujudata.current_controller()
if not controller_name:
raise JujuConnectionError()
controller = self.jujudata.controllers()[controller_name]
endpoint = controller[][0]
accounts = self.jujudata.accounts().get(controller_name, {})
await self.connect(
endpoint=endpoint,
uuid=None,
username=accounts.get(),
password=accounts.get(),
cacert=controller.get(),
bakery_client=self.bakery_client_for_controller(controller_name),
)
self.controller_name = controller_name | Connect to a controller by name. If the name is empty, it
connect to the current controller. |
16,816 | def GetPeaksExons(bed,parsedGTF):
bedtool_AB=dfTObedtool(bed)
exonsGTF=parsedGTF[parsedGTF["feature"]=="exon"]
exonsGTF.reset_index(inplace=True, drop=True)
exonsBED=GTFtoBED(exonsGTF, "exon_id")
exonsBED.columns=[, , , , , ]
exonsBEDcols=exonsBED.columns.tolist()
bedcols=bed.columns.tolist()
exonsBEDcols_=[]
for c in exonsBEDcols:
if c in bedcols:
exonsBEDcols_.append(c+"_exon")
else:
exonsBEDcols_.append(c)
cols=[bedcols,exonsBEDcols_,["overlap"] ]
cols=[item for sublist in cols for item in sublist]
bedtool_exons=dfTObedtool(exonsBED)
bedtool_target_exons=bedtool_AB.intersect(bedtool_exons, wo=True, s=True)
dfTargetE=pd.read_table(bedtool_target_exons.fn, names=cols)
ExonsTransGenes=parsedGTF[["exon_id","transcript_id","gene_id"]].drop_duplicates()
dfTargets=pd.merge(dfTargetE,ExonsTransGenes,on=["exon_id"],how="left")
dfTargets["count"]=1
def getCounts(df,field):
tmp=df[[field,,"count"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["count"]=tmp["count"].astype(int)
tmp.columns=[field,"%s_count" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,,"-log10(pValue)"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["-log10(pValue)"]=tmp["-log10(pValue)"].astype(float)
tmp=tmp.groupby(field).apply(lambda l: reduce(lambda x, y: x*y, l["-log10(pValue)"]) )
tmp=pd.DataFrame(tmp)
tmp.reset_index(inplace=True,drop=False)
tmp.columns=[field,"%s norm. mean -log10(pValue)" %str(field)]
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,,"signalValue"]].drop_duplicates()
tmp=tmp.drop(["name"],axis=1)
tmp["signalValue"]=tmp["signalValue"].astype(float)
tmp=tmp.groupby(field).apply(lambda l: reduce(lambda x, y: x*y, l["signalValue"]) )
tmp=pd.DataFrame(tmp)
tmp.reset_index(inplace=True,drop=False)
tmp.columns=[field,"%s signalValue" %str(field)]
df=pd.merge(df,tmp,on=field,how="left")
return df
for f in ["exon_id","transcript_id"]:
dfTargets=getCounts(dfTargets,f)
def getCounts_GeneIDs(df):
field="gene_id"
tmp=df[[field,"transcript_id","transcript_id_count"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id_count"]=tmp["transcript_id_count"].astype(int)
tmp.columns=[field,"%s_count" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,,"transcript_id norm. mean -log10(pValue)"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id norm. mean -log10(pValue)"]=tmp["transcript_id norm. mean -log10(pValue)"].astype(float)
tmp.columns=[field,"%s norm. mean -log10(pValue)" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
tmp=df[[field,,"transcript_id signalValue"]].drop_duplicates()
tmp=tmp.drop(["transcript_id"],axis=1)
tmp["transcript_id signalValue"]=tmp["transcript_id signalValue"].astype(float)
tmp.columns=[field,"%s signalValue" %str(field)]
tmp=tmp.groupby(field, as_index=False).sum()
df=pd.merge(df,tmp,on=field,how="left")
return df
dfTargets=getCounts_GeneIDs(dfTargets)
dfTargets=dfTargets.drop(["count"],axis=1)
return dfTargets | Annotates a bedtool, BED narrow peak
:param bed: a pandas dataframe in bed format
:param parsedGTF: a parsed GTF file as outputed by parseGTF() with the following columns
:returns: a Pandas dataframe |
16,817 | def get_pmag_dir():
try:
return os.environ[]
except KeyError: pass
elif getattr(sys, , False):
return sys._MEIPASS
else:
temp = os.getcwd()
os.chdir()
reload(locator)
lib_file = resource_filename(, )
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, ))
os.chdir(temp)
if not os.path.isfile(os.path.join(lib_dir, )):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, )
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,,)):
return pmag_dir
else:
print(t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade".pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir | Returns directory in which PmagPy is installed |
16,818 | def __initialize_instance(self):
config = self.config
self.instance.auth = self.authentication_class(self.app, config=config)
init_handlers = (
handlers if config.auth_mode() else auth_mode_agnostic_handlers
)
for handler in init_handlers:
if handler.keys is None:
self.__check_method_in_auth(handler.name, handler.exception)
else:
if all(map(config.get, handler.keys)):
self.__check_method_in_auth(
handler.name, handler.exception
)
for handler in init_handlers:
if handler.name in self.kwargs:
method = self.kwargs.pop(handler.name)
setattr(self.instance.auth, handler.name, method) | Take any predefined methods/handlers and insert them into Sanic JWT |
16,819 | def get_attributes(path):
*
if not os.path.exists(path):
raise CommandExecutionError(.format(path))
attributes = {}
intAttributes = win32file.GetFileAttributes(path)
attributes[] = (intAttributes & 32) == 32
attributes[] = (intAttributes & 1024) == 1024
attributes[] = (intAttributes & 2048) == 2048
attributes[] = (intAttributes & 16) == 16
attributes[] = (intAttributes & 16384) == 16384
attributes[] = (intAttributes & 2) == 2
attributes[] = (intAttributes & 128) == 128
attributes[] = (intAttributes & 8192) == 8192
attributes[] = (intAttributes & 4096) == 4096
attributes[] = (intAttributes & 1) == 1
attributes[] = (intAttributes & 4) == 4
attributes[] = (intAttributes & 256) == 256
attributes[] = False
if attributes[] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA000000C:
attributes[] = True
return attributes | Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt |
16,820 | def draw_commands(self, surf):
past_abilities = {act.ability for act in self._past_actions if act.ability}
for y, cmd in enumerate(sorted(self._abilities(
lambda c: c.name != "Smart"), key=lambda c: c.name), start=2):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3]
surf.write_screen(self._font_large, color, (0.2, y), hotkey)
surf.write_screen(self._font_large, color, (3, y), cmd.name) | Draw the list of available commands. |
16,821 | def position_target_global_int_send(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
return self.send(self.position_target_global_int_encode(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) | Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float) |
16,822 | def _getMostActiveCells(self):
poolingActivation = self._poolingActivation
nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0]
poolingActivationSubset = poolingActivation[nonZeroCells] + \
self._poolingActivation_tieBreaker[nonZeroCells]
potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]]
topCells = potentialUnionSDR[0: self._maxUnionCells]
if max(self._poolingTimer) > self._minHistory:
self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE)
else:
self._unionSDR = []
return self._unionSDR | Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices |
16,823 | def clean_proc_dir(opts):
serial = salt.payload.Serial(opts)
proc_dir = os.path.join(opts[], )
for fn_ in os.listdir(proc_dir):
proc_file = os.path.join(*[proc_dir, fn_])
data = salt.utils.master.read_proc_file(proc_file, opts)
if not data:
try:
log.warning(
"Found proc file %s without proper data. Removing from tracked proc files.",
proc_file
)
os.remove(proc_file)
except (OSError, IOError) as err:
log.error(, err)
continue
if not salt.utils.master.is_pid_healthy(data[]):
try:
log.warning(
"PID %s not owned by salt or no longer running. Removing tracked proc file %s",
data[],
proc_file
)
os.remove(proc_file)
except (OSError, IOError) as err:
log.error(, err) | Clean out old tracked jobs running on the master
Generally, anything tracking a job should remove the job
once the job has finished. However, this will remove any
jobs that for some reason were not properly removed
when finished or errored. |
16,824 | def extensions():
USE_CYTHON = False
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
warnings.warn()
import mdtraj
from numpy import get_include as _np_inc
np_inc = _np_inc()
pybind_inc = get_pybind_include()
lib_prefix = if sys.platform.startswith() else
common_cflags = [, ]
clustering_module = \
Extension(,
sources=[],
include_dirs=[
mdtraj.capi()[],
pybind_inc,
,
],
language=,
libraries=[lib_prefix+],
library_dirs=[mdtraj.capi()[]],
extra_compile_args=common_cflags)
covar_module = \
Extension(,
sources=[],
include_dirs=[,
pybind_inc,
],
language=,
extra_compile_args=common_cflags)
eig_qr_module = \
Extension(,
sources=[],
include_dirs=[, np_inc],
extra_compile_args=[] + common_cflags)
orderedset = \
Extension(,
sources=[],
extra_compile_args=[] + common_cflags)
extra_compile_args = ["-O3", "-std=c99"]
ext_bar = Extension(
"pyemma.thermo.extensions.bar",
sources=["pyemma/thermo/extensions/bar/bar.pyx",
"pyemma/thermo/extensions/bar/_bar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_wham = Extension(
"pyemma.thermo.extensions.wham",
sources=["pyemma/thermo/extensions/wham/wham.pyx",
"pyemma/thermo/extensions/wham/_wham.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_mbar = Extension(
"pyemma.thermo.extensions.mbar",
sources=["pyemma/thermo/extensions/mbar/mbar.pyx",
"pyemma/thermo/extensions/mbar/_mbar.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram = Extension(
"pyemma.thermo.extensions.tram",
sources=["pyemma/thermo/extensions/tram/tram.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_dtram = Extension(
"pyemma.thermo.extensions.dtram",
sources=["pyemma/thermo/extensions/dtram/dtram.pyx",
"pyemma/thermo/extensions/dtram/_dtram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar = Extension(
"pyemma.thermo.extensions.trammbar",
sources=["pyemma/thermo/extensions/trammbar/trammbar.pyx",
"pyemma/thermo/extensions/tram/_tram.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_mbar_direct = Extension(
"pyemma.thermo.extensions.mbar_direct",
sources=["pyemma/thermo/extensions/mbar_direct/mbar_direct.pyx",
"pyemma/thermo/extensions/mbar_direct/_mbar_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_tram_direct = Extension(
"pyemma.thermo.extensions.tram_direct",
sources=["pyemma/thermo/extensions/tram_direct/tram_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
ext_trammbar_direct = Extension(
"pyemma.thermo.extensions.trammbar_direct",
sources=["pyemma/thermo/extensions/trammbar_direct/trammbar_direct.pyx",
"pyemma/thermo/extensions/tram_direct/_tram_direct.c",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args + ["-DTRAMMBAR"])
ext_util = Extension(
"pyemma.thermo.extensions.util",
sources=["pyemma/thermo/extensions/util/util.pyx",
"pyemma/thermo/extensions/util/_util.c"],
extra_compile_args=extra_compile_args)
exts_thermo = [
ext_bar,
ext_wham,
ext_mbar,
ext_tram,
ext_dtram,
ext_trammbar,
ext_mbar_direct,
ext_tram_direct,
ext_trammbar_direct,
ext_util]
exts = [clustering_module,
covar_module,
eig_qr_module,
orderedset
]
exts += exts_thermo
for e in exts:
e.include_dirs.append(np_inc)
if not USE_CYTHON:
for e in exts:
new_src = []
for s in e.sources:
new_src.append(s.replace(, ))
e.sources = new_src
else:
exts = cythonize(exts, language_level=sys.version_info[0])
return exts | How do we handle cython:
1. when on git, require cython during setup time (do not distribute
generated .c files via git)
a) cython present -> fine
b) no cython present -> install it on the fly. Extensions have to have .pyx suffix
This is solved via a lazy evaluation of the extension list. This is needed,
because build_ext is being called before cython will be available.
https://bitbucket.org/pypa/setuptools/issue/288/cannot-specify-cython-under-setup_requires
2. src dist install (have pre-converted c files and pyx files)
a) cython present -> fine
b) no cython -> use .c files |
16,825 | def build_chain(self, source, chain):
for group in WalkByGroup(source, chain.order+1):
pre = group[:-1]
res = group[-1]
if pre not in chain.content:
chain.content[pre] = {res: 1}
else:
if res not in chain.content[pre]:
chain.content[pre][res] = 1
else:
chain.content[pre][res] += 1
chain.decache() | Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source |
16,826 | def remote_delete_user(model, request):
params = request.params
uid = params.get()
if not uid:
return {
: False,
: u"No user ID given.",
}
users = model.backend
if uid not in users:
return {
: False,
: u"User with given ID not exists.",
}
try:
del users[uid]
users.parent()
message = u"Deleted user with ID ." % uid
return {
: True,
: message,
}
except Exception as e:
return {
: False,
: str(e),
}
finally:
model.invalidate() | Remove user via remote service.
Returns a JSON response containing success state and a message indicating
what happened::
{
success: true, // respective false
message: 'message'
}
Expected request parameters:
id
Id of user to delete. |
16,827 | def render_template(template_name: str, **kwargs):
return get_environment().get_template(template_name).render(
cauldron_template_uid=make_template_uid(),
**kwargs
) | Renders the template file with the given filename from within Cauldron's
template environment folder.
:param template_name:
The filename of the template to render. Any path elements should be
relative to Cauldron's root template folder.
:param kwargs:
Any elements passed to Jinja2 for rendering the template
:return:
The rendered string |
16,828 | def _setup_dmtf_schema(self):
def print_verbose(msg):
if self.verbose:
print(msg)
if not os.path.isdir(self.schema_root_dir):
print_verbose(
_format("Creating directory for CIM Schema archive: {0}",
self.schema_root_dir))
os.mkdir(self.schema_root_dir)
if not os.path.isfile(self.schema_zip_file):
print_verbose(
_format("Downloading CIM Schema archive from: {0}",
self.schema_zip_url))
try:
ufo = urlopen(self.schema_zip_url)
except IOError as ie:
os.rmdir(self.schema_root_dir)
raise ValueError(
_format("DMTF Schema archive not found at url {0}: {1}",
self.schema_zip_url, ie))
with open(self.schema_zip_file, ) as fp:
for data in ufo:
fp.write(data)
if not os.path.isdir(self.schema_mof_dir):
print_verbose(
_format("Creating directory for CIM Schema MOF files: {0}",
self.schema_mof_dir))
os.mkdir(self.schema_mof_dir)
if not os.path.isfile(self._schema_mof_file):
print_verbose(
_format("Unpacking CIM Schema archive: {0}",
self.schema_zip_file))
zfp = None
try:
zfp = ZipFile(self.schema_zip_file, )
nlist = zfp.namelist()
for file_ in nlist:
dfile = os.path.join(self.schema_mof_dir, file_)
if dfile[-1] == :
if not os.path.exists(dfile):
os.mkdir(dfile)
else:
with open(dfile, ) as dfp:
dfp.write(zfp.read(file_))
finally:
if zfp:
zfp.close() | Install the DMTF CIM schema from the DMTF web site if it is not already
installed. This includes downloading the DMTF CIM schema zip file from
the DMTF web site and expanding that file into a subdirectory defined
by `schema_mof_dir`.
Once the schema zip file is downloaded into `schema_root_dir`, it is
not re-downloaded if this function is recalled since DMTF CIM Schema
releases are never modified; new update versions are released for minor
changes. If the `schema_zip_file` is in the `schema_root_dir`
directory, but no 'schema_mof_dir' subdirectory exists, the schema is
unzipped.
This allows the DMTF CIM schema zip file to be downloaded once and
reused and the user to chose if they want to retain the extracted MOF
files or remove them with :meth:`~pywbem_mock.DMTFCIMSchema.clean` when
not being used.
If the schema is to be committed a source repository such as git
it is logical to commit only the DMTF CIM schema zip file. Creation of
the `schema_mof_dir` subdirectory will be created when the
:class:`pywbem_mock.DMTFCIMSchema` object is created.
Raises:
ValueError: If the schema cannot be retrieved from the DMTF web
site.
TypeError: If the `schema_version` is not a valid tuple with 3
integer components |
16,829 | def get_bytes(self, n):
b = self.packet.read(n)
max_pad_size = 1 << 20
if len(b) < n < max_pad_size:
return b + zero_byte * (n - len(b))
return b | Return the next ``n`` bytes of the message (as a `str`), without
decomposing into an int, decoded string, etc. Just the raw bytes are
returned. Returns a string of ``n`` zero bytes if there weren't ``n``
bytes remaining in the message. |
16,830 | def remove_replica(self, partition_name, osr_broker_ids, count=1):
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found.".format(name=partition_name),
)
if partition.replication_factor - count < 1:
raise InvalidReplicationFactorError(
"Cannot decrease replication factor from {rf} to {new_rf}."
"Replication factor must be at least 1."
.format(
rf=partition.replication_factor,
new_rf=partition.replication_factor - count,
)
)
osr = {
broker for broker in partition.replicas
if broker.id in osr_broker_ids
}
state = _State(self.cluster_topology)
partition_index = state.partitions.index(partition)
for _ in range(count):
non_empty_rgs = [
rg for rg in six.itervalues(self.cluster_topology.rgs)
if rg.count_replica(partition) > 0
]
rgs_with_osr = [
rg for rg in non_empty_rgs
if any(b in osr for b in rg.brokers)
]
candidate_rgs = rgs_with_osr or non_empty_rgs
replica_count = sum(
rg.count_replica(partition)
for rg in candidate_rgs
)
opt_replicas, _ = compute_optimum(
len(candidate_rgs),
replica_count,
)
over_replicated_rgs = [
rg for rg in candidate_rgs
if rg.count_replica(partition) > opt_replicas
] or candidate_rgs
candidate_rgs = over_replicated_rgs or candidate_rgs
new_states = []
for rg in candidate_rgs:
osr_brokers = {
broker for broker in rg.brokers
if broker in osr
}
candidate_brokers = osr_brokers or rg.brokers
for broker in candidate_brokers:
if broker in partition.replicas:
broker_index = state.brokers.index(broker)
new_states.append(
state.remove_replica(partition_index, broker_index)
)
state = sorted(new_states, key=self._score, reverse=True)[0]
self.cluster_topology.update_cluster_topology(state.assignment)
osr = {b for b in osr if b in partition.replicas} | Removing a replica is done by trying to remove a replica from every
broker and choosing the resulting state with the highest fitness score.
Out-of-sync replicas will always be removed before in-sync replicas.
:param partition_name: (topic_id, partition_id) of the partition to remove replicas of.
:param osr_broker_ids: A list of the partition's out-of-sync broker ids.
:param count: The number of replicas to remove. |
16,831 | def _validate_features(self, data):
if self.feature_names is None:
self.feature_names = data.feature_names
self.feature_types = data.feature_types
else:
raise ValueError(msg.format(self.feature_names,
data.feature_names)) | Validate Booster and data's feature_names are identical.
Set feature_names and feature_types from DMatrix |
16,832 | def nt_counts(bam, positions, stranded=False, vcf=False, bed=False):
if not bed and not vcf:
if type(positions) == pbt.bedtool.BedTool:
df = positions.to_dataframe()
elif positions[-4:] == :
bed = True
elif positions[-4:] == :
vcf = True
else:
sys.stderr.write(
)
if bed:
df = pbt.BedTool(positions).to_dataframe()
elif vcf:
from variants import vcf_as_df
tdf = vcf_as_df(positions)
df = pd.DataFrame(index=tdf.index)
df[] = tdf.CHROM
df[] = tdf.POS - 1
df[] = tdf.POS
res = []
for i in df.index:
region = [df.ix[i, ], df.ix[i, ], df.ix[i, ]]
res.append(get_region_nt_counts(region, bam, stranded))
res = pd.concat(res)
return res | Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files. |
16,833 | def process_bind_param(self, value: Optional[List[str]],
dialect: Dialect) -> str:
retval = self._strlist_to_dbstr(value)
return retval | Convert things on the way from Python to the database. |
16,834 | def parse_get_list_response(content):
try:
tree = etree.fromstring(content)
hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall()]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list() | Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names. |
16,835 | def compute_y(self, coefficients, num_x):
y_vals = []
for x in range(1, num_x + 1):
y = sum([c * x ** i for i, c in enumerate(coefficients[::-1])])
y_vals.append(y)
return y_vals | Return calculated y-values for the domain of x-values in [1, num_x]. |
16,836 | def committees_legislators(self, *args, **kwargs):
committees = list(self.committees(*args, **kwargs))
legislators = self.legislators({: True},
fields=[,
settings.LEVEL_FIELD])
_legislators = {}
del legislators
for com in committees:
com._legislators = _legislators
return committees | Return an iterable of committees with all the
legislators cached for reference in the Committee model.
So do a "select_related" operation on committee members. |
16,837 | def nic_remove(self, nic):
args = {
: nic,
}
self._nic_remove_chk.check(args)
return self._client.json(, args) | Detach a nic from a bridge
:param nic: nic name to detach |
16,838 | def cov_trob(x, wt=None, cor=False, center=True, nu=5, maxit=25,
tol=0.01):
def test_values(x):
if pd.isnull(x).any() or np.isinf(x).any():
raise ValueError("Missing or infinite values in ")
def scale_simp(x, center, n, p):
return x - np.repeat([center], n, axis=0)
x = np.asarray(x)
n, p = x.shape
test_values(x)
miss_wt = wt is None
if not miss_wt:
wt = np.asarray(wt)
wt0 = wt
if len(wt) != n:
raise ValueError(
"length of must equal number of observations.")
if any(wt < 0):
raise ValueError("Negative weights not allowed.")
if not np.sum(wt):
raise ValueError("No positive weights.")
x = x[wt > 0, :]
wt = wt[wt > 0]
n, _ = x.shape
else:
wt = np.ones(n)
wt = wt[:, np.newaxis]
loc = np.sum(wt*x, axis=0) / wt.sum()
try:
_len = len(center)
except TypeError:
if isinstance(center, bool) and not center:
loc = np.zeros(p)
else:
if _len != p:
raise ValueError(" is not the right length")
loc = p
use_loc = isinstance(center, bool) and center
w = wt * (1 + p/nu)
for iteration in range(maxit):
w0 = w
X = scale_simp(x, loc, n, p)
_, s, v = linalg.svd(np.sqrt(w/np.sum(w)) * X)
wX = np.dot(np.dot(X, v.T), np.diag(np.full(p, 1/s)))
Q = np.squeeze(np.dot(wX**2, np.ones(p)))
w = (wt * (nu + p)) / (nu + Q)[:, np.newaxis]
if use_loc:
loc = np.sum(w*x, axis=0) / w.sum()
if all(np.abs(w-w0) < tol):
break
else:
if ((np.mean(w) - np.mean(wt) > tol) or
(np.abs(np.mean(w * Q)/p - 1) > tol)):
warn("Probable convergence failure.", PlotnineWarning)
_a = np.sqrt(w) * X
cov = np.dot(_a.T, _a) / np.sum(wt)
if miss_wt:
ans = dict(cov=cov, center=loc, n_obs=n)
else:
ans = dict(cov=cov, center=loc, wt=wt0, n_obs=n)
if cor:
sd = np.sqrt(np.diag(cov))
cor = (cov/sd)/np.repeat([sd], p, axis=0).T
ans[] = cor
ans[] = iteration
return ans | Covariance Estimation for Multivariate t Distribution
Estimates a covariance or correlation matrix assuming the
data came from a multivariate t distribution: this provides
some degree of robustness to outlier without giving a high
breakdown point.
**credit**: This function a port of the R function
``MASS::cov.trob``.
Parameters
----------
x : array
data matrix. Missing values (NaNs) are not allowed.
wt : array
A vector of weights for each case: these are treated as
if the case i actually occurred ``wt[i]`` times.
cor : bool
Flag to choose between returning the correlation
(``cor=True``) or covariance (``cor=False``) matrix.
center : array or bool
A logical value or a numeric vector providing the location
about which the covariance is to be taken.
If ``center=False``, no centering is done; if
``center=True`` the MLE of the location vector is used.
nu : int
'degrees of freedom' for the multivariate t distribution.
Must exceed 2 (so that the covariance matrix is finite).
maxit : int
Maximum number of iterations in fitting.
tol : float
Convergence tolerance for fitting.
Returns
-------
out : dict
A dictionary with with the following key-value
- ``cov`` : the fitted covariance matrix.
- ``center`` : the estimated or specified location vector.
- ``wt`` : the specified weights: only returned if the
wt argument was given.
- ``n_obs`` : the number of cases used in the fitting.
- ``cor`` : the fitted correlation matrix: only returned
if ``cor=True``.
- ``call`` : The matched call.
- ``iter`` : The number of iterations used.
References
----------
- J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood
identity for the multivariate t-distribution. *Communications in
Statistics-Simulation and Computation* **23**, 441-453.
- Venables, W. N. and Ripley, B. D. (1999) *Modern Applied
Statistics with S-PLUS*. Third Edition. Springer. |
16,839 | def newComment(content):
ret = libxml2mod.xmlNewComment(content)
if ret is None:raise treeError()
return xmlNode(_obj=ret) | Creation of a new node containing a comment. |
16,840 | def l2traceroute_input_protocolType_IP_l4_dest_port(self, **kwargs):
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
protocolType = ET.SubElement(input, "protocolType")
IP = ET.SubElement(protocolType, "IP")
l4_dest_port = ET.SubElement(IP, "l4-dest-port")
l4_dest_port.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
16,841 | def _update_data(self, name, value, timestamp, interval, config, conn):
i_time = config[].to_bucket(timestamp)
if not config[]:
r_time = config[].to_bucket(timestamp)
else:
r_time = None
stmt = self._table.update().where(
and_(
self._table.c.name==name,
self._table.c.interval==interval,
self._table.c.i_time==i_time,
self._table.c.r_time==r_time)
).values({self._table.c.value: value})
rval = conn.execute( stmt )
return rval.rowcount | Support function for insert. Should be called within a transaction |
16,842 | def _write_stream(self, src, dst, size=None, size_limit=None,
chunk_size=None, progress_callback=None):
chunk_size = chunk_size_or_default(chunk_size)
algo, m = self._init_hash()
bytes_written = 0
while 1:
algo, m.hexdigest()) if m else None | Get helper to save stream from src to dest + compute checksum.
:param src: Source stream.
:param dst: Destination stream.
:param size: If provided, this exact amount of bytes will be
written to the destination file.
:param size_limit: ``FileSizeLimit`` instance to limit number of bytes
to write. |
16,843 | def __Logout(si):
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass | Disconnect (logout) service instance
@param si: Service instance (returned from Connect) |
16,844 | def lagrange_polynomial(abscissas, sort="GR"):
abscissas = numpy.asfarray(abscissas)
if len(abscissas.shape) == 1:
abscissas = abscissas.reshape(1, abscissas.size)
dim, size = abscissas.shape
order = 1
while chaospy.bertran.terms(order, dim) <= size:
order += 1
indices = numpy.array(chaospy.bertran.bindex(0, order-1, dim, sort)[:size])
idx, idy = numpy.mgrid[:size, :size]
matrix = numpy.prod(abscissas.T[idx]**indices[idy], -1)
det = numpy.linalg.det(matrix)
if det == 0:
raise numpy.linalg.LinAlgError("invertible matrix required")
vec = chaospy.poly.basis(0, order-1, dim, sort)[:size]
coeffs = numpy.zeros((size, size))
if size == 1:
out = chaospy.poly.basis(0, 0, dim, sort)*abscissas.item()
elif size == 2:
coeffs = numpy.linalg.inv(matrix)
out = chaospy.poly.sum(vec*(coeffs.T), 1)
else:
for i in range(size):
for j in range(size):
coeffs[i, j] += numpy.linalg.det(matrix[1:, 1:])
matrix = numpy.roll(matrix, -1, axis=0)
matrix = numpy.roll(matrix, -1, axis=1)
coeffs /= det
out = chaospy.poly.sum(vec*(coeffs.T), 1)
return out | Create Lagrange polynomials.
Args:
abscissas (numpy.ndarray):
Sample points where the Lagrange polynomials shall be defined.
Example:
>>> print(chaospy.around(lagrange_polynomial([-10, 10]), 4))
[-0.05q0+0.5, 0.05q0+0.5]
>>> print(chaospy.around(lagrange_polynomial([-1, 0, 1]), 4))
[0.5q0^2-0.5q0, -q0^2+1.0, 0.5q0^2+0.5q0]
>>> poly = lagrange_polynomial([[1, 0, 1], [0, 1, 2]])
>>> print(chaospy.around(poly, 4))
[0.5q0-0.5q1+0.5, -q0+1.0, 0.5q0+0.5q1-0.5]
>>> print(numpy.around(poly([1, 0, 1], [0, 1, 2]), 4))
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]] |
16,845 | def second_order_diff(arr, x):
arr = np.array(arr)
dxf = (x[2] - x[0])/2
dxb = (x[-1] - x[-3])/2
dx = (x[2:] - x[:-2])/2
first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf)
last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb)
interior = (arr[2:] - arr[:-2])/(2*dx)
darr = np.concatenate(([first], interior, [last]))
return darr | Compute second order difference of an array.
A 2nd order forward difference is used for the first point, 2nd order
central difference for interior, and 2nd order backward difference for last
point, returning an array the same length as the input array. |
16,846 | def project(self, projection):
x, y = projection(self.lon.decimal_degree, self.lat.decimal_degree)
return (x, y) | Return coordinates transformed to a given projection
Projection should be a basemap or pyproj projection object or similar |
16,847 | def to_doc(name, thing, header_level, source_location):
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{*header_level} Class **{name}**\n\n"
else:
header = f"{*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
pass
return lines | Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code |
16,848 | def paddedInt(i):
s up to PAD_LEN digits
'
i_str = str(i)
pad = PAD_LEN - len(i_str)
return (pad * "0") + i_str | return a string that contains `i`, left-padded with 0's up to PAD_LEN digits |
16,849 | def constant_compare(a, b):
if not isinstance(a, byte_cls):
raise TypeError(pretty_message(
,
type_name(a)
))
if not isinstance(b, byte_cls):
raise TypeError(pretty_message(
,
type_name(b)
))
if len(a) != len(b):
return False
if sys.version_info < (3,):
a = [ord(char) for char in a]
b = [ord(char) for char in b]
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0 | Compares two byte strings in constant time to see if they are equal
:param a:
The first byte string
:param b:
The second byte string
:return:
A boolean if the two byte strings are equal |
16,850 | def get_repos(self):
print
headers = {: , : + self.token}
temp_count = 0
for repo in self.org_retrieved.iter_repos():
temp_count += 1
url = ( + self.organization_name + + repo.name)
self.repos[repo.name] = self.get_stargazers(url=url, headers=headers)
self.calc_stargazers(start_count=650)
print + str(self.total_count)
print str(temp_count) + | Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers. |
16,851 | def download(url, save_to_file=True, save_dir=".", filename=None,
block_size=64000, overwrite=False, quiet=False):
if save_to_file:
if not filename:
filename = safe_filename(url.split()[-1])
if not filename:
filename = "downloaded_at_{}.file".format(time.time())
save_location = os.path.abspath(os.path.join(save_dir, filename))
if os.path.exists(save_location) and not overwrite:
logger.error("File {0} already exists".format(save_location))
return False
else:
save_location = "memory"
try:
request = urlopen(url)
except ValueError as err:
if not quiet and "unknown url type" in str(err):
logger.error("Please make sure URL is formatted correctly and"
" starts with http:// or other protocol")
raise err
except Exception as err:
if not quiet:
logger.error("Could not download {0} - {1}".format(url, err))
raise err
try:
kb_size = int(request.headers["Content-Length"]) / 1024
except Exception as err:
if not quiet:
logger.debug("Could not determine file size - {0}".format(err))
file_size = "(unknown size)"
else:
file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999
else (kb_size / 1024, "MB"))
if not quiet:
logger.info("Downloading {0} {1} to {2}".format(url, file_size,
save_location))
if save_to_file:
with open(save_location, "wb") as f:
while True:
buffer = request.read(block_size)
if not buffer:
break
f.write(buffer)
return save_location
else:
return request.read() | Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file) |
16,852 | def update_probes(self, progress):
new_values = self.read_probes.probes_values
probe_count = len(self.read_probes.probes)
if probe_count > self.tree_probes.topLevelItemCount():
self.fill_treewidget(self.tree_probes, new_values)
else:
for x in range(probe_count):
topLvlItem = self.tree_probes.topLevelItem(x)
for child_id in range(topLvlItem.childCount()):
child = topLvlItem.child(child_id)
child.value = new_values[topLvlItem.name][child.name]
child.setText(1, str(child.value))
if self.probe_to_plot is not None:
self.probe_to_plot.plot(self.matplotlibwidget_1.axes)
self.matplotlibwidget_1.draw()
if self.chk_probe_log.isChecked():
data = .join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten()))
self.probe_file.write(.format(data)) | update the probe tree |
16,853 | def process(*args, **kwargs):
timeout = kwargs.get()
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return _process_wrapper(args[0], timeout)
else:
if timeout is not None and not isinstance(timeout, (int, float)):
raise TypeError()
def decorating_function(function):
return _process_wrapper(function, timeout)
return decorating_function | Runs the decorated function in a concurrent process,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
The timeout parameter will set a maximum execution time
for the decorated function. If the execution exceeds the timeout,
the process will be stopped and the Future will raise TimeoutError. |
16,854 | def check_url(url):
result = {"url": url}
try:
response = requests.get(url)
result["status"] = response.status_code
result["reason"] = response.reason
response.raise_for_status()
result["alive"] = True
except AttributeError as err:
if err.message == " object has no attribute ":
result["alive"] = False
result["reason"] = "Invalid URL"
result["status"] = None
else:
raise
except requests.exceptions.RequestException as err:
result["alive"] = False
if "reason" not in result:
result["reason"] = str(err)
if "status" not in result:
result["status"] = None
assert "url" in result
assert result.get("alive") in (True, False)
assert "status" in result
assert "reason" in result
return result | Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response. |
16,855 | def path(self, which=None):
if which in (
,
,
,
,
,
,
,
,
):
return .format(
super(Repository, self).path(which=),
which
)
return super(Repository, self).path(which) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
errata
/repositories/<id>/errata
files
/repositories/<id>/files
packages
/repositories/<id>/packages
module_streams
/repositories/<id>/module_streams
puppet_modules
/repositories/<id>/puppet_modules
remove_content
/repositories/<id>/remove_content
sync
/repositories/<id>/sync
upload_content
/repositories/<id>/upload_content
import_uploads
/repositories/<id>/import_uploads
``super`` is called otherwise. |
16,856 | async def _loadNodeValu(self, full, valu):
node = self.root
for path in iterpath(full):
name = path[-1]
step = node.kids.get(name)
if step is None:
step = await self._initNodePath(node, path, None)
node = step
node.valu = valu
return node | Load a node from storage into the tree.
( used by initialization routines to build the tree) |
16,857 | def initialize_request(self, request, *args, **kwargs):
parser_context = self.get_parser_context(request)
return Request(
request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context
) | Returns the initial request object. |
16,858 | def transform(self, X, y=None):
return [{
new_feature: self._fisher_pval(x, old_features)
for new_feature, old_features in self.feature_groups.items()
if len(set(x.keys()) & set(old_features))
} for x in X] | :X: list of dict
:y: labels |
16,859 | def handle_internal_commands(command):
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target() | Run repl-internal commands.
Repl-internal commands are all commands starting with ":". |
16,860 | def fetch_file(self, in_path, out_path):
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode=, in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
response = response[]
response = base64.b64decode(response)
fh = open(out_path, "w")
fh.write(response)
fh.close() | save a remote file to the specified path |
16,861 | def sections(self):
sections = []
for match in texutils.section_pattern.finditer(self.text):
textbefore = self.text[0:match.start()]
wordsbefore = nlputils.wordify(textbefore)
numwordsbefore = len(wordsbefore)
sections.append((numwordsbefore, match.group(1)))
self._sections = sections
return sections | List with tuples of section names and positions.
Positions of section names are measured by cumulative word count. |
16,862 | def cancel_download_task(self, task_id, expires=None, **kwargs):
data = {
: expires,
: task_id,
}
return self._request(, ,
data=data, **kwargs) | 取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 |
16,863 | def bibtex(self):
m = max(itertools.chain(map(len, self), [0]))
fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self)
return "@%s{%s,\n%s\n}" % (
getattr(self.genre, , self.genre), self.id, ",\n".join(fields)) | Represent the source in BibTeX format.
:return: string encoding the source in BibTeX syntax. |
16,864 | def validate(cpf_number):
_cpf = compat.clear_punctuation(cpf_number)
if (len(_cpf) != 11 or
len(set(_cpf)) == 1):
return False
first_part = _cpf[:9]
second_part = _cpf[:10]
first_digit = _cpf[9]
second_digit = _cpf[10]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False | This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise. |
16,865 | def _return_retry_timer(self):
msg =
if self.opts.get():
try:
random_retry = randint(self.opts[], self.opts[])
retry_msg = msg % random_retry
log.debug(, msg % random_retry)
return random_retry
except ValueError:
log.error(
,
self.opts[],
self.opts[],
)
log.debug(msg, DEFAULT_MINION_OPTS[])
return DEFAULT_MINION_OPTS[]
else:
log.debug(msg, self.opts.get())
return self.opts.get() | Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer. |
16,866 | def _add_case(self, case_obj):
if self.case(case_obj[]):
raise IntegrityError("Case %s already exists in database" % case_obj[])
return self.case_collection.insert_one(case_obj) | Add a case to the database
If the case already exists exception is raised
Args:
case_obj(Case) |
16,867 | def set_npn_advertise_callback(self, callback):
_warn_npn()
self._npn_advertise_helper = _NpnAdvertiseHelper(callback)
self._npn_advertise_callback = self._npn_advertise_helper.callback
_lib.SSL_CTX_set_next_protos_advertised_cb(
self._context, self._npn_advertise_callback, _ffi.NULL) | Specify a callback function that will be called when offering `Next
Protocol Negotiation
<https://technotes.googlecode.com/git/nextprotoneg.html>`_ as a server.
:param callback: The callback function. It will be invoked with one
argument, the :class:`Connection` instance. It should return a
list of bytestrings representing the advertised protocols, like
``[b'http/1.1', b'spdy/2']``.
.. versionadded:: 0.15 |
16,868 | def _load_json(self, filename):
with open(filename, ) as file_handle:
self._sensors.update(json.load(
file_handle, cls=MySensorsJSONDecoder)) | Load sensors from json file. |
16,869 | def slaveof(master_host=None, master_port=None, host=None, port=None, db=None,
password=None):
***
if master_host and not master_port:
master_port = 6379
server = _connect(host, port, db, password)
return server.slaveof(master_host, master_port) | Make the server a slave of another instance, or promote it as master
CLI Example:
.. code-block:: bash
# Become slave of redis-n01.example.com:6379
salt '*' redis.slaveof redis-n01.example.com 6379
salt '*' redis.slaveof redis-n01.example.com
# Become master
salt '*' redis.slaveof |
16,870 | def ungroup_state(self, state_id):
state = self.states[state_id]
assert isinstance(state, ContainerState)
from rafcon.core.states.barrier_concurrency_state import BarrierConcurrencyState, UNIQUE_DECIDER_STATE_ID
if isinstance(state, BarrierConcurrencyState):
state.remove_state(state_id=UNIQUE_DECIDER_STATE_ID, force=True)
[related_transitions, related_data_flows] = self.related_linkage_state(state_id)
ingoing_data_linkage_for_port = {}
for df in related_data_flows[][]:
if (df.from_state, df.from_key) in ingoing_data_linkage_for_port:
ingoing_data_linkage_for_port[(df.from_state, df.from_key)][].append(df)
else:
ingoing_data_linkage_for_port[(df.from_state, df.from_key)] = {: [], : [df]}
if not ingoing_data_linkage_for_port[(df.from_state, df.from_key)][]:
for ext_df in self.data_flows.values():
if (ext_df.to_state, ext_df.to_key) == (df.from_state, df.from_key):
ingoing_data_linkage_for_port[(df.from_state, df.from_key)][].append(ext_df)
outgoing_data_linkage_for_port = {}
for df in related_data_flows[][]:
if (df.to_state, df.to_key) in outgoing_data_linkage_for_port:
outgoing_data_linkage_for_port[(df.to_state, df.to_key)][].append(df)
else:
outgoing_data_linkage_for_port[(df.to_state, df.to_key)] = {: [], : [df]}
if not outgoing_data_linkage_for_port[(df.to_state, df.to_key)][]:
for ext_df in self.data_flows.values():
if (ext_df.from_state, ext_df.from_key) == (df.to_state, df.to_key):
outgoing_data_linkage_for_port[(df.to_state, df.to_key)][].append(ext_df)
child_states = [state.remove_state(s_id, recursive=False, destroy=False) for s_id in list(state.states.keys())]
child_scoped_variables = [sv for sv_id, sv in list(state.scoped_variables.items())]
old_state = self.remove_state(state_id, recursive=False, destroy=False)
state_id_dict = {}
sv_id_dict = {}
enclosed_df_id_dict = {}
enclosed_t_id_dict = {}
old_state_ids = [state.state_id for state in child_states]
for child_state in child_states:
old_state_id = child_state.state_id
new_id = None
if child_state.state_id in list(self.states.keys()):
new_id = state_id_generator(used_state_ids=list(self.states.keys()) + old_state_ids + [self.state_id])
child_state.change_state_id(new_id)
new_state_id = self.add_state(child_state)
if new_id is not None and not new_id == new_state_id:
logger.error("In ungroup state the changed state id should not be changed again by add_state because it"
" could become a old_state_id again and screw data flows and transitions.")
state_id_dict[old_state_id] = new_state_id
for sv in child_scoped_variables:
name = sv.name
if name in [parent_sv.name for parent_sv in self.scoped_variables.values()]:
name = state_id + name
new_sv_id = self.add_scoped_variable(name, sv.data_type, sv.default_value)
sv_id_dict[sv.data_port_id] = new_sv_id
for t in related_transitions[][]:
new_t_id = self.add_transition(state_id_dict[t.from_state], t.from_outcome,
state_id_dict[t.to_state], t.to_outcome)
enclosed_t_id_dict[t.transition_id] = new_t_id
assert len(related_transitions[][]) <= 1
if related_transitions[][]:
ingoing_t = related_transitions[][][0]
for t in related_transitions[][]:
self.add_transition(t.from_state, t.from_outcome, state_id_dict[ingoing_t.to_state],
ingoing_t.to_outcome)
for ext_t in related_transitions[][]:
for t in related_transitions[][]:
if (t.to_state, t.to_outcome) == (ext_t.from_state, ext_t.from_outcome):
try:
self.add_transition(state_id_dict[t.from_state], t.from_outcome,
ext_t.to_state, ext_t.to_outcome)
except ValueError:
from rafcon.core.states.barrier_concurrency_state import BarrierConcurrencyState
if not isinstance(self, BarrierConcurrencyState):
logger.exception("Error while recreation of logical linkage.")
for df in related_data_flows[][]:
new_df_id = self.add_data_flow(self.state_id if state_id == df.from_state else state_id_dict[df.from_state],
sv_id_dict[df.from_key] if state_id == df.from_state else df.from_key,
self.state_id if state_id == df.to_state else state_id_dict[df.to_state],
sv_id_dict[df.to_key] if state_id == df.to_state else df.to_key)
enclosed_df_id_dict[df.data_flow_id] = new_df_id
for data_port_linkage in ingoing_data_linkage_for_port.values():
for ext_df in data_port_linkage[]:
for df in data_port_linkage[]:
if df.to_state not in state_id_dict and df.to_state == state_id:
self.add_data_flow(ext_df.from_state, ext_df.from_key, self.state_id, sv_id_dict[df.to_key])
else:
self.add_data_flow(ext_df.from_state, ext_df.from_key, state_id_dict[df.to_state], df.to_key)
for data_port_linkage in outgoing_data_linkage_for_port.values():
for ext_df in data_port_linkage[]:
for df in data_port_linkage[]:
if df.from_state not in state_id_dict and df.from_state == state_id:
self.add_data_flow(self.state_id, sv_id_dict[df.from_key], ext_df.to_state, ext_df.to_key)
else:
self.add_data_flow(state_id_dict[df.from_state], df.from_key, ext_df.to_state, ext_df.to_key)
self.ungroup_state.__func__.state_id_dict = state_id_dict
self.ungroup_state.__func__.sv_id_dict = sv_id_dict
self.ungroup_state.__func__.enclosed_df_id_dict = enclosed_df_id_dict
self.ungroup_state.__func__.enclosed_t_id_dict = enclosed_t_id_dict
old_state.destroy(recursive=True)
return old_state | Ungroup state with state id state_id into its parent and remain internal linkage in parent.
Interconnecting transitions and data flows to parent and other child states are preserved except:
- a transition that is going from income to outcome directly and
- a data-flow that is linking input and output directly.
:param state_id: State that is to be ungrouped.
:return: |
16,871 | def index():
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
if current_user.is_anonymous:
return render_template("invenio_access/open.html",
actions=actions,
identity=identity)
else:
return render_template("invenio_access/limited.html",
message=,
actions=actions,
identity=identity) | Basic test view. |
16,872 | def apply_operation_to(self, path):
return path.add_lnTo(
self._x - self._freeform_builder.shape_offset_x,
self._y - self._freeform_builder.shape_offset_y
) | Add `a:lnTo` element to *path* for this line segment.
Returns the `a:lnTo` element newly added to the path. |
16,873 | def handle(self):
"The actual service to which the user has connected."
if self.TELNET_ISSUE:
self.writeline(self.TELNET_ISSUE)
if not self.authentication_ok():
return
if self.DOECHO:
self.writeline(self.WELCOME)
self.session_start()
while self.RUNSHELL:
raw_input = self.readline(prompt=self.PROMPT).strip()
self.input = self.input_reader(self, raw_input)
self.raw_input = self.input.raw
if self.input.cmd:
cmd = self.input.cmd.upper()
params = self.input.params
if self.COMMANDS.has_key(cmd):
try:
self.COMMANDS[cmd](params)
except:
log.exception( % cmd)
(t, p, tb) = sys.exc_info()
if self.handleException(t, p, tb):
break
else:
self.writeerror("Unknown command " % cmd)
log.debug("Exiting handler") | The actual service to which the user has connected. |
16,874 | def pexpireat(self, name, when):
with self.pipe as pipe:
return pipe.pexpireat(self.redis_key(name), when) | Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object. |
16,875 | def to_representation(self, value):
value = apply_subfield_projection(self, value, deep=True)
return super().to_representation(value) | Project outgoing native value. |
16,876 | def get_html_output(self):
def html_splitlines(lines):
open_tag_re = re.compile(r)
close_tag_re = re.compile(r)
open_tags = []
for line in lines:
for tag in open_tags:
line = tag.group(0) + line
open_tags = []
for tag in open_tag_re.finditer(line):
open_tags.append(tag)
open_tags.reverse()
for ctag in close_tag_re.finditer(line):
for otag in open_tags:
if otag.group(1) == ctag.group(1):
open_tags.remove(otag)
break
for tag in open_tags:
line += % tag.group(1)
yield line
if self.error:
return escape(self.raw).splitlines()
return list(html_splitlines(self.out.getvalue().splitlines())) | Return line generator. |
16,877 | def lambda_handler(event, context=None, settings_name="zappa_settings"):
time_start = datetime.datetime.now()
if settings.DEBUG:
logger.info(.format(event))
if event.get(, None):
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME)
environ[] =
environ[] =
wrap_me = get_wsgi_application()
app = ZappaWSGIMiddleware(wrap_me)
response = Response.from_app(app, environ)
response.content = response.data
returnme = {: response.data}
for (header_name, header_value) in response.headers:
returnme[header_name] = header_value
returnme[] = response.status_code
exception = None
if response.status_code in ERROR_CODES:
content = u"<!DOCTYPE html>" + unicode(response.status_code) + unicode() + response.data.encode()
b64_content = base64.b64encode(content)
exception = (b64_content)
elif 300 <= response.status_code < 400 and response.has_header():
location = returnme[]
location = + location.replace("http://zappa/", "")
exception = location
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
common_log(environ, response, response_time=response_time_ms)
if exception:
raise Exception(exception)
else:
return returnme
elif event.get(, None):
from django.core import management
| An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway. |
16,878 | def new_signal(celf, path, iface, name) :
"creates a new DBUS.MESSAGE_TYPE_SIGNAL message."
result = dbus.dbus_message_new_signal(path.encode(), iface.encode(), name.encode())
if result == None :
raise CallFailed("dbus_message_new_signal")
return \
celf(result) | creates a new DBUS.MESSAGE_TYPE_SIGNAL message. |
16,879 | def build_data_table(
energy,
flux,
flux_error=None,
flux_error_lo=None,
flux_error_hi=None,
energy_width=None,
energy_lo=None,
energy_hi=None,
ul=None,
cl=None,
):
table = QTable()
if cl is not None:
cl = validate_scalar("cl", cl)
table.meta["keywords"] = {"cl": {"value": cl}}
table["energy"] = energy
if energy_width is not None:
table["energy_width"] = energy_width
elif energy_lo is not None and energy_hi is not None:
table["energy_lo"] = energy_lo
table["energy_hi"] = energy_hi
table["flux"] = flux
if flux_error is not None:
table["flux_error"] = flux_error
elif flux_error_lo is not None and flux_error_hi is not None:
table["flux_error_lo"] = flux_error_lo
table["flux_error_hi"] = flux_error_hi
else:
raise TypeError("Flux error not provided!")
if ul is not None:
ul = np.array(ul, dtype=np.int)
table["ul"] = ul
table.meta["comments"] = ["Table generated with naima.build_data_table"]
validate_data_table(table)
return table | Read data into data dict.
Parameters
----------
energy : :class:`~astropy.units.Quantity` array instance
Observed photon energy array [physical type ``energy``]
flux : :class:`~astropy.units.Quantity` array instance
Observed flux array [physical type ``flux`` or ``differential flux``]
flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance
68% CL gaussian uncertainty of the flux [physical type ``flux`` or
``differential flux``]. Either ``flux_error`` (symmetrical uncertainty)
or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties)
must be provided.
energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional
Width of the energy bins [physical type ``energy``]. Either
``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi``
(Energies of the lower and upper bin edges) can be provided. If none
are provided, ``generate_energy_edges`` will be used.
ul : boolean or int array, optional
Boolean array indicating which of the flux values given in ``flux``
correspond to upper limits.
cl : float, optional
Confidence level of the flux upper limits given by ``ul``.
Returns
-------
data : :class:`astropy.table.QTable`
Data stored in an astropy Table. |
16,880 | def send_and_require(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
echo=None,
note=None,
loglevel=logging.INFO):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_and_require(send,
regexps,
not_there=not_there,
echo=echo,
note=note,
loglevel=loglevel) | Send string and require the item in the output.
See send_until |
16,881 | def createPortForm(self, req, tag):
def port(s):
n = int(s)
if n < 0 or n > 65535:
raise ValueError(s)
return n
factories = []
for f in self.store.parent.powerupsFor(IProtocolFactoryFactory):
factories.append((f.__class__.__name__.decode(),
f,
False))
f = LiveForm(
self.portConf.createPort,
[Parameter(, TEXT_INPUT, port, ,
),
Parameter(, TEXT_INPUT, unicode, ,
),
Parameter(, CHECKBOX_INPUT, bool, ,
),
Parameter(, TEXT_INPUT, unicode, ,
),
ChoiceParameter(, factories, ,
)])
f.setFragmentParent(self)
return tag[f] | Create and return a L{LiveForm} for adding a new L{TCPPort} or
L{SSLPort} to the site store. |
16,882 | def is_equivalent(self, other, ignore=False):
def is_equivalent_to_list_of_ipachars(other):
my_ipa_chars = self.canonical_representation.ipa_chars
if len(my_ipa_chars) != len(other):
return False
for i in range(len(my_ipa_chars)):
if not my_ipa_chars[i].is_equivalent(other[i]):
return False
return True
if is_unicode_string(other):
try:
return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars)
except:
return False
if is_list_of_ipachars(other):
try:
return is_equivalent_to_list_of_ipachars(other)
except:
return False
if isinstance(other, IPAString):
return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars)
return False | Return ``True`` if the IPA string is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string,
2. a list of IPAChar objects, and
3. another IPAString.
:param variant other: the object to be compared against
:param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid
:rtype: bool |
16,883 | def _prepare_request(reddit_session, url, params, data, auth, files,
method=None):
if getattr(reddit_session, , False):
bearer = .format(reddit_session.access_token)
headers = {: bearer}
config = reddit_session.config
for prefix in (config.api_url, config.permalink_url):
if url.startswith(prefix):
if config.log_requests >= 1:
msg = .format(
config.oauth_url, prefix)
sys.stderr.write(msg)
url = config.oauth_url + url[len(prefix):]
break
else:
headers = {}
headers.update(reddit_session.http.headers)
if method:
pass
elif data or files:
method =
else:
method =
if reddit_session.config.log_requests >= 1:
sys.stderr.write(.format(method, url))
if reddit_session.config.log_requests >= 2:
if params:
sys.stderr.write(.format(params))
if data:
sys.stderr.write(.format(data))
if auth:
sys.stderr.write(.format(auth))
request = Request(method=method, url=url, headers=headers, params=params,
auth=auth, cookies=reddit_session.http.cookies)
if method == :
return request
if data is True:
data = {}
if isinstance(data, dict):
if not auth:
data.setdefault(, )
if reddit_session.modhash:
data.setdefault(, reddit_session.modhash)
else:
request.headers.setdefault(, )
request.data = data
request.files = files
return request | Return a requests Request object that can be "prepared". |
16,884 | def generate_protocol(self,sweep=None):
finalVal=self.protoY[-1]
else:
finalVal=self.holding
self.protoX.append(self.protoX[-1])
self.protoY.append(finalVal)
self.protoX.append(self.sweepSize)
self.protoY.append(finalVal)
for i in range(1,len(self.protoX)-1):
self.protoX[i]=self.protoX[i]+self.offsetX
self.protoSeqY=[self.protoY[0]]
self.protoSeqX=[self.protoX[0]]
for i in range(1,len(self.protoY)):
if not self.protoY[i]==self.protoY[i-1]:
self.protoSeqY.append(self.protoY[i])
self.protoSeqX.append(self.protoX[i])
if self.protoY[0]!=self.protoY[1]:
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[1])
self.protoY.insert(1,self.protoY[0])
self.protoX.insert(1,self.protoX[0]+self.offsetX/2)
self.protoSeqY.append(finalVal)
self.protoSeqX.append(self.sweepSize)
self.protoX=np.array(self.protoX)
self.protoY=np.array(self.protoY) | Create (x,y) points necessary to graph protocol for the current sweep. |
16,885 | def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides,
padding, dimension_numbers):
lhs_perm, rhs_perm, out_perm = self._conv_general_permutations(
dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = self._conv_shape_tuple(
lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm))) | Generalized computation of conv shape. |
16,886 | def _set_index(self, schema, name, fields, **index_options):
query_str = "CREATE {}INDEX IF NOT EXISTS ON {} ({})".format(
if index_options.get(, False) else ,
schema,
name,
self._normalize_table_name(schema),
.join((self._normalize_name(f) for f in fields))
)
return self._query(query_str, ignore_result=True, **index_options) | https://www.sqlite.org/lang_createindex.html |
16,887 | def _new_object(self, objtype, name=None):
r
if objtype.startswith():
obj = openpnm.network.GenericNetwork(project=self, name=name)
elif objtype.startswith():
obj = openpnm.geometry.GenericGeometry(project=self, name=name)
elif objtype.startswith():
obj = openpnm.phases.GenericPhase(project=self, name=name)
elif objtype.startswith():
obj = openpnm.physics.GenericPhysics(project=self, name=name)
elif objtype.startswith():
obj = openpnm.algorithm.GenericAlgorithm(project=self, name=name)
else:
obj = openpnm.core.Base(project=self, name=name)
return obj | r""" |
16,888 | def _value_is_dynamic(self,obj,objtype=None):
return hasattr(super(Dynamic,self).__get__(obj,objtype),) | Return True if the parameter is actually dynamic (i.e. the
value is being generated). |
16,889 | def load_stock_prices(self):
from pricedb import SecuritySymbol
info = StocksInfo(self.config)
for item in self.model.stocks:
symbol = SecuritySymbol("", "")
symbol.parse(item.symbol)
price: PriceModel = info.load_latest_price(symbol)
if not price:
price = PriceModel()
price.currency = self.config.get(ConfigKeys.default_currency)
price.value = Decimal(1)
item.price = price.value
if isinstance(item, Stock):
item.currency = price.currency
info.close_databases() | Load latest prices for securities |
16,890 | def export(self, path, session):
if self._graph is not tf_v1.get_default_graph():
raise RuntimeError("default graph differs from the graph where the "
"module was instantiated.")
if self._graph is not session.graph:
raise RuntimeError("session graph differs from the graph where the "
"module was instantiated.")
self._impl.export(path, session) | Exports the module with the variables from the session in `path`.
Note that it is the module definition in the ModuleSpec used to create this
module that gets exported. The session is only used to provide the value
of variables.
Args:
path: path where to export the module to.
session: session where to export the variables from.
Raises:
RuntimeError: if there is an issue during the export. |
16,891 | def pixel_to_icrs_coords(x, y, wcs):
icrs_coords = pixel_to_skycoord(x, y, wcs).icrs
icrs_ra = icrs_coords.ra.degree * u.deg
icrs_dec = icrs_coords.dec.degree * u.deg
return icrs_ra, icrs_dec | Convert pixel coordinates to ICRS Right Ascension and Declination.
This is merely a convenience function to extract RA and Dec. from a
`~astropy.coordinates.SkyCoord` instance so they can be put in
separate columns in a `~astropy.table.Table`.
Parameters
----------
x : float or array-like
The x pixel coordinate.
y : float or array-like
The y pixel coordinate.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use to convert from pixel coordinates
to ICRS world coordinates.
`~astropy.table.Table`.
Returns
-------
ra : `~astropy.units.Quantity`
The ICRS Right Ascension in degrees.
dec : `~astropy.units.Quantity`
The ICRS Declination in degrees. |
16,892 | def residual_block(x, hparams):
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout) | A stack of convolution blocks with residual connection. |
16,893 | def mark_backward(output_tensor, used_node_names):
op = output_tensor.op
if op.name in used_node_names:
return
used_node_names.add(op.name)
for input_tensor in op.inputs:
mark_backward(input_tensor, used_node_names)
for control_input_op in op.control_inputs:
used_node_names.add(control_input_op.name)
for input_tensor in control_input_op.inputs:
mark_backward(input_tensor, used_node_names) | Function to propagate backwards in the graph and mark nodes as used.
Traverses recursively through the graph from the end tensor, through the op
that generates the tensor, and then to the input tensors that feed the op.
Nodes encountered are stored in used_node_names.
Args:
output_tensor: A Tensor which we start the propagation.
used_node_names: A list of strings, stores the name of nodes we've marked as
visited. |
16,894 | def convert_pattern_to_pil(pattern, version=1):
from PIL import Image
mode = get_pil_mode(pattern.image_mode.name, False)
size = pattern.data.rectangle[3], pattern.data.rectangle[2]
channels = [
_create_channel(size, c.get_data(version), c.pixel_depth).convert()
for c in pattern.data.channels if c.is_written
]
if len(channels) == len(mode) + 1:
mode +=
image = image.point(lambda x: 255 - x)
return image | Convert Pattern to PIL Image. |
16,895 | def concatenate_fields(fields, dim):
if len(fields) == 0:
raise ValueError()
if len(set((f.name, f.shape, f.dtype) for f in fields)) != 1:
raise ValueError()
tpl = fields[0]
attr = InstanceAttribute(tpl.name, shape=tpl.shape, dtype=tpl.dtype,
dim=dim, alias=None)
attr.value = np.array([f.value for f in fields], dtype=tpl.dtype)
return attr | Create an INstanceAttribute from a list of InstnaceFields |
16,896 | def update_environment(self, environment, environment_ids):
uri = % environment_ids
data = dict()
data[] = list()
data[].append(environment)
return super(ApiEnvironment, self).put(uri, data) | Method to update environment
:param environment_ids: Ids of Environment |
16,897 | def _AddEvents(cls, Class):
def make_event(event):
return property(lambda self: self._GetDefaultEventHandler(event),
lambda self, Value: self._SetDefaultEventHandler(event, Value))
for event in dir(Class):
if not event.startswith():
setattr(cls, % event, make_event(event))
cls._EventNames.append(event) | Adds events based on the attributes of the given ``...Events`` class.
:Parameters:
Class : class
An `...Events` class whose methods define events that may occur in the
instances of the current class. |
16,898 | def parse_unstruct(unstruct):
my_json = json.loads(unstruct)
data = my_json[]
schema = data[]
if in data:
inner_data = data[]
else:
raise SnowplowEventTransformationException(["Could not extract inner data field from unstructured event"])
fixed_schema = fix_schema("unstruct_event", schema)
return [(fixed_schema, inner_data)] | Convert an unstructured event JSON to a list containing one Elasticsearch-compatible key-value pair
For example, the JSON
{
"data": {
"data": {
"key": "value"
},
"schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1"
},
"schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0"
}
would become
[
(
"unstruct_com_snowplowanalytics_snowplow_link_click_1", {
"key": "value"
}
)
] |
16,899 | def get_settings_from_interface(iface):
settings = {}
schema_id = iface.getName()
settings[schema_id] = {}
schema = getAdapter(api.get_portal(), iface)
for setting in getFieldNames(iface):
value = getattr(schema, setting, None)
if is_json_serializable(value):
settings[schema_id][setting] = value
return settings | Get the configuration settings associated to a list of schema
interfaces
:param iface: The schema interface from which we want to get its
fields
:return: Dictionary with iface name as key and as value a dictionary
with the setting names (keys) linked to that schema and its
values. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.