Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
12,300 | def create_dimension(ncfile, name, length) -> None:
try:
ncfile.createDimension(name, length)
except BaseException:
objecttools.augment_excmessage(
% (name, length, get_filepath(ncfile))) | Add a new dimension with the given name and length to the given
NetCDF file.
Essentially, |create_dimension| just calls the equally named method
of the NetCDF library, but adds information to possible error messages:
>>> from hydpy import TestIO
>>> from hydpy.core.netcdftools import netcdf4
>>> with TestIO():
... ncfile = netcdf4.Dataset('test.nc', 'w')
>>> from hydpy.core.netcdftools import create_dimension
>>> create_dimension(ncfile, 'dim1', 5)
>>> dim = ncfile.dimensions['dim1']
>>> dim.size if hasattr(dim, 'size') else dim
5
>>> try:
... create_dimension(ncfile, 'dim1', 5)
... except BaseException as exc:
... print(exc) # doctest: +ELLIPSIS
While trying to add dimension `dim1` with length `5` \
to the NetCDF file `test.nc`, the following error occurred: ...
>>> ncfile.close() |
12,301 | def showtraceback(self, *args, **kwargs):
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(.join(lines)) | Display the exception that just occurred. |
12,302 | def _scalar_field_to_json(field, row_value):
converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)
if converter is None:
return row_value
return converter(row_value) | Maps a field and value to a JSON-safe value.
Args:
field ( \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
):
The SchemaField to use for type conversion and field name.
row_value (any):
Value to be converted, based on the field's type.
Returns:
any:
A JSON-serializable object. |
12,303 | def requires(self):
if self.use_spark:
return AggregateArtistsSpark(self.date_interval)
else:
return AggregateArtists(self.date_interval) | This task's dependencies:
* :py:class:`~.AggregateArtists` or
* :py:class:`~.AggregateArtistsSpark` if :py:attr:`~/.Top10Artists.use_spark` is set.
:return: object (:py:class:`luigi.task.Task`) |
12,304 | def song(self, song_id):
if song_id.startswith():
song_info = self._call(
mc_calls.FetchTrack,
song_id
).body
else:
song_info = next(
(
song
for song in self.songs()
if song[] == song_id
),
None
)
return song_info | Get information about a song.
Parameters:
song_id (str): A song ID.
Returns:
dict: Song information. |
12,305 | def calc_and_plot_sample_orient_check(self):
fit = self.current_fit
if fit == None:
return
pars = fit.get()
if not in list(pars.keys()) or not in list(pars.keys()):
fit.put(self.s, , self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, , fit.PCA_type))
pars = fit.get()
if not pars:
self.user_warning(
"could not calculate fit %s for specimen %s in specimen coordinate system while checking sample orientation please check data" % (fit.name, self.s))
return
dec, inc = pars[], pars[]
sample = self.Data_hierarchy[][self.s]
if sample not in list(self.Data_info["er_samples"].keys()) or "sample_azimuth" not in list(self.Data_info["er_samples"][sample].keys()) or "sample_dip" not in list(self.Data_info["er_samples"][sample].keys()):
self.user_warning(
"Could not display sample orientation checks because sample azimuth or sample dip is missing from er_samples table for sample %s" % sample)
self.check_orient_on = False
return
try:
azimuth = float(self.Data_info["er_samples"][sample][])
dip = float(self.Data_info["er_samples"][sample][])
except TypeError:
self.user_warning(
"Could not display sample orientation checks because azimuth or dip is missing (or invalid) for sample %s" % sample)
self.check_orient_on = False
return
d, i = pmag.dogeo(dec, inc, azimuth-180., -dip)
XY = pmag.dimap(d, i)
if i > 0:
FC = fit.color
SIZE = 15*self.GUI_RESOLUTION
else:
FC =
SIZE = 15*self.GUI_RESOLUTION
self.high_level_eqarea.scatter([XY[0]], [
XY[1]], marker=, edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False)
if self.ie_open:
self.ie.scatter([XY[0]], [XY[1]], marker=, edgecolor=fit.color,
facecolor=FC, s=SIZE, lw=1, clip_on=False)
d, i = pmag.dogeo(dec, inc, azimuth-180., dip)
XY = pmag.dimap(d, i)
if i > 0:
FC = fit.color
SIZE = 15*self.GUI_RESOLUTION
else:
FC =
SIZE = 15*self.GUI_RESOLUTION
self.high_level_eqarea.scatter([XY[0]], [
XY[1]], marker=, edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False)
if self.ie_open:
self.ie.scatter([XY[0]], [XY[1]], marker=, edgecolor=fit.color,
facecolor=FC, s=SIZE, lw=1, clip_on=False)
color=fit.color, s=SIZE/2, lw=1, clip_on=False) | If sample orientation is on plots the wrong arrow, wrong compass,
and rotated sample error directions for the current specimen
interpretation on the high level mean plot so that you can check
sample orientation good/bad. |
12,306 | def _get_content(data, which_content):
content =
if data.get(which_content):
if isinstance(data.get(which_content), feedparser.FeedParserDict):
content = data.get(which_content)[]
elif not isinstance(data.get(which_content), str):
if in data.get(which_content)[0]:
content = data.get(which_content)[0].value
else:
content = data.get(which_content)
return content | get the content that could be hidden
in the middle of "content" or "summary detail"
from the data of the provider |
12,307 | def maybe_download(url, filename):
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = request.urlretrieve(url + filename, filepath)
statinfo = os.stat(filepath)
print(, filename, statinfo.st_size, )
return filepath | Download the data from Yann's website, unless it's already here. |
12,308 | def drp_load_data(package, data, confclass=None):
drpdict = yaml.safe_load(data)
ins = load_instrument(package, drpdict, confclass=confclass)
if ins.version == :
pkg = importlib.import_module(package)
ins.version = getattr(pkg, , )
return ins | Load the DRPS from data. |
12,309 | def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(self.r * scale, relative_energies * 1000, ,
x * scale, y, , linewidth=2, markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], )
plt.annotate( % (np.max(y) - np.min(y)),
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment=)
plt.tight_layout()
return plt | Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object. |
12,310 | def read_string_from_file(path, encoding="utf8"):
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value | Read entire contents of file into a string. |
12,311 | def add_role(self, role, term, start_date=None, end_date=None,
**kwargs):
self[].append(dict(role=role, term=term,
start_date=start_date,
end_date=end_date, **kwargs)) | Examples:
leg.add_role('member', term='2009', chamber='upper',
party='Republican', district='10th') |
12,312 | def press(self):
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
return self.server.jsonrpc.pressKey(str(key))
return _press | press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode |
12,313 | def _copy_old_features(new_eopatch, old_eopatch, copy_features):
if copy_features:
existing_features = set(new_eopatch.get_feature_list())
for copy_feature_type, copy_feature_name, copy_new_feature_name in copy_features:
new_feature = copy_feature_type, copy_new_feature_name
if new_feature in existing_features:
raise ValueError(
.format(copy_new_feature_name, copy_feature_type))
else:
existing_features.add(new_feature)
new_eopatch[copy_feature_type][copy_new_feature_name] = \
old_eopatch[copy_feature_type][copy_feature_name]
return new_eopatch | Copy features from old EOPatch
:param new_eopatch: New EOPatch container where the old features will be copied to
:type new_eopatch: EOPatch
:param old_eopatch: Old EOPatch container where the old features are located
:type old_eopatch: EOPatch
:param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied
over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name
to be used for the feature
:type copy_features: list((FeatureType, str) or (FeatureType, str, str)) |
12,314 | def OSCBlob(next):
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag =
else:
tag =
binary =
return (tag, binary) | Convert a string into an OSC Blob,
returning a (typetag, data) tuple. |
12,315 | def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file=,
engine_dir=):
security_context = None
if in self.config[]:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=[],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=[],
args=launch_args,
env=[environment_vars])
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment | Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch |
12,316 | def analyze_dir(stats, parent_dir, rel_filepaths, cover_filename, *, ignore_existing=False):
no_metadata = None, None, None
metadata = no_metadata
audio_filepaths = []
for rel_filepath in rel_filepaths:
stats["files"] += 1
try:
ext = os.path.splitext(rel_filepath)[1][1:].lower()
except IndexError:
continue
if ext in AUDIO_EXTENSIONS:
audio_filepaths.append(os.path.join(parent_dir, rel_filepath))
if audio_filepaths:
stats["albums"] += 1
if (cover_filename != EMBEDDED_ALBUM_ART_SYMBOL):
missing = (not os.path.isfile(os.path.join(parent_dir, cover_filename))) or ignore_existing
if missing:
metadata = get_metadata(audio_filepaths)
else:
metadata = get_metadata(audio_filepaths)
missing = (not metadata[2]) or ignore_existing
if missing:
stats["missing covers"] += 1
if not all(metadata[:-1]):
stats["errors"] += 1
logging.getLogger("sacad_r").error("Unable to read metadata for album directory " % (parent_dir))
else:
metadata = no_metadata
return metadata | Analyze a directory (non recursively) to get its album metadata if it is one. |
12,317 | def null_concept(self):
cause_repertoire = self.cause_repertoire((), ())
effect_repertoire = self.effect_repertoire((), ())
cause = MaximallyIrreducibleCause(
_null_ria(Direction.CAUSE, (), (), cause_repertoire))
effect = MaximallyIrreducibleEffect(
_null_ria(Direction.EFFECT, (), (), effect_repertoire))
return Concept(mechanism=(),
cause=cause,
effect=effect,
subsystem=self) | Return the null concept of this subsystem.
The null concept is a point in concept space identified with
the unconstrained cause and effect repertoire of this subsystem. |
12,318 | def find_usb_device_by_address(self, name):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
device = self._call("findUSBDeviceByAddress",
in_p=[name])
device = IHostUSBDevice(device)
return device | Searches for a USB device with the given host address.
:py:func:`IUSBDevice.address`
in name of type str
Address of the USB device (as assigned by the host) to
search for.
return device of type :class:`IHostUSBDevice`
Found USB device object.
raises :class:`VBoxErrorObjectNotFound`
Given @c name does not correspond to any USB device. |
12,319 | def first_setup(self):
if ATTR_FIRST_SETUP not in self.raw:
return None
return datetime.utcfromtimestamp(self.raw[ATTR_FIRST_SETUP]) | This is a guess of the meaning of this value. |
12,320 | def get_osdp(self, id_or_uri):
uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path="osdp")
return self._client.get(uri) | Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided.
Args:
id_or_uri: ID or URI of the Deployment Plan.
Returns:
dict: Server Profiles and Server Profile Templates |
12,321 | def set_prefix(self, elt, pyobj):
if isinstance(pyobj, tuple):
namespaceURI,localName = pyobj
self.prefix = elt.getPrefix(namespaceURI) | use this method to set the prefix of the QName,
method looks in DOM to find prefix or set new prefix.
This method must be called before get_formatted_content. |
12,322 | def get_index_text(self, modname, name_cls):
if self.objtype in (, ):
if not modname:
return _() % (name_cls[0], self.objtype)
return _() % (name_cls[0], self.objtype, modname)
else:
return | Return index entry text based on object type. |
12,323 | def ability(cls, id_, name, function_type, ability_id, general_id=0):
assert function_type in ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], None) | Define a function represented as a game ability. |
12,324 | def get_external_command_output(command: str) -> bytes:
args = shlex.split(command)
ret = subprocess.check_output(args)
return ret | Takes a command-line command, executes it, and returns its ``stdout``
output.
Args:
command: command string
Returns:
output from the command as ``bytes`` |
12,325 | def _create_ids(self, home_teams, away_teams):
categories = pd.Categorical(np.append(home_teams,away_teams))
home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1]
return home_id, away_id | Creates IDs for both players/teams |
12,326 | def from_int(data):
if not isinstance(data, int) and not isinstance(data, long):
raise TypeError()
res = []
while data > 0 or not res:
for j in range(5):
if not j % 2:
res += CONSONANTS[(data & 0xf)]
data >>= 4
else:
res += VOWELS[(data & 0x3)]
data >>= 2
if data > 0:
res +=
res.reverse()
return .join(res) | :params data: integer
:returns: proquint made from input data
:type data: int
:rtype: string |
12,327 | def align_file_position(f, size):
align = (size - 1) - (f.tell() % size)
f.seek(align, 1) | Align the position in the file to the next block of specified size |
12,328 | def mstmap(args):
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp)
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) | %prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input. |
12,329 | def from_dict(cls, d):
conf = {}
for k in d["config"]:
v = d["config"][k]
if isinstance(v, dict):
if u"type" in v:
typestr = v[u"type"]
else:
typestr = v["type"]
conf[str(k)] = classes.get_dict_handler(typestr)(v)
else:
conf[str(k)] = v
return classes.get_class(d["class"])(name=d["name"], config=conf) | Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object |
12,330 | def convert_reshape(net, node, module, builder):
input_name, output_name = _get_input_output_name(net, node)
name = node[]
param = _get_attr(node)
target_shape = literal_eval(param[])
if target_shape == (0, -1):
convert_flatten(net, node, module, builder)
return
if any(item <= 0 for item in target_shape):
raise NotImplementedError(
)
if in node and node[] == :
raise NotImplementedError(
)
mode = 0
builder.add_reshape(name, input_name, output_name, target_shape, mode) | Converts a reshape layer from mxnet to coreml.
This doesn't currently handle the deprecated parameters for the reshape layer.
Parameters
----------
net: network
An mxnet network object.
node: layer
Node to convert.
module: module
A module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object. |
12,331 | def get_user(self, username):
User = get_user_model()
try:
user = User.objects.get(**{
User.USERNAME_FIELD: username,
})
if user.is_active:
raise ActivationError(
self.ALREADY_ACTIVATED_MESSAGE,
code=
)
return user
except User.DoesNotExist:
raise ActivationError(
self.BAD_USERNAME_MESSAGE,
code=
) | Given the verified username, look up and return the
corresponding user account if it exists, or raising
``ActivationError`` if it doesn't. |
12,332 | def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if section < len(self.__horizontal_headers):
return self.__horizontal_headers.keys()[section]
elif orientation == Qt.Vertical:
if section < len(self.__vertical_headers):
return self.__vertical_headers.keys()[section]
return QVariant() | Reimplements the :meth:`QAbstractItemModel.headerData` method.
:param section: Section.
:type section: int
:param orientation: Orientation. ( Qt.Orientation )
:param role: Role.
:type role: int
:return: Header data.
:rtype: QVariant |
12,333 | def intersection(self, *args):
values = self.values()
if args:
values = [val for key,val in self.items() if key in args]
return set(reduce(set.intersection, values)) | Returns the intersection of the values whose keys are in *args. If *args is blank, returns the intersection of all values. |
12,334 | def total_supply(self, block_identifier=):
return self.proxy.contract.functions.totalSupply().call(block_identifier=block_identifier) | Return the total supply of the token at the given block identifier. |
12,335 | def id_generator(size=15, random_state=None):
chars = list(string.ascii_uppercase + string.digits)
return .join(random_state.choice(chars, size, replace=True)) | Helper function to generate random div ids. This is useful for embedding
HTML into ipython notebooks. |
12,336 | def encode(data, scheme=None, size=None):
size = size if size else
size_name = .format(ENCODING_SIZE_PREFIX, size)
if not hasattr(DmtxSymbolSize, size_name):
raise PyLibDMTXError(
.format(
size, ENCODING_SIZE_NAMES
)
)
size = getattr(DmtxSymbolSize, size_name)
scheme = scheme if scheme else
scheme_name = .format(
ENCODING_SCHEME_PREFIX, scheme.capitalize()
)
if not hasattr(DmtxScheme, scheme_name):
raise PyLibDMTXError(
.format(
scheme, ENCODING_SCHEME_NAMES
)
)
scheme = getattr(DmtxScheme, scheme_name)
with _encoder() as encoder:
dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme)
dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size)
if dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0:
raise PyLibDMTXError(
)
w, h, bpp = map(
partial(dmtxImageGetProp, encoder[0].image),
(
DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight,
DmtxProperty.DmtxPropBitsPerPixel
)
)
size = w * h * bpp // 8
pixels = cast(
encoder[0].image[0].pxl, ctypes.POINTER(ctypes.c_ubyte * size)
)
return Encoded(
width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size)
) | Encodes `data` in a DataMatrix image.
For now bpp is the libdmtx default which is 24
Args:
data: bytes instance
scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`.
If `None`, defaults to 'Ascii'.
size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`.
If `None`, defaults to 'ShapeAuto'.
Returns:
Encoded: with properties `(width, height, bpp, pixels)`.
You can use that result to build a PIL image:
Image.frombytes('RGB', (width, height), pixels) |
12,337 | def _calc_min_size(self, conv_layers):
input_size = 1
for _, conv_params, max_pooling in reversed(conv_layers):
if max_pooling is not None:
kernel_size, stride = max_pooling
input_size = input_size * stride + (kernel_size - stride)
if conv_params is not None:
kernel_size, stride = conv_params
input_size = input_size * stride + (kernel_size - stride)
return input_size | Calculates the minimum size of the input layer.
Given a set of convolutional layers, calculate the minimum value of
the `input_height` and `input_width`, i.e. such that the output has
size 1x1. Assumes snt.VALID padding.
Args:
conv_layers: List of tuples `(output_channels, (kernel_size, stride),
(pooling_size, pooling_stride))`
Returns:
Minimum value of input height and width. |
12,338 | def getnames():
namestring = ""
addmore = 1
while addmore:
scientist = input("Enter name - <Return> when done ")
if scientist != "":
namestring = namestring + ":" + scientist
else:
namestring = namestring[1:]
addmore = 0
return namestring | get mail names |
12,339 | def substitute_selected_state(state, as_template=False, keep_name=False):
assert isinstance(state, State)
from rafcon.core.states.barrier_concurrency_state import DeciderState
if isinstance(state, DeciderState):
raise ValueError("State of type DeciderState can not be substituted.")
smm_m = rafcon.gui.singleton.state_machine_manager_model
if not smm_m.selected_state_machine_id:
logger.error("Selected state machine can not be found, please select a state within a state machine first.")
return False
selection = smm_m.state_machines[smm_m.selected_state_machine_id].selection
selected_state_m = selection.get_selected_state()
if len(selection.states) != 1:
logger.error("Please select exactly one state for the substitution")
return False
if is_selection_inside_of_library_state(selected_elements=[selected_state_m]):
logger.warning("Substitute is not performed because target state is inside of a library state.")
return
gui_helper_state.substitute_state_as(selected_state_m, state, as_template, keep_name)
return True | Substitute the selected state with the handed state
:param rafcon.core.states.state.State state: A state of any functional type that derives from State
:param bool as_template: The flag determines if a handed the state of type LibraryState is insert as template
:return: |
12,340 | def _cryptodome_encrypt(cipher_factory, plaintext, key, iv):
encryptor = cipher_factory(key, iv)
return encryptor.encrypt(plaintext) | Use a Pycryptodome cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a Pycryptodome Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes |
12,341 | def export_organizations(self, outfile):
exporter = SortingHatOrganizationsExporter(self.db)
dump = exporter.export()
try:
outfile.write(dump)
outfile.write()
except IOError as e:
raise RuntimeError(str(e))
return CMD_SUCCESS | Export organizations information to a file.
The method exports information related to organizations, to
the given 'outfile' output file.
:param outfile: destination file object |
12,342 | def switch_region(request, region_name,
redirect_field_name=auth.REDIRECT_FIELD_NAME):
if region_name in request.user.available_services_regions:
request.session[] = region_name
LOG.debug(,
region_name, request.user.username)
redirect_to = request.GET.get(redirect_field_name, )
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
response = shortcuts.redirect(redirect_to)
utils.set_response_cookie(response, ,
request.session[])
return response | Switches the user's region for all services except Identity service.
The region will be switched if the given region is one of the regions
available for the scoped project. Otherwise the region is not switched. |
12,343 | def drawpoint(self, x, y, colour = None):
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
self.draw.point((pilx, pily), fill = colour) | Most elementary drawing, single pixel, used mainly for testing purposes.
Coordinates are those of your initial image ! |
12,344 | def _run_snpeff(snp_in, out_format, data):
snpeff_db, datadir = get_db(data)
if not snpeff_db:
return None, None
assert os.path.exists(os.path.join(datadir, snpeff_db)), \
"Did not find %s snpEff genome data in %s" % (snpeff_db, datadir)
ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv"
out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext)
stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0]
csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0]
if not utils.file_exists(out_file):
config_args = " ".join(_snpeff_args_from_config(data))
if ext.endswith(".gz"):
bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"])
else:
bgzip_cmd = ""
with file_transaction(data, out_file) as tx_out_file:
snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file)
cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} "
"-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "snpEff effects", data)
if ext.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file, [stats_file, csv_file] | Run effects prediction with snpEff, skipping if snpEff database not present. |
12,345 | def search(self, query, limit=None):
return self._limit_get(, params=dict(q=query), limit=limit) | Use reddit's search function. Returns :class:`things.Listing` object.
URL: ``http://www.reddit.com/search/?q=<query>&limit=<limit>``
:param query: query string
:param limit: max number of results to get |
12,346 | def make_store(name, min_length=4, **kwargs):
if name not in stores:
raise ValueError(.format(.join(stores)))
if name == :
store = MemcacheStore
elif name == :
store = MemoryStore
elif name == :
store = RedisStore
return store(min_length=min_length, **kwargs) | \
Creates a store with a reasonable keygen.
.. deprecated:: 2.0.0
Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)`` |
12,347 | def register_memory():
def get_mem(proc):
if os.name == :
mem = proc.memory_info_ex()
counter = mem.rss
if in mem._fields:
counter -= mem.shared
return counter
else:
return proc.get_memory_info().rss
if SERVER_PROC is not None:
mem = get_mem(SERVER_PROC)
for child in SERVER_PROC.children():
mem += get_mem(child)
server_memory.append(bytes2human(mem)) | Register an approximation of memory used by FTP server process
and all of its children. |
12,348 | def get_scenario(scenario_id,**kwargs):
user_id = kwargs.get()
scen_i = _get_scenario(scenario_id, user_id)
scen_j = JSONObject(scen_i)
rscen_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.scenario_id==scenario_id).options(joinedload_all()).all()
for rs in rscen_rs:
rs.resourceattr
rs.resourceattr.attr
rgi_rs = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.scenario_id==scenario_id).all()
scen_j.resourcescenarios = []
for rs in rscen_rs:
rs_j = JSONObject(rs, extras={:JSONObject(rs.resourceattr)})
if rs.dataset.check_read_permission(user_id, do_raise=False) is False:
rs_j.dataset[] = None
rs_j.dataset.metadata = JSONObject({})
scen_j.resourcescenarios.append(rs_j)
scen_j.resourcegroupitems =[JSONObject(r) for r in rgi_rs]
return scen_j | Get the specified scenario |
12,349 | def load_services(self, services=settings.TH_SERVICES):
kwargs = {}
for class_path in services:
module_name, class_name = class_path.rsplit(, 1)
klass = import_from_path(class_path)
service = klass(None, **kwargs)
self.register(class_name, service) | get the service from the settings |
12,350 | def get_activity_mdata():
return {
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: True,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: True,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: True,
: [],
: ,
: [],
},
} | Return default mdata map for Activity |
12,351 | def _filter_choosers_alts(self, choosers, alternatives):
return (
util.apply_filter_query(
choosers, self.choosers_predict_filters),
util.apply_filter_query(
alternatives, self.alts_predict_filters)) | Apply filters to the choosers and alts tables. |
12,352 | def require_meta_and_content(self, content_handler, params, **kwargs):
meta = {
: params
}
content = content_handler(params, meta, **kwargs)
meta[] = params
return meta, content | Require 'meta' and 'content' dictionaries using proper hander.
Args:
content_handler (callable): function that accepts
``params, meta, **kwargs`` argument and returns dictionary
for ``content`` response section
params (dict): dictionary of parsed resource parameters
kwargs (dict): dictionary of values created from resource url
template
Returns:
tuple (meta, content): two-tuple with dictionaries of ``meta`` and
``content`` response sections |
12,353 | def wrapped_request(self, request, *args, **kwargs):
f = tornado_Future()
try:
use_mid = kwargs.get()
timeout = kwargs.get()
mid = kwargs.get()
msg = Message.request(request, *args, mid=mid)
except Exception:
f.set_exc_info(sys.exc_info())
return f
return transform_future(self.reply_wrapper,
self.katcp_client.future_request(msg, timeout, use_mid)) | Create and send a request to the server.
This method implements a very small subset of the options
possible to send an request. It is provided as a shortcut to
sending a simple wrapped request.
Parameters
----------
request : str
The request to call.
*args : list of objects
Arguments to pass on to the request.
Keyword Arguments
-----------------
timeout : float or None, optional
Timeout after this amount of seconds (keyword argument).
mid : None or int, optional
Message identifier to use for the request message. If None, use either
auto-incrementing value or no mid depending on the KATCP protocol version
(mid's were only introduced with KATCP v5) and the value of the `use_mid`
argument. Defaults to None.
use_mid : bool
Use a mid for the request if True.
Returns
-------
future object that resolves with the
:meth:`katcp.client.DeviceClient.future_request` response wrapped in
self.reply_wrapper
Example
-------
::
wrapped_reply = yield ic.simple_request('help', 'sensor-list') |
12,354 | def compute_csets_TRAM(
connectivity, state_counts, count_matrices, equilibrium_state_counts=None,
ttrajs=None, dtrajs=None, bias_trajs=None, nn=None, factor=1.0, callback=None):
r
return _compute_csets(
connectivity, state_counts, count_matrices, ttrajs, dtrajs, bias_trajs,
nn=nn, equilibrium_state_counts=equilibrium_state_counts,
factor=factor, callback=callback) | r"""
Computes the largest connected sets in the produce space of Markov state and
thermodynamic states for TRAM data.
Parameters
----------
connectivity : string
one of None, 'reversible_pathways', 'post_hoc_RE' or 'BAR_variance',
'neighbors', 'summed_count_matrix' or None.
Selects the algorithm for measuring overlap between thermodynamic
and Markov states.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'largest' : alias for reversible_pathways
* 'post_hoc_RE' : similar to 'reversible_pathways' but with a more strict
requirement for the overlap between thermodynamic states. It is required
that every state in the connected set can be reached by following a
pathway of reversible transitions or jumping between overlapping
thermodynamic states while staying in the same Markov state. A reversible
transition between two Markov states (within the same thermodynamic
state k) is a pair of Markov states that belong to the same strongly
connected component of the count matrix (from thermodynamic state k).
Two thermodynamic states k and l are defined to overlap at Markov state
n if a replica exchange simulation [2]_ restricted to state n would show
at least one transition from k to l or one transition from from l to k.
The expected number of replica exchanges is estimated from the
simulation data. The minimal number required of replica exchanges
per Markov state can be increased by decreasing `connectivity_factor`.
* 'BAR_variance' : like 'post_hoc_RE' but with a different condition to
define the thermodynamic overlap based on the variance of the BAR
estimator [3]_. Two thermodynamic states k and l are defined to overlap
at Markov state n if the variance of the free energy difference Delta
f_{kl} computed with BAR (and restricted to conformations form Markov
state n) is less or equal than one. The minimally required variance
can be controlled with `connectivity_factor`.
* 'neighbors' : like 'post_hoc_RE' or 'BAR_variance' but assume a
overlap between "neighboring" thermodynamic states. It is assumed that
the data comes from an Umbrella sampling simulation and the number of
the thermodynamic state matches the position of the Umbrella along the
order parameter. The overlap of thermodynamic states k and l within
Markov state n is set according to the value of nn; if there are
samples in both product-space states (k,n) and (l,n) and |l-n|<=nn,
the states are overlapping.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
* None : assume that everything is connected. For debugging.
state_counts : numpy.ndarray((T, M), dtype=numpy.intc)
Number of visits to the combinations of thermodynamic state t
and Markov state m
count_matrices : numpy.ndarray((T, M, M), dtype=numpy.intc)
Count matrices for all T thermodynamic states.
equilibrium_state_counts : numpy.dnarray((T, M)), optional
Number of visits to the combinations of thermodynamic state t
and Markov state m in the equilibrium data (for use with TRAMMBAR).
ttrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional
List of generating thermodynamic state trajectories.
dtrajs : list of numpy.ndarray(X_i, dtype=numpy.intc), optional
List of configurational state trajectories (disctrajs).
bias_trajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64), optional
List of bias energy trajectories.
The last three parameters are only required for
connectivity = 'post_hoc_RE' or connectivity = 'BAR_variance'.
nn : int, optional
Number of neighbors that are assumed to overlap when
connectivity='neighbors'
factor : int, default=1.0
scaling factor used for connectivity = 'post_hoc_RE' or
'BAR_variance'. Values greater than 1.0 weaken the connectivity
conditions. For 'post_hoc_RE' this multiplies the number of
hypothetically observed transitions. For 'BAR_variance' this
scales the threshold for the minimal allowed variance of free
energy differences.
Returns
-------
csets, projected_cset
csets : list of ndarrays((X_i,), dtype=int)
List indexed by thermodynamic state. Every element csets[k] is
the largest connected set at thermodynamic state k.
projected_cset : ndarray(M, dtype=int)
The overall connected set. This is the union of the individual
connected sets of the thermodynamic states.
References:
-----------
[1]_ Hukushima et al, Exchange Monte Carlo method and application to spin
glass simulations, J. Phys. Soc. Jan. 65, 1604 (1996)
[2]_ Shirts and Chodera, Statistically optimal analysis of samples
from multiple equilibrium states, J. Chem. Phys. 129, 124105 (2008) |
12,355 | def update_member_names(oldasndict, pydr_input):
omembers = oldasndict[].copy()
nmembers = {}
translated_names = [f.split()[0] for f in pydr_input]
newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
keys_map = list(zip(newkeys, pydr_input))
for okey, oval in list(omembers.items()):
if okey in newkeys:
nkey = pydr_input[newkeys.index(okey)]
nmembers[nkey.split()[0]] = oval
oldasndict.pop()
oldasndict.update(members=nmembers, replace=True)
oldasndict[] = translated_names
return oldasndict | Update names in a member dictionary.
Given an association dictionary with rootnames and a list of full
file names, it will update the names in the member dictionary to
contain '_*' extension. For example a rootname of 'u9600201m' will
be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
as an input and not the corresponding GEIS file. |
12,356 | def project(self, x, vector):
scale = np.linalg.norm(vector)
if scale == 0.0:
return vector
self.lock[:] = False
normals, signs = self._compute_equations(x)[::3]
if len(normals) == 0:
return vector
vector = vector/scale
mask = signs == 0
result = vector.copy()
changed = True
counter = 0
while changed:
changed = False
y = np.dot(normals, result)
for i, sign in enumerate(signs):
if sign != 0:
if sign*y[i] < -self.threshold:
mask[i] = True
changed = True
elif mask[i] and np.dot(normals[i], result-vector) < 0:
mask[i] = False
changed = True
if mask.any():
normals_select = normals[mask]
y = np.dot(normals_select, vector)
U, S, Vt = np.linalg.svd(normals_select, full_matrices=False)
if S.min() == 0.0:
Sinv = S/(S**2+self.rcond1)
else:
Sinv = 1.0/S
result = vector - np.dot(Vt.transpose(), np.dot(U.transpose(), y)*Sinv)
else:
result = vector.copy()
if counter > self.max_iter:
raise ConstraintError()
counter += 1
return result*scale | Project a vector (gradient or direction) on the active constraints.
Arguments:
| ``x`` -- The unknowns.
| ``vector`` -- A numpy array with a direction or a gradient.
The return value is a gradient or direction, where the components
that point away from the constraints are projected out. In case of
half-open constraints, the projection is only active of the vector
points into the infeasible region. |
12,357 | def optimize_seq_and_branch_len(self,reuse_branch_len=True, prune_short=True,
marginal_sequences=False, branch_length_mode=,
max_iter=5, infer_gtr=False, **kwargs):
if branch_length_mode==:
marginal_sequences = True
self.logger("TreeAnc.optimize_sequences_and_branch_length: sequences...", 1)
if reuse_branch_len:
N_diff = self.reconstruct_anc(method=, infer_gtr=infer_gtr,
marginal=marginal_sequences, **kwargs)
self.optimize_branch_len(verbose=0, store_old=False, mode=branch_length_mode)
else:
N_diff = self.reconstruct_anc(method=, infer_gtr=infer_gtr, **kwargs)
self.optimize_branch_len(verbose=0, store_old=False, marginal=False)
n = 0
while n<max_iter:
n += 1
if prune_short:
self.prune_short_branches()
N_diff = self.reconstruct_anc(method=, infer_gtr=False,
marginal=marginal_sequences, **kwargs)
self.logger("TreeAnc.optimize_sequences_and_branch_length: Iteration %d."
"
if N_diff < 1:
break
self.optimize_branch_len(verbose=0, store_old=False, mode=branch_length_mode)
self.tree.unconstrained_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum()
self._prepare_nodes()
self.logger("TreeAnc.optimize_sequences_and_branch_length: Unconstrained sequence LH:%f" % self.tree.unconstrained_sequence_LH , 2)
return ttconf.SUCCESS | Iteratively set branch lengths and reconstruct ancestral sequences until
the values of either former or latter do not change. The algorithm assumes
knowing only the topology of the tree, and requires that sequences are assigned
to all leaves of the tree.
The first step is to pre-reconstruct ancestral
states using Fitch reconstruction algorithm or ML using existing branch length
estimates. Then, optimize branch lengths and re-do reconstruction until
convergence using ML method.
Parameters
-----------
reuse_branch_len : bool
If True, rely on the initial branch lengths, and start with the
maximum-likelihood ancestral sequence inference using existing branch
lengths. Otherwise, do initial reconstruction of ancestral states with
Fitch algorithm, which uses only the tree topology.
prune_short : bool
If True, the branches with zero optimal length will be pruned from
the tree, creating polytomies. The polytomies could be further
processed using :py:meth:`treetime.TreeTime.resolve_polytomies` from the TreeTime class.
marginal_sequences : bool
Assign sequences to their marginally most likely value, rather than
the values that are jointly most likely across all nodes.
branch_length_mode : str
'joint', 'marginal', or 'input'. Branch lengths are left unchanged in case
of 'input'. 'joint' and 'marginal' cause branch length optimization
while setting sequences to the ML value or tracing over all possible
internal sequence states.
max_iter : int
Maximal number of times sequence and branch length iteration are optimized
infer_gtr : bool
Infer a GTR model from the observed substitutions. |
12,358 | def get_xy_environment(self, xy):
x = xy[0]
y = xy[1]
for origin, addr in self._slave_origins:
ox = origin[0]
oy = origin[1]
if ox <= x < ox + self.gs[0] and oy <= y < oy + self.gs[1]:
return addr
return None | Get manager address for the environment which should have the agent
with given *xy* coordinate, or None if no such environment is in this
multi-environment. |
12,359 | async def start(self):
if self.connection.connected:
return
await self.connection.connect()
if self.service.device_credentials:
self.srp.pairing_id = Credentials.parse(
self.service.device_credentials).client_id
msg = messages.device_information(
, self.srp.pairing_id.decode())
await self.send_and_receive(msg)
self._initial_message_sent = True
await self.send(messages.set_ready_state())
async def _wait_for_updates(_, semaphore):
semaphore.release()
semaphore = asyncio.Semaphore(value=0, loop=self.loop)
self.add_listener(_wait_for_updates,
protobuf.SET_STATE_MESSAGE,
data=semaphore,
one_shot=True)
await self.send(messages.client_updates_config())
await self.send(messages.wake_device())
try:
await asyncio.wait_for(
semaphore.acquire(), 1, loop=self.loop)
except asyncio.TimeoutError:
pass | Connect to device and listen to incoming messages. |
12,360 | def cleanParagraph(self):
runs = self.block.content
if not runs:
self.block = None
return
if not self.clean_paragraphs:
return
joinedRuns = []
hasContent = False
for run in runs:
if run.content[0]:
hasContent = True
else:
continue
if not run.content[0].strip():
run.properties = {}
if joinedRuns and (run.properties == joinedRuns[-1].properties):
joinedRuns[-1].content[0] += run.content[0]
else:
joinedRuns.append(run)
if hasContent:
joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip()
joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip()
self.block.content = joinedRuns
else:
self.block = None | Compress text runs, remove whitespace at start and end,
skip empty blocks, etc |
12,361 | def transform(self, X, y=None, copy=None):
check_is_fitted(self, )
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse=, copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X | Perform standardization by centering and scaling using the parameters.
:param X: Data matrix to scale.
:type X: numpy.ndarray, shape [n_samples, n_features]
:param y: Passthrough for scikit-learn ``Pipeline`` compatibility.
:type y: None
:param bool copy: Copy the X matrix.
:return: Scaled version of the X data matrix.
:rtype: numpy.ndarray, shape [n_samples, n_features] |
12,362 | def getresponse(self):
status = self._httprequest.status()
status_text = self._httprequest.status_text()
resp_headers = self._httprequest.get_all_response_headers()
fixed_headers = []
for resp_header in resp_headers.split():
if (resp_header.startswith() or\
resp_header.startswith()) and fixed_headers:
fixed_headers[-1] += resp_header
else:
fixed_headers.append(resp_header)
headers = []
for resp_header in fixed_headers:
if in resp_header:
pos = resp_header.find()
headers.append(
(resp_header[:pos].lower(), resp_header[pos + 1:].strip()))
body = self._httprequest.response_body()
length = len(body)
return _Response(status, status_text, length, headers, body) | Gets the response and generates the _Response object |
12,363 | def _bisect(value_and_gradients_function, initial_args, f_lim):
def _loop_cond(curr):
return ~tf.reduce_all(input_tensor=curr.stopped)
def _loop_body(curr):
mid = value_and_gradients_function((curr.left.x + curr.right.x) / 2)
stopped = curr.stopped | failed | (right.df >= 0)
return [_IntermediateResult(
iteration=curr.iteration,
stopped=stopped,
failed=failed,
num_evals=curr.num_evals + 1,
left=left,
right=right)]
return tf.while_loop(
cond=_loop_cond, body=_loop_body, loop_vars=[initial_args])[0] | Actual implementation of bisect given initial_args in a _BracketResult. |
12,364 | def ahead(self, i, j=None):
if j is None:
return self._stream[self.i + i]
else:
return self._stream[self.i + i: self.i + j] | Raising stopiteration with end the parse. |
12,365 | def getItemTrace(self):
item, path, name, ref = self, [], ,
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if not attr:
attr = item.getAttribute(ref)
if not attr:
path.append( %(item.tag))
else:
path.append( %(item.tag, attr))
else:
path.append( %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns =
path.append( %(item.tag, tns))
path.reverse()
return .join(path) | Returns a node trace up to the <schema> item. |
12,366 | def format(self, data, *args, **kwargs):
sections = OrderedDict()
hot_list = []
normal_list = []
for item in data:
meta = item.get(, [])
if not item.get():
continue
soup = BeautifulSoup(item.get(), "lxml")
for view_more in soup.select():
view_more.extract()
item[] = str(soup.div)
if not item.get() and item.get():
word_limit = self.options.get(
, 500)
content_list = soup.select()
content_list = [content.get_text() for content in content_list]
excerpt = .join(content_list)[:word_limit]
item[] = html.escape(excerpt)
top = meta.pop(, )
item[] = meta
if str(top) == :
hot_list.append(item)
else:
normal_list.append(item)
if hot_list:
sections.setdefault(, hot_list)
if normal_list:
sections.setdefault(, normal_list)
return sections | 将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为
:meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度,
可以对非当日文章对象进行格式化,制作相关主题的合集书籍
:param data: 待处理的文章列表
:type data: list
:return: 返回符合mobi打包需求的定制化数据结构
:rtype: dict |
12,367 | def _extend_nocheck(self, iterable):
current_length = len(self)
list.extend(self, iterable)
_dict = self._dict
if current_length is 0:
self._generate_index()
return
for i, obj in enumerate(islice(self, current_length, None),
current_length):
_dict[obj.id] = i | extends without checking for uniqueness
This function should only be used internally by DictList when it
can guarantee elements are already unique (as in when coming from
self or other DictList). It will be faster because it skips these
checks. |
12,368 | def get_all_in_collection(self, collection_paths: Union[str, Iterable[str]], load_metadata: bool = True) \
-> Sequence[EntityType]:
| Gets entities contained within the given iRODS collections.
If one or more of the collection_paths does not exist, a `FileNotFound` exception will be raised.
:param collection_paths: the collection(s) to get the entities from
:param load_metadata: whether metadata associated to the entities should be loaded
:return: the entities loaded from iRODS |
12,369 | def make_named_stemmer(stem=None, min_len=3):
name, stem = stringify(stem), make_stemmer(stem=stem, min_len=min_len)
if hasattr(stem, ):
return stem.__name__, stem
if name.strip().lower() in STEMMER_TYPES:
return name.strip().lower(), stem
if hasattr(stem, ):
return stem.pattern, stem
return stringify(stem), stem | Construct a callable object and a string sufficient to reconstruct it later (unpickling)
>>> make_named_stemmer('str_lower')
('str_lower', <function str_lower at ...>)
>>> make_named_stemmer('Lancaster')
('lancaster', <Stemmer object at ...>) |
12,370 | def get_teachers_sorted(self):
teachers = self.get_teachers()
teachers = [(u.last_name, u.first_name, u.id) for u in teachers]
for t in teachers:
if t is None or t[0] is None or t[1] is None or t[2] is None:
teachers.remove(t)
for t in teachers:
if t[0] is None or len(t[0]) <= 1:
teachers.remove(t)
teachers.sort(key=lambda u: (u[0], u[1]))
id_list = [t[2] for t in teachers]
clauses = .join([ % (pk, i) for i, pk in enumerate(id_list)])
ordering = % clauses
queryset = User.objects.filter(id__in=id_list).extra(select={: ordering}, order_by=(,))
return queryset | Get teachers sorted by last name.
This is used for the announcement request page. |
12,371 | def mgmt_root(opt_bigip, opt_username, opt_password, opt_port, opt_token):
try:
from pytest import symbols
except ImportError:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
else:
if symbols is not None:
m = ManagementRoot(symbols.bigip_mgmt_ip_public,
symbols.bigip_username,
symbols.bigip_password,
port=opt_port, token=opt_token)
else:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
return m | bigip fixture |
12,372 | def merge_dict(d0, d1, add_new_keys=False, append_arrays=False):
if d1 is None:
return d0
elif d0 is None:
return d1
elif d0 is None and d1 is None:
return {}
od = {}
for k, v in d0.items():
t0 = None
t1 = None
if k in d0:
t0 = type(d0[k])
if k in d1:
t1 = type(d1[k])
if k not in d1:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, dict) and isinstance(d1[k], dict):
od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays)
elif isinstance(v, list) and isstr(d1[k]):
od[k] = d1[k].split()
elif isinstance(v, dict) and d1[k] is None:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, np.ndarray) and append_arrays:
od[k] = np.concatenate((v, d1[k]))
elif (d0[k] is not None and d1[k] is not None) and t0 != t1:
if t0 == dict or t0 == list:
raise Exception(
% (k, t0, t1))
od[k] = t0(d1[k])
else:
od[k] = copy.copy(d1[k])
if add_new_keys:
for k, v in d1.items():
if k not in d0:
od[k] = copy.deepcopy(d1[k])
return od | Recursively merge the contents of python dictionary d0 with
the contents of another python dictionary, d1.
Parameters
----------
d0 : dict
The input dictionary.
d1 : dict
Dictionary to be merged with the input dictionary.
add_new_keys : str
Do not skip keys that only exist in d1.
append_arrays : bool
If an element is a numpy array set the value of that element by
concatenating the two arrays. |
12,373 | def _example_broker_queue(quote_ctx):
stock_code_list = ["HK.00700"]
for stk_code in stock_code_list:
ret_status, ret_data = quote_ctx.subscribe(stk_code, ft.SubType.BROKER)
if ret_status != ft.RET_OK:
print(ret_data)
exit()
for stk_code in stock_code_list:
ret_status, bid_data, ask_data = quote_ctx.get_broker_queue(stk_code)
if ret_status != ft.RET_OK:
print(bid_data)
exit()
print("%s BROKER" % stk_code)
print(ask_data)
print("\n\n")
print(bid_data)
print("\n\n") | 获取经纪队列,输出 买盘卖盘的经纪ID,经纪名称,经纪档位 |
12,374 | def download(self,
files=None,
formats=None,
glob_pattern=None,
dry_run=None,
verbose=None,
silent=None,
ignore_existing=None,
checksum=None,
destdir=None,
no_directory=None,
retries=None,
item_index=None,
ignore_errors=None,
on_the_fly=None,
return_responses=None,
no_change_timestamp=None,
params=None):
dry_run = False if dry_run is None else dry_run
verbose = False if verbose is None else verbose
silent = False if silent is None else silent
ignore_existing = False if ignore_existing is None else ignore_existing
ignore_errors = False if not ignore_errors else ignore_errors
checksum = False if checksum is None else checksum
no_directory = False if no_directory is None else no_directory
return_responses = False if not return_responses else True
no_change_timestamp = False if not no_change_timestamp else no_change_timestamp
params = None if not params else params
if not dry_run:
if item_index and verbose is True:
print(.format(self.identifier, item_index))
elif item_index and silent is False:
print(.format(self.identifier, item_index), end=)
elif item_index is None and verbose is True:
print(.format(self.identifier))
elif item_index is None and silent is False:
print(self.identifier, end=)
sys.stdout.flush()
if self.is_dark is True:
msg = .format(self.identifier)
log.warning(msg)
if verbose:
print( + msg)
elif silent is False:
print(msg)
return
elif self.metadata == {}:
msg = .format(self.identifier)
log.warning(msg)
if verbose:
print( + msg)
elif silent is False:
print(msg)
return
if files:
files = self.get_files(files, on_the_fly=on_the_fly)
else:
files = self.get_files(on_the_fly=on_the_fly)
if formats:
files = self.get_files(formats=formats, on_the_fly=on_the_fly)
if glob_pattern:
files = self.get_files(glob_pattern=glob_pattern, on_the_fly=on_the_fly)
if not files:
msg = .format(self.identifier)
log.info(msg)
if verbose:
print( + msg)
elif silent is False:
print(msg, end=)
errors = list()
responses = list()
for f in files:
if no_directory:
path = f.name
else:
path = os.path.join(self.identifier, f.name)
if dry_run:
print(f.url)
continue
r = f.download(path, verbose, silent, ignore_existing, checksum, destdir,
retries, ignore_errors, None, return_responses,
no_change_timestamp, params)
if return_responses:
responses.append(r)
if r is False:
errors.append(f.name)
if silent is False and verbose is False and dry_run is False:
if errors:
print()
else:
print()
if return_responses:
return responses
else:
return errors | Download files from an item.
:param files: (optional) Only download files matching given file names.
:type formats: str
:param formats: (optional) Only download files matching the given
Formats.
:type glob_pattern: str
:param glob_pattern: (optional) Only download files matching the given
glob pattern.
:type dry_run: bool
:param dry_run: (optional) Output download URLs to stdout, don't
download anything.
:type verbose: bool
:param verbose: (optional) Turn on verbose output.
:type silent: bool
:param silent: (optional) Suppress all output.
:type ignore_existing: bool
:param ignore_existing: (optional) Skip files that already exist
locally.
:type checksum: bool
:param checksum: (optional) Skip downloading file based on checksum.
:type destdir: str
:param destdir: (optional) The directory to download files to.
:type no_directory: bool
:param no_directory: (optional) Download files to current working
directory rather than creating an item directory.
:type retries: int
:param retries: (optional) The number of times to retry on failed
requests.
:type item_index: int
:param item_index: (optional) The index of the item for displaying
progress in bulk downloads.
:type ignore_errors: bool
:param ignore_errors: (optional) Don't fail if a single file fails to
download, continue to download other files.
:type on_the_fly: bool
:param on_the_fly: (optional) Download on-the-fly files (i.e. derivative EPUB,
MOBI, DAISY files).
:type return_responses: bool
:param return_responses: (optional) Rather than downloading files to disk, return
a list of response objects.
:type no_change_timestamp: bool
:param no_change_timestamp: (optional) If True, leave the time stamp as the
current time instead of changing it to that given in
the original archive.
:type params: dict
:param params: (optional) URL parameters to send with
download request (e.g. `cnt=0`).
:rtype: bool
:returns: True if if all files have been downloaded successfully. |
12,375 | def pgcd(numa, numb):
int_args = (int(numa) == numa) and (int(numb) == numb)
fraction_args = isinstance(numa, Fraction) and isinstance(numb, Fraction)
if int_args:
numa, numb = int(numa), int(numb)
elif not fraction_args:
numa, numb = float(numa), float(numb)
if (not int_args) and (not fraction_args):
numa, numb = (
Fraction(_no_exp(numa)).limit_denominator(),
Fraction(_no_exp(numb)).limit_denominator(),
)
while numb:
numa, numb = (
numb,
(numa % numb if int_args else (numa % numb).limit_denominator()),
)
return int(numa) if int_args else (numa if fraction_args else float(numa)) | Calculate the greatest common divisor (GCD) of two numbers.
:param numa: First number
:type numa: number
:param numb: Second number
:type numb: number
:rtype: number
For example:
>>> import pmisc, fractions
>>> pmisc.pgcd(10, 15)
5
>>> str(pmisc.pgcd(0.05, 0.02))
'0.01'
>>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6]
'0.3333'
>>> pmisc.pgcd(
... fractions.Fraction(str(5/3.0)),
... fractions.Fraction(str(2/3.0))
... )
Fraction(1, 3)
>>> pmisc.pgcd(
... fractions.Fraction(5, 3),
... fractions.Fraction(2, 3)
... )
Fraction(1, 3) |
12,376 | def validate_path(path):
if not isinstance(path, six.string_types) or not re.match(, path):
raise InvalidUsage(
"Path validation failed - Expected: '/<component>[/component], got: %s" % path
)
return True | Validates the provided path
:param path: path to validate (string)
:raise:
:InvalidUsage: If validation fails. |
12,377 | def _raise_decomposition_errors(uvw, antenna1, antenna2,
chunks, ant_uvw, max_err):
start = 0
problem_str = []
for ci, chunk in enumerate(chunks):
end = start + chunk
ant1 = antenna1[start:end]
ant2 = antenna2[start:end]
cuvw = uvw[start:end]
ant1_uvw = ant_uvw[ci, ant1, :]
ant2_uvw = ant_uvw[ci, ant2, :]
ruvw = ant2_uvw - ant1_uvw
close = np.isclose(ruvw, cuvw)
problems = np.nonzero(np.logical_or.reduce(np.invert(close), axis=1))
for row in problems[0]:
problem_str.append("[row %d [%d, %d] (chunk %d)]: "
"original %s recovered %s "
"ant1 %s ant2 %s" % (
start+row, ant1[row], ant2[row], ci,
cuvw[row], ruvw[row],
ant1_uvw[row], ant2_uvw[row]))
if len(problem_str) >= max_err:
break
if len(problem_str) >= max_err:
break
start = end
if len(problem_str) == 0:
return
problem_str = ["Antenna UVW Decomposition Failed",
"The following differences were found "
"(first 100):"] + problem_str
raise AntennaUVWDecompositionError(.join(problem_str)) | Raises informative exception for an invalid decomposition |
12,378 | def log_template_errors(logger, log_level=logging.ERROR):
if not (isinstance(log_level, int) and
log_level in logging._levelNames):
raise ValueError( % log_level)
decorators = [
_log_template_string_if_invalid(logger, log_level),
_log_unicode_errors(logger, log_level),
_always_strict_resolve,
]
if django.VERSION < (1, 8):
decorators.append(_patch_invalid_var_format_string)
@decorator
def function(f, *args, **kwargs):
return reduce(__apply, decorators, f)(*args, **kwargs)
return function | Decorator to log template errors to the specified logger.
@log_template_errors(logging.getLogger('mylogger'), logging.INFO)
def my_view(*args):
pass
Will log template errors at INFO. The default log level is ERROR. |
12,379 | def print_logs(query, types=None):
if query is None:
return
for run, log in query:
print(("{0} @ {1} - {2} id: {3} group: {4} status: {5}".format(
run.end, run.experiment_name, run.project_name,
run.experiment_group, run.run_group, log.status)))
print(("command: {0}".format(run.command)))
if "stderr" in types:
print("StdErr:")
print((log.stderr))
if "stdout" in types:
print("StdOut:")
print((log.stdout))
print() | Print status logs. |
12,380 | def ncbi_blast(self, db="nr", megablast=True, sequence=None):
import requests
requests.defaults.max_retries = 4
assert sequence in (None, "cds", "mrna")
seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence))
r = requests.post(,
timeout=20,
data=dict(
PROGRAM="blastn",
DESCRIPTIONS=100,
ALIGNMENTS=0,
FILTER="L",
CMD="Put",
MEGABLAST=True,
DATABASE=db,
QUERY=">%s\n%s" % (self.name, seq)
)
)
if not ("RID =" in r.text and "RTOE" in r.text):
print("no results", file=sys.stderr)
raise StopIteration
rid = r.text.split("RID = ")[1].split("\n")[0]
import time
time.sleep(4)
print("checking...", file=sys.stderr)
r = requests.post(,
data=dict(RID=rid, format="Text",
DESCRIPTIONS=100,
DATABASE=db,
CMD="Get", ))
while "Status=WAITING" in r.text:
print("checking...", file=sys.stderr)
time.sleep(10)
r = requests.post(,
data=dict(RID=rid, format="Text",
CMD="Get", ))
for rec in _ncbi_parse(r.text):
yield rec | perform an NCBI blast against the sequence of this feature |
12,381 | def process_objects(kls):
if not in kls.__dict__:
kls.Meta = type(, (object,), {})
if not in kls.Meta.__dict__:
kls.Meta.unique_together = []
if not in kls.Meta.__dict__:
kls.Meta.verbose_name = kls.__name__
if not in kls.Meta.__dict__:
kls.Meta.verbose_name_plural = kls.Meta.verbose_name + | Applies default Meta properties. |
12,382 | def map_query(self, variables=None, evidence=None):
if not variables:
variables = set(self.variables)
final_distribution = self._query(variables=variables, operation=, evidence=evidence)
argmax = np.argmax(final_distribution.values)
assignment = final_distribution.assignment([argmax])[0]
map_query_results = {}
for var_assignment in assignment:
var, value = var_assignment
map_query_results[var] = value
if not variables:
return map_query_results
else:
return_dict = {}
for var in variables:
return_dict[var] = map_query_results[var]
return return_dict | MAP Query method using belief propagation.
Note: When multiple variables are passed, it returns the map_query for each
of them individually.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.map_query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1}) |
12,383 | def clone(self, **kwargs):
child = ChildContextDict(parent=self, threadsafe=self._threadsafe, overrides=kwargs)
return child | Clone this context, and return the ChildContextDict |
12,384 | def _expand_shorthand(model_formula, variables):
wm =
gsr =
rps =
fd =
acc = _get_matches_from_data(, variables)
tcc = _get_matches_from_data(, variables)
dv = _get_matches_from_data(, variables)
dvall = _get_matches_from_data(, variables)
nss = _get_matches_from_data(,
variables)
spikes = _get_matches_from_data(, variables)
model_formula = re.sub(, wm, model_formula)
model_formula = re.sub(, gsr, model_formula)
model_formula = re.sub(, rps, model_formula)
model_formula = re.sub(, fd, model_formula)
model_formula = re.sub(, acc, model_formula)
model_formula = re.sub(, tcc, model_formula)
model_formula = re.sub(, dv, model_formula)
model_formula = re.sub(, dvall, model_formula)
model_formula = re.sub(, nss, model_formula)
model_formula = re.sub(, spikes, model_formula)
formula_variables = _get_variables_from_formula(model_formula)
others = .join(set(variables) - set(formula_variables))
model_formula = re.sub(, others, model_formula)
return model_formula | Expand shorthand terms in the model formula. |
12,385 | def action_set(values):
cmd = []
for k, v in list(values.items()):
cmd.append(.format(k, v))
subprocess.check_call(cmd) | Sets the values to be returned after the action finishes |
12,386 | def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False):
if len(indexes) == 0:
return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)),
np.zeros((0,)),np.zeros((0,)))
i,j = np.argwhere(labels != 0).transpose()
return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness) | Calculate measurements of ellipses equivalent to the second moments of labels
image - the intensity at each point
labels - for each labeled object, derive an ellipse
indexes - sequence of indexes to process
returns the following arrays:
coordinates of the center of the ellipse
eccentricity
major axis length
minor axis length
orientation
compactness (if asked for)
some definitions taken from "Image Moments-Based Structuring and Tracking
of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO,
http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf
particularly equation 5 (which has some errors in it).
These yield the rectangle with equivalent second moments. I translate
to the ellipse by multiplying by 1.154701 which is Matlab's calculation
of the major and minor axis length for a square of length X divided
by the actual length of the side of a square of that length.
eccentricity is the distance between foci divided by the major axis length
orientation is the angle of the major axis with respect to the X axis
compactness is the variance of the radial distribution normalized by the area |
12,387 | def Back(self, n = 1, dl = 0):
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.backspace_key, n) | 退格键n次 |
12,388 | def _make_valid_state_name(self, state_name):
s = str(state_name)
s_fixed = pp.CharsNotIn(pp.alphanums + "_").setParseAction(pp.replaceWith("_")).transformString(s)
if not s_fixed[0].isalpha():
s_fixed = "state" + s_fixed
return s_fixed | Transform the input state_name into a valid state in XMLBIF.
XMLBIF states must start with a letter an only contain letters,
numbers and underscores. |
12,389 | def delete(self, option=None):
write_pb = _helpers.pb_for_delete(self._document_path, option)
commit_response = self._client._firestore_api.commit(
self._client._database_string,
[write_pb],
transaction=None,
metadata=self._client._rpc_metadata,
)
return commit_response.commit_time | Delete the current document in the Firestore database.
Args:
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.protobuf.timestamp_pb2.Timestamp: The time that the delete
request was received by the server. If the document did not exist
when the delete was sent (i.e. nothing was deleted), this method
will still succeed and will still return the time that the
request was received by the server. |
12,390 | def gen_report(report, sdir=, report_name=):
if not os.path.exists(sdir):
os.makedirs(sdir)
if sdir[-1] != :
sdir +=
report_html =
if in report.keys():
report_html += "<h1>Method: " + report[] + "</h1><p>"
for i in report[report[]]:
if i == :
fig, ax = plt.subplots(1)
ax.plot(report[report[]][],
report[report[]][])
ax.set_xlabel()
ax.set_title(
+ report[report[]][] + )
fig.savefig(sdir + )
report_html += "<img src= width=500>" + "<p>"
else:
report_html += "- <b>" + i + "</b>: " + \
str(report[report[]][i]) + "<br>"
if in report.keys():
report_html += "<p><h2>Postprocessing:</h2><p>"
report_html += "<b>Pipeline: </b>"
for i in report[]:
report_html += " " + i + ","
for i in report[]:
report_html += "<p><h3>" + i + "</h3><p>"
for j in report[i]:
if j == :
report_html += "- <b>" + j + "</b>: " + "<br>"
lambda_val = np.array(report[][])
fig, ax = plt.subplots(1)
ax.hist(lambda_val[:, -1])
ax.set_xlabel()
ax.set_ylabel()
ax.set_title()
fig.savefig(sdir + )
report_html += "<img src= width=500>" + "<p>"
report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>"
np.savetxt(sdir + "boxcox_lambda.csv",
lambda_val, delimiter=",")
else:
report_html += "- <b>" + j + "</b>: " + \
str(report[i][j]) + "<br>"
report_html +=
with open(sdir + report_name, ) as file:
file.write(report_html)
file.close() | Generates report of derivation and postprocess steps in teneto.derive |
12,391 | def concretize(x, solver, sym_handler):
if solver.symbolic(x):
try:
return solver.eval_one(x)
except SimSolverError:
return sym_handler(x)
else:
return solver.eval(x) | For now a lot of naive concretization is done when handling heap metadata to keep things manageable. This idiom
showed up a lot as a result, so to reduce code repetition this function uses a callback to handle the one or two
operations that varied across invocations.
:param x: the item to be concretized
:param solver: the solver to evaluate the item with
:param sym_handler: the handler to be used when the item may take on more than one value
:returns: a concrete value for the item |
12,392 | def _find_statements(self):
for bp in self.child_parsers():
for _, l in bp._bytes_lines():
yield l | Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`. |
12,393 | def _sorted_copy(self, comparison, reversed=False):
sorted = self.copy()
_list.sort(sorted, comparison)
if reversed:
_list.reverse(sorted)
return sorted | Returns a sorted copy with the colors arranged according to the given comparison. |
12,394 | def enqueue_command(self, command_name, args, options):
assert_open(self)
promise = Promise()
self.commands.append((command_name, args, options, promise))
return promise | Enqueue a new command into this pipeline. |
12,395 | def has_segment_tables(xmldoc, name = None):
try:
names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name")
lsctables.SegmentTable.get_table(xmldoc)
lsctables.SegmentSumTable.get_table(xmldoc)
except (ValueError, KeyError):
return False
return name is None or name in names | Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name. |
12,396 | def recommend(self, users=None, k=10, exclude=None, items=None,
new_observation_data=None, new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
from turicreate._cython.cy_server import QuietProgress
assert type(k) == int
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
__null_sframe = _SFrame()
if users is None:
users = __null_sframe
if exclude is None:
exclude = __null_sframe
if items is None:
items = __null_sframe
if new_observation_data is None:
new_observation_data = __null_sframe
if new_user_data is None:
new_user_data = __null_sframe
if new_item_data is None:
new_item_data = __null_sframe
if isinstance(users, list) or (_HAS_NUMPY and isinstance(users, _numpy.ndarray)):
users = _SArray(users)
if users.dtype == dict:
users = users.unpack(column_name_prefix=)
if isinstance(users, _SArray):
users = _SFrame({user_id: users})
if isinstance(items, list) or (_HAS_NUMPY and isinstance(items, _numpy.ndarray)):
items = _SArray(items, dtype = item_type)
if isinstance(items, _SArray):
items = _SFrame({item_id: items})
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type not recognized.")
check_type(users, "users", _SFrame, ["SArray", "list", "SFrame", "numpy.ndarray"])
check_type(exclude, "exclude", _SFrame, ["SFrame"])
check_type(items, "items", _SFrame, ["SFrame", "SArray", "list", "numpy.ndarray"])
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
}
with QuietProgress(verbose):
recs = self.__proxy__.recommend(users, exclude, items, new_observation_data, new_user_data,
new_item_data, exclude_known, k, diversity, random_seed)
if cast_user_to_string_type:
recs[user_id] = recs[user_id].astype(original_user_type)
return recs | Recommend the ``k`` highest scored items for each user.
Parameters
----------
users : SArray, SFrame, or list, optional
Users or observation queries for which to make recommendations.
For list, SArray, and single-column inputs, this is simply a set
of user IDs. By default, recommendations are returned for all
users present when the model was trained. However, if the
recommender model was created with additional features in the
``observation_data`` SFrame, then a corresponding SFrame of
observation queries -- observation data without item or target
columns -- can be passed to this method. For example, a model
trained with user ID, item ID, time, and rating columns may be
queried using an SFrame with user ID and time columns. In this
case, the user ID column must be present, and all column names
should match those in the ``observation_data`` SFrame passed to
``create.``
k : int, optional
The number of recommendations to generate for each user.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be made. If
``items`` is an SArray, list, or SFrame with a single column,
only items from the given set will be recommended. This can be
used, for example, to restrict the recommendations to items
within a particular category or genre. If ``items`` is an
SFrame with user ID and item ID columns, then the item
restriction is specialized to each user. For example, if
``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2),
and (U1, I3) -- then the recommendations for user U1 are
chosen from items I1, I2, and I3. By default, recommendations
are made from all items present when the model was trained.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score and recommendation accuracy. Must be in the same
format as the observation data passed to ``create``. How
this data is used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of user / item pairs. The
column names must be equal to the user and item columns of
the main data, and it provides the model with user/item
pairs to exclude from the recommendations. These
user-item-pairs are always excluded from the predictions,
even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``user_id``, ``item_id``, *score*,
and *rank*, where ``user_id`` and ``item_id``
match the user and item column names specified at training
time. The rank column is between 1 and ``k`` and gives
the relative score of that item. The value of score
depends on the method used for recommendations.
See Also
--------
recommend_from_interactions
predict
evaluate |
12,397 | def get_collections_for_image(self, image_id):
result = []
for document in self.collection.find({ : True, : image_id}):
result.append(str(document[]))
return result | Get identifier of all collections that contain a given image.
Parameters
----------
image_id : string
Unique identifierof image object
Returns
-------
List(string)
List of image collection identifier |
12,398 | def update(self):
_LOGGER.debug("Querying the device..")
time = datetime.now()
value = struct.pack(, PROP_INFO_QUERY,
time.year % 100, time.month, time.day,
time.hour, time.minute, time.second)
self._conn.make_request(PROP_WRITE_HANDLE, value) | Update the data from the thermostat. Always sets the current time. |
12,399 | def run_processes(self,
procdetails: List[ProcessDetails],
subproc_run_timeout_sec: float = 1,
stop_event_timeout_ms: int = 1000,
kill_timeout_sec: float = 5) -> None:
def cleanup():
self.debug("atexit function called: cleaning up")
for pmgr_ in self.process_managers:
pmgr_.stop()
atexit.register(cleanup)
self.process_managers = []
n = len(procdetails)
for i, details in enumerate(procdetails):
pmgr = ProcessManager(details, i + 1, n,
kill_timeout_sec=kill_timeout_sec,
debugging=self.debugging)
self.process_managers.append(pmgr)
for pmgr in self.process_managers:
pmgr.start()
self.info("All started")
something_running = True
stop_requested = False
subproc_failed = False
while something_running and not stop_requested and not subproc_failed:
if (win32event.WaitForSingleObject(
self.h_stop_event,
stop_event_timeout_ms) == win32event.WAIT_OBJECT_0):
stop_requested = True
self.info("Stop requested; stopping")
else:
something_running = False
for pmgr in self.process_managers:
if subproc_failed:
break
try:
retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec)
if retcode != 0:
subproc_failed = True
except subprocess.TimeoutExpired:
something_running = True
for pmgr in self.process_managers:
pmgr.stop()
self.info("All stopped") | Run multiple child processes.
Args:
procdetails: list of :class:`ProcessDetails` objects (q.v.)
subproc_run_timeout_sec: time (in seconds) to wait for each process
when polling child processes to see how they're getting on
(default ``1``)
stop_event_timeout_ms: time to wait (in ms) while checking the
Windows stop event for this service (default ``1000``)
kill_timeout_sec: how long (in seconds) will we wait for the
subprocesses to end peacefully, before we try to kill them?
.. todo::
cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED:
Windows service autorestart |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.