Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,600 | def flatten(iterable):
if isiterable(iterable):
flat = []
for item in list(iterable):
item = flatten(item)
if not isiterable(item):
item = [item]
flat += item
return flat
else:
return iterable | convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object |
384,601 | def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError("Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)) | Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: |
384,602 | def container_remove_objects(object_id, input_params={}, always_retry=False, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /container-xxxx/removeObjects API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveObjects |
384,603 | def _strip_zoom(input_string, strip_string):
try:
return int(input_string.strip(strip_string))
except Exception as e:
raise MapcheteConfigError("zoom level could not be determined: %s" % e) | Return zoom level as integer or throw error. |
384,604 | def solar_position(moment, latitude, longitude, Z=0.0, T=298.15, P=101325.0,
atmos_refract=0.5667):
rs refraction changes how high it appears as though the sun is;
so values are returned with an optional conversion to the aparent angle.
This impacts only the zenith/elevation.
Uses the Reda and Andreas (2004) model described in [1]_,
originally incorporated into the excellent
`pvlib library <https://github.com/pvlib/pvlib-python>`_
Parameters
----------
moment : datetime
Time and date for the calculation, in local UTC time (not daylight
savings time), [-]
latitude : float
Latitude, between -90 and 90 [degrees]
longitude : float
Longitude, between -180 and 180, [degrees]
Z : float, optional
Elevation above sea level for the solar position calculation, [m]
T : float, optional
Temperature of atmosphere at ground level, [K]
P : float, optional
Pressure of atmosphere at ground level, [Pa]
atmos_refract : float, optional
Atmospheric refractivity, [degrees]
Returns
-------
apparent_zenith : float
Zenith of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
zenith : float
Actual zenith of the sun (ignores atmospheric refraction), [degrees]
apparent_altitude : float
Altitude of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
altitude : float
Actual altitude of the sun (ignores atmospheric refraction), [degrees]
azimuth : float
The azimuth of the sun, [degrees]
equation_of_time : float
Equation of time - the number of seconds to be added to the day
from fluids.optional import spa
delta_t = spa.calculate_deltat(moment.year, moment.month)
unixtime = time.mktime(moment.timetuple())
result = spa.solar_position_numpy(unixtime, lat=latitude, lon=longitude, elev=Z,
pressure=P*1E-2, temp=T-273.15, delta_t=delta_t,
atmos_refract=atmos_refract, sst=False, esd=False)
result[-1] = result[-1]*60.0
return result | r'''Calculate the position of the sun in the sky. It is defined in terms of
two angles - the zenith and the azimith. The azimuth tells where a sundial
would see the sun as coming from; the zenith tells how high in the sky it
is. The solar elevation angle is returned for convinience; it is the
complimentary angle of the zenith.
The sun's refraction changes how high it appears as though the sun is;
so values are returned with an optional conversion to the aparent angle.
This impacts only the zenith/elevation.
Uses the Reda and Andreas (2004) model described in [1]_,
originally incorporated into the excellent
`pvlib library <https://github.com/pvlib/pvlib-python>`_
Parameters
----------
moment : datetime
Time and date for the calculation, in local UTC time (not daylight
savings time), [-]
latitude : float
Latitude, between -90 and 90 [degrees]
longitude : float
Longitude, between -180 and 180, [degrees]
Z : float, optional
Elevation above sea level for the solar position calculation, [m]
T : float, optional
Temperature of atmosphere at ground level, [K]
P : float, optional
Pressure of atmosphere at ground level, [Pa]
atmos_refract : float, optional
Atmospheric refractivity, [degrees]
Returns
-------
apparent_zenith : float
Zenith of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
zenith : float
Actual zenith of the sun (ignores atmospheric refraction), [degrees]
apparent_altitude : float
Altitude of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
altitude : float
Actual altitude of the sun (ignores atmospheric refraction), [degrees]
azimuth : float
The azimuth of the sun, [degrees]
equation_of_time : float
Equation of time - the number of seconds to be added to the day's
mean solar time to obtain the apparent solar noon time, [seconds]
Examples
--------
>>> solar_position(datetime(2003, 10, 17, 13, 30, 30), 45, 45)
[140.8367913391112, 140.8367913391112, -50.83679133911118, -50.83679133911118, 329.9096671679604, 878.4902950980904]
Sunrise occurs when the zenith is 90 degrees (Calgary, AB):
>>> solar_position(datetime(2018, 4, 15, 6, 43, 5), 51.0486, -114.07)[0]
90.00054676987014
Sunrise also occurs when the zenith is 90 degrees (13.5 hours later):
>>> solar_position(datetime(2018, 4, 15, 20, 30, 28), 51.0486, -114.07)
[89.9995695661236, 90.54103812161853, 0.00043043387640950836, -0.5410381216185247, 286.8313781904518, 6.631429525878048]
Notes
-----
If you were standing at the same longitude of the sun such that it was no
further east or west than you were, the amount of angle it was south or
north of you is the *zenith*. If it were directly overhead it would be 0°;
a little north or south and it would be a little positive;
near sunset or sunrise, near 90°; and at night, between 90° and 180°.
The *solar altitude angle* is defined as 90° -`zenith`.
Note the *elevation* angle is just another name for the *altitude* angle.
The *azimuth* the angle in degrees that the sun is East of the North angle.
It is positive North eastwards 0° to 360°. Other conventions may be used.
Note that due to differences in atmospheric refractivity, estimation of
sunset and sunrise are accuract to no more than one minute. Refraction
conditions truly vary across the atmosphere; so characterizing it by an
average value is limiting as well.
References
----------
.. [1] Reda, Ibrahim, and Afshin Andreas. "Solar Position Algorithm for
Solar Radiation Applications." Solar Energy 76, no. 5 (January 1, 2004):
577-89. https://doi.org/10.1016/j.solener.2003.12.003.
.. [2] "Navigation - What Azimuth Description Systems Are in Use? -
Astronomy Stack Exchange."
https://astronomy.stackexchange.com/questions/237/what-azimuth-description-systems-are-in-use?rq=1. |
384,605 | def xpose6(m):
m = stypes.toDoubleMatrix(m)
mout = stypes.emptyDoubleMatrix(x=6, y=6)
libspice.xpose6_c(m, mout)
return stypes.cMatrixToNumpy(mout) | Transpose a 6x6 matrix
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html
:param m: Matrix to be transposed
:type m: list[6][6]
:return: Transposed matrix
:rtype: list[6][6] |
384,606 | def cumulative_window(group_by=None, order_by=None):
return Window(
preceding=None, following=0, group_by=group_by, order_by=order_by
) | Create a cumulative window for use with aggregate window functions.
All window frames / ranges are inclusive.
Parameters
----------
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window |
384,607 | def download_file(url, filename=None, show_progress=draw_pbar):
s filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to.
/Content-Lengthwb') as f:
for chunk in r.iter_content(chunk_size=1024):
seen += 1024
show_progress(seen, size)
if chunk:
f.write(chunk)
f.flush()
return filename | Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to. |
384,608 | def fix_config(self, options):
options = super(InitStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to delete (string)."
opt = "value"
if opt not in options:
options[opt] = "1"
if opt not in self.help:
self.help[opt] = "The initial value (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
384,609 | def closeEvent(self, event):
self.script_thread.quit()
self.read_probes.quit()
if self.config_filename:
fname = self.config_filename
self.save_config(fname)
event.accept()
print()
print()
print() | things to be done when gui closes, like save the settings |
384,610 | def set_send_enable(self, setting):
self._pebble.send_packet(DataLogging(data=DataLoggingSetSendEnable(enabled=setting))) | Set the send enable setting on the watch |
384,611 | def make_headers(context: TraceContext) -> Headers:
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: ,
SAMPLED_ID_HEADER: if context.sampled else ,
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers | Creates dict with zipkin headers from supplied trace context. |
384,612 | async def get_scene(self, scene_id, from_cache=True) -> Scene:
if not from_cache:
await self.get_scenes()
for _scene in self.scenes:
if _scene.id == scene_id:
return _scene
raise ResourceNotFoundException("Scene not found scene_id: {}".format(scene_id)) | Get a scene resource instance.
:raises a ResourceNotFoundException when no scene found.
:raises a PvApiError when something is wrong with the hub. |
384,613 | def set_server(self, server_pos, key, value):
if zeroconf_tag and self.zeroconf_enable_tag:
self.listener.set_server(server_pos, key, value) | Set the key to the value for the server_pos (position in the list). |
384,614 | def cmprss(delim, n, instr, lenout=_default_len_out):
delim = ctypes.c_char(delim.encode(encoding=))
n = ctypes.c_int(n)
instr = stypes.stringToCharP(instr)
output = stypes.stringToCharP(lenout)
libspice.cmprss_c(delim, n, instr, lenout, output)
return stypes.toPythonString(output) | Compress a character string by removing occurrences of
more than N consecutive occurrences of a specified
character.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cmprss_c.html
:param delim: Delimiter to be compressed.
:type delim: str
:param n: Maximum consecutive occurrences of delim.
:type n: int
:param instr: Input string.
:type instr: str
:param lenout: Optional available space in output string.
:type lenout: Optional int
:return: Compressed string.
:rtype: str |
384,615 | def reset ():
global __prefixes_suffixes, __suffixes_to_types, __types, __rule_names_to_types, __target_suffixes_cache
__register_features ()
__prefixes_suffixes = [property.PropertyMap(), property.PropertyMap()]
__suffixes_to_types = {}
__types = {}
__target_suffixes_cache = {} | Clear the module state. This is mainly for testing purposes.
Note that this must be called _after_ resetting the module 'feature'. |
384,616 | def get_subdomain_history_neighbors(self, cursor, subdomain_rec):
hist = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, start_sequence=subdomain_rec.n-1, end_sequence=subdomain_rec.n, cur=cursor)
hist.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
fut = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, start_sequence=subdomain_rec.n, end_sequence=subdomain_rec.n+2, cur=cursor)
fut.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
cur = []
tmp_fut = []
for f in fut:
if f.n == subdomain_rec.n:
cur.append(f)
else:
tmp_fut.append(f)
fut = tmp_fut
ret = {: hist, : cur, : fut}
return ret | Given a subdomain record, get its neighbors.
I.e. get all of the subdomain records with the previous sequence number,
and get all of the subdomain records with the next sequence number
Returns {'prev': [...blockchain order...], 'cur': [...blockchain order...], 'fut': [...blockchain order...]} |
384,617 | def getTransitionProbabilities(state, action):
assert 0 <= action < ACTIONS
if not isLegal(state, action):
s1 = [convertTupleToIndex(state)]
return(s1, [1], -10)
state = list(state)
state[action] = PLAYER
if isWon(state, PLAYER):
s1 = [convertTupleToIndex(state)]
return(s1, [1], 1)
elif isDraw(state):
s1 = [convertTupleToIndex(state)]
return(s1, [1], 0)
s1 = []
p = []
legal_a = getLegalActions(state)
for a in legal_a:
state[a] = OPPONENT
if isWon(state, OPPONENT):
s1 = [convertTupleToIndex(state)]
return(s1, [1], -1)
elif isDraw(state):
s1 = [convertTupleToIndex(state)]
return(s1, [1], 0)
s1.append(convertTupleToIndex(state))
p.append(1.0 / len(legal_a))
state[a] = 0
return(s1, p, 0) | Parameters
----------
state : tuple
The state
action : int
The action
Returns
-------
s1, p, r : tuple of two lists and an int
s1 are the next states, p are the probabilities, and r is the reward |
384,618 | def nuc_v(msg):
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NUCv = common.bin2int(msgbin[42:45])
try:
HVE = uncertainty.NUCv[NUCv][]
VVE = uncertainty.NUCv[NUCv][]
except KeyError:
HVE, VVE = uncertainty.NA, uncertainty.NA
return HVE, VVE | Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1)
Args:
msg (string): 28 bytes hexadecimal message string,
Returns:
int or string: 95% Horizontal Velocity Error
int or string: 95% Vertical Velocity Error |
384,619 | def clean_caches(path):
for dirname, subdirlist, filelist in os.walk(path):
for f in filelist:
if f.endswith():
try:
os.remove(os.path.join(dirname, f))
except FileNotFoundError:
pass
if dirname.endswith():
shutil.rmtree(dirname) | Removes all python cache files recursively on a path.
:param path: the path
:return: None |
384,620 | def sync(to_install, to_uninstall, verbose=False, dry_run=False, install_flags=None):
if not to_uninstall and not to_install:
click.echo("Everything up-to-date")
pip_flags = []
if not verbose:
pip_flags += []
if to_uninstall:
if dry_run:
click.echo("Would uninstall:")
for pkg in to_uninstall:
click.echo(" {}".format(pkg))
else:
check_call([sys.executable, , , , ] + pip_flags + sorted(to_uninstall))
if to_install:
if install_flags is None:
install_flags = []
if dry_run:
click.echo("Would install:")
for ireq in to_install:
click.echo(" {}".format(format_requirement(ireq)))
else:
req_lines = []
for ireq in sorted(to_install, key=key_from_ireq):
ireq_hashes = get_hashes_from_ireq(ireq)
req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
tmp_req_file = tempfile.NamedTemporaryFile(mode=, delete=False)
tmp_req_file.write(.join(req_lines))
tmp_req_file.close()
try:
check_call(
[sys.executable, , , , , tmp_req_file.name] + pip_flags + install_flags
)
finally:
os.unlink(tmp_req_file.name)
return 0 | Install and uninstalls the given sets of modules. |
384,621 | def user_data_dir(appname, appauthor=None, version=None, roaming=False):
r
if sys.platform.startswith("win"):
if appauthor is None:
raise AppDirsError("must specify on Windows")
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(_get_win_folder(const), appauthor, appname)
elif sys.platform == :
path = os.path.join(
os.path.expanduser(),
appname)
else:
path = os.path.join(
os.getenv(, os.path.expanduser("~/.config")),
appname.lower())
if version:
path = os.path.join(path, version)
return path | r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.config/<appname> # or in $XDG_CONFIG_HOME if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. We don't
use $XDG_DATA_HOME as that data dir is mostly used at the time of
installation, instead of the application adding data during runtime.
Also, in practice, Linux apps tend to store their data in
"~/.config/<appname>" instead of "~/.local/share/<appname>". |
384,622 | def upgrade():
op.create_table(
,
sa.Column(, sa.Integer(), nullable=False),
sa.Column(, sa.Integer(), nullable=False),
sa.Column(, sa.String(length=255), nullable=False),
sa.Column(
,
sqlalchemy_utils.JSONType(),
nullable=False),
sa.ForeignKeyConstraint([], [u], ),
sa.PrimaryKeyConstraint(),
sa.UniqueConstraint(, )
)
op.create_table(
,
sa.Column(, sa.String(length=255), nullable=False),
sa.Column(, sa.String(length=255), nullable=False),
sa.Column(, sa.Integer(), nullable=False),
sa.ForeignKeyConstraint([], [u], ),
sa.PrimaryKeyConstraint(, )
)
op.create_index(
, ,
[, ], unique=True
)
op.create_table(
,
sa.Column(, sa.Integer(), nullable=False),
sa.Column(, sa.String(length=40), nullable=False),
sa.Column(
,
sqlalchemy_utils.EncryptedType(),
nullable=False),
sa.Column(, sa.Text(), nullable=False),
sa.ForeignKeyConstraint(
[], [u],
name=
),
sa.PrimaryKeyConstraint(, )
) | Upgrade database. |
384,623 | def build(self, pre=None, shortest=False):
if pre is None:
pre = []
res = deque()
for value in self.values:
try:
res.append(utils.val(value, pre, shortest=shortest))
except errors.FlushGrams as e:
prev = "".join(res)
res.clear()
if len(self.fuzzer._scope_stack) == 1:
pre.append(prev)
else:
stmts = self.fuzzer._curr_scope.setdefault("prev_append", deque())
stmts.extend(pre)
stmts.append(prev)
pre.clear()
continue
except errors.OptGram as e:
continue
except errors.GramFuzzError as e:
print("{} : {}".format(self.name, str(e)))
raise
return self.sep.join(res) | Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated. |
384,624 | def connect(self, name, func, sender=None, dispatch_uid=None):
try:
signal = self._registry[name]
except KeyError:
signal = self.register(name)
signal.connect(func, sender=sender, dispatch_uid=dispatch_uid) | Connects a function to a hook.\
Creates the hook (name) if it does not exists
:param str name: The hook name
:param callable func: A function reference used as a callback
:param class sender: Optional sender __class__ to which the\
func should respond. Default will match all
:param str dispatch_uid: Optional unique id,\
see :py:class:`django.dispatch.Signal` for more info |
384,625 | def pretty_size(value):
exp = int(math.log(value, 1024)) if value > 0 else 0
unit = [exp]
if exp == 0:
return % (value, unit)
unit_value = value / (1024.0 ** exp)
places = int(math.log(unit_value, 10))
return % (2 - places, unit_value, unit) | Convert a number of bytes into a human-readable string.
Output is 2...5 characters. Values >= 1000 always produce output in form: x.xxxU, xx.xxU, xxxU, xxxxU. |
384,626 | def _cmptimestamps(self, filest1, filest2):
mtime_cmp = int((filest1.st_mtime - filest2.st_mtime) * 1000) > 0
if self._use_ctime:
return mtime_cmp or \
int((filest1.st_ctime - filest2.st_mtime) * 1000) > 0
else:
return mtime_cmp | Compare time stamps of two files and return True
if file1 (source) is more recent than file2 (target) |
384,627 | def delete(self, id):
map = self._delete_map_from_user_by_id(c.user, id)
if map is None:
abort(404)
if os.path.exists(os.path.join(config[], map.filepath)):
os.unlink(os.path.join(config[], map.filepath))
response.status = 204
return | DELETE /mapfiles/id: Delete an existing mapfile owned by the current
user. Deletion of the map entry in db and remove mapfile from filesystem. |
384,628 | def _Open(self, path_spec, mode=):
if not path_spec.HasParent():
raise errors.PathSpecError(
)
range_offset = getattr(path_spec, , None)
if range_offset is None:
raise errors.PathSpecError(
)
range_size = getattr(path_spec, , None)
if range_size is None:
raise errors.PathSpecError(
)
self._range_offset = range_offset
self._range_size = range_size | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. |
384,629 | def file_delete(context, id, file_id):
component.file_delete(context, id=id, file_id=file_id) | file_delete(context, id, path)
Delete a component file
>>> dcictl component-file-delete [OPTIONS]
:param string id: ID of the component to delete file [required]
:param string file_id: ID for the file to delete [required] |
384,630 | def build_path(graph, node1, node2, path=None):
if path is None:
path = []
if node1 is node2:
return path
path.append(node2)
for pred in graph.all_preds(node2):
if pred in path:
continue
build_path(graph, node1, pred, path)
return path | Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path. |
384,631 | def attention_lm_base():
hparams = common_hparams.basic_params1()
hparams.hidden_size = 1024
hparams.batch_size = 8192
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0.
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("filter_size", 4096)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing")
hparams.add_hparam("encoder_full_attention", False)
return hparams | Set of hyperparameters. |
384,632 | def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table) | Make copy of this GeneticCode object with optional replacement
values for all fields. |
384,633 | def usage():
global g_script_name
print("")
print("Usage: " + g_script_name + " [...options...]")
print("")
print(" --help print out this help menu and show all the valid flags and inputs.")
print("")
print(" --inputfileadd filename where the new java messages to ignore are stored in.")
print("")
print(" --inputfilerm filename where the java messages are removed from the ignored list.")
print("")
print(" --loadjavamessage filename pickle file that stores the dict structure containing java messages to include.")
print("")
print(" --savejavamessage filename pickle file that saves the final dict structure after update.")
print("")
print(" --printjavamessage filename print java ignored java messages stored in pickle file filenam onto console and save into a text file.")
print("")
sys.exit(1) | Illustrate what the various input flags are and the options should be.
:return: none |
384,634 | def _extract_symbols(self, symbols, default=None):
to_ret = {}
for symbol in symbols:
symbolInfo = self.elf.symbol_decoder.get_symbol_for_name(symbol)
if symbolInfo is None:
if default is not None:
to_ret[symbol] = default
continue
raise FlashAlgoException("Missing symbol %s" % symbol)
to_ret[symbol] = symbolInfo.address
return to_ret | ! @brief Fill 'symbols' field with required flash algo symbols |
384,635 | def add_values_to_bundle_safe(connection, bundle, values):
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e | Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries. |
384,636 | def get(self, key):
doc = self._collection.find_one({: key})
if doc:
doc.pop()
return doc | Get a document by id. |
384,637 | def infos(self):
data = {
"meta": {
"description": self.meta_description,
"lang": self.meta_lang,
"keywords": self.meta_keywords,
"favicon": self.meta_favicon,
"canonical": self.canonical_link,
"encoding": self.meta_encoding
},
"image": None,
"domain": self.domain,
"title": self.title,
"cleaned_text": self.cleaned_text,
"opengraph": self.opengraph,
"tags": self.tags,
"tweets": self.tweets,
"movies": [],
"links": self.links,
"authors": self.authors,
"publish_date": self.publish_date
}
if self.top_image is not None:
data[] = {
: self.top_image.src,
: self.top_image.width,
: self.top_image.height,
:
}
for movie in self.movies:
data[].append({
: movie.embed_type,
: movie.provider,
: movie.width,
: movie.height,
: movie.embed_code,
: movie.src,
})
return data | dict: The summation of all data available about the extracted article
Note:
Read only |
384,638 | def direction_vector(self, angle):
s movement direction at a given (absolute) angle (in degrees).
No check is made whether angle lies within the arcs span )
Returns a 2x1 numpy array.
>>> a = Arc((0, 0), 1, 0, 90, True)
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ -0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([-1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([-0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([1.0, 0.0])) < tol)
>>> a = a.reversed()
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ 0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([-1.0, 0.0])) < tol)
'
a = angle + self.sign * 90
a = a * np.pi / 180.0
return np.array([np.cos(a), np.sin(a)]) | Returns a unit vector, pointing in the arc's movement direction at a given (absolute) angle (in degrees).
No check is made whether angle lies within the arc's span (the results for angles outside of the arc's span )
Returns a 2x1 numpy array.
>>> a = Arc((0, 0), 1, 0, 90, True)
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ -0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([-1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([-0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([1.0, 0.0])) < tol)
>>> a = a.reversed()
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ 0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([-1.0, 0.0])) < tol) |
384,639 | def pop(self, pair, default=None):
return super(BaseKerning, self).pop(pair, default) | Removes the **pair** from the Kerning and returns the value as an ``int``.
If no pair is found, **default** is returned. **pair** is a
``tuple`` of two :ref:`type-string`\s. This must return either
**default** or a :ref:`type-int-float`.
>>> font.kerning.pop(("A", "V"))
-20
>>> font.kerning.pop(("A", "W"))
-10.5 |
384,640 | def char_between(lower, upper, func_name):
function = register_function(func_name,
lambda char: lower<=char<=upper)
return char_on_predicate(function) | return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function |
384,641 | def returnValueList(self, key_list, last=False):
last=TrueJimLarry
result = []
row = self.returnOneEntry(last=last)
if not row:
return None
dict_row = internal.convert_to_dict(row)
for field in key_list:
result.append(dict_row.get(field, None))
return result | Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty. |
384,642 | def apply_new_scoped_variable_type(self, path, new_variable_type_str):
data_port_id = self.list_store[path][self.ID_STORAGE_ID]
try:
if self.model.state.scoped_variables[data_port_id].data_type.__name__ != new_variable_type_str:
self.model.state.scoped_variables[data_port_id].change_data_type(new_variable_type_str)
except ValueError as e:
logger.error("Error while changing data type: {0}".format(e)) | Applies the new data type of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_variable_type_str: New data type as str |
384,643 | def normalize_sort(sort=None):
if not sort:
return Null
output = FlatList()
for s in listwrap(sort):
if is_text(s) or mo_math.is_integer(s):
output.append({"value": s, "sort": 1})
elif not s.field and not s.value and s.sort==None:
for n, v in s.items():
output.append({"value": n, "sort": sort_direction[v]})
else:
output.append({"value": coalesce(s.field, s.value), "sort": coalesce(sort_direction[s.sort], 1)})
return wrap(output) | CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE |
384,644 | def zpk2tf(z, p, k):
r
import scipy.signal
b, a = scipy.signal.zpk2tf(z, p, k)
return b, a | r"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
:func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must be real or come in complex conjugate pairs.
The polynomial denominator coefficients are returned in row vector a and
the polynomial numerator coefficients are returned in matrix b, which has
as many rows as there are columns of z.
Inf values can be used as place holders in z if some columns have fewer zeros than others.
.. note:: wrapper of scipy function zpk2tf |
384,645 | def start_trace(reset=True, filter_func=None, time_filter_func=None):
global trace_filter
global time_filter
if reset:
reset_trace()
if filter_func:
trace_filter = filter_func
else:
trace_filter = GlobbingFilter(exclude=[])
if time_filter_func:
time_filter = time_filter_func
else:
time_filter = GlobbingFilter()
sys.settrace(tracer) | Begins a trace. Setting reset to True will reset all previously recorded
trace data. filter_func needs to point to a callable function that accepts
the parameters (call_stack, module_name, class_name, func_name, full_name).
Every call will be passed into this function and it is up to the function
to decide if it should be included or not. Returning False means the call
will be filtered out and not included in the call graph. |
384,646 | def count_if(predicate, seq):
f = lambda count, x: count + (not not predicate(x))
return reduce(f, seq, 0) | Count the number of elements of seq for which the predicate is true.
>>> count_if(callable, [42, None, max, min])
2 |
384,647 | def _gather_all_deps(self, args, kwargs):
depends = []
count = 0
for dep in args:
if isinstance(dep, Future):
if self.tasks[dep.tid][] not in FINAL_STATES:
count += 1
depends.extend([dep])
for key in kwargs:
dep = kwargs[key]
if isinstance(dep, Future):
if self.tasks[dep.tid][] not in FINAL_STATES:
count += 1
depends.extend([dep])
for dep in kwargs.get(, []):
if isinstance(dep, Future):
if self.tasks[dep.tid][] not in FINAL_STATES:
count += 1
depends.extend([dep])
return count, depends | Count the number of unresolved futures on which a task depends.
Args:
- args (List[args]) : The list of args list to the fn
- kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn
Returns:
- count, [list of dependencies] |
384,648 | def register(self, what, obj):
name = obj.name
version = obj.version
enable = obj.enable
if enable == :
return
key = Key(name, version)
self.plugins[what][key] = obj | Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin |
384,649 | def validate_xml_text(text):
bad_chars = __INVALID_XML_CHARS & set(text)
if bad_chars:
for offset,c in enumerate(text):
if c in bad_chars:
raise RuntimeError( + repr(c) + + str(offset)) | validates XML text |
384,650 | def get_feature_by_path(self, locus, term, rank, accession, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_feature_by_path_with_http_info(locus, term, rank, accession, **kwargs)
else:
(data) = self.get_feature_by_path_with_http_info(locus, term, rank, accession, **kwargs)
return data | Retrieve an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_feature_by_path(locus, term, rank, accession, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:param int accession: accession, must be at least 1 (required)
:return: Feature
If the method is called asynchronously,
returns the request thread. |
384,651 | def _reaction_po_to_dict(tokens) -> Reaction:
return Reaction(
reactants=_reaction_part_po_to_dict(tokens[REACTANTS]),
products=_reaction_part_po_to_dict(tokens[PRODUCTS]),
) | Convert a reaction parse object to a DSL.
:type tokens: ParseResult |
384,652 | def _radec(self,*args,**kwargs):
lbd= self._lbd(*args,**kwargs)
return coords.lb_to_radec(lbd[:,0],lbd[:,1],degree=True,epoch=None) | Calculate ra and dec |
384,653 | def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken=""):
iam_credentials_provider = IAMCredentialsProvider()
iam_credentials_provider.set_access_key_id(AWSAccessKeyID)
iam_credentials_provider.set_secret_access_key(AWSSecretAccessKey)
iam_credentials_provider.set_session_token(AWSSessionToken)
self._mqtt_core.configure_iam_credentials(iam_credentials_provider) | **Description**
Used to configure/update the custom IAM credentials for Websocket SigV4 connection to
AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None |
384,654 | def _invalid_implementation(self, t, missing, mistyped, mismatched):
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | Make a TypeError explaining why ``t`` doesn't implement our interface. |
384,655 | def redistribute_threads(blockdimx, blockdimy, blockdimz,
dimx, dimy, dimz):
while blockdimz > dimz:
tmp = blockdimz // 2
if tmp < dimz:
break
blockdimy *= 2
blockdimz = tmp
while blockdimy > dimy:
tmp = blockdimy // 2
if tmp < dimy:
break
blockdimx *= 2
blockdimy = tmp
if dimx < blockdimx:
blockdimx = dimx
if dimy < blockdimy:
blockdimy = dimy
if dimz < blockdimz:
blockdimz = dimz
return blockdimx, blockdimy, blockdimz | Redistribute threads from the Z dimension towards the X dimension.
Also clamp number of threads to the problem dimension size,
if necessary |
384,656 | def set_dimmer_start_time(self, hour, minute):
d1 = self._gateway.get_gateway_info().current_time
d2 = dt.utcnow()
diff = d1 - d2
newtime = dt(100, 1, 1, hour, minute, 00) - diff
command = {
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL:
[{
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR: newtime.hour,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN: newtime.minute
}]
}
return self._task.set_values(command) | Set start time for task (hh:mm) in iso8601.
NB: dimmer starts 30 mins before time in app |
384,657 | def create(*context, **kwargs):
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)
if kwargs:
context.push(kwargs)
return context | Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's. |
384,658 | def _make_r_patches(data, K_g, critical_r, indices, approx):
def append_components(means, covs, data, partition):
subdata_start = 0
subdata_stop = partition[0]
for len_subdata in partition:
subdata = data[subdata_start:subdata_stop]
means.append( _np.mean(subdata, axis=0) )
covs.append ( _np.cov (subdata, rowvar=0) )
subdata_start += len_subdata
subdata_stop += len_subdata
n = len(data[0])
for item in data:
assert len(item) == n,
data = [_np.asarray(d) for d in data]
if indices is None:
indices = _np.arange(data[0].shape[1])
assert len(indices) > 0, + str(indices)
chain_groups = r_group([_np.mean(chain_values.T[indices], axis=1) for chain_values in data],
[_np.var (chain_values.T[indices], axis=1, ddof=1) for chain_values in data],
n, critical_r, approx)
long_patches_means = []
long_patches_covs = []
for group in chain_groups:
k_g = len(group)
if K_g >= k_g:
n = _part(K_g, k_g)
for i, chain_index in enumerate(group):
data_full_chain = data[chain_index]
this_patch_lengths = _part(len(data_full_chain), n[i])
append_components(long_patches_means, long_patches_covs, data_full_chain, this_patch_lengths)
else:
k_g = 1
data_full_chain = _np.vstack([data[i] for i in group])
this_patch_lengths = _part(len(data_full_chain), K_g)
append_components(long_patches_means, long_patches_covs, data_full_chain, this_patch_lengths)
return long_patches_means, long_patches_covs | Helper function for :py:func:`.make_r_gaussmix` and
:py:func:`.make_r_tmix`. Group the ``data`` according to the R value
and split each group into ``K_g`` patches. Return the patch means
and covariances. For details see the docstrings of the above mentioned
functions. |
384,659 | def load(self, dump_fn=, prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None):
r = self.database_renderer(name=name, site=site)
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format()
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.pc()
r.local(
)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.run_or_local(r.env.load_command) | Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot. |
384,660 | def enter_eventloop(self):
self.log.info("entering eventloop")
signal(SIGINT, default_int_handler)
while self.eventloop is not None:
try:
self.eventloop(self)
except KeyboardInterrupt:
self.log.error("KeyboardInterrupt caught in kernel")
continue
else:
self.eventloop = None
break
self.log.info("exiting eventloop")
ioloop.IOLoop.instance().stop() | enter eventloop |
384,661 | def cleanup_unreachable(rdf):
all_subjects = set(rdf.subjects())
logging.debug("total subject resources: %d", len(all_subjects))
reachable = find_reachable(rdf, SKOS.Concept)
nonreachable = all_subjects - reachable
logging.debug("deleting %s non-reachable resources", len(nonreachable))
for subj in nonreachable:
delete_uri(rdf, subj) | Remove triples which cannot be reached from the concepts by graph
traversal. |
384,662 | def resample_signal(self, data_frame):
new_freq = np.round(1 / self.sampling_frequency, decimals=6)
df_resampled = data_frame.resample(str(new_freq) + ).mean()
logging.debug("resample signal")
df_resampled = df_resampled.interpolate(method=)
get_sampling_rate_from_timestamp(df_resampled)
return df_resampled | Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1] |
384,663 | def _stripslashes(s):
r = re.sub(r"\\(n|r)", "\n", s)
r = re.sub(r"\\", "", r)
return r | Removes trailing and leading backslashes from string |
384,664 | def _set_status(self, status, message=):
message = message.strip()
if message.startswith():
msg_id = message[1:]
message = self.messages.get(msg_id, )
message = message.encode(, )
for func in self.set_status_funcs:
func(status, message) | Updates the status and message on all supported IM apps.
`status`
Status type (See ``VALID_STATUSES``).
`message`
Status message. |
384,665 | def from_bytes(cls, b):
hdr = None
head_chunks = []
end = ("IEND", make_chunk("IEND", b""))
frame_chunks = []
frames = []
num_plays = 0
frame_has_head_chunks = False
control = None
for type_, data in parse_chunks(b):
if type_ == "IHDR":
hdr = data
frame_chunks.append((type_, data))
elif type_ == "acTL":
_num_frames, num_plays = struct.unpack("!II", data[8:-4])
continue
elif type_ == "fcTL":
if any(type_ == "IDAT" for type_, data in frame_chunks):
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
frame_has_head_chunks = False
control = FrameControl.from_bytes(data[12:-4])
hdr = make_chunk("IHDR", struct.pack("!II", control.width, control.height) + hdr[16:-4])
frame_chunks = [("IHDR", hdr)]
else:
control = FrameControl.from_bytes(data[12:-4])
elif type_ == "IDAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append((type_, data))
elif type_ == "fdAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append(("IDAT", make_chunk("IDAT", data[12:-4])))
elif type_ == "IEND":
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
break
elif type_ in CHUNK_BEFORE_IDAT:
head_chunks.append((type_, data))
else:
frame_chunks.append((type_, data))
o = cls()
o.frames = frames
o.num_plays = num_plays
return o | Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG |
384,666 | def is_same(type1, type2):
nake_type1 = remove_declarated(type1)
nake_type2 = remove_declarated(type2)
return nake_type1 == nake_type2 | returns True, if type1 and type2 are same types |
384,667 | def _get_log_model_class(self):
if self.log_model_class is not None:
return self.log_model_class
app_label, model_label = self.log_model.rsplit(, 1)
self.log_model_class = apps.get_model(app_label, model_label)
return self.log_model_class | Cache for fetching the actual log model object once django is loaded.
Otherwise, import conflict occur: WorkflowEnabled imports <log_model>
which tries to import all models to retrieve the proper model class. |
384,668 | def shelter_listbybreed(self, **kwargs):
root = self._do_api_call("shelter.listByBreed", kwargs)
shelter_ids = root.findall("shelterIds/id")
for shelter_id in shelter_ids:
yield shelter_id.text | shelter.listByBreed wrapper. Given a breed and an animal type, list
the shelter IDs with pets of said breed.
:rtype: generator
:returns: A generator of shelter IDs that have breed matches. |
384,669 | def move(zone, zonepath):
*
ret = {: True}
res = __salt__[](.format(
zone=.format(zone) if _is_uuid(zone) else .format(zone),
path=zonepath,
))
ret[] = res[] == 0
ret[] = res[] if ret[] else res[]
ret[] = ret[].replace(, )
if ret[] == :
del ret[]
return ret | Move zone to new zonepath.
zone : string
name or uuid of the zone
zonepath : string
new zonepath
CLI Example:
.. code-block:: bash
salt '*' zoneadm.move meave /sweetwater/meave |
384,670 | def get_profile(session):
response = session.get(PROFILE_URL, allow_redirects=False)
if response.status_code == 302:
raise USPSError()
parsed = BeautifulSoup(response.text, HTML_PARSER)
profile = parsed.find(, {: })
data = {}
for row in profile.find_all():
cells = row.find_all()
if len(cells) == 2:
key = .join(cells[0].find_all(text=True)).strip().lower().replace(, )
value = .join(cells[1].find_all(text=True)).strip()
data[key] = value
return data | Get profile data. |
384,671 | def add_interactions_from(self, ebunch, t=None, e=None):
if t is None:
raise nx.NetworkXError(
"The t argument must be a specified.")
for ed in ebunch:
self.add_interaction(ed[0], ed[1], t, e) | Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0) |
384,672 | def imagecapture(self, window_name=None, x=0, y=0,
width=None, height=None):
if x or y or (width and width != -1) or (height and height != -1):
raise LdtpServerException("Not implemented")
if window_name:
handle, name, app = self._get_window_handle(window_name)
try:
self._grabfocus(handle)
except:
pass
rect = self._getobjectsize(handle)
screenshot = CGWindowListCreateImage(NSMakeRect(rect[0],
rect[1], rect[2], rect[3]), 1, 0, 0)
else:
screenshot = CGWindowListCreateImage(CGRectInfinite, 1, 0, 0)
image = CIImage.imageWithCGImage_(screenshot)
bitmapRep = NSBitmapImageRep.alloc().initWithCIImage_(image)
blob = bitmapRep.representationUsingType_properties_(NSPNGFileType, None)
tmpFile = tempfile.mktemp(, )
blob.writeToFile_atomically_(tmpFile, False)
rv = b64encode(open(tmpFile).read())
os.remove(tmpFile)
return rv | Captures screenshot of the whole desktop or given window
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param x: x co-ordinate value
@type x: int
@param y: y co-ordinate value
@type y: int
@param width: width co-ordinate value
@type width: int
@param height: height co-ordinate value
@type height: int
@return: screenshot with base64 encoded for the client
@rtype: string |
384,673 | def set_stack_address_mapping(self, absolute_address, region_id, related_function_address=None):
if self._stack_region_map is None:
raise SimMemoryError()
self._stack_region_map.map(absolute_address, region_id, related_function_address=related_function_address) | Create a new mapping between an absolute address (which is the base address of a specific stack frame) and a
region ID.
:param absolute_address: The absolute memory address.
:param region_id: The region ID.
:param related_function_address: Related function address. |
384,674 | def s15f16l(s):
n = len(s) // 4
t = struct.unpack( % n, s)
return map((2**-16).__mul__, t) | Convert sequence of ICC s15Fixed16 to list of float. |
384,675 | def _get(self, key, what):
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == :
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in [, , , ]:
return self.runtime._nodes[nodename][what]
elif what == :
return self.runtime._nodes[nodename]
elif what == :
return nodename
elif what == :
return (self.runtime._keys[pos], nodename) | Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight |
384,676 | def checkBinary(name, bindir=None):
if name == "sumo-gui":
envName = "GUISIM_BINARY"
else:
envName = name.upper() + "_BINARY"
env = os.environ
join = os.path.join
if envName in env and exeExists(env.get(envName)):
return env.get(envName)
if bindir is not None:
binary = join(bindir, name)
if exeExists(binary):
return binary
if "SUMO_HOME" in env:
binary = join(env.get("SUMO_HOME"), "bin", name)
if exeExists(binary):
return binary
binary = os.path.abspath(
join(os.path.dirname(__file__), , , , name))
if exeExists(binary):
return binary
return name | Checks for the given binary in the places, defined by the environment
variables SUMO_HOME and <NAME>_BINARY. |
384,677 | def OnPadIntCtrl(self, event):
self.attrs["pad"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | Pad IntCtrl event handler |
384,678 | def _get_remote(self, cached=True):
return self.m(
,
cmdd=dict(
cmd= % ( if cached else ),
cwd=self.local
),
verbose=False
) | Helper function to determine remote
:param cached:
Use cached values or query remotes |
384,679 | def from_conll(this_class, stream):
stream = iter(stream)
corpus = this_class()
while 1:
sentence = Sentence.from_conll(stream)
if sentence:
corpus.append(sentence)
else:
break
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format. |
384,680 | def remove(path):
r = cpenv.resolve(path)
if isinstance(r.resolved[0], cpenv.VirtualEnvironment):
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() | Remove a cached environment. Removed paths will no longer be able to
be activated by name |
384,681 | def sample_slice(args):
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
nonperiodic = kwargs.get(, None)
n = len(u)
slices = kwargs.get(, 5)
nc = 0
nexpand = 0
ncontract = 0
fscale = []
axes = scale * axes.T
axlens = [linalg.norm(axis) for axis in axes]
for it in range(slices):
idxs = np.arange(n)
rstate.shuffle(idxs)
for idx in idxs:
axis = axes[idx]
axlen = axlens[idx]
r = rstate.rand()
u_l = u - r * axis
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
u_r = u + (1 - r) * axis
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
while logl_l >= loglstar:
u_l -= axis
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
while logl_r >= loglstar:
u_r += axis
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
while True:
u_hat = u_r - u_l
u_prop = u_l + rstate.rand() * u_hat
if unitcheck(u_prop, nonperiodic):
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
else:
logl_prop = -np.inf
nc += 1
ncontract += 1
if logl_prop >= loglstar:
window = linalg.norm(u_hat)
fscale.append(window / axlen)
u = u_prop
break
else:
s = np.dot(u_prop - u, u_hat)
if s < 0:
u_l = u_prop
elif s > 0:
u_r = u_prop
else:
raise RuntimeError("Slice sampler has failed to find "
"a valid point. Some useful "
"output quantities:\n"
"u: {0}\n"
"u_left: {1}\n"
"u_right: {2}\n"
"u_hat: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"axlens: {8}\n"
"s: {9}."
.format(u, u_l, u_r, u_hat, u_prop,
loglstar, logl_prop,
axes, axlens, s))
blob = {: np.mean(fscale),
: nexpand, : ncontract}
return u_prop, v_prop, logl_prop, nc, blob | Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`. |
384,682 | def _execute(self, cursor, statements):
payload = [{: s, : p, :[]} for (s, p) in statements]
http_response = self._http_req("POST", self._tx, {: payload})
if self._tx == TX_ENDPOINT:
self._tx = http_response.getheader()
response = self._deserialize(http_response)
self._handle_errors(response, cursor, cursor)
return response[][-1] | Executes a list of statements, returning an iterator of results sets. Each
statement should be a tuple of (statement, params). |
384,683 | def list(self, search_opts=None):
query = base.get_query_string(search_opts)
return self._list( % query, ) | Get a list of Plugins. |
384,684 | def get_ball_by_ball(self, match_key, over_key=None):
if over_key:
ball_by_ball_url = "{base_path}match/{match_key}/balls/{over_key}/".format(base_path=self.api_path, match_key=match_key, over_key=over_key)
else:
ball_by_ball_url = "{base_path}match/{match_key}/balls/".format(base_path=self.api_path, match_key=match_key)
response = self.get_response(ball_by_ball_url)
return response | match_key: key of the match
over_key : key of the over
Return:
json data: |
384,685 | def check_applied(result):
try:
applied = result.was_applied
except Exception:
applied = True
if not applied:
raise LWTException(result.one()) | Raises LWTException if it looks like a failed LWT request. A LWTException
won't be raised in the special case in which there are several failed LWT
in a :class:`~cqlengine.query.BatchQuery`. |
384,686 | def updateSocialTone(user, socialTone, maintainHistory):
currentSocial = []
currentSocialObject = []
for tone in socialTone[]:
if tone[] >= SOCIAL_HIGH_SCORE_THRESHOLD:
currentSocial.append(tone[].lower() + )
currentSocialObject.append({
: tone[].lower(),
: tone[],
:
})
elif tone[] <= SOCIAL_LOW_SCORE_THRESHOLD:
currentSocial.append(tone[].lower() + )
currentSocialObject.append({
: tone[].lower(),
: tone[],
:
})
else:
currentSocialObject.append({
: tone[].lower(),
: tone[],
:
})
user[][][] = currentSocial
if maintainHistory:
if not user[][][]:
user[][][] = []
user[][][].append(currentSocialObject) | updateSocialTone updates the user with the social tones interpreted based on
the specified thresholds
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param socialTone a json object containing the social tones in the payload
returned by the Tone Analyzer |
384,687 | def _set_system_description(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: [u]}), is_leaf=True, yang_name="system-description", rest_name="system-description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "string",
: ,
})
self.__system_description = t
if hasattr(self, ):
self._set() | Setter method for system_description, mapped from YANG variable /protocol/lldp/system_description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_description() directly. |
384,688 | def mode(data):
n = data.shape[0]
iqr = np.percentile(data, 75)-np.percentile(data, 25)
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode | Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule. |
384,689 | def add_enclave_tag(self, report_id, name, enclave_id, id_type=None):
params = {
: id_type,
: name,
: enclave_id
}
resp = self._client.post("reports/%s/tags" % report_id, params=params)
return str(resp.content) | Adds a tag to a specific report, for a specific enclave.
:param report_id: The ID of the report
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: The ID of the tag that was created. |
384,690 | def primary_avatar(user, size=AVATAR_DEFAULT_SIZE):
alt = unicode(user)
url = reverse(, kwargs={ : user, : size})
return % (url, alt,
) | This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls. |
384,691 | def delete(self, personId):
check_type(personId, basestring, may_be_none=False)
self._session.delete(API_ENDPOINT + + personId) | Remove a person from the system.
Only an admin can remove a person.
Args:
personId(basestring): The ID of the person to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. |
384,692 | def parse_args(self):
self.tcex.log.info()
Args(self.tcex.parser)
self.args = self.tcex.args | Parse CLI args. |
384,693 | def send_data_to_server(self, data, time_out=5):
if not data.endswith(self.__connection_end):
data = "{0}{1}".format(data, foundations.strings.to_string(self.__connection_end).decode("string_escape"))
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.settimeout(time_out)
connection.connect((foundations.strings.to_string(self.__address), int(self.__port)))
connection.send(data)
self.__engine.notifications_manager.notify(
"{0} | Socket connection command dispatched!".format(self.__class__.__name__))
connection.close()
return True | Sends given data to the Server.
:param data: Data to send.
:type data: unicode
:param time_out: Connection timeout in seconds.
:type time_out: float
:return: Method success.
:rtype: bool |
384,694 | def terminate(self):
terminate_function_name = self.spec.get()
if not terminate_function_name:
logger.error(
, self.key)
return None
terminate_function = getattr(self.module,
self.spec[])
if not terminate_function:
logger.error(
,
self.key, terminate_function_name,
self.module.__name__)
return None
logger.info(
.format(self.key))
ret_val = terminate_function(self)
self.update(lease_time=-10)
return ret_val | Kills the work unit.
This is called by the standard worker system, but only in
response to an operating system signal. If the job does setup
such as creating a child process, its terminate function
should kill that child process. More specifically, this
function requires the work spec to contain the keys
``module``, ``run_function``, and ``terminate_function``, and
calls ``terminate_function`` in :attr:`module` containing
:const:`self` as its only parameter. |
384,695 | def transformer_moe_base():
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0.
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("pos", "timing")
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
hparams.add_hparam("layer_types", "")
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams | Set of hyperparameters. |
384,696 | def field_value(key, label, color, padding):
if not clr.has_colors and padding > 0:
padding = 7
if color == "bright gray" or color == "dark gray":
bright_prefix = ""
else:
bright_prefix = "bright "
field = clr.stringc(key, "{0}{1}".format(bright_prefix, color))
field_label = clr.stringc(label, color)
return "{0:>{1}} {2}".format(field, padding, field_label) | Print a specific field's stats. |
384,697 | def write_configuration(self, out, secret_attrs=False):
key_order = [, , , , ,
, , , ]
cd = self.get_configuration_dict(secret_attrs=secret_attrs)
for k in key_order:
if k in cd:
out.write(.format(k, cd[k]))
out.write()
for o in cd[]:
out.write(.format(o[], o[])) | Generic configuration, may be overridden by type-specific version |
384,698 | def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print("CLEANUP TASK: %s" % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run))) | Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool) |
384,699 | def make_transformer(self, decompose=, decompose_by=50, tsne_kwargs={}):
decompositions = {
: TruncatedSVD,
: PCA,
}
if decompose and decompose.lower() not in decompositions:
raise YellowbrickValueError(
" is not a valid decomposition, use {}, or None".format(
decompose, ", ".join(decompositions.keys())
)
)
steps = []
if decompose:
klass = decompositions[decompose]
steps.append((decompose, klass(
n_components=decompose_by, random_state=self.random_state)))
steps.append((, TSNE(
n_components=2, random_state=self.random_state, **tsne_kwargs)))
return Pipeline(steps) | Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make
the projection faster. Specify ``"svd"`` for sparse data or ``"pca"``
for dense data. If decompose is None, the original data set will
be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.