Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
7,500 | def write_point(self, **kw):
assert in kw
self.convert_bool(kw, )
return self.write_tag_with_content(, **kw) | Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`) |
7,501 | def getColor(rgb=None, hsv=None):
if _isSequence(rgb) and len(rgb) > 3:
seqcol = []
for sc in rgb:
seqcol.append(getColor(sc))
return seqcol
if str(rgb).isdigit():
rgb = int(rgb)
if hsv:
c = hsv2rgb(hsv)
else:
c = rgb
if _isSequence(c):
if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:
return c
else:
if len(c) == 3:
return list(np.array(c) / 255.0)
else:
return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3])
elif isinstance(c, str):
c = c.replace(",", " ").replace("/", " ").replace("alpha=", "")
c = c.replace("grey", "gray")
c = c.split()[0]
if 0 < len(c) < 3:
if c.lower() in color_nicks.keys():
c = color_nicks[c.lower()]
else:
print("Unknow color nickname:", c)
print("Available abbreviations:", color_nicks)
return (0.5, 0.5, 0.5)
if c.lower() in colors.keys():
c = colors[c.lower()]
else:
namedColors = vtk.vtkNamedColors()
rgba = [0, 0, 0, 0]
namedColors.GetColor(c, rgba)
return list(np.array(rgba[0:3]) / 255.0)
if "
h = c.lstrip("
rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))
rgbh = np.array(rgb255) / 255.0
if np.sum(rgbh) > 3:
print("Error in getColor(): Wrong hex color", c)
return (0.5, 0.5, 0.5)
return tuple(rgbh)
elif isinstance(c, int):
if c >= 0:
return colors1[c % 10]
else:
return colors2[-c % 10]
elif isinstance(c, float):
if c >= 0:
return colors1[int(c) % 10]
else:
return colors2[int(-c) % 10]
return (0.5, 0.5, 0.5) | Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_ |
7,502 | def _get_scope_highlight_color(self):
color = self.editor.sideareas_color
if color.lightness() < 128:
color = drift_color(color, 130)
else:
color = drift_color(color, 105)
return color | Gets the base scope highlight color (derivated from the editor
background)
For lighter themes will be a darker color,
and for darker ones will be a lighter color |
7,503 | def do_macro_block(parser, token):
tag_name, macro_name, args, kwargs = parse_macro_params(token)
kwargs[node.keyword] = node
else:
raise template.TemplateSyntaxError(
"{0} template tag was supplied "
"the same keyword argument multiple times.".format(
tag_name))
else:
raise template.TemplateSyntaxError(
"{0} template tag was supplied with a "
"keyword argument not defined by the {1} macro.".format(
tag_name, macro_name))
if len(args) > len(macro.args):
raise template.TemplateSyntaxError(
"{0} template tag was supplied too many arg block tags.".format(
tag_name))
macro.parser = parser
return MacroBlockNode(macro, nodelist, args, kwargs) | Function taking parsed template tag
to a MacroBlockNode. |
7,504 | def __setup_taskset(self, affinity, pid=None, args=None):
self.taskset_path = self.get_option(self.SECTION, )
if args:
return [self.taskset_path, , affinity] + args
if pid:
args = "%s -pc %s %s" % (self.taskset_path, affinity, pid)
retcode, stdout, stderr = execute(args, shell=True, poll_period=0.1, catch_out=True)
logger.debug(, pid, stdout)
if retcode == 0:
logger.info("Enabled taskset for pid %s with affinity %s", str(pid), affinity)
else:
logger.debug(, retcode)
raise KeyError(stderr) | if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity` |
7,505 | def case_name_parts(self):
if not self.is_mixed_case():
self.honorific = self.honorific.title() if self.honorific else None
self.nick = self.nick.title() if self.nick else None
if self.first:
self.first = self.first.title()
self.first = self.capitalize_and_punctuate_initials(self.first)
if self.last:
self.last = self.last.title()
self.last = self.uppercase_the_scots(self.last)
self.middle = self.middle.title() if self.middle else None
if self.suffix:
if re.match(r, self.suffix):
self.suffix = self.suffix.title()
else:
self.suffix = self.suffix.upper()
return self | Convert all the parts of the name to the proper case... carefully! |
7,506 | def locator_to_latlong (locator):
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord() or ord(locator[0]) < ord():
raise ValueError
if ord(locator[1]) > ord() or ord(locator[1]) < ord():
raise ValueError
if ord(locator[2]) > ord() or ord(locator[2]) < ord():
raise ValueError
if ord(locator[3]) > ord() or ord(locator[3]) < ord():
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord() or ord(locator[4]) < ord():
raise ValueError
if ord (locator[5]) > ord() or ord(locator[5]) < ord():
raise ValueError
longitude = (ord(locator[0]) - ord()) * 20 - 180
latitude = (ord(locator[1]) - ord()) * 10 - 90
longitude += (ord(locator[2]) - ord()) * 2
latitude += (ord(locator[3]) - ord())
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord()) * (2 / 24)
latitude += ((ord(locator[5])) - ord()) * (1 / 24)
longitude += 1 / 24
latitude += 0.5 / 24
else:
longitude += 1;
latitude += 0.5;
return latitude, longitude | converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North) |
7,507 | def _reset_server(self, address):
server = self._servers.get(address)
if server:
server.reset()
self._description = self._description.reset_server(address)
self._update_servers() | Clear our pool for a server and mark it Unknown.
Hold the lock when calling this. Does *not* request an immediate check. |
7,508 | def get_charset(request):
content_type = request.META.get(, None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None | Extract charset from the content type |
7,509 | def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks | Create train and eval hooks for Experiment. |
7,510 | def add_node(self, binary_descriptor):
try:
node_string = parse_binary_descriptor(binary_descriptor)
except:
self._logger.exception("Error parsing binary node descriptor: %s", binary_descriptor)
return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM)
try:
self.graph.add_node(node_string)
except NodeConnectionError:
return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE)
except ProcessingFunctionError:
return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION)
except ResourceUsageError:
return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE)
return Error.NO_ERROR | Add a node to the sensor_graph using a binary node descriptor.
Args:
binary_descriptor (bytes): An encoded binary node descriptor.
Returns:
int: A packed error code. |
7,511 | def flash(message, category=):
flashes = session.get(, [])
flashes.append((category, message))
session[] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category) | Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category. |
7,512 | def artist_related_artists(self, spotify_id):
route = Route(, , spotify_id=spotify_id)
return self.request(route) | Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by. |
7,513 | def find_non_contiguous(all_items):
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
continue
last_slot = None
for slot in item.slots.all().order_by():
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous | Find any items that have slots that aren't contiguous |
7,514 | def get(self, queue=, no_ack=False, to_dict=False, auto_decode=True):
if not compatibility.is_string(queue):
raise AMQPInvalidArgument()
elif not isinstance(no_ack, bool):
raise AMQPInvalidArgument()
elif self._channel.consumer_tags:
raise AMQPChannelError("Cannot call when channel is "
"set to consume")
get_frame = specification.Basic.Get(queue=queue,
no_ack=no_ack)
with self._channel.lock and self._channel.rpc.lock:
message = self._get_message(get_frame, auto_decode=auto_decode)
if message and to_dict:
return message.to_dict()
return message | Fetch a single message.
:param str queue: Queue name
:param bool no_ack: No acknowledgement needed
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:returns: Returns a single message, as long as there is a message in
the queue. If no message is available, returns None.
:rtype: dict|Message|None |
7,515 | def addcomment(self, comment, private=False):
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals) | Add the given comment to this bug. Set private to True to mark this
comment as private. |
7,516 | def _receive_data(self):
result = self.queue.get(block=True)
if hasattr(self.queue, ):
self.queue.task_done()
return result | Gets data from queue |
7,517 | def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
t have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt selinux.fcontext_add_or_delete_policy add my-policy
SodiumThe \ module has been deprecated. Please use the \ and \ modules instead. Support for the \ module will be removed in Salt {version}.'
)
return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level) | .. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy |
7,518 | def _pys_assert_version(self, line):
if float(line.strip()) > 1.0:
msg = _("File version {version} unsupported (>1.0).").format(
version=line.strip())
raise ValueError(msg) | Asserts pys file version |
7,519 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = DocumentSentimentResults._from_dict(
_dict.get())
if in _dict:
args[] = [
TargetedSentimentResults._from_dict(x)
for x in (_dict.get())
]
return cls(**args) | Initialize a SentimentResult object from a json dictionary. |
7,520 | def update_username(
self,
username: Union[str, None]
) -> bool:
return bool(
self.send(
functions.account.UpdateUsername(
username=username or ""
)
)
) | Use this method to update your own username.
This method only works for users, not bots. Bot usernames must be changed via Bot Support or by recreating
them from scratch using BotFather. To update a channel or supergroup username you can use
:meth:`update_chat_username`.
Args:
username (``str`` | ``None``):
Username to set. "" (empty string) or None to remove the username.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
7,521 | def pass_rate(self, include_skips=False, include_inconclusive=False, include_retries=True):
total = self.count()
success = self.success_count()
retries = self.retry_count()
try:
if include_inconclusive and include_skips and include_retries:
val = 100.0*success/total
elif include_inconclusive and include_skips and not include_retries:
val = 100.0 * success / (total - retries)
elif include_skips and include_retries and not include_inconclusive:
inconcs = self.inconclusive_count()
val = 100.0 * success / (total - inconcs)
elif include_skips and not include_retries and not include_inconclusive:
inconcs = self.inconclusive_count()
val = 100.0 * success / (total - inconcs - retries)
elif include_inconclusive and include_retries and not include_skips:
skipped = self.skip_count()
val = 100.0 * success / (total - skipped)
elif include_inconclusive and not include_retries and not include_skips:
skipped = self.skip_count()
val = 100.0 * success / (total - skipped - retries)
elif not include_inconclusive and not include_skips and include_retries:
failures = self.failure_count()
val = 100.0 * success / (failures + success)
else:
failures = self.clean_fails()
val = 100.0 * success / (failures + success)
except ZeroDivisionError:
val = 0
return format(val, ) + " %" | Calculate pass rate for tests in this list.
:param include_skips: Boolean, if True skipped tc:s will be included. Default is False
:param include_inconclusive: Boolean, if True inconclusive tc:s will be included.
Default is False.
:param include_retries: Boolean, if True retried tc:s will be included in percentages.
:return: Percentage in format .2f % |
7,522 | def get_object_methods(obj):
import utool as ut
attr_list = (getattr(obj, attrname) for attrname in dir(obj))
methods = [attr for attr in attr_list if ut.is_method(attr)]
return methods | Returns all methods belonging to an object instance specified in by the
__dir__ function
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> obj = ut.NiceRepr()
>>> methods1 = ut.get_object_methods()
>>> ut.inject_func_as_method(obj, ut.get_object_methods)
>>> methods2 = ut.get_object_methods()
>>> assert ut.get_object_methods in methods2 |
7,523 | def delete(self):
if len(self) == 0:
return 0
mdl = self.getModel()
return mdl.deleter.deleteMultiple(self) | delete - Delete all objects in this list.
@return <int> - Number of objects deleted |
7,524 | def is_muted(what):
state = False
for item in solo:
if item not in what:
state = True
else:
state = False
break
for item in mute:
if item in what:
state = True
break
return state | Checks if a logged event is to be muted for debugging purposes.
Also goes through the solo list - only items in there will be logged!
:param what:
:return: |
7,525 | def get_email(self, email_id):
connection = Connection(self.token)
connection.set_url(self.production, self.EMAILS_ID_URL % email_id)
return connection.get_request() | Get a specific email |
7,526 | def colstack(seq, mode=,returnnaming=False):
assert mode in [,,,], \
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == :
if mode == :
raise ValueError( +
)
elif mode == :
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == :
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]) | Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_. |
7,527 | def get_weld_obj_id(weld_obj, data):
obj_id = weld_obj.update(data)
if isinstance(data, WeldObject):
obj_id = data.obj_id
weld_obj.dependencies[obj_id] = data
return obj_id | Helper method to update WeldObject with some data.
Parameters
----------
weld_obj : WeldObject
WeldObject to update.
data : numpy.ndarray or WeldObject or str
Data for which to get an id. If str, it is a placeholder or 'str' literal.
Returns
-------
str
The id of the data, e.g. _inp0 for raw data, obj101 for WeldObject |
7,528 | def add_extension_if_needed(filepath, ext, check_if_exists=False):
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not os.path.exists(filepath):
err = + filepath
log.error(err)
raise IOError(err)
return filepath | Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed. |
7,529 | def get_mesures(self, mes, debut=None, fin=None, freq=, format=None,
dayfirst=False, brut=False):
def create_index(index, freq):
decalage = 1
_sql = .format(champ_date=champ_date,
table=table,
champ_code=champ_code,
mes=mes,
champ_val=champ_val,
debut=debut_db,
fin=fin_db)
rep = psql.read_sql(_sql, self.conn)
df = rep.set_index([, ])
index = create_index(df.index, freq)
df.reset_index(inplace=True, drop=True)
df[] = index
df = df.set_index([])
etats = etats.unstack()
etats.fillna(value=MISSING_CODE * diviseur, inplace=True)
etats = etats.sum(axis=0)
etats = pd.DataFrame(list(zip(*etats.apply(list))))
etats.index = df.index
etats.columns = df.columns
dates_completes = date_range(debut, fin, freq)
df = df.reindex(dates_completes)
etats = etats.reindex(dates_completes)
invalid = etats_to_invalid(etats)
if not brut:
dfn = df.mask(invalid)
return dfn
else:
return df, etats | Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides) |
7,530 | def infer_trading_calendar(factor_idx, prices_idx):
full_idx = factor_idx.union(prices_idx)
traded_weekdays = []
holidays = []
days_of_the_week = [, , , , , , ]
for day, day_str in enumerate(days_of_the_week):
weekday_mask = (full_idx.dayofweek == day)
if not weekday_mask.any():
continue
traded_weekdays.append(day_str)
used_weekdays = full_idx[weekday_mask].normalize()
all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),
freq=CustomBusinessDay(weekmask=day_str)
).normalize()
_holidays = all_weekdays.difference(used_weekdays)
_holidays = [timestamp.date() for timestamp in _holidays]
holidays.extend(_holidays)
traded_weekdays = .join(traded_weekdays)
return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays) | Infer the trading calendar from factor and price information.
Parameters
----------
factor_idx : pd.DatetimeIndex
The factor datetimes for which we are computing the forward returns
prices_idx : pd.DatetimeIndex
The prices datetimes associated withthe factor data
Returns
-------
calendar : pd.DateOffset |
7,531 | def optimisation_plot(d, overlay_alpha=0.5, **kwargs):
if not hasattr(d, ):
raise ValueError()
out = []
for n, opt in d.opt.items():
if not opt[]:
out.append((None, None))
else:
means = opt[]
stds = opt[]
min_points = opt[]
mean_threshold = opt[]
std_threshold = opt[]
opt_centre = opt[]
opt_n_points = opt[]
centres, npoints = np.meshgrid(np.arange(means.shape[1]), np.arange(min_points, min_points + means.shape[0]))
rind = (stds < std_threshold)
mind = (means < mean_threshold)
mlim = np.percentile(means.flatten()[~np.isnan(means.flatten())], (0, 99))
rlim = np.percentile(stds.flatten()[~np.isnan(stds.flatten())], (0, 99))
cmr = plt.cm.Blues
cmr.set_bad((0,0,0,0.3))
cmm = plt.cm.Reds
cmm.set_bad((0,0,0,0.3))
fig = plt.figure(figsize=[7,7])
ma = fig.add_subplot(3, 2, 1)
ra = fig.add_subplot(3, 2, 2)
nonan = np.argwhere(~np.isnan(means))
xdif = np.ptp(nonan[:, 1])
ydif = np.ptp(nonan[:, 0])
extent = (nonan[:, 1].min() - np.ceil(0.1 * xdif),
nonan[:, 1].max() + np.ceil(0.1 * xdif),
nonan[:, 0].min() + min_points,
nonan[:, 0].max() + np.ceil(0.1 * ydif) + min_points)
mm = ma.imshow(means, origin=, cmap=cmm, vmin=mlim[0], vmax=mlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.set_ylabel()
ma.set_xlabel()
fig.colorbar(mm, ax=ma, label=)
mr = ra.imshow(stds, origin=, cmap=cmr, vmin=rlim[0], vmax=rlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ra.set_xlabel()
fig.colorbar(mr, ax=ra, label=)
ra.imshow(~rind, origin=, cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.imshow(~mind, origin=, cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
for ax in [ma, ra]:
ax.scatter(opt_centre, opt_n_points, c=(1,1,1,0.7), edgecolor=,marker=)
ax.set_xlim(extent[:2])
ax.set_ylim(extent[-2:])
mah = fig.add_subplot(3, 2, 3)
rah = fig.add_subplot(3, 2, 4)
mah.set_xlim(mlim)
mbin = np.linspace(*mah.get_xlim(), 50)
mah.hist(means.flatten()[~np.isnan(means.flatten())], mbin)
mah.axvspan(mean_threshold, mah.get_xlim()[1], color=(0,0,0,overlay_alpha))
mah.axvline(mean_threshold, c=)
mah.set_xlabel()
mah.set_ylabel()
rah.set_xlim(rlim)
rbin = np.linspace(*rah.get_xlim(), 50)
rah.hist(stds.flatten()[~np.isnan(stds.flatten())], rbin)
rah.axvspan(std_threshold, rah.get_xlim()[1], color=(0,0,0,0.4))
rah.axvline(std_threshold, c=)
rah.set_xlabel()
tax = fig.add_subplot(3,1,3)
tplot(d, opt.analytes, ax=tax, **kwargs)
tax.axvspan(*d.Time[[opt.lims[0], opt.lims[1]]], alpha=0.2)
tax.set_xlim(d.Time[d.ns == n].min() - 3, d.Time[d.ns == n].max() + 3)
fig.tight_layout()
out.append((fig, (ma, ra, mah, rah, tax)))
return out | Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot` |
7,532 | def _set_sharing_keys(self, keys):
if isinstance(keys, str):
keys = {keys}
keys = set(self) if keys is None else set(keys)
fmto_groups = self._fmto_groups
keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key])
for key in keys.intersection(fmto_groups))))
keys.difference_update(fmto_groups)
return keys | Set the keys to share or unshare
Parameters
----------
keys: string or iterable of strings
The iterable may contain formatoptions that shall be shared (or
unshared), or group names of formatoptions to share all
formatoptions of that group (see the :attr:`fmt_groups` property).
If None, all formatoptions of this plotter are inserted.
Returns
-------
set
The set of formatoptions to share (or unshare) |
7,533 | def _valcache_lookup(self, cache, branch, turn, tick):
if branch in cache:
branc = cache[branch]
try:
if turn in branc and branc[turn].rev_gettable(tick):
return branc[turn][tick]
elif branc.rev_gettable(turn-1):
turnd = branc[turn-1]
return turnd[turnd.end]
except HistoryError as ex:
if ex.deleted:
raise
for b, r, t in self.db._iter_parent_btt(branch, turn, tick):
if b in cache:
if r in cache[b] and cache[b][r].rev_gettable(t):
try:
return cache[b][r][t]
except HistoryError as ex:
if ex.deleted:
raise
elif cache[b].rev_gettable(r-1):
cbr = cache[b][r-1]
try:
return cbr[cbr.end]
except HistoryError as ex:
if ex.deleted:
raise | Return the value at the given time in ``cache`` |
7,534 | def class_method(cls, f):
setattr(cls, f.__name__, classmethod(f))
return f | Decorator which dynamically binds class methods to the model for later use. |
7,535 | def monitor(result_queue, broker=None):
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, ):
if task.get(, False):
save_cached(task, broker)
else:
save_task(task, broker)
ack_id = task.pop(, False)
if ack_id and (task[] or task.get(, False)):
broker.acknowledge(ack_id)
if task[]:
logger.info(_("Processed [{}]").format(task[]))
else:
logger.error(_("Failed [{}] - {}").format(task[], task[]))
logger.info(_("{} stopped monitoring results").format(name)) | Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue |
7,536 | def push_plugin(self, name):
url = self._url(, name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers[] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) | Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful |
7,537 | def preprocess_incoming_content(content, encrypt_func, max_size_bytes):
encrypted = encrypt_func(content)
if max_size_bytes != UNLIMITED and len(encrypted) > max_size_bytes:
raise FileTooLarge()
return encrypted | Apply preprocessing steps to file/notebook content that we're going to
write to the database.
Applies ``encrypt_func`` to ``content`` and checks that the result is
smaller than ``max_size_bytes``. |
7,538 | def write_short_ascii(s):
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError(.format(s))
return write_short_bytes(s.encode()) | Encode a Kafka short string which represents text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters |
7,539 | def addEqLink(self, link):
if isinstance(link, EqLink):
self.eqLinks.append(link)
else:
raise TypeError(
% type(link)) | Appends EqLink |
7,540 | def retrieve_value(self, name, default_value=None):
value = self.spine.send_query("retrieveSetting", self.group, name, processes=["kervi-main"])
if value is None:
return default_value
elif isinstance(value, list) and len(value) == 0:
return default_value
elif isinstance(default_value, int):
return int(value)
elif isinstance(default_value, float):
return float(value)
else:
return value | Retrieve a value from DB |
7,541 | def set_yaxis(self, param, unit=None, label=None):
if unit is None:
unit = self._getParLabelAndUnit(param)[1]
self._yaxis_unit = unit
self._yaxis = self._set_axis(param, unit)
if label is None:
self.ylabel = self._gen_label(param, unit)
else:
self.ylabel = label | Sets the value of use on the yaxis
:param param: value to use on the yaxis, should be a variable or function of the objects in objectList. ie 'R'
for the radius variable and 'calcDensity()' for the calcDensity function
:param unit: the unit to scale the values to
:type unit: quantities unit or None
:param label: axis label to use, if None "Parameter (Unit)" is generated here and used
:type label: str |
7,542 | def head(self, n=None):
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n) | Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')] |
7,543 | def __get_nondirect_init(self, init):
crc = init
for i in range(self.Width):
bit = crc & 0x01
if bit:
crc^= self.Poly
crc >>= 1
if bit:
crc |= self.MSB_Mask
return crc & self.Mask | return the non-direct init if the direct algorithm has been selected. |
7,544 | def process_quote(self, data):
for ix, row in data.iterrows():
symbol = row[]
tick = self._tick_dict.get(symbol, None)
if not tick:
tick = TinyQuoteData()
tick.symbol = symbol
self._tick_dict[symbol] = tick
tick.date = row[].replace(, )
tick.time = row[]
if tick.date and tick.time:
tick.datetime = datetime.strptime(.join([tick.date, tick.time]), )
else:
return
tick.openPrice = row[]
tick.highPrice = row[]
tick.lowPrice = row[]
tick.preClosePrice = row[]
if in row:
tick.priceSpread = row[]
tick.lastPrice = row[]
tick.volume = row[]
new_tick = copy(tick)
self._notify_new_tick_event(new_tick) | 报价推送 |
7,545 | def call_remoteckan(self, *args, **kwargs):
requests_kwargs = kwargs.get(, dict())
credentials = self._get_credentials()
if credentials:
requests_kwargs[] = credentials
kwargs[] = requests_kwargs
apikey = kwargs.get(, self.get_api_key())
kwargs[] = apikey
return self.remoteckan().call_action(*args, **kwargs) | Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method |
7,546 | def copy(self, graph):
e = events(graph, self._ctx)
e.clicked = self.clicked
return e | Returns a copy of the event handler, remembering the last node clicked. |
7,547 | def stop(self):
self._hw_virtualization = False
yield from self._stop_ubridge()
yield from self._stop_remote_console()
vm_state = yield from self._get_vm_state()
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
if self.acpi_shutdown:
result = yield from self._control_vm("acpipowerbutton")
trial = 0
while True:
vm_state = yield from self._get_vm_state()
if vm_state == "poweroff":
break
yield from asyncio.sleep(1)
trial += 1
if trial >= 120:
yield from self._control_vm("poweroff")
break
self.status = "stopped"
log.debug("ACPI shutdown result: {}".format(result))
else:
result = yield from self._control_vm("poweroff")
self.status = "stopped"
log.debug("Stop result: {}".format(result))
log.info("VirtualBox VM [{id}] stopped".format(name=self.name, id=self.id))
yield from asyncio.sleep(0.5)
try:
yield from self._modify_vm("--uart1 off")
except VirtualBoxError as e:
log.warn("Could not deactivate the first serial port: {}".format(e))
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1))
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
yield from super().stop() | Stops this VirtualBox VM. |
7,548 | def tool(self):
htablettool = self._libinput.libinput_event_tablet_tool_get_tool(
self._handle)
return TabletTool(htablettool, self._libinput) | The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event. |
7,549 | def _sample_batch():
if _sample_probability == 1.0 or random.random() < _sample_probability:
return True
for database in _measurements:
_measurements[database] = _measurements[database][_max_batch_size:]
return False | Determine if a batch should be processed and if not, pop off all of
the pending metrics for that batch.
:rtype: bool |
7,550 | def return_markers(self, state=):
markers = []
try:
all_states = self._read_states()
except ValueError:
return markers
try:
x = all_states[state]
except KeyError:
return markers
markers = []
i_mrk = hstack((0, where(diff(x))[0] + 1, len(x)))
for i0, i1 in zip(i_mrk[:-1], i_mrk[1:]):
marker = {: str(x[i0]),
: (i0) / self.s_freq,
: i1 / self.s_freq,
}
markers.append(marker)
return markers | Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
Raises
------
FileNotFoundError
when it cannot read the events for some reason (don't use other
exceptions). |
7,551 | def from_Composition(composition):
if not hasattr(composition, ):
return False
result = \
% (composition.title, composition.author, composition.subtitle)
for track in composition.tracks:
result += from_Track(track) +
return result[:-1] | Return the LilyPond equivalent of a Composition in a string. |
7,552 | def dict_factory(cursor, row):
out = {}
for i, col in enumerate(cursor.description):
out[col[0]] = row[i]
return out | Converts the cursor information from a SQLite query to a dictionary.
:param cursor | <sqlite3.Cursor>
row | <sqlite3.Row>
:return {<str> column: <variant> value, ..} |
7,553 | def _render_select(selections):
if not selections:
return
rendered_selections = []
for name, options in selections.items():
if not isinstance(options, list):
options = [options]
original_name = name
for options_dict in options:
name = original_name
alias = options_dict.get()
alias = "as %s" % alias if alias else ""
formatter = options_dict.get()
if formatter:
name = _format_select(formatter, name)
rendered_selections.append("%s %s" % (name, alias))
return "SELECT " + ", ".join(rendered_selections) | Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting |
7,554 | def entrance_beveled(Di, l, angle, method=):
rRennelsIdelchikRennelsRennelsIdelchikIdelchik
if method is None:
method =
if method == :
Cb = (1-angle/90.)*(angle/90.)**(1./(1 + l/Di ))
lbd = 1 + 0.622*(1 - 1.5*Cb*(l/Di)**((1 - (l/Di)**0.25)/2.))
return 0.0696*(1 - Cb*l/Di)*lbd**2 + (lbd - 1.)**2
elif method == :
return float(entrance_beveled_Idelchik_obj(angle*2.0, l/Di))
else:
raise ValueError(
%(entrance_beveled_methods)) | r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe
flush with the wall of a reservoir. This calculation has two methods
available.
The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels'
formulation is centered around a straight loss coefficient of 0.57, so it
is normally at least 0.07 higher.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d}
\right)^{\frac{1-(l/d)^{1/4}}{2}}\right]
.. math::
C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90}
\right)^{\frac{1}{1+l/d}}
.. figure:: fittings/flush_mounted_beveled_entrance.png
:scale: 30 %
:alt: Beveled entrace mounted straight; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
l : float
Length of bevel measured parallel to the pipe length, [m]
angle : float
Angle of bevel with respect to the pipe length, [degrees]
method : str, optional
One of 'Rennels', or 'Idelchik', [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
A cheap way of getting a lower pressure drop.
Little credible data is available.
The table of data in [2]_ uses the angle for both bevels, so it runs from 0
to 180 degrees; this function follows the convention in [1]_ which uses
only one angle, with the angle varying from 0 to 90 degrees.
.. plot:: plots/entrance_beveled.py
Examples
--------
>>> entrance_beveled(Di=0.1, l=0.003, angle=45)
0.45086864221916984
>>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
0.3995000000000001
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966. |
7,555 | def main(
output_file: str,
entry_point: Optional[str],
console_script: Optional[str],
python: Optional[str],
site_packages: Optional[str],
compressed: bool,
compile_pyc: bool,
extend_pythonpath: bool,
pip_args: List[str],
) -> None:
if not pip_args and not site_packages:
sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES)
if output_file is None:
sys.exit(NO_OUTFILE)
for disallowed in DISALLOWED_ARGS:
for supplied_arg in pip_args:
if supplied_arg in disallowed:
sys.exit(
DISALLOWED_PIP_ARGS.format(
arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed]
)
)
with TemporaryDirectory() as working_path:
tmp_site_packages = Path(working_path, "site-packages")
if site_packages:
shutil.copytree(site_packages, tmp_site_packages)
if pip_args:
pip.install(["--target", str(tmp_site_packages)] + list(pip_args))
if entry_point is None and console_script is not None:
try:
entry_point = find_entry_point(tmp_site_packages, console_script)
except KeyError:
if not Path(tmp_site_packages, "bin", console_script).exists():
sys.exit(NO_ENTRY_POINT.format(entry_point=console_script))
env = Environment(
build_id=str(uuid.uuid4()),
entry_point=entry_point,
script=console_script,
compile_pyc=compile_pyc,
extend_pythonpath=extend_pythonpath,
)
Path(working_path, "environment.json").write_text(env.to_json())
bootstrap_target = Path(working_path, "_bootstrap")
bootstrap_target.mkdir(parents=True, exist_ok=True)
copy_bootstrap(bootstrap_target)
builder.create_archive(
Path(working_path),
target=Path(output_file).expanduser(),
interpreter=python or _interpreter_path(),
main="_bootstrap:bootstrap",
compressed=compressed,
) | Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included! |
7,556 | def _update_rr_ce_entry(self, rec):
if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None:
celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area
added_block, block, offset = self.pvd.add_rr_ce_entry(celen)
rec.rock_ridge.update_ce_block(block)
rec.rock_ridge.dr_entries.ce_record.update_offset(offset)
if added_block:
return self.pvd.logical_block_size()
return 0 | An internal method to update the Rock Ridge CE entry for the given
record.
Parameters:
rec - The record to update the Rock Ridge CE entry for (if it exists).
Returns:
The number of additional bytes needed for this Rock Ridge CE entry. |
7,557 | def str_strip(arr, to_strip=None, side=):
if side == :
f = lambda x: x.strip(to_strip)
elif side == :
f = lambda x: x.lstrip(to_strip)
elif side == :
f = lambda x: x.rstrip(to_strip)
else:
raise ValueError()
return _na_map(f, arr) | Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index |
7,558 | def _build(self, build_method):
logger.info("building image ", self.image)
self.ensure_not_built()
self.temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(self.temp_dir, BUILD_JSON)
try:
with open(temp_path, ) as build_json:
json.dump(self.build_args, build_json)
self.build_container_id = build_method(self.build_image, self.temp_dir)
try:
logs_gen = self.dt.logs(self.build_container_id, stream=True)
wait_for_command(logs_gen)
return_code = self.dt.wait(self.build_container_id)
except KeyboardInterrupt:
logger.info("killing build container on user's request")
self.dt.remove_container(self.build_container_id, force=True)
results = BuildResults()
results.return_code = 1
return results
else:
results = self._load_results(self.build_container_id)
results.return_code = return_code
return results
finally:
shutil.rmtree(self.temp_dir) | build image from provided build_args
:return: BuildResults |
7,559 | def continue_login(self, login_token, **params):
login_params = {
: "clientlogin",
: login_token,
: 1
}
login_params.update(params)
login_doc = self.post(**login_params)
if login_doc[][] != :
raise LoginError.from_doc(login_doc[])
return login_doc[] | Continues a login that requires an additional step. This is common
for when login requires completing a captcha or supplying a two-factor
authentication token.
:Parameters:
login_token : `str`
A login token generated by the MediaWiki API (and used in a
previous call to login())
params : `mixed`
A set of parameters to include with the request. This depends
on what "requests" for additional information were made by the
MediaWiki API. |
7,560 | def get_requirements():
requirements_file = os.path.join(os.getcwd(), )
requirements = []
links=[]
try:
with open(requirements_file) as reqfile:
for line in reqfile.readlines():
line = line.strip()
if line.startswith():
continue
elif line.startswith(
(, , , )):
links.append(line)
else:
requirements.append(line)
except (IOError, OSError) as error:
print(error)
if python26():
requirements.append()
print("Not installing %s on PyPy..." % line)
requirements.remove(line)
return requirements, links | Extract the list of requirements from our requirements.txt.
:rtype: 2-tuple
:returns: Two lists, the first is a list of requirements in the form of
pkgname==version. The second is a list of URIs or VCS checkout strings
which specify the dependency links for obtaining a copy of the
requirement. |
7,561 | def ask_captcha(length=4):
captcha = "".join(random.choice(string.ascii_lowercase) for _ in range(length))
ask_str( % (captcha), vld=[captcha, captcha.upper()], blk=False) | Prompts the user for a random string. |
7,562 | def get_activities(self, before=None, after=None, limit=None):
if before:
before = self._utc_datetime_to_epoch(before)
if after:
after = self._utc_datetime_to_epoch(after)
params = dict(before=before, after=after)
result_fetcher = functools.partial(self.protocol.get,
,
**params)
return BatchedResultsIterator(entity=model.Activity,
bind_client=self,
result_fetcher=result_fetcher,
limit=limit) | Get activities for authenticated user sorted by newest first.
http://strava.github.io/api/v3/activities/
:param before: Result will start with activities whose start date is
before specified date. (UTC)
:type before: datetime.datetime or str or None
:param after: Result will start with activities whose start date is after
specified value. (UTC)
:type after: datetime.datetime or str or None
:param limit: How many maximum activities to return.
:type limit: int or None
:return: An iterator of :class:`stravalib.model.Activity` objects.
:rtype: :class:`BatchedResultsIterator` |
7,563 | def id(self, value):
i = value.rfind()
if (i > 0):
self.server_and_prefix = value[:i]
self.identifier = value[(i + 1):]
elif (i == 0):
self.server_and_prefix =
self.identifier = value[(i + 1):]
else:
self.server_and_prefix =
self.identifier = value | Split into server_and_prefix and identifier. |
7,564 | def FindModuleDefiningFlag(self, flagname, default=None):
registered_flag = self.FlagDict().get(flagname)
if registered_flag is None:
return default
for module, flags in six.iteritems(self.FlagsByModuleDict()):
for flag in flags:
if (flag.name == registered_flag.name and
flag.short_name == registered_flag.short_name):
return module
return default | Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default. |
7,565 | def import_data_to_restful_server(args, content):
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config()
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT)
if response and check_response(response):
return response
else:
print_error()
return None | call restful server to import data to the experiment |
7,566 | def run_migrations_online():
engine = engine_from_config(
winchester_config[],
prefix=,
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. |
7,567 | def timing(self, stat, value, tags=None):
self.histogram(stat, value, tags) | Measure a timing for statistical distribution.
Note: timing is a special case of histogram. |
7,568 | def start(self):
result = self.__enter__()
self._active_patches.append(self)
return result | Activate a patch, returning any created mock. |
7,569 | def _get_flavor():
target = op.join("seqcluster", "flavor")
url = "https://github.com/lpantano/seqcluster.git"
if not os.path.exists(target):
subprocess.check_call(["git", "clone","-b", "flavor", "--single-branch", url])
return op.abspath(target) | Download flavor from github |
7,570 | def getService(self, name, auto_execute=True):
if not isinstance(name, basestring):
raise TypeError()
return ServiceProxy(self, name, auto_execute) | Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy} |
7,571 | def _find_match(self, position):
document = self._text_edit.document()
start_char = document.characterAt(position)
search_char = self._opening_map.get(start_char)
if search_char:
increment = 1
else:
search_char = self._closing_map.get(start_char)
if search_char:
increment = -1
else:
return -1
char = start_char
depth = 0
while position >= 0 and position < document.characterCount():
if char == start_char:
depth += 1
elif char == search_char:
depth -= 1
if depth == 0:
break
position += increment
char = document.characterAt(position)
else:
position = -1
return position | Given a valid position in the text document, try to find the
position of the matching bracket. Returns -1 if unsuccessful. |
7,572 | def _split_generators(self, dl_manager):
split_names = list_folders(dl_manager.manual_dir)
split_label_images = {}
for split_name in split_names:
split_dir = os.path.join(dl_manager.manual_dir, split_name)
split_label_images[split_name] = {
label_name: list_imgs(os.path.join(split_dir, label_name))
for label_name in list_folders(split_dir)
}
labels = [split.keys() for split in split_label_images.values()]
labels = list(sorted(set(itertools.chain(*labels))))
image_paths = [
image_paths
for label_images in split_label_images.values()
for image_paths in label_images.values()
]
if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)):
encoding_format = "png"
else:
encoding_format = "jpeg"
self.info.features["image"].set_encoding_format(encoding_format)
self.info.features["label"].names = labels
def num_examples(label_images):
return sum(len(imgs) for imgs in label_images.values())
return [
tfds.core.SplitGenerator(
name=split_name,
num_shards=min(10, max(num_examples(label_images) // 1000, 1)),
gen_kwargs=dict(label_images=label_images,),
) for split_name, label_images in split_label_images.items()
] | Returns SplitGenerators from the folder names. |
7,573 | def register(self, event_type, callback,
args=None, kwargs=None, details_filter=None,
weak=False):
if not six.callable(callback):
raise ValueError("Event callback must be callable")
if details_filter is not None:
if not six.callable(details_filter):
raise ValueError("Details filter must be callable")
if not self.can_be_registered(event_type):
raise ValueError("Disallowed event type can not have a"
" callback registered" % event_type)
if kwargs:
for k in self.RESERVED_KEYS:
if k in kwargs:
raise KeyError("Reserved key not allowed in "
"kwargs" % k)
with self._lock:
if self.is_registered(event_type, callback,
details_filter=details_filter):
raise ValueError("Event callback already registered with"
" equivalent details filter")
listener = Listener(_make_ref(callback, weak=weak),
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
listeners = self._topics.setdefault(event_type, [])
listeners.append(listener)
return listener | Register a callback to be called when event of a given type occurs.
Callback will be called with provided ``args`` and ``kwargs`` and
when event type occurs (or on any event if ``event_type`` equals to
:attr:`.ANY`). It will also get additional keyword argument,
``details``, that will hold event details provided to the
:meth:`.notify` method (if a details filter callback is provided then
the target callback will *only* be triggered if the details filter
callback returns a truthy value).
:param event_type: event type to get triggered on
:param callback: function callback to be registered.
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value pair arguments
:type kwargs: dictionary
:param weak: if the callback retained should be referenced via
a weak reference or a strong reference (defaults to
holding a strong reference)
:type weak: bool
:returns: the listener that was registered
:rtype: :py:class:`~.Listener` |
7,574 | def notify_program_learners(cls, enterprise_customer, program_details, users):
program_name = program_details.get()
program_branding = program_details.get()
program_uuid = program_details.get()
lms_root_url = get_configuration_value_for_site(
enterprise_customer.site,
,
settings.LMS_ROOT_URL
)
program_path = urlquote(
.format(
program_uuid=program_uuid,
tpa_hint=enterprise_customer.identity_provider,
)
)
destination_url = .format(
site=lms_root_url,
login_or_register=,
program_path=program_path
)
program_type =
program_start = get_earliest_start_date_from_program(program_details)
with mail.get_connection() as email_conn:
for user in users:
login_or_register = if isinstance(user, PendingEnterpriseCustomerUser) else
destination_url = destination_url.format(login_or_register=login_or_register)
send_email_notification_message(
user=user,
enrolled_in={
: program_name,
: destination_url,
: program_type,
: program_start,
: program_branding,
},
enterprise_customer=enterprise_customer,
email_connection=email_conn
) | Notify learners about a program in which they've been enrolled.
Args:
enterprise_customer: The EnterpriseCustomer being linked to
program_details: Details about the specific program the learners were enrolled in
users: An iterable of the users or pending users who were enrolled |
7,575 | def tm(seq, dna_conc=50, salt_conc=50, parameters=):
breslauersugimotosantalucia96santalucia98cloningcloning_sl98cloning
if parameters == :
params = tm_params.BRESLAUER
elif parameters == :
params = tm_params.SUGIMOTO
elif parameters == :
params = tm_params.SANTALUCIA96
elif parameters == or parameters == :
params = tm_params.SANTALUCIA98
elif parameters == :
params = tm_params.CLONING
else:
raise ValueError()
pars = {: params[], : params[]}
pars_error = {: params[],
: params[]}
if parameters == :
deltas = breslauer_corrections(seq, pars_error)
elif parameters == :
deltas = breslauer_corrections(seq, pars_error)
elif parameters == :
deltas = breslauer_corrections(seq, pars_error)
elif parameters == or parameters == :
deltas = santalucia98_corrections(seq, pars_error)
elif parameters == :
deltas = breslauer_corrections(seq, pars_error)
deltas[0] += 3.4
deltas[1] += 12.4
seq = str(seq).upper()
if in seq:
raise ValueError(t calculate Tm of an N base.t match theirs
salt_correction = 16.6 * log10(salt_conc)
melt = numerator / denominator + salt_correction - 273.15
if parameters == :
melt *= 1.27329212575
melt += -2.55585450119
return melt | Calculate nearest-neighbor melting temperature (Tm).
:param seq: Sequence for which to calculate the tm.
:type seq: coral.DNA
:param dna_conc: DNA concentration in nM.
:type dna_conc: float
:param salt_conc: Salt concentration in mM.
:type salt_conc: float
:param parameters: Nearest-neighbor parameter set. Available options:
'breslauer': Breslauer86 parameters
'sugimoto': Sugimoto96 parameters
'santalucia96': SantaLucia96 parameters
'santalucia98': SantaLucia98 parameters
'cloning': breslauer without corrections
'cloning_sl98': santalucia98 fit to 'cloning'
:type parameters: str
:returns: Melting temperature (Tm) in °C.
:rtype: float
:raises: ValueError if parameter argument is invalid. |
7,576 | def on_mouse_wheel(self, event):
rotation = event.GetWheelRotation() / event.GetWheelDelta()
if rotation > 0:
zoom = 1.0/(1.1 * rotation)
elif rotation < 0:
zoom = 1.1 * (-rotation)
self.change_zoom(zoom)
self.redraw_map() | handle mouse wheel zoom changes |
7,577 | def del_repo(repo, root=None):
*
repos_cfg = _get_configured_repos(root=root)
for alias in repos_cfg.sections():
if alias == repo:
doc = __zypper__(root=root).xml.call(, , , alias)
msg = doc.getElementsByTagName()
if doc.getElementsByTagName() and msg:
return {
repo: True,
: msg[0].childNodes[0].nodeValue,
}
raise CommandExecutionError({0}\.format(repo)) | Delete a repo.
root
operate on a different root directory.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo alias |
7,578 | def update_anomalous_score(self):
products = self._graph.retrieve_products(self)
diffs = [
p.summary.difference(self._graph.retrieve_review(self, p))
for p in products
]
old = self.anomalous_score
try:
self.anomalous_score = np.average(
diffs, weights=list(map(self._credibility, products)))
except ZeroDivisionError:
self.anomalous_score = np.average(diffs)
return abs(self.anomalous_score - old) | Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one. |
7,579 | def write(self, more):
if more:
self.output += str(more).upper()
self.output += | Append the Unicode representation of `s` to our output. |
7,580 | def get_lbaas_agent_hosting_loadbalancer(self, loadbalancer, **_params):
return self.get((self.lbaas_loadbalancer_path +
self.LOADBALANCER_HOSTING_AGENT) % loadbalancer,
params=_params) | Fetches a loadbalancer agent hosting a loadbalancer. |
7,581 | def draw_identity_line(ax=None, dynamic=True, **kwargs):
ax = ax or plt.gca()
if not in kwargs and not in kwargs:
kwargs[] = LINE_COLOR
if not in kwargs:
kwargs[] = 0.5
identity, = ax.plot([],[], **kwargs)
def callback(ax):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
data = (
max(xlim[0], ylim[0]), min(xlim[1], ylim[1])
)
identity.set_data(data, data)
callback(ax)
if dynamic:
ax.callbacks.connect(, callback)
ax.callbacks.connect(, callback)
return ax | Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_ |
7,582 | def get_form_language(self, request, obj=None):
if self._has_translatable_parent_model():
return super(TranslatableInlineModelAdmin, self).get_form_language(request, obj=obj)
else:
return self._language(request) | Return the current language for the currently displayed object fields. |
7,583 | def _pname_and_metadata(in_file):
if os.path.isfile(in_file):
with open(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = in_file
elif objectstore.is_remote(in_file):
with objectstore.open_file(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = None
else:
if in_file.endswith(".csv"):
raise ValueError("Did not find input metadata file: %s" % in_file)
base, md, global_vars = _safe_name(os.path.splitext(os.path.basename(in_file))[0]), {}, {}
md_file = None
return _safe_name(base), md, global_vars, md_file | Retrieve metadata and project name from the input metadata CSV file.
Uses the input file name for the project name and for back compatibility,
accepts the project name as an input, providing no metadata. |
7,584 | def from_response(raw_response):
json_response = raw_response.json()
error_info = json_response["error"]
code = error_info["code"]
try:
error_cls = _error_map[code]
except KeyError:
raise NotImplementedError(
"Unknown error code returned in Yelp API response. "
"This code may have been newly added. Please ensure you are "
"using the latest version of the yelp-python library, and if "
"so, create a new issue at https://github.com/Yelp/yelp-python "
"to add support for this error.".format(code)
)
else:
return error_cls(raw_response, **error_info) | The Yelp Fusion API returns error messages with a json body
like:
{
'error': {
'code': 'ALL_CAPS_CODE',
'description': 'Human readable description.'
}
}
Some errors may have additional fields. For example, a
validation error:
{
'error': {
'code': 'VALIDATION_ERROR',
'description': "'en_USS' does not match '^[a-z]{2,3}_[A-Z]{2}$'",
'field': 'locale',
'instance': 'en_USS'
}
} |
7,585 | def affine(self, func:AffineFunc, *args, **kwargs)->:
"Equivalent to `image.affine_mat = image.affine_mat @ func()`."
m = tensor(func(*args, **kwargs)).to(self.device)
self.affine_mat = self.affine_mat @ m
return self | Equivalent to `image.affine_mat = image.affine_mat @ func()`. |
7,586 | def _fasta_slice(fasta, seqid, start, stop, strand):
_strand = 1 if strand == else -1
return fasta.sequence({: seqid, : start, : stop, \
: _strand}) | Return slice of fasta, given (seqid, start, stop, strand) |
7,587 | def demacronize(string_matrix: List[List[str]]) -> List[List[str]]:
scansion = ScansionConstants()
accent_dropper = str.maketrans(scansion.ACCENTED_VOWELS, scansion.VOWELS)
return [[word.translate(accent_dropper)
for word in sentence]
for sentence in string_matrix] | Transform macronized vowels into normal vowels
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return: string_matrix
>>> demacronize([['ōdī', 'et', 'amō',]])
[['odi', 'et', 'amo']] |
7,588 | def destroy(self):
logger.info("Destroying doc: %s" % self.path)
self.fs.rm_rf(self.path)
logger.info("Done") | Delete the document. The *whole* document. There will be no survivors. |
7,589 | def tenengrad(img, ksize=3):
TENG
Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
FM = Gx*Gx + Gy*Gy
mn = cv2.mean(FM)[0]
if np.isnan(mn):
return np.nanmean(FM)
return mn | TENG' algorithm (Krotkov86) |
7,590 | def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title=,
xlabel=, ylabel=,
importance_type=, max_num_features=None,
ignore_zero=True, figsize=None, grid=True,
precision=None, **kwargs):
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError()
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError()
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Boosterfigsizecentergaincenterxlimylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax | Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances. |
7,591 | def populate_requirement_set(requirement_set,
args,
options,
finder,
session,
name,
wheel_cache
):
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache,
use_pep517=options.use_pep517):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {: name}
if options.find_links:
raise CommandError(
%
dict(opts, links=.join(options.find_links)))
else:
raise CommandError(
% opts) | Marshal cmd line args into a requirement set. |
7,592 | async def register(self):
url = .format(self.construct_url(API_URL))
params = {: self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error()
else:
self._registered = True
_LOGGER.info(, self.unique_id)
self._sessions = reg
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop) | Register library device id and get initial device list. |
7,593 | def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False):
from sparklingpandas.groupby import GroupBy
return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze) | Returns a groupby on the schema rdd. This returns a GroupBy object.
Note that grouping by a column name will be faster than most other
options due to implementation. |
7,594 | def processed(self):
self.processed_tasks += 1
qsize = self.tasks.qsize()
if qsize > 0:
progress(, self.processed_tasks, qsize, len(self.workers))
else:
progress(, self.processed_tasks, len(self.workers)) | Increase the processed task counter and show progress message |
7,595 | def get_families_by_ids(self, family_ids=None):
if family_ids is None:
raise NullArgument()
families = []
for i in family_ids:
family = None
url_path = + str(i)
try:
family = self._get_request(url_path)
except (NotFound, OperationFailed):
if self._family_view == PLENARY:
raise
else:
pass
if family:
if not (self._family_view == COMPARATIVE and
family in families):
families.append(family)
return objects.FamilykList(families) | Gets a ``FamilyList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the families
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible families may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: family_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``family_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
7,596 | async def handle(self, record):
if (not self.disabled) and self.filter(record):
await self.callHandlers(record) | Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied. |
7,597 | def copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path):
machine_list = nni_config.get_config().get()
machine_dict = {}
local_path_list = []
for machine in machine_list:
machine_dict[machine[]] = {: machine[], : machine[], : machine[]}
for index, host in enumerate(host_list):
local_path = os.path.join(temp_nni_path, trial_content[index].get())
local_path_list.append(local_path)
print_normal( % (host + + path_list[index], local_path))
sftp = create_ssh_sftp_client(host, machine_dict[host][], machine_dict[host][], machine_dict[host][])
copy_remote_directory_to_local(sftp, path_list[index], local_path)
print_normal()
return local_path_list | use ssh client to copy data from remote machine to local machien |
7,598 | def _integrate(self, time_steps, capture_elements, return_timestamps):
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2)
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs | Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries |
7,599 | def focusInEvent(self, event):
self.focus_changed.emit()
return super(ControlWidget, self).focusInEvent(event) | Reimplement Qt method to send focus change notification |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.