Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,400 |
def colorbar(self, mappable=None, **kwargs):
fig = self.get_figure()
if kwargs.get(, True):
kwargs.setdefault(, 0.)
if kwargs.get(, 0.) == 0.:
kwargs.setdefault(, True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
kwargs[] = False
return fig.colorbar(mappable, **kwargs)
|
Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
|
24,401 |
def clear_citation(self):
self.citation.clear()
if self.citation_clearing:
self.evidence = None
self.annotations.clear()
|
Clear the citation and if citation clearing is enabled, clear the evidence and annotations.
|
24,402 |
def handleStatus(self, version, code, message):
"extends handleStatus to instantiate a local response object"
proxy.ProxyClient.handleStatus(self, version, code, message)
self._response = client.Response(version, code, message, {}, None)
|
extends handleStatus to instantiate a local response object
|
24,403 |
def start(self):
if os.path.isfile(self.pid):
with open(self.pid, "r") as old_pidfile:
old_pid = old_pidfile.read()
try:
lockfile = open(self.pid, "w")
except IOError:
print("Unable to create the pidfile.")
sys.exit(1)
try:
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print("Unable to lock on the pidfile.")
with open(self.pid, "w") as pidfile:
pidfile.write(old_pid)
sys.exit(1)
if not self.foreground:
try:
process_id = os.fork()
except OSError as e:
self.logger.error("Unable to fork, errno: {0}".format(e.errno))
sys.exit(1)
if process_id != 0:
if self.keep_fds:
os._exit(0)
else:
sys.exit(0)
process_id = os.setsid()
if process_id == -1:
sys.exit(1)
self.keep_fds.append(lockfile.fileno())
devnull = "/dev/null"
if hasattr(os, "devnull"):
devnull = os.devnull
if self.auto_close_fds:
for fd in range(3, resource.getrlimit(resource.RLIMIT_NOFILE)[0]):
if fd not in self.keep_fds:
try:
os.close(fd)
except OSError:
pass
devnull_fd = os.open(devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
os.close(devnull_fd)
if self.logger is None:
self.logger = logging.getLogger(self.app)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
if sys.platform == "darwin":
syslog_address = "/var/run/syslog"
else:
syslog_address = "/dev/log"
if os.path.exists(syslog_address):
syslog = handlers.SysLogHandler(syslog_address)
if self.verbose:
syslog.setLevel(logging.DEBUG)
else:
syslog.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(name)s: %(message)s",
"%b %e %H:%M:%S")
syslog.setFormatter(formatter)
self.logger.addHandler(syslog)
os.umask(0o27)
uid, gid = -1, -1
if self.group:
try:
gid = grp.getgrnam(self.group).gr_gid
except KeyError:
self.logger.error("Group {0} not found".format(self.group))
sys.exit(1)
if self.user:
try:
uid = pwd.getpwnam(self.user).pw_uid
except KeyError:
self.logger.error("User {0} not found.".format(self.user))
sys.exit(1)
if uid != -1 or gid != -1:
os.chown(self.pid, uid, gid)
if self.group:
try:
os.setgid(gid)
except OSError:
self.logger.error("Unable to change gid.")
sys.exit(1)
if self.user:
try:
uid = pwd.getpwnam(self.user).pw_uid
except KeyError:
self.logger.error("User {0} not found.".format(self.user))
sys.exit(1)
try:
os.setuid(uid)
except OSError:
self.logger.error("Unable to change uid.")
sys.exit(1)
try:
lockfile.write("%s" % (os.getpid()))
lockfile.flush()
except IOError:
self.logger.error("Unable to write pid to the pidfile.")
print("Unable to write pid to the pidfile.")
sys.exit(1)
signal.signal(signal.SIGTERM, self.sigterm)
atexit.register(self.exit)
self.logger.warning("Starting daemon.")
try:
self.action(*privileged_action_result)
except Exception:
for line in traceback.format_exc().split("\n"):
self.logger.error(line)
|
Start daemonization process.
|
24,404 |
def build(self, format=, path=):
if kiwi is None:
msg =
log.error(msg)
raise CommandExecutionError(msg)
raise CommandExecutionError("Build is not yet implemented")
|
Build an image using Kiwi.
:param format:
:param path:
:return:
|
24,405 |
def get_log_events(awsclient, log_group_name, log_stream_name, start_ts=None):
client_logs = awsclient.get_client()
request = {
: log_group_name,
: log_stream_name
}
if start_ts:
request[] = start_ts
response = client_logs.get_log_events(**request)
if in response and response[]:
return [{: e[], : e[]}
for e in response[]]
|
Get log events for the specified log group and stream.
this is used in tenkai output instance diagnostics
:param log_group_name: log group name
:param log_stream_name: log stream name
:param start_ts: timestamp
:return:
|
24,406 |
def run_command(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get(, ).lower()
wrapper = self.create_routine_wrapper_generator(rdbms)
wrapper.main(config_file)
|
:param str config_file: The name of config file.
|
24,407 |
def AgregarFlete(self, descripcion, importe):
"Agrega la información referente al flete de la liquidación (opcional)"
flete = dict(descripcion=descripcion, importe=importe)
self.solicitud[] = flete
return True
|
Agrega la información referente al flete de la liquidación (opcional)
|
24,408 |
def withNamedValues(cls, **values):
enums = set(cls.namedValues.items())
enums.update(values.items())
class X(cls):
namedValues = namedval.NamedValues(*enums)
subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint(
*values.values())
X.__name__ = cls.__name__
return X
|
Create a subclass with discreet named values constraint.
Reduce fully duplicate enumerations along the way.
|
24,409 |
def version(self, content_type="*/*"):
v = ""
accept_header = self.get_header(, "")
if accept_header:
a = AcceptHeader(accept_header)
for mt in a.filter(content_type):
v = mt[2].get("version", "")
if v: break
return v
|
versioning is based off of this post
http://urthen.github.io/2013/05/09/ways-to-version-your-api/
|
24,410 |
def scan(audio_filepaths, *, album_gain=False, skip_tagged=False, thread_count=None, ffmpeg_path=None, executor=None):
r128_data = {}
with contextlib.ExitStack() as cm:
if executor is None:
if thread_count is None:
try:
thread_count = len(os.sched_getaffinity(0))
except AttributeError:
thread_count = os.cpu_count()
enable_ffmpeg_threading = thread_count > (len(audio_filepaths) + int(album_gain))
executor = cm.enter_context(concurrent.futures.ThreadPoolExecutor(max_workers=thread_count))
asynchronous = False
else:
enable_ffmpeg_threading = False
asynchronous = True
loudness_tags = tuple(map(has_loudness_tag, audio_filepaths))
audio_filepaths = tuple(audio_filepath for (audio_filepath,
has_tags) in zip(audio_filepaths,
loudness_tags) if has_tags is not None)
loudness_tags = tuple(filter(None, loudness_tags))
futures = {}
if album_gain:
if skip_tagged and all(map(operator.itemgetter(1), loudness_tags)):
logger().info("All files already have an album gain tag, skipping album gain scan")
elif audio_filepaths:
calc_album_peak = any(map(lambda x: os.path.splitext(x)[-1].lower() != ".opus",
audio_filepaths))
futures[ALBUM_GAIN_KEY] = executor.submit(get_r128_loudness,
audio_filepaths,
calc_peak=calc_album_peak,
enable_ffmpeg_threading=enable_ffmpeg_threading,
ffmpeg_path=ffmpeg_path)
for audio_filepath in audio_filepaths:
if skip_tagged and has_loudness_tag(audio_filepath)[0]:
logger().info("File already has a track gain tag, skipping track gain scan" % (audio_filepath))
continue
if os.path.splitext(audio_filepath)[-1].lower() == ".opus":
calc_peak = False
else:
calc_peak = True
futures[audio_filepath] = executor.submit(get_r128_loudness,
(audio_filepath,),
calc_peak=calc_peak,
enable_ffmpeg_threading=enable_ffmpeg_threading,
ffmpeg_path=ffmpeg_path)
if asynchronous:
return futures
for audio_filepath in audio_filepaths:
try:
r128_data[audio_filepath] = futures[audio_filepath].result()
except KeyError:
pass
except Exception as e:
logger().warning("Failed to analyze file : %s %s" % (audio_filepath,
e.__class__.__qualname__,
e))
if album_gain and audio_filepaths:
try:
r128_data[ALBUM_GAIN_KEY] = futures[ALBUM_GAIN_KEY].result()
except KeyError:
pass
except Exception as e:
logger().warning("Failed to analyze files %s: %s %s" % (", ".join("" % (audio_filepath) for audio_filepath in audio_filepaths),
e.__class__.__qualname__,
e))
return r128_data
|
Analyze files, and return a dictionary of filepath to loudness metadata or filepath to future if executor is not None.
|
24,411 |
def normalise_key(self, key):
key = key.replace(, )
if key.startswith("noy_"):
key = key[4:]
return key
|
Make sure key is a valid python attribute
|
24,412 |
def look_up(self, **keys: Dict[InstanceName, ScalarValue]) -> "ArrayEntry":
if not isinstance(self.schema_node, ListNode):
raise InstanceValueError(self.json_pointer(), "lookup on non-list")
try:
for i in range(len(self.value)):
en = self.value[i]
flag = True
for k in keys:
if en[k] != keys[k]:
flag = False
break
if flag:
return self._entry(i)
raise NonexistentInstance(self.json_pointer(), "entry lookup failed")
except KeyError:
raise NonexistentInstance(self.json_pointer(), "entry lookup failed") from None
except TypeError:
raise InstanceValueError(self.json_pointer(), "lookup on non-list") from None
|
Return the entry with matching keys.
Args:
keys: Keys and values specified as keyword arguments.
Raises:
InstanceValueError: If the receiver's value is not a YANG list.
NonexistentInstance: If no entry with matching keys exists.
|
24,413 |
def get_service_references(self, clazz, ldap_filter=None):
refs = self.__framework.find_service_references(clazz, ldap_filter)
if refs:
for ref in refs:
if ref.get_bundle() is not self.__bundle:
refs.remove(ref)
return refs
|
Returns the service references for services that were registered under
the specified class by this bundle and matching the given filter
:param clazz: The class name with which the service was registered.
:param ldap_filter: A filter on service properties
:return: The list of references to the services registered by the
calling bundle and matching the filters.
|
24,414 |
def add_back_ref(self, back_ref, attr=None):
back_ref.add_ref(self, attr)
return self.fetch()
|
Add reference from back_ref to self
:param back_ref: back_ref to add
:type back_ref: Resource
:rtype: Resource
|
24,415 |
def write(data, path, saltenv=, index=0):
if saltenv not in __opts__[]:
return .format(saltenv)
if len(__opts__[][saltenv]) <= index:
return .format(
index, saltenv)
if os.path.isabs(path):
return (
).format(path, saltenv)
dest = os.path.join(__opts__[][saltenv][index], path)
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with salt.utils.files.fopen(dest, ) as fp_:
fp_.write(salt.utils.stringutils.to_str(data))
return .format(dest)
|
Write the named file, by default the first file found is written, but the
index of the file can be specified to write to a lower priority file root
|
24,416 |
def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph):
g = _test_graph(g)
if adjust_graph:
pos = nx.get_node_attributes(g, )
ans = nx.to_dict_of_dicts(g)
g = adjacency2graph(ans, adjust=2, is_directed=g.is_directed())
g = QueueNetworkDiGraph(g)
if len(pos) > 0:
g.set_pos(pos)
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
queues = _set_queues(g, q_cls, q_arg, in g.vertex_properties())
if not in g.vertex_properties():
g.set_pos()
for k, e in enumerate(g.edges()):
g.set_ep(e, , 1.25)
g.set_ep(e, , 8)
if e[0] == e[1]:
g.set_ep(e, , queues[k].colors[])
else:
g.set_ep(e, , queues[k].colors[])
for v in g.nodes():
g.set_vp(v, , 1)
g.set_vp(v, , 8)
e = (v, v)
if g.is_edge(e):
g.set_vp(v, , queues[g.edge_index[e]]._current_color(2))
g.set_vp(v, , queues[g.edge_index[e]]._current_color())
else:
g.set_vp(v, , g_colors[])
g.set_vp(v, , g_colors[])
return g, queues
|
Prepares a graph for use in :class:`.QueueNetwork`.
This function is called by ``__init__`` in the
:class:`.QueueNetwork` class. It creates the :class:`.QueueServer`
instances that sit on the edges, and sets various edge and node
properties that are used when drawing the graph.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \
``None``, etc.
Any object that networkx can turn into a
:any:`DiGraph<networkx.DiGraph>`
g_colors : dict
A dictionary of colors. The specific keys used are
``vertex_color`` and ``vertex_fill_color`` for vertices that
do not have any loops. Set :class:`.QueueNetwork` for the
default values passed.
q_cls : dict
A dictionary where the keys are integers that represent an edge
type, and the values are :class:`.QueueServer` classes.
q_args : dict
A dictionary where the keys are integers that represent an edge
type, and the values are the arguments that are used when
creating an instance of that :class:`.QueueServer` class.
adjust_graph : bool
Specifies whether the graph will be adjusted using
:func:`.adjacency2graph`.
Returns
-------
g : :class:`.QueueNetworkDiGraph`
queues : list
A list of :class:`QueueServers<.QueueServer>` where
``queues[k]`` is the ``QueueServer`` that sets on the edge with
edge index ``k``.
Notes
-----
The graph ``g`` should have the ``edge_type`` edge property map.
If it does not then an ``edge_type`` edge property is
created and set to 1.
The following properties are set by each queue: ``vertex_color``,
``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``.
See :class:`.QueueServer` for more on setting these values.
The following properties are assigned as a properties to the graph;
their default values for each edge or vertex is shown:
* ``vertex_pen_width``: ``1``,
* ``vertex_size``: ``8``,
* ``edge_control_points``: ``[]``
* ``edge_marker_size``: ``8``
* ``edge_pen_width``: ``1.25``
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`.
|
24,417 |
def hkl_transformation(transf, miller_index):
lcm = lambda a, b: a * b // math.gcd(a, b)
reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf
reduced_transf = reduced_transf.astype(int)
t_hkl = np.dot(reduced_transf, miller_index)
d = abs(reduce(gcd, t_hkl))
t_hkl = np.array([int(i / d) for i in t_hkl])
if len([i for i in t_hkl if i < 0]) > 1:
t_hkl *= -1
return tuple(t_hkl)
|
Returns the Miller index from setting
A to B using a transformation matrix
Args:
transf (3x3 array): The transformation matrix
that transforms a lattice of A to B
miller_index ([h, k, l]): Miller index to transform to setting B
|
24,418 |
def unlink(self):
logger.debug("Unlinking %s", self.pid_filename)
try:
os.unlink(self.pid_filename)
except OSError as exp:
logger.debug("Got an error unlinking our pid file: %s", exp)
|
Remove the daemon's pid file
:return: None
|
24,419 |
def _handle_response(response, server_config, synchronous=False, timeout=None):
response.raise_for_status()
if synchronous is True and response.status_code == ACCEPTED:
return ForemanTask(
server_config, id=response.json()[]).poll(timeout=timeout)
if response.status_code == NO_CONTENT:
return
if in response.headers.get(, ).lower():
return response.json()
elif isinstance(response.content, bytes):
return response.content.decode()
else:
return response.content
|
Handle a server's response in a typical fashion.
Do the following:
1. Check the server's response for an HTTP status code indicating an error.
2. Poll the server for a foreman task to complete if an HTTP 202 (accepted)
status code is returned and ``synchronous is True``.
3. Immediately return if an HTTP "NO CONTENT" response is received.
4. Determine what type of the content returned from server. Depending on
the type method should return server's response, with all JSON decoded
or just response content itself.
:param response: A response object as returned by one of the functions in
:mod:`nailgun.client` or the requests library.
:param server_config: A `nailgun.config.ServerConfig` object.
:param synchronous: Should this function poll the server?
:param timeout: Maximum number of seconds to wait until timing out.
Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``.
|
24,420 |
def avatar_url_from_openid(openid, size=64, default=, dns=False):
if dns:
import libravatar
return libravatar.libravatar_url(
openid=openid,
size=size,
default=default,
)
else:
params = _ordered_query_params([(, size), (, default)])
query = parse.urlencode(params)
hash = sha256(openid.encode()).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
|
Our own implementation since fas doesn't support this nicely yet.
|
24,421 |
def write_update(rootfs_filepath: str,
progress_callback: Callable[[float], None],
chunk_size: int = 1024,
file_size: int = None) -> RootPartitions:
unused = _find_unused_partition()
part_path = unused.value.path
write_file(rootfs_filepath, part_path, progress_callback,
chunk_size, file_size)
return unused
|
Write the new rootfs to the next root partition
- Figure out, from the system, the correct root partition to write to
- Write the rootfs at ``rootfs_filepath`` there, with progress
:param rootfs_filepath: The path to a checked rootfs.ext4
:param progress_callback: A callback to call periodically with progress
between 0 and 1.0. May never reach precisely
1.0, best only for user information.
:param chunk_size: The size of file chunks to copy in between progress
notifications
:param file_size: The total size of the update file (for generating
progress percentage). If ``None``, generated with
``seek``/``tell``.
:returns: The root partition that the rootfs image was written to, e.g.
``RootPartitions.TWO`` or ``RootPartitions.THREE``.
|
24,422 |
def _build_response(self, resp):
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
|
Build internal Response object from given response.
|
24,423 |
def read(self):
f = open(self.path, "r")
self.manifest_json = f.read()
|
Load the metrics file from the given path
|
24,424 |
def pOparapar(self,Opar,apar,tdisrupt=None):
if _APY_LOADED and isinstance(Opar,units.Quantity):
Opar= Opar.to(1/units.Gyr).value\
/bovy_conversion.freq_in_Gyr(self._vo,self._ro)
if _APY_LOADED and isinstance(apar,units.Quantity):
apar= apar.to(units.rad).value
if tdisrupt is None: tdisrupt= self._tdisrupt
if isinstance(Opar,(int,float,numpy.float32,numpy.float64)):
Opar= numpy.array([Opar])
out= numpy.zeros(len(Opar))
ts= apar/Opar
out[(ts < tdisrupt)*(ts >= 0.)]=\
numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
|
NAME:
pOparapar
PURPOSE:
return the probability of a given parallel (frequency,angle) offset pair
INPUT:
Opar - parallel frequency offset (array) (can be Quantity)
apar - parallel angle offset along the stream (scalar) (can be Quantity)
OUTPUT:
p(Opar,apar)
HISTORY:
2015-12-07 - Written - Bovy (UofT)
|
24,425 |
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True,
role=settings.DEFAULT_ASSISTANT_ROLE):
name = os.path.splitext(os.path.basename(source))[0]
yaml_checker.check(source, y)
assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant,
fully_loaded=fully_loaded, role=role)
return assistant
|
Constructs instance of YamlAssistant loaded from given structure y, loaded
from source file source.
Args:
source: path to assistant source file
y: loaded yaml structure
superassistant: superassistant of this assistant
Returns:
YamlAssistant instance constructed from y with source file source
Raises:
YamlError: if the assistant is malformed
|
24,426 |
def get_viscosity(medium="CellCarrier", channel_width=20.0, flow_rate=0.16,
temperature=23.0):
if medium.lower() not in ["cellcarrier", "cellcarrier b", "water"]:
raise ValueError("Invalid medium: {}".format(medium))
term1 = 1.1856 * 6 * flow_rate * 1e-9 / (channel_width * 1e-6)**3 * 2 / 3
if medium == "CellCarrier":
temp_corr = (temperature / 23.2)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.677)
eta = 0.179 * (term1 * term2)**(0.677 - 1) * temp_corr * 1e3
elif medium == "CellCarrier B":
temp_corr = (temperature / 23.6)**-0.866
term2 = 0.6771 / 0.5928 + 0.2121 / (0.5928 * 0.634)
eta = 0.360 * (term1 * term2)**(0.634 - 1) * temp_corr * 1e3
elif medium == "water":
if np.min(temperature) < 0 or np.max(temperature) > 40:
msg = "For water, the temperature must be in [0, 40] degC! " \
"Got min/max values of .".format(np.min(temperature),
np.max(temperature))
raise ValueError(msg)
eta0 = 1.002
right = (20-temperature) / (temperature + 96) \
* (+ 1.2364
- 1.37e-3 * (20 - temperature)
+ 5.7e-6 * (20 - temperature)**2
)
eta = eta0 * 10**right
return eta
|
Returns the viscosity for RT-DC-specific media
Parameters
----------
medium: str
The medium to compute the viscosity for.
One of ["CellCarrier", "CellCarrier B", "water"].
channel_width: float
The channel width in µm
flow_rate: float
Flow rate in µl/s
temperature: float or ndarray
Temperature in °C
Returns
-------
viscosity: float or ndarray
Viscosity in mPa*s
Notes
-----
- CellCarrier and CellCarrier B media are optimized for
RT-DC measurements.
- Values for the viscosity of water are computed using
equation (15) from :cite:`Kestin_1978`.
|
24,427 |
def get_assessment(self, assessment_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find_one(
dict({: ObjectId(self._get_id(assessment_id, ).get_identifier())},
**self._view_filter()))
return objects.Assessment(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
|
Gets the ``Assessment`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Assessment`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``Assessment`` and retained
for compatibility.
arg: assessment_id (osid.id.Id): ``Id`` of the ``Assessment``
return: (osid.assessment.Assessment) - the assessment
raise: NotFound - ``assessment_id`` not found
raise: NullArgument - ``assessment_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
|
24,428 |
def export(rv, code=None, headers=None):
if isinstance(rv, ResponseBase):
return make_response(rv, code, headers)
else:
if code is None:
code = 200
mediatype = request.accept_mimetypes.best_match(
exporters.keys(), default=)
return exporters[mediatype](rv, code, headers)
|
Create a suitable response
Args:
rv: return value of action
code: status code
headers: response headers
Returns:
flask.Response
|
24,429 |
def allow(self, privilege):
assert privilege in PERMISSIONS[].keys()
reading = PERMISSIONS[][privilege] + PERMISSIONS[][privilege] + PERMISSIONS[][privilege]
os.chmod(self.file_path, reading)
|
Add an allowed privilege (read, write, execute, all).
|
24,430 |
def authorize(self, names, payload=None, request_type="push"):
if self.secrets is not None:
if "registry" in self.secrets:
timestamp = generate_timestamp()
credential = generate_credential(self.secrets[][])
credential = "%s/%s/%s" %(request_type,credential,timestamp)
if payload is None:
payload = "%s|%s|%s|%s|%s|" %(request_type,
names[],
timestamp,
names[],
names[])
signature = generate_signature(payload,self.secrets[][])
return "SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s" %(credential,signature)
|
Authorize a client based on encrypting the payload with the client
token, which should be matched on the receiving server
|
24,431 |
def fill_treewidget(self, tree, parameters):
tree.clear()
assert isinstance(parameters, (dict, Parameter))
for key, value in parameters.items():
if isinstance(value, Parameter):
B26QTreeItem(tree, key, value, parameters.valid_values[key], parameters.info[key])
else:
B26QTreeItem(tree, key, value, type(value), )
|
fills a QTreeWidget with nested parameters, in future replace QTreeWidget with QTreeView and call fill_treeview
Args:
tree: QtWidgets.QTreeWidget
parameters: dictionary or Parameter object
show_all: boolean if true show all parameters, if false only selected ones
Returns:
|
24,432 |
def walk_oid(self, oid):
var = netsnmp.Varbind(oid)
varlist = netsnmp.VarList(var)
data = self.walk(varlist)
if len(data) == 0:
raise SnmpException("SNMP walk response incomplete")
return varlist
|
Get a list of SNMP varbinds in response to a walk for oid.
Each varbind in response list has a tag, iid, val and type attribute.
|
24,433 |
def handle_pubrel(self):
self.logger.info("PUBREL received")
ret, mid = self.in_packet.read_uint16()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventPubrel(mid)
self.push_event(evt)
return NC.ERR_SUCCESS
|
Handle incoming PUBREL packet.
|
24,434 |
def hmset(self, key, value_dict):
if not value_dict:
future = concurrent.TracebackFuture()
future.set_result(False)
else:
command = [b, key]
command.extend(sum(value_dict.items(), ()))
future = self._execute(command)
return future
|
Sets fields to values as in `value_dict` in the hash stored at `key`.
Sets the specified fields to their respective values in the hash
stored at `key`. This command overwrites any specified fields
already existing in the hash. If `key` does not exist, a new key
holding a hash is created.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of
fields being set.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:param value_dict: field to value mapping
:type value_dict: :class:`dict`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
|
24,435 |
def password_enter(self, wallet, password):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet, "password": password}
resp = self.call(, payload)
return resp[] ==
|
Enters the **password** in to **wallet**
:param wallet: Wallet to enter password for
:type wallet: str
:param password: Password to enter
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_enter(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
|
24,436 |
def onFolderTreeClicked(self, proxyIndex):
if not proxyIndex.isValid():
return
index = self.proxyFileModel.mapToSource(proxyIndex)
settings = QSettings()
folder_path = self.fileModel.filePath(index)
settings.setValue(, folder_path)
|
What to do when a Folder in the tree is clicked
|
24,437 |
def _push_next(self):
r = next(self._iter, None)
if r is None:
return
async_ret = self._worker_pool.apply_async(
self._worker_fn, (r, self._batchify_fn, self._dataset))
self._data_buffer[self._sent_idx] = async_ret
self._sent_idx += 1
|
Assign next batch workload to workers.
|
24,438 |
def get_frames(self, frames=, override=False, **kwargs):
if override is True:
self.frames = {}
if isinstance(frames, int):
frame = self._get_frame(
self.trajectory_map[frames], frames, **kwargs)
if frames not in self.frames.keys():
self.frames[frames] = frame
return frame
if isinstance(frames, list):
for frame in frames:
if frame not in self.frames.keys():
self.frames[frame] = self._get_frame(
self.trajectory_map[frame], frame, **kwargs)
if isinstance(frames, tuple):
for frame in range(frames[0], frames[1]):
if frame not in self.frames.keys():
self.frames[frame] = self._get_frame(
self.trajectory_map[frame], frame, **kwargs)
if isinstance(frames, str):
if frames in [, ]:
for frame in range(0, self.no_of_frames):
if frame not in self.frames.keys():
self.frames[frame] = self._get_frame(
self.trajectory_map[frame], frame, **kwargs)
|
Extract frames from the trajectory file.
Depending on the passed parameters a frame, a list of particular
frames, a range of frames (from, to), or all frames can be extracted
with this function.
Parameters
----------
frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str`
Specified frame (:class:`int`), or frames (:class:`list`), or
range (:class:`touple`), or `all`/`everything` (:class:`str`).
(default=`all`)
override : :class:`bool`
If True, a frame already storred in :attr:`frames` can be override.
(default=False)
extract_data : :class:`bool`, optional
If False, a frame is returned as a :class:`str` block as in the
trajectory file. Ohterwise, it is extracted and returned as
:class:`pywindow.molecular.MolecularSystem`. (default=True)
swap_atoms : :class:`dict`, optional
If this kwarg is passed with an appropriate dictionary a
:func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will
be applied to the extracted frame.
forcefield : :class:`str`, optional
If this kwarg is passed with appropriate forcefield keyword a
:func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()`
will be applied to the extracted frame.
Returns
-------
:class:`pywindow.molecular.MolecularSystem`
If a single frame is extracted.
None : :class:`NoneType`
If more than one frame is extracted, the frames are returned to
:attr:`frames`
|
24,439 |
def merged(self):
stats = {}
for topic in self.client.topics()[]:
for producer in self.client.lookup(topic)[]:
hostname = producer[]
port = producer[]
host = % (hostname, port)
stats[host] = nsqd.Client(
% (hostname, port)).clean_stats()
return stats
|
The clean stats from all the hosts reporting to this host.
|
24,440 |
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
if self.request.is_ajax():
return JSONResponseMixin.render_to_response(self, context={})
return HttpResponseRedirect(success_url)
|
Calls the delete() method on the fetched object and then
redirects to the success URL.
|
24,441 |
def get_nodes_with_recipe(recipe_name, environment=None):
prefix_search = recipe_name.endswith("*")
if prefix_search:
recipe_name = recipe_name.rstrip("*")
for n in get_nodes(environment):
recipes = get_recipes_in_node(n)
for role in get_roles_in_node(n, recursive=True):
recipes.extend(get_recipes_in_role(role))
if prefix_search:
if any(recipe.startswith(recipe_name) for recipe in recipes):
yield n
else:
if recipe_name in recipes:
yield n
|
Get all nodes which include a given recipe,
prefix-searches are also supported
|
24,442 |
def getfo(self, remotepath, fl, callback=None):
file_size = self.stat(remotepath).st_size
with self.open(remotepath, "rb") as fr:
fr.prefetch(file_size)
return self._transfer_with_callback(
reader=fr, writer=fl, file_size=file_size, callback=callback
)
|
Copy a remote file (``remotepath``) from the SFTP server and write to
an open file or file-like object, ``fl``. Any exception raised by
operations will be passed through. This method is primarily provided
as a convenience.
:param object remotepath: opened file or file-like object to copy to
:param str fl:
the destination path on the local host or open file object
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:return: the `number <int>` of bytes written to the opened file object
.. versionadded:: 1.10
|
24,443 |
def setHint( self, hint ):
self._hint = hint
lineEdit = self.lineEdit()
if isinstance(lineEdit, XLineEdit):
lineEdit.setHint(hint)
|
Sets the hint for this line edit that will be displayed when in \
editable mode.
:param hint | <str>
|
24,444 |
def trace_symlink_target(link):
if not is_symlink(link):
raise ValueError("link must point to a symlink on the system")
while is_symlink(link):
orig = os.path.dirname(link)
link = readlink(link)
link = resolve_path(link, orig)
return link
|
Given a file that is known to be a symlink, trace it to its ultimate
target.
Raises TargetNotPresent when the target cannot be determined.
Raises ValueError when the specified link is not a symlink.
|
24,445 |
def find_token(request, token_type, service, **kwargs):
if request is not None:
try:
_token = request[token_type]
except KeyError:
pass
else:
del request[token_type]
request.c_param[token_type] = SINGLE_OPTIONAL_STRING
return _token
try:
return kwargs["access_token"]
except KeyError:
_arg = service.multiple_extend_request_args(
{}, kwargs[], [],
[, , ])
return _arg[]
|
The access token can be in a number of places.
There are priority rules as to which one to use, abide by those:
1 If it's among the request parameters use that
2 If among the extra keyword arguments
3 Acquired by a previous run service.
:param request:
:param token_type:
:param service:
:param kwargs:
:return:
|
24,446 |
def set_channel_locations(self, channel_ids, locations):
if len(channel_ids) == len(locations):
for i in range(len(channel_ids)):
if isinstance(locations[i],(list,np.ndarray)):
location = np.asarray(locations[i])
self.set_channel_property(channel_ids[i], , location.astype(float))
else:
raise ValueError(str(locations[i]) + " must be an array_like")
else:
raise ValueError("channel_ids and locations must have same length")
|
This function sets the location properties of each specified channel
id with the corresponding locations of the passed in locations list.
Parameters
----------
channel_ids: array_like
The channel ids (ints) for which the locations will be specified
locations: array_like
A list of corresonding locations (array_like) for the given channel_ids
|
24,447 |
def last(self):
if self.type == HoloMap:
last_items = [(k, v.last if isinstance(v, HoloMap) else v)
for (k, v) in self.data.items()]
else:
last_items = self.data
return self.clone(last_items)
|
The last of a GridSpace is another GridSpace
constituted of the last of the individual elements. To access
the elements by their X,Y position, either index the position
directly or use the items() method.
|
24,448 |
def __set_ethernet_uris(self, ethernet_names, operation="add"):
if not isinstance(ethernet_names, list):
ethernet_names = [ethernet_names]
associated_enets = self.data.get(, [])
ethernet_uris = []
for i, enet in enumerate(ethernet_names):
enet_exists = self._ethernet_networks.get_by_name(enet)
if enet_exists:
ethernet_uris.append(enet_exists.data[])
else:
raise HPOneViewResourceNotFound("Ethernet: {} does not exist".foramt(enet))
if operation == "remove":
enets_to_update = sorted(list(set(associated_enets) - set(ethernet_uris)))
elif operation == "add":
enets_to_update = sorted(list(set(associated_enets).union(set(ethernet_uris))))
else:
raise ValueError("Value {} is not supported as operation. The supported values are: [, ]")
if set(enets_to_update) != set(associated_enets):
updated_network = {: enets_to_update}
self.update(updated_network)
|
Updates network uris.
|
24,449 |
def _append(self, menu):
menu.AppendCheckItem(self.id(), self.name, self.description)
menu.Check(self.id(), self.checked)
|
append this menu item to a menu
|
24,450 |
def save_artists(self, artists, filename="artist_lyrics", overwrite=False):
if isinstance(artists, Artist):
artists = [artists]
start = time.time()
tmp_dir =
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
count = 0
else:
count = len(os.listdir(tmp_dir))
if os.path.isfile(filename + ".json") and not overwrite:
msg = "{f} already exists. Overwrite?\n(y/n): ".format(f=filename)
if input(msg).lower() != "y":
print("Leaving file in place. Exiting.")
os.rmdir(tmp_dir)
return
json.dump(all_lyrics, outfile)
shutil.rmtree(tmp_dir)
elapsed = (time.time() - start) / 60 / 60
print("Time elapsed: {t} hours".format(t=elapsed))
|
Save lyrics from multiple Artist objects as JSON object
:param artists: List of Artist objects to save lyrics from
:param filename: Name of output file (json)
:param overwrite: Overwrites preexisting file if True
|
24,451 |
def get_allproductandrelease(self):
logger.info("Get all ProductAndReleases")
response = self.get(PRODUCTANDRELEASE_RESOURCE_ROOT_URI, headers=self.headers)
sr_response = response_body_to_dict(response, self.headers[HEADER_CONTENT_TYPE],
xml_root_element_name=PRODUCTANDRELEASE_BODY_ROOT)
return sr_response, response
|
Get All ProductAndReleases
:return: A duple: All product and Releses from SDC Catalog as a dict, the 'Request' response
|
24,452 |
def vectorize_range(values):
if isinstance(values, tuple):
return .join(str(i) for i in values)
if isinstance(values, list):
if not all([isinstance(item, tuple) for item in values]):
raise TypeError()
return .join(.join(str(i) for i in v) for v in values)
return str(values)
|
This function is for url encoding.
Takes a value or a tuple or list of tuples and returns a single result,
tuples are joined by "," if necessary, elements in tuple are joined by '_'
|
24,453 |
def dictify(r,root=True):
if root:
return {r.tag : dictify(r, False)}
d=copy(r.attrib)
if r.text:
d["_text"]=r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag]=[]
d[x.tag].append(dictify(x,False))
return d
|
http://stackoverflow.com/a/30923963/2946714
|
24,454 |
def allconcat(self, x, mesh_axis, concat_axis):
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(allconcat_ring, concat_axis=concat_axis))
|
Grouped allconcat (like MPI allgather followed by concat).
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
|
24,455 |
def bods2c(name):
name = stypes.stringToCharP(name)
code = ctypes.c_int(0)
found = ctypes.c_int(0)
libspice.bods2c_c(name, ctypes.byref(code), ctypes.byref(found))
return code.value, bool(found.value)
|
Translate a string containing a body name or ID code to an integer code.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bods2c_c.html
:param name: String to be translated to an ID code.
:type name: str
:return: Integer ID code corresponding to name.
:rtype: int
|
24,456 |
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
try:
pool_kwargs[] = ssl.PROTOCOL_TLS
except AttributeError:
pool_kwargs[] = ssl.PROTOCOL_SSLv23
return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs)
|
Called to initialize the HTTPAdapter when no proxy is used.
|
24,457 |
def sg_arg():
r
if not tf.app.flags.FLAGS.__dict__[]:
tf.app.flags.FLAGS._parse_flags()
return tf.sg_opt(tf.app.flags.FLAGS.__dict__[])
|
r"""Gets current command line options
Returns:
tf.sg_opt instance that is updated with current commandd line options.
|
24,458 |
def set_params(self, **params):
valid_params = self.get_params()
for key, value in params.items():
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`."
% (key, self.__class__.__name__)
)
setattr(self, key, value)
return self
|
Set the parameters of this estimator.
Returns
-------
self
|
24,459 |
def get_term_agents(self):
terms = self.tree.findall()
agents = {}
assoc_links = []
for term in terms:
term_id = term.attrib.get()
if term_id:
agent = self._get_agent_by_id(term_id, None)
agents[term_id] = agent
aw = term.find()
if aw is not None:
aw_id = aw.attrib.get()
if aw_id:
assoc_links.append((term_id, aw_id))
for source, target in assoc_links:
if target in agents and source in agents:
agents.pop(source)
return agents
|
Return dict of INDRA Agents keyed by corresponding TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents. Further, each key of the dictionary corresponds to
the ID assigned by TRIPS to the given TERM that the Agent was
extracted from.
Returns
-------
agents : dict[str, indra.statements.Agent]
Dict of INDRA Agents extracted from EKB.
|
24,460 |
def to_bytes(self):
raw = b
if not self._options:
return raw
for ipopt in self._options:
raw += ipopt.to_bytes()
padbytes = 4 - (len(raw) % 4)
raw += b*padbytes
return raw
|
Takes a list of IPOption objects and returns a packed byte string
of options, appropriately padded if necessary.
|
24,461 |
def wait_for_jobs(jobs):
all_running = False
while not all_running:
all_running = True
time.sleep(5)
for job in jobs:
job.refresh()
scheduled = getattr(job, "scheduled_at", None)
if scheduled is not None:
logger.info("Waiting for %s on %s [%s]" % (job.uid,
job.site,
_date2h(scheduled)))
all_running = all_running and job.state == "running"
if job.state == "error":
raise Exception("The job %s is in error state" % job)
logger.info("All jobs are Running !")
|
Waits for all the jobs to be runnning.
Args:
jobs(list): list of the python-grid5000 jobs to wait for
Raises:
Exception: if one of the job gets in error state.
|
24,462 |
def authenticated(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
@functools.wraps(method)
def wrapper(
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urllib.parse.urlsplit(url).scheme:
next_url = self.request.full_url()
else:
assert self.request.uri is not None
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return None
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
|
Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
|
24,463 |
def load(filenames, prepare_data_iterator=True, batch_size=None, exclude_parameter=False, parameter_only=False):
class Info:
pass
info = Info()
proto = nnabla_pb2.NNablaProtoBuf()
for filename in filenames:
_, ext = os.path.splitext(filename)
if ext in [, ]:
if not parameter_only:
with open(filename, ) as f:
try:
text_format.Merge(f.read(), proto)
except:
logger.critical(.format(filename))
logger.critical(
)
raise
if len(proto.parameter) > 0:
if not exclude_parameter:
nn.load_parameters(filename)
elif ext in [, ]:
if not exclude_parameter:
nn.load_parameters(filename)
else:
logger.info()
elif ext == :
try:
tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(filename, ) as nnp:
for name in nnp.namelist():
_, ext = os.path.splitext(name)
if name == :
nnp.extract(name, tmpdir)
with open(os.path.join(tmpdir, name), ) as f:
pass
elif ext in [, ]:
nnp.extract(name, tmpdir)
if not parameter_only:
with open(os.path.join(tmpdir, name), ) as f:
text_format.Merge(f.read(), proto)
if len(proto.parameter) > 0:
if not exclude_parameter:
nn.load_parameters(
os.path.join(tmpdir, name))
elif ext in [, ]:
nnp.extract(name, tmpdir)
if not exclude_parameter:
nn.load_parameters(os.path.join(tmpdir, name))
else:
logger.info()
finally:
shutil.rmtree(tmpdir)
default_context = None
if proto.HasField():
info.global_config = _global_config(proto)
default_context = info.global_config.default_context
if in default_context.backend:
import nnabla_ext.cudnn
elif in default_context.backend:
try:
import nnabla_ext.cudnn
except:
pass
else:
import nnabla_ext.cpu
default_context = nnabla_ext.cpu.context()
comm = current_communicator()
if comm:
default_context.device_id = str(comm.rank)
if proto.HasField():
info.training_config = _training_config(proto)
info.datasets = _datasets(
proto, prepare_data_iterator if prepare_data_iterator is not None else info.training_config.max_epoch > 0)
info.networks = _networks(proto, default_context, batch_size)
info.optimizers = _optimizers(
proto, default_context, info.networks, info.datasets)
info.monitors = _monitors(
proto, default_context, info.networks, info.datasets)
info.executors = _executors(proto, info.networks)
return info
|
load
Load network information from files.
Args:
filenames (list): List of filenames.
Returns:
dict: Network information.
|
24,464 |
def recursive_operation_ls(
self, endpoint_id, depth=3, filter_after_first=True, **params
):
endpoint_id = safe_stringify(endpoint_id)
self.logger.info(
"TransferClient.recursive_operation_ls({}, {}, {})".format(
endpoint_id, depth, params
)
)
return RecursiveLsResponse(self, endpoint_id, depth, filter_after_first, params)
|
Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls``
Does not preserve access to top level operation_ls fields, but
adds a "path" field for every item that represents the full
path to that item.
:rtype: iterable of :class:`GlobusResponse
<globus_sdk.response.GlobusResponse>`
**Parameters**
``endpoint_id`` (*string*)
The endpoint being recursively ls'ed. If no "path" is given in
params, the start path is determined by this endpoint.
``depth`` (*int*)
The maximum file depth the recursive ls will go to.
``filter_after_first`` (*bool*)
If False, any "filter" in params will only be applied to the
first, top level ls, all results beyond that will be unfiltered.
``params``
Parameters that will be passed through as query params.
**Examples**
>>> tc = globus_sdk.TransferClient(...)
>>> for entry in tc.recursive_operation_ls(ep_id, path="/~/project1/"):
>>> print(entry["path"], entry["type"])
**External Documentation**
See
`List Directory Contents \
<https://docs.globus.org/api/transfer/file_operations/#list_directory_contents>`_
in the REST documentation for details, but note that top level data
fields are no longer available and an additional per item
"path" field is added.
|
24,465 |
def sam2fastq(line):
fastq = []
fastq.append( % line[0])
fastq.append(line[9])
fastq.append( % line[0])
fastq.append(line[10])
return fastq
|
print fastq from sam
|
24,466 |
def get_public_key(self):
return PublicKey.from_verifying_key(
self._private_key.get_verifying_key(),
network=self.network, compressed=self.compressed)
|
Get the PublicKey for this PrivateKey.
|
24,467 |
def assess_angmom(X):
L=angmom(X[0])
loop = np.array([1,1,1])
for i in X[1:]:
L0 = angmom(i)
if(L0[0]*L[0]<0.):
loop[0] = 0
if(L0[1]*L[1]<0.):
loop[1] = 0
if(L0[2]*L[2]<0.):
loop[2] = 0
return loop
|
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
|
24,468 |
def create_handler(target: str):
if target == :
return logging.StreamHandler(sys.stderr)
elif target == :
return logging.StreamHandler(sys.stdout)
else:
return logging.handlers.WatchedFileHandler(filename=target)
|
Create a handler for logging to ``target``
|
24,469 |
def decision_function(self, pairs):
pairs = check_input(pairs, type_of_inputs=,
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return - self.score_pairs(pairs)
|
Returns the decision function used to classify the pairs.
Returns the opposite of the learned metric value between samples in every
pair, to be consistent with scikit-learn conventions. Hence it should
ideally be low for dissimilar samples and high for similar samples.
This is the decision function that is used to classify pairs as similar
(+1), or dissimilar (-1).
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to predict, with each row corresponding to two
points, or 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,)
The predicted decision function value for each pair.
|
24,470 |
def render(template: typing.Union[str, Template], **kwargs):
if not hasattr(template, ):
template = get_environment().from_string(textwrap.dedent(template))
return template.render(
cauldron_template_uid=make_template_uid(),
**kwargs
)
|
Renders a template string using Jinja2 and the Cauldron templating
environment.
:param template:
The string containing the template to be rendered
:param kwargs:
Any named arguments to pass to Jinja2 for use in rendering
:return:
The rendered template string
|
24,471 |
def solve_with_sdpa(sdp, solverparameters=None):
solverexecutable = detect_sdpa(solverparameters)
if solverexecutable is None:
raise OSError("SDPA is not in the path or the executable provided is" +
" not correct")
primal, dual = 0, 0
tempfile_ = tempfile.NamedTemporaryFile()
tmp_filename = tempfile_.name
tempfile_.close()
tmp_dats_filename = tmp_filename + ".dat-s"
tmp_out_filename = tmp_filename + ".out"
write_to_sdpa(sdp, tmp_dats_filename)
command_line = [solverexecutable, "-ds", tmp_dats_filename,
"-o", tmp_out_filename]
if solverparameters is not None:
for key, value in list(solverparameters.items()):
if key == "executable":
continue
elif key == "paramsfile":
command_line.extend(["-p", value])
else:
raise ValueError("Unknown parameter for SDPA: " + key)
if sdp.verbose < 1:
with open(os.devnull, "w") as fnull:
call(command_line, stdout=fnull, stderr=fnull)
else:
call(command_line)
primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True,
True)
if sdp.verbose < 2:
os.remove(tmp_dats_filename)
os.remove(tmp_out_filename)
return primal+sdp.constant_term, \
dual+sdp.constant_term, x_mat, y_mat, status
|
Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
|
24,472 |
def set_ntp_server(server):
log = logging.getLogger(mod_logger + )
if not isinstance(server, basestring):
msg =
log.error(msg)
raise CommandError(msg)
ntp_conf =
if not os.path.isfile(ntp_conf):
msg = .format(f=ntp_conf)
log.error(msg)
raise CommandError(msg)
log.info(, ntp_conf)
try:
sed(ntp_conf, , , g=0)
except CommandError:
_, ex, trace = sys.exc_info()
msg = .format(f=ntp_conf, e=str(ex))
log.error(msg)
raise CommandError, msg, trace
out_str = + server
log.info(, out_str)
with open(ntp_conf, ) as f:
f.write(out_str)
log.info(.format(f=ntp_conf))
|
Sets the NTP server on Linux
:param server: (str) NTP server IP or hostname
:return: None
:raises CommandError
|
24,473 |
def kill_line(event):
buff = event.current_buffer
if event.arg < 0:
deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position())
else:
if buff.document.current_char == :
deleted = buff.delete(1)
else:
deleted = buff.delete(count=buff.document.get_end_of_line_position())
event.cli.clipboard.set_text(deleted)
|
Kill the text from the cursor to the end of the line.
If we are at the end of the line, this should remove the newline.
(That way, it is possible to delete multiple lines by executing this
command multiple times.)
|
24,474 |
def input_dir(self, dirname):
dirname = dirname.rstrip()
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print( + root)
counters[] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
|
Check all files in this directory and all subdirectories.
|
24,475 |
def flushall(self, async_op=False):
if async_op:
fut = self.execute(b, b)
else:
fut = self.execute(b)
return wait_ok(fut)
|
Remove all keys from all databases.
:param async_op: lets the entire dataset to be freed asynchronously. \
Defaults to False
|
24,476 |
def get_grade_entries_by_query(self, grade_entry_query):
and_list = list()
or_list = list()
for term in grade_entry_query._query_terms:
if in grade_entry_query._query_terms[term] and in grade_entry_query._query_terms[term]:
and_list.append(
{: [{term: {: grade_entry_query._query_terms[term][]}},
{term: {: grade_entry_query._query_terms[term][]}}]})
else:
and_list.append({term: grade_entry_query._query_terms[term]})
for term in grade_entry_query._keyword_terms:
or_list.append({term: grade_entry_query._keyword_terms[term]})
if or_list:
and_list.append({: or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {: and_list}
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find(query_terms).sort(, DESCENDING)
else:
result = []
return objects.GradeEntryList(result, runtime=self._runtime, proxy=self._proxy)
|
Gets a list of entries matching the given grade entry query.
arg: grade_entry_query (osid.grading.GradeEntryQuery): the
grade entry query
return: (osid.grading.GradeEntryList) - the returned
``GradeEntryList``
raise: NullArgument - ``grade_entry_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``grade_entry_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
|
24,477 |
def to_pb(self):
max_age = _helpers._timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age)
|
Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
|
24,478 |
def _fetch_targets(self, api_client, q, target):
target_type, response_attribute, list_method_name, list_params, ignore_list_error = target
list_method = getattr(api_client, list_method_name)
try:
targets = handle_truncated_response(list_method, list_params, [response_attribute])[response_attribute]
except Exception as e:
if not ignore_list_error:
printException(e)
targets = []
setattr(self, % target_type, len(targets))
self.fetchstatuslogger.counts[target_type][] += len(targets)
region = api_client._client_config.region_name
for target in targets:
try:
callback = getattr(self, % target_type[0:-1])
except:
callback = self.store_target
target[] = target_type
if q:
q.put((callback, region, target))
|
Make an API call defined in metadata.json.
Parse the returned object as implemented in the "parse_[object name]" method.
:param api_client:
:param q:
:param target:
:return:
|
24,479 |
def open_spec(f):
import ruamel.yaml as yaml
keys = [, , , , ]
data = yaml.safe_load(f)
parsed = dict()
for k in keys:
v = data.get(k, [])
if isinstance(v, basestring):
parsed[k] = [m for m in re.split(r",| ", v)]
else:
parsed[k] = v
return parsed
|
:param f: file object with spec data
spec file is a yaml document that specifies which modules
can be loaded.
modules - list of base modules that can be loaded
pths - list of .pth files to load
|
24,480 |
def _get_rows(self, table):
childnodes = table.childNodes
qname_childnodes = [(s.qname[1], s) for s in childnodes]
return [node for name, node in qname_childnodes
if name == u]
|
Returns rows from table
|
24,481 |
def prepare_axes(wave, flux, fig=None, ax_lower=(0.1, 0.1),
ax_dim=(0.85, 0.65)):
if not fig:
fig = plt.figure()
ax = fig.add_axes([ax_lower[0], ax_lower[1], ax_dim[0], ax_dim[1]])
ax.plot(wave, flux)
return fig, ax
|
Create fig and axes if needed and layout axes in fig.
|
24,482 |
def get_extents(self, element, ranges, range_type=):
xdim, ydim = element.nodes.kdims[:2]
if range_type not in (, , ):
return xdim.range[0], ydim.range[0], xdim.range[1], ydim.range[1]
no_labels = (element.nodes.get_dimension(self.label_index) is None and
self.labels is None)
rng = 1.1 if no_labels else 1.4
x0, x1 = max_range([xdim.range, (-rng, rng)])
y0, y1 = max_range([ydim.range, (-rng, rng)])
return (x0, y0, x1, y1)
|
A Chord plot is always drawn on a unit circle.
|
24,483 |
def compute(self, *inputs, **kwargs):
from deepy.core.neural_var import NeuralVariable
from deepy.core.graph import graph
if type(inputs[0]) != NeuralVariable:
raise SystemError("The input of `compute` must be NeuralVar")
dims = [t.dim() for t in inputs]
if len(inputs) == 1:
self.init(input_dim=dims[0])
else:
self.init(input_dims=dims)
if self.parameters and not self._linked_block:
self.belongs_to(graph.default_block())
train_kwargs, _, _ = convert_to_theano_var(kwargs)
output = self.compute_tensor(*[t.tensor for t in inputs], **train_kwargs)
if type(output) != list and type(output) != tuple:
return NeuralVariable(output, dim=self.output_dim)
else:
return [NeuralVariable(*item) for item in zip(output, self.output_dims)]
|
Compute based on NeuralVariable.
:type inputs: list of NeuralVariable
:return: NeuralVariable
|
24,484 |
def watched(self, option):
params = join_params(self.parameters, {"watched": option})
return self.__class__(**params)
|
Set whether to filter by a user's watchlist. Options available are
user.ONLY, user.NOT, and None; default is None.
|
24,485 |
def xpath(self, selector: str, *, first: bool = False, _encoding: str = None) -> _XPath:
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
return _get_first_or_list(elements, first)
|
Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
|
24,486 |
def run(self):
self.otherThread._Thread__stderr = self._stderr
if hasattr(self.otherThread, ):
self.otherThread._Thread__stop()
while self.otherThread.isAlive():
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception))
self.otherThread.join(self.repeatEvery)
try:
self._stderr.close()
except:
pass
|
run - The thread main. Will attempt to stop and join the attached thread.
|
24,487 |
def securityEventWS(symbols=None, on_data=None):
symbols = _strToList(symbols)
sendinit = ({: symbols, : []},)
return _stream(_wsURL(), sendinit, on_data)
|
https://iextrading.com/developer/docs/#security-event
|
24,488 |
def is_all_field_none(self):
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._avatar is not None:
return False
if self._currency is not None:
return False
if self._description is not None:
return False
if self._daily_limit is not None:
return False
if self._daily_spent is not None:
return False
if self._overdraft_limit is not None:
return False
if self._balance is not None:
return False
if self._alias is not None:
return False
if self._public_uuid is not None:
return False
if self._status is not None:
return False
if self._sub_status is not None:
return False
if self._reason is not None:
return False
if self._reason_description is not None:
return False
if self._all_co_owner is not None:
return False
if self._user_id is not None:
return False
if self._monetary_account_profile is not None:
return False
if self._notification_filters is not None:
return False
if self._setting is not None:
return False
return True
|
:rtype: bool
|
24,489 |
def make_sub_call(id_, lineno, params):
return symbols.CALL.make_node(id_, params, lineno)
|
This will return an AST node for a sub/procedure call.
|
24,490 |
def chain_HSPs(blast, xdist=100, ydist=100):
key = lambda x: (x.query, x.subject)
blast.sort(key=key)
clusters = Grouper()
for qs, points in groupby(blast, key=key):
points = sorted(list(points), \
key=lambda x: (x.qstart, x.qstop, x.sstart, x.sstop))
n = len(points)
for i in xrange(n):
a = points[i]
clusters.join(a)
for j in xrange(i + 1, n):
b = points[j]
del_x = get_distance(a, b)
if del_x > xdist:
break
del_y = get_distance(a, b, xaxis=False)
if del_y > ydist:
continue
clusters.join(a, b)
chained_hsps = [combine_HSPs(x) for x in clusters]
key = lambda x: (x.query, -x.score if x.has_score else 0)
chained_hsps = sorted(chained_hsps, key=key)
return chained_hsps
|
Take a list of BlastLines (or a BlastSlow instance), and returns a list of
BlastLines.
|
24,491 |
def get(self, path, data=None, return_fields=None):
return self.session.get(self._request_url(path, return_fields),
data=json.dumps(data),
auth=self.auth, verify=False)
|
Call the Infoblox device to get the obj for the data passed in
:param str obj_reference: The object reference data
:param dict data: The data for the get request
:rtype: requests.Response
|
24,492 |
def guest_session_new(self, **kwargs):
path = self._get_path()
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Generate a guest session id.
Returns:
A dict respresentation of the JSON returned from the API.
|
24,493 |
def _all_params(arr):
if not isinstance([], list):
raise TypeError("non-list value found for parameters")
return all(isinstance(x, GPParamSpec) for x in arr)
|
Ensures that the argument is a list that either is empty or contains only GPParamSpec's
:param arr: list
:return:
|
24,494 |
def set_page_property(self, page_id, data):
url = .format(page_id=page_id)
json_data = data
return self.post(path=url, data=json_data)
|
Set the page (content) property e.g. add hash parameters
:param page_id: content_id format
:param data: data should be as json data
:return:
|
24,495 |
def _find_supported_challenge(authzr, responders):
matches = [
(responder, challbs[0])
for challbs in authzr.body.resolved_combinations
for responder in responders
if [challb.typ for challb in challbs] == [responder.challenge_type]]
if len(matches) == 0:
raise NoSupportedChallenges(authzr)
else:
return matches[0]
|
Find a challenge combination that consists of a single challenge that the
responder can satisfy.
:param ~acme.messages.AuthorizationResource auth: The authorization to
examine.
:type responder: List[`~txacme.interfaces.IResponder`]
:param responder: The possible responders to use.
:raises NoSupportedChallenges: When a suitable challenge combination is not
found.
:rtype: Tuple[`~txacme.interfaces.IResponder`,
`~acme.messages.ChallengeBody`]
:return: The responder and challenge that were found.
|
24,496 |
def Route(resource=None, methods=["get", "post", "put", "delete"],
schema=None):
def _route(func):
def wrapper(self, *args, **kwargs):
if kwargs.get("test", False):
kwargs.pop("test")
func(self, *args, **kwargs)
_methods = methods
if isinstance(methods, str):
_methods = [methods]
route = self.router.route(resource)
for method in _methods:
getattr(route, method)(func, schema)
f_locals = sys._getframe(1).f_locals
_order = len([v for v in f_locals.itervalues()
if hasattr(v, ) and
hasattr(v, ) and
v.__name__ == "wrapper"])
wrapper.__dict__["_order"] = _order
return wrapper
return _route
|
route
|
24,497 |
def build_info_string(info):
info_list = []
for annotation in info:
if info[annotation]:
info_list.append(.join([annotation, .join(info[annotation])]))
else:
info_list.append(annotation)
return .join(info_list)
|
Build a new vcf INFO string based on the information in the info_dict.
The info is a dictionary with vcf info keys as keys and lists of vcf values
as values. If there is no value False is value in info
Args:
info (dict): A dictionary with information from the vcf file
Returns:
String: A string that is on the proper vcf format for the INFO column
|
24,498 |
def list_components(self):
overlays = list(self._component_overlays)
items = self.kvstore.get_all()
return overlays + [x[0] for x in items if not x[0].startswith()]
|
List all of the registered component names.
This list will include all of the permanently stored components as
well as any temporary components that were added with a temporary=True
flag in this session.
Returns:
list of str: The list of component names.
Any of these names can be passed to get_component as is to get the
corresponding IOTile object.
|
24,499 |
def simxGetJointPosition(clientID, jointHandle, operationMode):
position = ct.c_float()
return c_GetJointPosition(clientID, jointHandle, ct.byref(position), operationMode), position.value
|
Please have a look at the function description/documentation in the V-REP user manual
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.