Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,400 | def update_metadata(self, metadata):
if len(metadata.topics) == 1 and metadata.topics[0][0] != 0:
error_code, topic = metadata.topics[0][:2]
error = Errors.for_code(error_code)(topic)
return self.failed_update(error)
if not metadata.brokers:
log.warning("No broker metadata found in MetadataResponse -- ignoring.")
return self.failed_update(Errors.MetadataEmptyBrokerList(metadata))
_new_brokers = {}
for broker in metadata.brokers:
if metadata.API_VERSION == 0:
node_id, host, port = broker
rack = None
else:
node_id, host, port, rack = broker
_new_brokers.update({
node_id: BrokerMetadata(node_id, host, port, rack)
})
if metadata.API_VERSION == 0:
_new_controller = None
else:
_new_controller = _new_brokers.get(metadata.controller_id)
_new_partitions = {}
_new_broker_partitions = collections.defaultdict(set)
_new_unauthorized_topics = set()
_new_internal_topics = set()
for topic_data in metadata.topics:
if metadata.API_VERSION == 0:
error_code, topic, partitions = topic_data
is_internal = False
else:
error_code, topic, is_internal, partitions = topic_data
if is_internal:
_new_internal_topics.add(topic)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
_new_partitions[topic] = {}
for p_error, partition, leader, replicas, isr in partitions:
_new_partitions[topic][partition] = PartitionMetadata(
topic=topic, partition=partition, leader=leader,
replicas=replicas, isr=isr, error=p_error)
if leader != -1:
_new_broker_partitions[leader].add(
TopicPartition(topic, partition))
elif error_type is Errors.LeaderNotAvailableError:
log.warning("Topic %s is not available during auto-create"
" initialization", topic)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.error("Topic %s not found in cluster metadata", topic)
elif error_type is Errors.TopicAuthorizationFailedError:
log.error("Topic %s is not authorized for this client", topic)
_new_unauthorized_topics.add(topic)
elif error_type is Errors.InvalidTopicError:
log.error(" is not a valid topic name", topic)
else:
log.error("Error fetching metadata for topic %s: %s",
topic, error_type)
with self._lock:
self._brokers = _new_brokers
self.controller = _new_controller
self._partitions = _new_partitions
self._broker_partitions = _new_broker_partitions
self.unauthorized_topics = _new_unauthorized_topics
self.internal_topics = _new_internal_topics
f = None
if self._future:
f = self._future
self._future = None
self._need_update = False
now = time.time() * 1000
self._last_refresh_ms = now
self._last_successful_refresh_ms = now
if f:
f.success(self)
log.debug("Updated cluster metadata to %s", self)
for listener in self._listeners:
listener(self)
if self.need_all_topic_metadata:
self._need_update = False | Update cluster state given a MetadataResponse.
Arguments:
metadata (MetadataResponse): broker response to a metadata request
Returns: None |
384,401 | def show_patterned_file(dir_path, pattern=list(), filename_only=True):
pattern = [i.lower() for i in pattern]
if filename_only:
def filter(winfile):
for p in pattern:
if p in winfile.fname.lower():
return True
return False
else:
def filter(winfile):
for p in pattern:
if p in winfile.abspath.lower():
return True
return False
fc = FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
if filename_only:
fc.sort_by("fname")
else:
fc.sort_by("abspath")
table = {p: "<%s>" % p for p in pattern}
lines = list()
lines.append("Results:")
for winfile in fc.iterfiles():
lines.append(" %s" % winfile)
if filename_only:
lines.append("Above are all files that file name contains %s" % pattern)
else:
lines.append("Above are all files that abspath contains %s" % pattern)
text = "\n".join(lines)
print(text)
with open("__show_patterned_file__.log", "wb") as f:
f.write(text.encode("utf-8")) | Print all file that file name contains ``pattern``. |
384,402 | def quality_to_apply(self):
if (self.request.quality is None):
if (self.api_version <= ):
return()
else:
return()
return(self.request.quality) | Value of quality parameter to use in processing request.
Simple substitution of 'native' or 'default' if no quality
parameter is specified. |
384,403 | def color_toggle(self):
if self.color_scheme_table.active_scheme_name == :
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
self.old_scheme = self.color_scheme_table.active_scheme_name
self.color_scheme_table.set_active_scheme()
self.Colors = self.color_scheme_table.active_colors | Toggle between the currently active color scheme and NoColor. |
384,404 | def write_registers(self, registeraddress, values):
if not isinstance(values, list):
raise TypeError(.format(values))
_checkInt(len(values), minvalue=1, description=)
self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat=) | Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError |
384,405 | def resolve_dist(cls, dist, working_set):
deps = set()
deps.add(dist)
try:
reqs = dist.requires()
except (AttributeError, OSError, IOError):
return deps
for req in reqs:
dist = working_set.find(req)
deps |= cls.resolve_dist(dist, working_set)
return deps | Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`) |
384,406 | def edit_scheme(self):
dlg = self.scheme_editor_dialog
dlg.set_scheme(self.current_scheme)
if dlg.exec_():
temporal_color_scheme = dlg.get_edited_color_scheme()
for key in temporal_color_scheme:
option = "temp/{0}".format(key)
value = temporal_color_scheme[key]
self.set_option(option, value)
self.update_preview(scheme_name=) | Edit current scheme. |
384,407 | def warning(*args):
if sys.stdin.isatty():
print(, *args, file=sys.stderr)
else:
notify_warning(*args) | Display warning message via stderr or GUI. |
384,408 | def no_empty_value(func):
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper | Raises an exception if function argument is empty. |
384,409 | def get_volume_steps(self):
if not self.__volume_steps:
self.__volume_steps = yield from self.handle_int(
self.API.get())
return self.__volume_steps | Read the maximum volume level of the device. |
384,410 | async def add_relation(self, relation1, relation2):
connection = self.connection()
app_facade = client.ApplicationFacade.from_connection(connection)
log.debug(
, relation1, relation2)
def _find_relation(*specs):
for rel in self.relations:
if rel.matches(*specs):
return rel
return None
try:
result = await app_facade.AddRelation([relation1, relation2])
except JujuAPIError as e:
if not in e.message:
raise
rel = _find_relation(relation1, relation2)
if rel:
return rel
raise JujuError(.format(
relation1, relation2))
specs = [.format(app, data[])
for app, data in result.endpoints.items()]
await self.block_until(lambda: _find_relation(*specs) is not None)
return _find_relation(*specs) | Add a relation between two applications.
:param str relation1: '<application>[:<relation_name>]'
:param str relation2: '<application>[:<relation_name>]' |
384,411 | def epochs(steps=None, epoch_steps=1):
try:
iter(epoch_steps)
except TypeError:
epoch_steps = itertools.repeat(epoch_steps)
step = 0
for epoch, epoch_steps in enumerate(epoch_steps):
epoch_steps = min(epoch_steps, steps - step)
yield (epoch + 1, epoch_steps)
step += epoch_steps
if steps and step >= steps:
break | Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) |
384,412 | def depth(self):
return self.fold_up(lambda n, fl, fg: max(fl + 1, fg + 1), lambda leaf: 0) | Compute the depth of the tree (depth of a leaf=0). |
384,413 | def add_item(self, assessment_id, item_id):
if assessment_id.get_identifier_namespace() != :
raise errors.InvalidArgument
self._part_item_design_session.add_item(item_id, self._get_first_part_id(assessment_id)) | Adds an existing ``Item`` to an assessment.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: item_id (osid.id.Id): the ``Id`` of the ``Item``
raise: NotFound - ``assessment_id`` or ``item_id`` not found
raise: NullArgument - ``assessment_id`` or ``item_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
384,414 | def timestamp_filename(basename, ext=None):
dt = datetime.now().strftime()
if ext:
return % (basename, dt, ext)
return % (basename, dt) | Return a string of the form [basename-TIMESTAMP.ext]
where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC |
384,415 | def send_one_ping(self, current_socket):
checksum = 0
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
padBytes = []
startVal = 0x42
for i in range(startVal, startVal + (self.packet_size)):
padBytes += [(i & 0xff)]
data = bytes(padBytes)
checksum = calculate_checksum(header + data)
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
packet = header + data
send_time = default_timer()
try:
current_socket.sendto(packet, (self.destination, 1))
except socket.error as e:
print("General failure (%s)" % (e.args[1]))
current_socket.close()
return
return send_time | Send one ICMP ECHO_REQUEST. |
384,416 | def p_block_statements(self, p):
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | block_statements : block_statements block_statement |
384,417 | def embeddedFileGet(self, id):
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_embeddedFileGet(self, id) | Retrieve embedded file content by name or by number. |
384,418 | def _get_marX(self, attr_name, default):
if self.tcPr is None:
return Emu(default)
return Emu(int(self.tcPr.get(attr_name, default))) | Generalized method to get margin values. |
384,419 | def prime_check(n):
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True | Return True if n is a prime number
Else return False. |
384,420 | def copy_figure(self):
if self.fmt in [, ]:
qpixmap = QPixmap()
qpixmap.loadFromData(self.fig, self.fmt.upper())
QApplication.clipboard().setImage(qpixmap.toImage())
elif self.fmt == :
svg_to_clipboard(self.fig)
else:
return
self.blink_figure() | Copy figure to clipboard. |
384,421 | def p_try_statement_3(self, p):
p[0] = self.asttypes.Try(statements=p[2], catch=p[3], fin=p[4])
p[0].setpos(p) | try_statement : TRY block catch finally |
384,422 | def update_particle(self, part, chi=0.729843788, c=2.05):
neighbour_pool = [self.population[i] for i in part.neighbours]
best_neighbour = max(neighbour_pool, key=lambda x: x.best.fitness)
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, part.best, part))
ce2_g = map(operator.mul, ce2, map(
operator.sub, best_neighbour.best, part))
chi_list = [chi] * len(part)
chi_list2 = [1 - chi] * len(part)
a = map(operator.sub,
map(operator.mul, chi_list, map(operator.add, ce1_p, ce2_g)),
map(operator.mul, chi_list2, part.speed))
part.speed = list(map(operator.add, part.speed, a))
for i, speed in enumerate(part.speed):
if speed < part.smin:
part.speed[i] = part.smin
elif speed > part.smax:
part.speed[i] = part.smax
part[:] = list(map(operator.add, part, part.speed))
return | Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour. |
384,423 | def _represent_match_traversal(match_traversal):
output = []
output.append(_first_step_to_match(match_traversal[0]))
for step in match_traversal[1:]:
output.append(_subsequent_step_to_match(step))
return u.join(output) | Emit MATCH query code for an entire MATCH traversal sequence. |
384,424 | def _type_insert(self, handle, key, value):
if value!=0:
if isinstance(value,float):
handle.incrbyfloat(key, value)
else:
handle.incr(key,value) | Insert the value into the series. |
384,425 | def get_sites(self):
url = "/2/sites"
data = self._get_resource(url)
sites = []
for entry in data[]:
sites.append(self.site_from_json(entry))
return sites | Returns a list of sites.
http://dev.wheniwork.com/#listing-sites |
384,426 | def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None,
object_prefix_permissions=None):
self.ops.append({
: , : name,
: namespace,
: key_name or service_name(),
: permission,
: object_prefix_permissions}) | Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']} |
384,427 | def perform_put(self, path, body, x_ms_version=None):
/<subscription-id>/services/hostedservices/<service-name>
request = HTTPRequest()
request.method =
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = self._httpclient._update_request_uri_query(request)
request.headers = self._update_management_header(request, x_ms_version)
response = self._perform_request(request)
return response | Performs a PUT request and returns the response.
path:
Path to the resource.
Ex: '/<subscription-id>/services/hostedservices/<service-name>'
body:
Body for the PUT request.
x_ms_version:
If specified, this is used for the x-ms-version header.
Otherwise, self.x_ms_version is used. |
384,428 | def catch_all(path):
return (dict(error=.format(path),
links=dict(root=.format(request.url_root, PREFIX[1:]))),
HTTPStatus.NOT_FOUND) | Catch all path - return a JSON 404 |
384,429 | def uninstall(self, updates):
KB3195454
for update in updates.updates:
uid = update.Identity.UpdateID
ret[][uid] = {}
ret[][uid][] = update.Title
ret[][uid][] = \
not bool(update.IsInstalled)
if salt.utils.data.is_true(update.IsInstalled):
log.debug(, uid)
log.debug(, update.Title)
uninstall_list.Add(update)
if uninstall_list.Count == 0:
ret = {: False,
: }
return ret
installer.Updates = uninstall_list
try:
log.debug()
result = installer.Uninstall()
except pywintypes.com_error as error:
hr, msg, exc, arg = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = .format(error)
if exc[5] == -2145124312:
log.debug()
try:
for item in uninstall_list:
for kb in item.KBArticleIDs:
cmd = [, , ]
pkg_list = self._run(cmd)[0].splitlines()
for item in pkg_list:
if + kb in item.lower():
pkg = item.split()[1]
ret[] = pkg
cmd = [,
,
,
.format(pkg),
,
]
self._run(cmd)
except CommandExecutionError as exc:
log.debug()
log.debug(, .join(cmd))
log.debug(, exc)
raise CommandExecutionError(
.format(exc))
log.debug()
ret[] = True
ret[] =
ret[] = needs_reboot()
log.debug(, ret[])
self.refresh()
reboot = {0: ,
1: ,
2: }
for update in self._updates:
uid = update.Identity.UpdateID
for item in uninstall_list:
if item.Identity.UpdateID == uid:
if not update.IsInstalled:
ret[][uid][] = \
else:
ret[][uid][] = \
ret[][uid][] = \
reboot[update.InstallationBehavior.RebootBehavior]
return ret
log.error(, failure_code)
raise CommandExecutionError(failure_code)
result_code = {0: ,
1: ,
2: ,
3: ,
4: ,
5: }
log.debug()
log.debug(result_code[result.ResultCode])
ret[] = result_code[result.ResultCode]
if result.ResultCode in [2, 3]:
ret[] = True
ret[] = result.RebootRequired
log.debug(, result.RebootRequired)
else:
log.debug()
ret[] = False
reboot = {0: ,
1: ,
2: }
for i in range(uninstall_list.Count):
uid = uninstall_list.Item(i).Identity.UpdateID
ret[][uid][] = \
result_code[result.GetUpdateResult(i).ResultCode]
ret[][uid][] = reboot[
uninstall_list.Item(i).InstallationBehavior.RebootBehavior]
return ret | Uninstall the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions.
.. note:: Starting with Windows 10 the Windows Update Agent is unable to
uninstall updates. An ``Uninstall Not Allowed`` error is returned. If
this error is encountered this function will instead attempt to use
``dism.exe`` to perform the uninstallation. ``dism.exe`` may fail to
to find the KB number for the package. In that case, removal will fail.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be uninstalled.
Returns:
dict: A dictionary containing the results of the uninstallation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# uninstall KB3195454
updates = wua.search('KB3195454')
results = wua.uninstall(updates) |
384,430 | def dialectfromstring(s):
try:
AST = compiler.parse(s)
except SyntaxError:
return
else:
try:
if (len(AST.getChildren()) > 1):
ST = AST.getChildren()[1]
if isinstance(ST, Stmt):
if isinstance(ST.getChildren()[0], Discard):
d = ST.getChildren()[0].asList()[0]
except (TypeError,AttributeError):
pass
else:
if (isinstance(d,Dict) and (len(d.items) > 0)):
if all([isctype(i[0], str) for i in d.items]):
testd = csv.Sniffer().sniff()
if all([n.value in dir(testd) and
isctype(v, type(getattr(testd, n.value))) for (n,v) in
d.items]):
D = eval(s)
for n in D.keys():
setattr(testd, n, D[n])
return testd | Attempts to convert a string representation of a CSV
dialect (as would be read from a file header, for instance)
into an actual csv.Dialect object. |
384,431 | def move_in_stack(move_up):
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print
else:
print | Move up or down the stack (for the py-up/py-down command) |
384,432 | def _echo_setting(key):
value = getattr(settings, key)
secho( % key, fg=, bold=True, nl=False)
secho(
six.text_type(value),
bold=True,
fg= if isinstance(value, six.text_type) else ,
) | Echo a setting to the CLI. |
384,433 | def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
detector = Detector(detector_name)
hp, hc = ringdown_td_approximants[inj[]](
inj, delta_t=delta_t, **self.extra_args)
hp._epoch += inj[]
hc._epoch += inj[]
if distance_scale != 1:
hp /= distance_scale
hc /= distance_scale
signal = detector.project_wave(hp, hc,
inj[], inj[], inj[])
return signal | Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection. |
384,434 | def _check_error(self, response, json_response=None):
s any
Args:
response (object): Object returned by requests
if response.status_code >= 400:
json_response = json_response or self._get_json_response(response)
err_cls = self._check_http_error_code(response.status_code)
try:
raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code)
except TypeError:
raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code)
return True | Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code |
384,435 | def select_catalogue(self, selector, distance=None):
if selector.catalogue.get_number_events() < 1:
raise ValueError()
self.catalogue = selector.within_polygon(self.geometry,
distance,
upper_depth=self.upper_depth,
lower_depth=self.lower_depth)
if self.catalogue.get_number_events() < 5:
warnings.warn(
% (self.id, self.name)) | Selects the catalogue of earthquakes attributable to the source
:param selector:
Populated instance of openquake.hmtk.seismicity.selector.CatalogueSelector
class
:param float distance:
Distance (in km) to extend or contract (if negative) the zone for
selecting events |
384,436 | async def join(self, ctx, *, channel: discord.VoiceChannel):
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect() | Joins a voice channel |
384,437 | def read_scanimage_metadata(fh):
fh.seek(0)
try:
byteorder, version = struct.unpack(, fh.read(4))
if byteorder != b or version != 43:
raise Exception
fh.seek(16)
magic, version, size0, size1 = struct.unpack(, fh.read(16))
if magic != 117637889 or version != 3:
raise Exception
except Exception:
raise ValueError()
frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1]))
roi_data = read_json(fh, , None, size1, None) if size1 > 1 else {}
return frame_data, roi_data | Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata. |
384,438 | def get_user(username):
*
try:
enable()
configure_terminal()
cmd_out = sendline(.format(username))
cmd_out.split()
user = cmd_out[1:-1]
configure_terminal_exit()
disable()
return user
except TerminalException as e:
log.error(e)
return | Get username line from switch
.. code-block: bash
salt '*' onyx.cmd get_user username=admin |
384,439 | async def entries_exists(self, url, urls=):
params = {: self.token,
: url,
: urls}
path = .format(ext=self.format)
return await self.query(path, "get", **params) | GET /api/entries/exists.{_format}
Check if an entry exist by url.
:param url string true An url Url to check if it exists
:param urls string false An array of urls
(?urls[]=http...&urls[]=http...) Urls (as an array)
to check if it exists
:return result |
384,440 | def recommend(self, userid, user_items,
N=10, filter_already_liked_items=True, filter_items=None, recalculate_user=False):
pass | Recommends items for a user
Calculates the N best recommendations for a user, and returns a list of itemids, score.
Parameters
----------
userid : int
The userid to calculate recommendations for
user_items : csr_matrix
A sparse matrix of shape (number_users, number_items). This lets us look
up the liked items and their weights for the user. This is used to filter out
items that have already been liked from the output, and to also potentially
calculate the best items for this user.
N : int, optional
The number of results to return
filter_items : sequence of ints, optional
List of extra item ids to filter out from the output
recalculate_user : bool, optional
When true, don't rely on stored user state and instead recalculate from the
passed in user_items
Returns
-------
list
List of (itemid, score) tuples |
384,441 | async def send_venue(self, latitude: base.Float, longitude: base.Float, title: base.String, address: base.String,
foursquare_id: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
warn_deprecated(
,
stacklevel=8)
return await self.bot.send_venue(chat_id=self.chat.id,
latitude=latitude,
longitude=longitude,
title=title,
address=address,
foursquare_id=foursquare_id,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | Use this method to send information about a venue.
Source: https://core.telegram.org/bots/api#sendvenue
:param latitude: Latitude of the venue
:type latitude: :obj:`base.Float`
:param longitude: Longitude of the venue
:type longitude: :obj:`base.Float`
:param title: Name of the venue
:type title: :obj:`base.String`
:param address: Address of the venue
:type address: :obj:`base.String`
:param foursquare_id: Foursquare identifier of the venue
:type foursquare_id: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message` |
384,442 | def get_trunk_interfaces(auth, url, devid=None, devip=None):
if devip is not None:
devid = get_dev_details(devip, auth, url)[]
get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devid) + \
"&start=1&size=5000&total=false"
f_url = url + get_trunk_interfaces_url
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_trunk_interfaces = (json.loads(response.text))
if len(dev_trunk_interfaces) == 2:
if isinstance(dev_trunk_interfaces[], list):
return dev_trunk_interfaces[]
elif isinstance(dev_trunk_interfaces[], dict):
return [dev_trunk_interfaces[]]
else:
dev_trunk_interfaces[] = ["No trunk inteface"]
return dev_trunk_interfaces[]
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + | Function takes devId as input to RESTFULL call to HP IMC platform
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: list of dictionaries where each element of the list represents an interface which
has been configured as a
VLAN trunk port
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> trunk_interfaces = get_trunk_interfaces('10', auth.creds, auth.url)
>>> assert type(trunk_interfaces) is list
>>> assert len(trunk_interfaces[0]) == 3
>>> assert 'allowedVlans' in trunk_interfaces[0]
>>> assert 'ifIndex' in trunk_interfaces[0]
>>> assert 'pvid' in trunk_interfaces[0]
>>> get_trunk_interfaces('350', auth.creds, auth.url)
['No trunk inteface'] |
384,443 | def get_timefactor(cls) -> float:
try:
parfactor = hydpy.pub.timegrids.parfactor
except RuntimeError:
if not (cls.parameterstep and cls.simulationstep):
raise RuntimeError(
f
f
f
f)
else:
date1 = timetools.Date()
date2 = date1 + cls.simulationstep
parfactor = timetools.Timegrids(timetools.Timegrid(
date1, date2, cls.simulationstep)).parfactor
return parfactor(cls.parameterstep) | Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5 |
384,444 | def run_application(component: Union[Component, Dict[str, Any]], *, event_loop_policy: str = None,
max_threads: int = None, logging: Union[Dict[str, Any], int, None] = INFO,
start_timeout: Union[int, float, None] = 10):
assert check_argument_types()
if isinstance(logging, dict):
dictConfig(logging)
elif isinstance(logging, int):
basicConfig(level=logging)
logger = getLogger(__name__)
logger.info(, if __debug__ else )
if event_loop_policy:
create_policy = policies.resolve(event_loop_policy)
policy = create_policy()
asyncio.set_event_loop_policy(policy)
logger.info(, qualified_name(policy))
event_loop = asyncio.get_event_loop()
if max_threads is not None:
event_loop.set_default_executor(ThreadPoolExecutor(max_threads))
logger.info(, max_threads)
if isinstance(component, dict):
component = cast(Component, component_types.create_object(**component))
logger.info()
context = Context()
exception = None
exit_code = 0
try:
coro = asyncio.wait_for(component.start(context), start_timeout, loop=event_loop)
event_loop.run_until_complete(coro)
except asyncio.TimeoutError as e:
exception = e
logger.error()
exit_code = 1
except Exception as e:
exception = e
logger.exception()
exit_code = 1
else:
logger.info()
try:
event_loop.add_signal_handler(signal.SIGTERM, sigterm_handler, logger, event_loop)
except NotImplementedError:
pass
try:
event_loop.run_forever()
except KeyboardInterrupt:
pass
except SystemExit as e:
exit_code = e.code
logger.info()
event_loop.run_until_complete(context.close(exception))
try:
event_loop.run_until_complete(event_loop.shutdown_asyncgens())
except (AttributeError, NotImplementedError):
pass
event_loop.close()
logger.info()
shutdown()
if exit_code:
sys.exit(exit_code) | Configure logging and start the given root component in the default asyncio event loop.
Assuming the root component was started successfully, the event loop will continue running
until the process is terminated.
Initializes the logging system first based on the value of ``logging``:
* If the value is a dictionary, it is passed to :func:`logging.config.dictConfig` as
argument.
* If the value is an integer, it is passed to :func:`logging.basicConfig` as the logging
level.
* If the value is ``None``, logging setup is skipped entirely.
By default, the logging system is initialized using :func:`~logging.basicConfig` using the
``INFO`` logging level.
The default executor in the event loop is replaced with a new
:class:`~concurrent.futures.ThreadPoolExecutor` where the maximum number of threads is set to
the value of ``max_threads`` or, if omitted, the default value of
:class:`~concurrent.futures.ThreadPoolExecutor`.
:param component: the root component (either a component instance or a configuration dictionary
where the special ``type`` key is either a component class or a ``module:varname``
reference to one)
:param event_loop_policy: entry point name (from the ``asphalt.core.event_loop_policies``
namespace) of an alternate event loop policy (or a module:varname reference to one)
:param max_threads: the maximum number of worker threads in the default thread pool executor
(the default value depends on the event loop implementation)
:param logging: a logging configuration dictionary, :ref:`logging level <python:levels>` or
``None``
:param start_timeout: seconds to wait for the root component (and its subcomponents) to start
up before giving up (``None`` = wait forever) |
384,445 | def read(cls, f):
url = None
line = f.readline()
if not line:
return None
while not line.startswith(cls.LENGTH_HEADER):
if line.startswith(cls.URI_HEADER):
url = line[len(cls.URI_HEADER):].strip()
line = f.readline()
f.readline()
length = int(line.split()[1])
return cls(url, length) | Read header from file. Headers end with length and then 1 blank line. |
384,446 | def import_task_to_graph(diagram_graph, process_id, process_attributes, task_element):
BpmnDiagramGraphImport.import_activity_to_graph(diagram_graph, process_id, process_attributes, task_element) | Adds to graph the new element that represents BPMN task.
In our representation tasks have only basic attributes and elements, inherited from Activity type,
so this method only needs to call add_flownode_to_graph.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param task_element: object representing a BPMN XML 'task' element. |
384,447 | def _map_filtered_clusters_to_full_clusters(self,
clusters,
filter_map):
results = []
for cluster in clusters:
full_cluster = []
for seq_id in cluster:
full_cluster += filter_map[seq_id]
results.append(full_cluster)
return results | Input: clusters, a list of cluster lists
filter_map, the seq_id in each clusters
is the key to the filter_map
containing all seq_ids with
duplicate FASTA sequences
Output: an extended list of cluster lists |
384,448 | def Nu_plate_Muley_Manglik(Re, Pr, chevron_angle, plate_enlargement_factor):
r
beta, phi = chevron_angle, plate_enlargement_factor
t1 = (0.2668 - 0.006967*beta + 7.244E-5*beta**2)
t2 = (20.7803 - 50.9372*phi + 41.1585*phi**2 - 10.1507*phi**3)
t3 = (0.728 + 0.0543*sin((2*pi*beta/90) + 3.7))
return t1*t2*Re**t3*Pr**(1/3.) | r'''Calculates Nusselt number for single-phase flow in a
Chevron-style plate heat exchanger according to [1]_, also shown in [2]_
and [3]_.
.. math::
Nu = [0.2668 - 0.006967(\beta) + 7.244\times 10^{-5}(\beta)^2]
\times[20.7803 - 50.9372\phi + 41.1585\phi^2 - 10.1507\phi^3]
\times Re^{[0.728 + 0.0543\sin[(2\pi\beta/90) + 3.7]]} Pr^{1/3}
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
plate_enlargement_factor : float
The extra surface area multiplier as compared to a flat plate
caused the corrugations, [-]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
The correlation as presented in [1]_ suffers from a typo, with a
coefficient of 10.51 instead of 10.15. Several more decimal places were
published along with the corrected typo in [2]_. This has a *very large*
difference if not implemented.
The viscosity correction power is recommended to be the blanket
Sieder and Tate (1936) value of 0.14.
The correlation is recommended in the range of Reynolds numbers above
1000, chevron angles between 30 and 60 degrees, and enlargement factors
from 1 to 1.5. Due to its cubic nature it is not likely to give good
results if the chevron angle or enlargement factors are out of those
ranges.
Examples
--------
>>> Nu_plate_Muley_Manglik(Re=2000, Pr=.7, chevron_angle=45,
... plate_enlargement_factor=1.18)
36.49087100602062
References
----------
.. [1] Muley, A., and R. M. Manglik. "Experimental Study of Turbulent Flow
Heat Transfer and Pressure Drop in a Plate Heat Exchanger With Chevron
Plates." Journal of Heat Transfer 121, no. 1 (February 1, 1999): 110-17.
doi:10.1115/1.2825923.
.. [2] Palm, Björn, and Joachim Claesson. "Plate Heat Exchangers:
Calculation Methods for Single- and Two-Phase Flow (Keynote)," January
1, 2005, 103-13. https://doi.org/10.1115/ICMM2005-75092. |
384,449 | def __load_file(self, key_list) -> str:
file = str(key_list[0]) + self.extension
key_list.pop(0)
file_path = os.path.join(self.path, file)
if os.path.exists(file_path):
return Json.from_file(file_path)
else:
raise FileNotFoundError(file_path) | Load a translator file |
384,450 | def params_as_tensors_for(*objs, convert=True):
objs = set(objs)
prev_values = [_params_as_tensors_enter(o, convert) for o in objs]
try:
yield
finally:
for o, pv in reversed(list(zip(objs, prev_values))):
_params_as_tensors_exit(o, pv) | Context manager which changes the representation of parameters and data holders
for the specific parameterized object(s).
This can also be used to turn off tensor conversion functions wrapped with
`params_as_tensors`:
```
@gpflow.params_as_tensors
def compute_something(self): # self is parameterized object.
s = tf.reduce_sum(self.a) # self.a is a parameter.
with params_as_tensors_for(self, convert=False):
b = self.c.constrained_tensor
return s + b
```
:param objs: one or more instances of classes deriving from Parameterized
:param convert: Flag which is used for turning tensor convertion
feature on, `True`, or turning it off, `False`. |
384,451 | def _validate_calibration_params(strategy=, min_rate=None,
beta=1.):
if strategy not in (, , ,
):
raise ValueError(
.format(strategy))
if strategy == or strategy == :
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError(
.format(min_rate))
if strategy == :
if beta is None or not isinstance(beta, (int, float)):
raise ValueError(
.format(type(beta))) | Ensure that calibration parameters have allowed values |
384,452 | def verifydropdown(self, window_name, object_name):
try:
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled or not object_handle.AXChildren:
return 0
children = object_handle.AXChildren[0]
if children:
return 1
except LdtpServerException:
pass
return 0 | Verify drop down list / menu poped up
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success 0 on failure.
@rtype: integer |
384,453 | def unregister(self, condition_set):
if callable(condition_set):
condition_set = condition_set()
self._registry.pop(condition_set.get_id(), None) | Unregisters a condition set with the manager.
>>> gargoyle.unregister(condition_set) #doctest: +SKIP |
384,454 | def get_event_consumer(config, success_channel, error_channel, metrics,
**kwargs):
builder = event_consumer.GPSEventConsumerBuilder(
config, success_channel, error_channel, metrics, **kwargs)
return builder.build_event_consumer() | Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance. |
384,455 | def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
args = parser.parse_args()
if args.width <= 0:
raise argparse.ArgumentError(args.width, )
return args | Provides additional validation of the arguments collected by argparse. |
384,456 | def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True):
if resfile is not None:
self.resfile = resfile
self.__res = None
obs = self.observation_data.loc[self.nnz_obs_names,:]
swr = (self.res.loc[self.nnz_obs_names,:].residual * obs.weight)**2
factors = (1.0/swr).apply(np.sqrt)
if original_ceiling:
factors = factors.apply(lambda x: 1.0 if x > 1.0 else x)
self.observation_data.loc[self.nnz_obs_names,"weight"] *= factors | adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True |
384,457 | def float_greater_or_equal(threshold: float) -> Callable:
def check_greater_equal(value: str):
value_to_check = float(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %f." % threshold)
return value_to_check
return check_greater_equal | Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse. |
384,458 | def mergecn(args):
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | %prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another. |
384,459 | def mechanism(self):
return tuple(sorted(
chain.from_iterable(part.mechanism for part in self))) | tuple[int]: The nodes of the mechanism in the partition. |
384,460 | def notify(title,
message,
api_key=NTFY_API_KEY,
provider_key=None,
priority=0,
url=None,
retcode=None):
data = {
: api_key,
: ,
: title,
: message,
}
if MIN_PRIORITY <= priority <= MAX_PRIORITY:
data[] = priority
else:
raise ValueError(
.format(MIN_PRIORITY, MAX_PRIORITY))
if url is not None:
data[] = url
if provider_key is not None:
data[] = provider_key
resp = requests.post(
API_URL, data=data, headers={
: USER_AGENT,
})
resp.raise_for_status() | Optional parameters:
* ``api_key`` - use your own application token
* ``provider_key`` - if you are whitelisted
* ``priority``
* ``url`` |
384,461 | def load(self):
data = self.get_data( % self.id, type=GET)
load_balancer = data[]
for attr in load_balancer.keys():
if attr == :
health_check = HealthCheck(**load_balancer[])
setattr(self, attr, health_check)
elif attr == :
sticky_ses = StickySesions(**load_balancer[])
setattr(self, attr, sticky_ses)
elif attr == :
rules = list()
for rule in load_balancer[]:
rules.append(ForwardingRule(**rule))
setattr(self, attr, rules)
else:
setattr(self, attr, load_balancer[attr])
return self | Loads updated attributues for a LoadBalancer object.
Requires self.id to be set. |
384,462 | def verifySignature(ecPublicSigningKey, message, signature):
if ecPublicSigningKey.getType() == Curve.DJB_TYPE:
result = _curve.verifySignature(ecPublicSigningKey.getPublicKey(), message, signature)
return result == 0
else:
raise InvalidKeyException("Unknown type: %s" % ecPublicSigningKey.getType()) | :type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray |
384,463 | def get_login_theme():
today = datetime.now().date()
if today.month == 12 or today.month == 1:
return {"js": "themes/snow/snow.js", "css": "themes/snow/snow.css"}
if today.month == 3 and (14 <= today.day <= 16):
return {"js": "themes/piday/piday.js", "css": "themes/piday/piday.css"}
return {} | Load a custom login theme (e.g. snow) |
384,464 | def unpack_grad_tuple(gv, gpt):
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope("unpack"):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv | Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction. |
384,465 | def compile(self, module):
imports, depends = self.get_dependencies(module)
file_list = list(imports | depends) + [module]
cmd_list = [, , , , self.pyang_plugins]
cmd_list += [, self.dir_yang]
cmd_list += [self.dir_yang + + f + for f in file_list]
logger.info(.format(module,
.join(cmd_list)))
p = Popen(.join(cmd_list), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
logger.info(.format(p.returncode))
if p.returncode == 0:
logger.debug(stderr.decode())
else:
logger.error(stderr.decode())
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.XML(stdout.decode(), parser)
return Model(tree) | compile
High-level api: Compile a module.
Parameters
----------
module : `str`
Module name that is inquired about.
Returns
-------
Model
A Model object. |
384,466 | def get_new(mserver_url, token, board):
thread = termui.waiting_echo("Getting message from Server...")
thread.daemon = True
thread.start()
try:
params = {"name":"node000", "board":board, "access_token":token}
r = requests.post("%s%s" %(mserver_url, nodes_create_endpoint), params=params, timeout=10, verify=verify)
r.raise_for_status()
json_response = r.json()
except requests.exceptions.HTTPError as e:
thread.stop()
thread.join()
if r.status_code == 400:
error = r.json().get("error", None)
click.secho(">> %s" %error, fg=)
else:
click.secho(">> %s" %e, fg=)
return None
except Exception as e:
thread.stop()
thread.join()
click.secho(">> %s" %e, fg=)
return None
thread.stop()
thread.join()
return json_response | get node sn and key |
384,467 | def _select_Generic_superclass_parameters(subclass, superclass_origin):
subclass = _find_base_with_origin(subclass, superclass_origin)
if subclass is None:
return None
if subclass.__origin__ is superclass_origin:
return subclass.__args__
prms = _find_Generic_super_origin(subclass, superclass_origin)
res = []
for prm in prms:
sub_search = subclass
while not sub_search is None:
try:
res.append(sub_search.__args__[sub_search.__origin__.__parameters__.index(prm)])
break
except ValueError:
sub_search = _find_base_with_origin(
sub_search.__origin__, superclass_origin)
else:
return None
return res | Helper for _issubclass_Generic. |
384,468 | def _update_centers(X, membs, n_clusters):
centers = np.empty(shape=(n_clusters, X.shape[1]), dtype=float)
sse = np.empty(shape=n_clusters, dtype=float)
for clust_id in range(n_clusters):
memb_ids = np.where(membs == clust_id)[0]
if memb_ids.shape[0] == 0:
memb_ids = np.random.choice(X.shape[0], size=1)
centers[clust_id,:] = np.mean(X[memb_ids,:], axis=0)
sse[clust_id] = _cal_dist2center(X[memb_ids,:], centers[clust_id,:])
return(centers, sse) | Update Cluster Centers:
calculate the mean of feature vectors for each cluster |
384,469 | def _download_query(self, as_of):
c = self.institution.client()
q = c.bank_account_query(
number=self.number,
date=as_of,
account_type=self.account_type,
bank_id=self.routing_number)
return q | Formulate the specific query needed for download
Not intended to be called by developers directly.
:param as_of: Date in 'YYYYMMDD' format
:type as_of: string |
384,470 | def spec(self) -> list:
spec = [item
for op, pat in self.ops.items()
for item in [( + op, {: pat, : self.postf, : None}),
( + op, {: pat, : self.postf, : None})]
]
spec[0][1][] = self.regex_pat.format(_ops_regex(self.ops.keys()))
return spec | Returns prefix unary operators list.
Sets only one regex for all items in the dict. |
384,471 | async def ListModels(self, tag):
_params = dict()
msg = dict(type=,
request=,
version=5,
params=_params)
_params[] = tag
reply = await self.rpc(msg)
return reply | tag : str
Returns -> typing.Sequence[~UserModel] |
384,472 | def mt_fields(fields, nomaster=False, onlydefaultlang=False):
assert isinstance(fields, (list, tuple))
fl = []
for field in fields:
if not nomaster:
fl.append(field)
if onlydefaultlang:
fl.append(.format(field, DEFAULT_LANGUAGE))
else:
for lang in AVAILABLE_LANGUAGES:
fl.append(.format(field, lang))
return fl | Returns list of fields for multilanguage fields of model.
Examples:
print(mt_fields('name', 'desc'))
['name', 'name_en', 'name_uk', 'desc', 'desc_en', 'desc_uk']
MyModel.objects.only(*mt_fields('name', 'desc', 'content'))
If nomaster then master field will not be append.
F.e.: ['name_en', 'name_uk'] -- without master 'name'.
If onlydefaultlang then wiil be search only default language:
F.e.: ['name', 'name_en'] -- without additional 'name_uk'.
If nomaster and onlydefaultlang then will be use both rulses.
F.e.: ['name_en'] -- without master 'name' and additional 'name_uk'. |
384,473 | def generate_lines(input_file,
start=0,
stop=float()):
with gzip.GzipFile(input_file, ) as f:
for i, line in enumerate(f):
if i < start:
continue
if i >= stop:
break
yield line.rstrip() | Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time |
384,474 | def check_site_enabled(site):
**
if site.endswith():
site_file = site
else:
site_file = .format(site)
if os.path.islink(.format(SITE_ENABLED_DIR, site_file)):
return True
elif site == and \
os.path.islink(.format(SITE_ENABLED_DIR, site_file)):
return True
else:
return False | Checks to see if the specific site symlink is in /etc/apache2/sites-enabled.
This will only be functional on Debian-based operating systems (Ubuntu,
Mint, etc).
CLI Examples:
.. code-block:: bash
salt '*' apache.check_site_enabled example.com
salt '*' apache.check_site_enabled example.com.conf |
384,475 | def parse_argument(string: str) -> Union[str, Tuple[str, str]]:
idx_equal = string.find()
if idx_equal == -1:
return string
idx_quote = idx_equal+1
for quote in (, "'"):
idx = string.find(quote)
if -1 < idx < idx_quote:
idx_quote = idx
if idx_equal < idx_quote:
return string[:idx_equal], string[idx_equal+1:]
return string | Return a single value for a string understood as a positional
argument or a |tuple| containing a keyword and its value for a
string understood as a keyword argument.
|parse_argument| is intended to be used as a helper function for
function |execute_scriptfunction| only. See the following
examples to see which types of keyword arguments |execute_scriptfunction|
covers:
>>> from hydpy.exe.commandtools import parse_argument
>>> parse_argument('x=3')
('x', '3')
>>> parse_argument('"x=3"')
'"x=3"'
>>> parse_argument("'x=3'")
"'x=3'"
>>> parse_argument('x="3==3"')
('x', '"3==3"')
>>> parse_argument("x='3==3'")
('x', "'3==3'") |
384,476 | def query_nexus(query_url, timeout_sec, basic_auth=None):
log = logging.getLogger(mod_logger + )
retry_sec = 5
max_retries = 6
try_num = 1
query_success = False
nexus_response = None
while try_num <= max_retries:
if query_success:
break
log.debug(.format(n=try_num, u=query_url, m=max_retries))
try:
nexus_response = requests.get(query_url, auth=basic_auth, stream=True, timeout=timeout_sec)
except requests.exceptions.Timeout:
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, t=timeout_sec, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info(.format(t=retry_sec))
time.sleep(retry_sec)
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
_, ex, trace = sys.exc_info()
msg = .format(
n=ex.__class__.__name__, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info(.format(t=retry_sec))
time.sleep(retry_sec)
else:
query_success = True
try_num += 1
if not query_success:
msg = .format(
u=query_url, m=max_retries)
log.error(msg)
raise RuntimeError(msg)
if nexus_response.status_code != 200:
msg = .format(
u=query_url, c=nexus_response.status_code)
log.error(msg)
raise RuntimeError(msg)
return nexus_response | Queries Nexus for an artifact
:param query_url: (str) Query URL
:param timeout_sec: (int) query timeout
:param basic_auth (HTTPBasicAuth) object or none
:return: requests.Response object
:raises: RuntimeError |
384,477 | def p2pkh_input_and_witness(outpoint, sig, pubkey, sequence=0xFFFFFFFE):
stack_script = .format(sig=sig, pk=pubkey)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=script_ser.serialize(stack_script),
redeem_script=b,
sequence=sequence) | OutPoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts |
384,478 | def send_invite_email(application, link, is_secret):
if not application.applicant.email:
return
context = CONTEXT.copy()
context[] = application.applicant
context[] = application
context[] = link
context[] = is_secret
to_email = application.applicant.email
subject, body = render_email(, context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | Sends an email inviting someone to create an account |
384,479 | def map_entity(self, entity: dal.Price) -> PriceModel:
if not entity:
return None
result = PriceModel()
result.currency = entity.currency
dt_string = entity.date
format_string = "%Y-%m-%d"
if entity.time:
dt_string += f"T{entity.time}"
format_string += "T%H:%M:%S"
price_datetime = datetime.strptime(dt_string, format_string)
result.datum = Datum()
result.datum.from_datetime(price_datetime)
assert isinstance(result.datum, Datum)
result.symbol = SecuritySymbol(entity.namespace, entity.symbol)
value = Decimal(entity.value) / Decimal(entity.denom)
result.value = Decimal(value)
return result | Map the price entity |
384,480 | def replace_between_tags(text, repl_, start_tag, end_tag=None):
r
new_lines = []
editing = False
lines = text.split()
for line in lines:
if not editing:
new_lines.append(line)
if line.strip().startswith(start_tag):
new_lines.append(repl_)
editing = True
if end_tag is not None and line.strip().startswith(end_tag):
editing = False
new_lines.append(line)
new_text = .join(new_lines)
return new_text | r"""
Replaces text between sentinal lines in a block of text.
Args:
text (str):
repl_ (str):
start_tag (str):
end_tag (str): (default=None)
Returns:
str: new_text
CommandLine:
python -m utool.util_str --exec-replace_between_tags
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> text = ut.codeblock(
'''
class:
# <FOO>
bar
# </FOO>
baz
''')
>>> repl_ = 'spam'
>>> start_tag = '# <FOO>'
>>> end_tag = '# </FOO>'
>>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)
>>> result = ('new_text =\n%s' % (str(new_text),))
>>> print(result)
new_text =
class:
# <FOO>
spam
# </FOO>
baz |
384,481 | def _get_error_code(self, e):
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e | Extract error code from ftp exception |
384,482 | def get_host(environ):
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
rv = "unknown"
return rv | Return the host for the given WSGI environment. Yanked from Werkzeug. |
384,483 | def insert_object_into_db_pk_unknown(self,
obj: Any,
table: str,
fieldlist: Sequence[str]) -> None:
self.ensure_db_open()
valuelist = []
for f in fieldlist[1:]:
valuelist.append(getattr(obj, f))
cursor = self.db.cursor()
self.db_exec_with_cursor(
cursor,
get_sql_insert_without_first_field(table, fieldlist,
self.get_delims()),
*valuelist
)
pkvalue = get_pk_of_last_insert(cursor)
setattr(obj, fieldlist[0], pkvalue) | Inserts object into database table, with PK (first field) initially
unknown (and subsequently set in the object from the database). |
384,484 | def toggle_exclusivity(self,override=None):
if override is not None:
new = override
else:
new = not self.exclusive
self.exclusive = new
self.set_exclusive_mouse(self.exclusive)
self.peng.sendEvent("peng3d:window.toggle_exclusive",{"peng":self.peng,"window":self,"exclusive":self.exclusive}) | Toggles mouse exclusivity via pyglet's :py:meth:`set_exclusive_mouse()` method.
If ``override`` is given, it will be used instead.
You may also read the current exclusivity state via :py:attr:`exclusive`\ . |
384,485 | def crop(self, vector, resolution=None, masked=None,
bands=None, resampling=Resampling.cubic):
bounds, window = self._vector_to_raster_bounds(vector.envelope, boundless=self._image is None)
if resolution:
xsize, ysize = self._resolution_to_output_shape(bounds, resolution)
else:
xsize, ysize = (None, None)
return self.pixel_crop(bounds, xsize, ysize, window=window,
masked=masked, bands=bands, resampling=resampling) | crops raster outside vector (convex hull)
:param vector: GeoVector, GeoFeature, FeatureCollection
:param resolution: output resolution, None for full resolution
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster |
384,486 | def alerts(self, alert_level=):
alerts = self.zap.core.alerts()
alert_level_value = self.alert_levels[alert_level]
alerts = sorted((a for a in alerts if self.alert_levels[a[]] >= alert_level_value),
key=lambda k: self.alert_levels[k[]], reverse=True)
return alerts | Get a filtered list of alerts at the given alert level, and sorted by alert level. |
384,487 | def get_rich_menu_image(self, rich_menu_id, timeout=None):
response = self._get(
.format(rich_menu_id=rich_menu_id),
timeout=timeout
)
return Content(response) | Call download rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image
:param str rich_menu_id: ID of the rich menu with the image to be downloaded
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Content`
:return: Content instance |
384,488 | def generate_code_cover(self):
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst | Generate a list of all recovered basic blocks. |
384,489 | def write_data(self, data, dstart=None, swap_axes=True):
if dstart is None:
shape = self.data_shape
dstart = int(self.header_size)
elif dstart < 0:
raise ValueError(
.format(dstart))
else:
shape = -1
dstart = int(dstart)
if dstart < self.header_size:
raise ValueError(
.format(dstart, self.header_size))
data = np.asarray(data, dtype=self.data_dtype).reshape(shape)
if swap_axes:
data = np.transpose(data, axes=np.argsort(self.data_axis_order))
assert data.shape == self.data_storage_shape
data = data.reshape(-1, order=)
self.file.seek(dstart)
data.tofile(self.file) | Write ``data`` to `file`.
Parameters
----------
data : `array-like`
Data that should be written to `file`.
dstart : non-negative int, optional
Offset in bytes of the start position of the written data.
If provided, reshaping and axis swapping of ``data`` is
skipped.
For ``None``, `header_size` is used.
swap_axes : bool, optional
If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries
to swap the axes in the ``data`` before writing. Use ``False``
only if the data is already consistent with the final axis
order. |
384,490 | def update(self, campaign_id, area, nick=None):
request = TOPRequest()
request[] = campaign_id
request[] = area
if nick!=None: request[] = nick
self.create(self.execute(request), fields=[,,,,], models={:CampaignArea})
return self.result | xxxxx.xxxxx.campaign.area.update
===================================
更新一个推广计划的投放地域 |
384,491 | def create(cls, options, session, build_root=None, exclude_patterns=None, tags=None):
spec_roots = cls.parse_specs(
target_specs=options.target_specs,
build_root=build_root,
exclude_patterns=exclude_patterns,
tags=tags)
changed_options = options.for_scope()
changed_request = ChangedRequest.from_options(changed_options)
owned_files = options.for_global_scope().owner_of
logger.debug(, spec_roots)
logger.debug(, changed_request)
logger.debug(, owned_files)
targets_specified = sum(1 for item
in (changed_request.is_actionable(), owned_files, spec_roots.dependencies)
if item)
if targets_specified > 1:
request = OwnersRequest(sources=tuple(changed_files),
include_dependees=str(changed_request.include_dependees))
changed_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug(, changed_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in changed_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
if owned_files:
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in owner_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
return TargetRoots(spec_roots) | :param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root. |
384,492 | def actor2ImageData(actor, spacing=(1, 1, 1)):
pd = actor.polydata()
whiteImage = vtk.vtkImageData()
bounds = pd.GetBounds()
whiteImage.SetSpacing(spacing)
dim = [0, 0, 0]
for i in [0, 1, 2]:
dim[i] = int(np.ceil((bounds[i * 2 + 1] - bounds[i * 2]) / spacing[i]))
whiteImage.SetDimensions(dim)
whiteImage.SetExtent(0, dim[0] - 1, 0, dim[1] - 1, 0, dim[2] - 1)
origin = [0, 0, 0]
origin[0] = bounds[0] + spacing[0] / 2
origin[1] = bounds[2] + spacing[1] / 2
origin[2] = bounds[4] + spacing[2] / 2
whiteImage.SetOrigin(origin)
whiteImage.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
inval = 255
count = whiteImage.GetNumberOfPoints()
for i in range(count):
whiteImage.GetPointData().GetScalars().SetTuple1(i, inval)
pol2stenc = vtk.vtkPolyDataToImageStencil()
pol2stenc.SetInputData(pd)
pol2stenc.SetOutputOrigin(origin)
pol2stenc.SetOutputSpacing(spacing)
pol2stenc.SetOutputWholeExtent(whiteImage.GetExtent())
pol2stenc.Update()
outval = 0
imgstenc = vtk.vtkImageStencil()
imgstenc.SetInputData(whiteImage)
imgstenc.SetStencilConnection(pol2stenc.GetOutputPort())
imgstenc.ReverseStencilOff()
imgstenc.SetBackgroundValue(outval)
imgstenc.Update()
return imgstenc.GetOutput() | Convert a mesh it into volume representation as ``vtkImageData``
where the foreground (exterior) voxels are 1 and the background
(interior) voxels are 0.
Internally the ``vtkPolyDataToImageStencil`` class is used.
.. hint:: |mesh2volume| |mesh2volume.py|_ |
384,493 | def control_gate(control: Qubit, gate: Gate) -> Gate:
if control in gate.qubits:
raise ValueError()
qubits = [control, *gate.qubits]
gate_tensor = join_gates(P0(control), identity_gate(gate.qubits)).tensor \
+ join_gates(P1(control), gate).tensor
controlled_gate = Gate(qubits=qubits, tensor=gate_tensor)
return controlled_gate | Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. |
384,494 | def _add_logo(fig, x=10, y=25, zorder=100, which=, size=, **kwargs):
fname_suffix = {: ,
: }
fname_prefix = {: ,
: }
try:
fname = fname_prefix[which] + fname_suffix[size]
fpath = posixpath.join(, fname)
except KeyError:
raise ValueError()
logo = imread(pkg_resources.resource_stream(, fpath))
return fig.figimage(logo, x, y, zorder=zorder, **kwargs) | Add the MetPy or Unidata logo to a figure.
Adds an image to the figure.
Parameters
----------
fig : `matplotlib.figure`
The `figure` instance used for plotting
x : int
x position padding in pixels
y : float
y position padding in pixels
zorder : int
The zorder of the logo
which : str
Which logo to plot 'metpy' or 'unidata'
size : str
Size of logo to be used. Can be 'small' for 75 px square or 'large' for
150 px square.
Returns
-------
`matplotlib.image.FigureImage`
The `matplotlib.image.FigureImage` instance created |
384,495 | def as_html(self, max_rows=0):
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self.labels
lines = [
(0, ),
(1, ),
(2, ),
(3, .join( + label + for label in labels)),
(2, ),
(1, ),
(1, ),
]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [
(2, ),
(3, .join( + fmt(v, label=False) + for
v, fmt in zip(row, fmts))),
(2, ),
]
lines.append((1, ))
lines.append((0, ))
if omitted:
lines.append((0, .format(omitted)))
return .join(4 * indent * + text for indent, text in lines) | Format table as HTML. |
384,496 | def _learning_rate_decay(hparams, warmup_steps=0):
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = _global_step(hparams)
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float(
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position)
return (cycle_position + 0.1) * 3.0
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme) | Learning rate decay multiplier. |
384,497 | def clean(self):
doc = self._resource
result = {k: v for k, v in doc.iteritems() if k not in
self.internal_fields}
if in doc and not in result:
result[] = doc[]
return result | Remove internal fields |
384,498 | def lcsr(s1, s2):
ababcd
if s1 == s2:
return 1.0
return llcs(s1, s2) / max(1, len(s1), len(s2)) | longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5 |
384,499 | def use(ctx, shortcut):
git_dir = current_git_dir()
if git_dir is None:
output(NOT_GIT_REPO_MSG)
exit(1)
repo_root = os.path.dirname(git_dir)
config = get_config(repo_root)
try:
use_shortcut = config.shortcuts.get(shortcut)
while use_shortcut.extends is not None:
base = config.shortcuts.get(use_shortcut.extends)
use_shortcut = base.extend(use_shortcut)
except config.shortcuts.DoesNotExist as err:
output(.format(err.message))
for s in config.shortcuts:
output(s.name)
exit(1)
else:
options = use_shortcut.options
for flag in use_shortcut.flags:
options[flag.replace(, )] = True
options_string =
for k, v in sorted(iteritems(options)):
options_string += .format(k.replace(, ))
if v is not True:
options_string += .format(v)
output(.format(options_string))
ctx.invoke(run, **options) | Use a shortcut. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.