Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,400 | def _create_client_impl(self, api_version):
if api_version == v7_0_VERSION:
from azure.keyvault.v7_0 import KeyVaultClient as ImplClient
elif api_version == v2016_10_01_VERSION:
from azure.keyvault.v2016_10_01 import KeyVaultClient as ImplClient
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
impl = ImplClient(credentials=self._credentials)
impl.config = self.config
if self._entered and hasattr(impl, ):
impl.__enter__()
self._client_impls[api_version] = impl
return impl | Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return: |
4,401 | def to_array(self):
array = super(InputMediaDocument, self).to_array()
if self.thumb is not None:
if isinstance(self.thumb, InputFile):
array[] = None
elif isinstance(self.thumb, str):
array[] = u(self.thumb)
else:
raise TypeError()
return array | Serializes this InputMediaDocument to a dictionary.
:return: dictionary representation of this object.
:rtype: dict |
4,402 | def vad(self, location=1, normalize=True, activity_threshold=7.0,
min_activity_duration=0.25, initial_search_buffer=1.0,
max_gap=0.25, initial_pad=0.0):
if location not in [-1, 1]:
raise ValueError("location must be -1 or 1.")
if not isinstance(normalize, bool):
raise ValueError("normalize muse be a boolean.")
if not is_number(activity_threshold):
raise ValueError("activity_threshold must be a number.")
if not is_number(min_activity_duration) or min_activity_duration < 0:
raise ValueError("min_activity_duration must be a positive number")
if not is_number(initial_search_buffer) or initial_search_buffer < 0:
raise ValueError("initial_search_buffer must be a positive number")
if not is_number(max_gap) or max_gap < 0:
raise ValueError("max_gap must be a positive number.")
if not is_number(initial_pad) or initial_pad < 0:
raise ValueError("initial_pad must be a positive number.")
effect_args = []
if normalize:
effect_args.append()
if location == -1:
effect_args.append()
effect_args.extend([
,
, .format(activity_threshold),
, .format(min_activity_duration),
, .format(initial_search_buffer),
, .format(max_gap),
, .format(initial_pad)
])
if location == -1:
effect_args.append()
self.effects.extend(effect_args)
self.effects_log.append()
return self | Voice Activity Detector. Attempts to trim silence and quiet
background sounds from the ends of recordings of speech. The algorithm
currently uses a simple cepstral power measurement to detect voice, so
may be fooled by other things, especially music.
The effect can trim only from the front of the audio, so in order to
trim from the back, the reverse effect must also be used.
Parameters
----------
location : 1 or -1, default=1
If 1, trims silence from the beginning
If -1, trims silence from the end
normalize : bool, default=True
If true, normalizes audio before processing.
activity_threshold : float, default=7.0
The measurement level used to trigger activity detection. This may
need to be cahnged depending on the noise level, signal level, and
other characteristics of the input audio.
min_activity_duration : float, default=0.25
The time constant (in seconds) used to help ignore short bursts of
sound.
initial_search_buffer : float, default=1.0
The amount of audio (in seconds) to search for quieter/shorter
bursts of audio to include prior to the detected trigger point.
max_gap : float, default=0.25
The allowed gap (in seconds) between quiteter/shorter bursts of
audio to include prior to the detected trigger point
initial_pad : float, default=0.0
The amount of audio (in seconds) to preserve before the trigger
point and any found quieter/shorter bursts.
See Also
--------
silence
Examples
--------
>>> tfm = sox.Transformer()
Remove silence from the beginning of speech
>>> tfm.vad(initial_pad=0.3)
Remove silence from the end of speech
>>> tfm.vad(location=-1, initial_pad=0.2) |
4,403 | def run_helper_process(python_file, metadata_queue, quit_event, options):
class_wrapper = import_class_with_base(python_file, BotHelperProcess)
helper_class = class_wrapper.get_loaded_class()
helper = helper_class(metadata_queue, quit_event, options)
helper.start() | :param python_file: The absolute path of a python file containing the helper process that should be run.
It must define a class which is a subclass of BotHelperProcess.
:param metadata_queue: A queue from which the helper process will read AgentMetadata updates.
:param quit_event: An event which should be set when rlbot is shutting down.
:param options: A dict with arbitrary options that will be passed through to the helper process. |
4,404 | def read_segment(self, segment):
log.debug("read segment {0}".format(segment))
if segment < 0 or segment > 15:
raise ValueError("invalid segment number")
cmd = "\x10" + chr(segment << 4) + 8 * chr(0) + self.uid
rsp = self.transceive(cmd)
if len(rsp) < 129:
raise Type1TagCommandError(RESPONSE_ERROR)
return rsp[1:129] | Read one memory segment (128 byte). |
4,405 | def make_fil_file(filename,out_dir=, new_filename=None, max_load = None):
fil_file = Waterfall(filename, max_load = max_load)
if not new_filename:
new_filename = out_dir+filename.replace(,).split()[-1]
if not in new_filename:
new_filename = new_filename+
fil_file.write_to_fil(new_filename) | Converts file to Sigproc filterbank (.fil) format. Default saves output in current dir. |
4,406 | def show(self):
self.hidden = False
window_rect = RectCalculator.set_final_window_rect(self.settings, self.window)
self.window.stick()
if not self.get_notebook().has_page():
self.add_tab()
self.window.set_keep_below(False)
self.window.show_all()
self.settings.general.triggerOnChangedValue(self.settings.general, "use-scrollbar")
log.debug("Moving window to: %r", window_rect)
self.window.move(window_rect.x, window_rect.y)
if not FullscreenManager(self.settings, self.window).is_fullscreen():
self.settings.general.triggerOnChangedValue(self.settings.general, )
time = get_server_time(self.window)
log.debug("order to present and deiconify")
self.window.present()
self.window.deiconify()
self.window.show()
self.window.get_window().focus(time)
self.window.set_type_hint(Gdk.WindowTypeHint.DOCK)
self.window.set_type_hint(Gdk.WindowTypeHint.NORMAL)
self.settings.styleFont.triggerOnChangedValue(self.settings.styleFont, )
self.settings.styleBackground.triggerOnChangedValue(self.settings.styleBackground, )
log.debug("Current window position: %r", self.window.get_position())
self.execute_hook() | Shows the main window and grabs the focus on it. |
4,407 | def init_stash(stash_path, passphrase, passphrase_size, backend):
r
stash_path = stash_path or STORAGE_DEFAULT_PATH_MAPPING[backend]
click.echo(.format(backend, stash_path))
storage = STORAGE_MAPPING[backend](**_parse_path_string(stash_path))
try:
click.echo()
if os.path.isfile(PASSPHRASE_FILENAME):
raise GhostError(
.format(PASSPHRASE_FILENAME))
stash = Stash(
storage,
passphrase=passphrase,
passphrase_size=passphrase_size)
passphrase = stash.init()
if not passphrase:
click.echo()
sys.exit(0)
_write_passphrase_file(passphrase)
except GhostError as ex:
sys.exit(ex)
except (OSError, IOError) as ex:
click.echo("Seems like wedb_pathRemoving stale stash and passphrase: {0}. Note that any directories created are not removed for safety reasons and you might want to remove them manually.Initialized stash at: {0}Your passphrase can be found under the `{0}` file in the current directory.Make sure you save your passphrase somewhere safe. If lost, you will lose access to your stash.') | r"""Init a stash
`STASH_PATH` is the path to the storage endpoint. If this isn't supplied,
a default path will be used. In the path, you can specify a name
for the stash (which, if omitted, will default to `ghost`) like so:
`ghost init http://10.10.1.1:8500;stash1`.
After initializing a stash, don't forget you can set environment
variables for both your stash's path and its passphrase.
On Linux/OSx you can run:
export GHOST_STASH_PATH='http://10.10.1.1:8500;stash1'
export GHOST_PASSPHRASE=$(cat passphrase.ghost)
export GHOST_BACKEND='tinydb' |
4,408 | def resolve_to_callable(callable_name):
from . import registry
clean_callable_name = callable_name.replace(
, ).replace(, ).strip()
try:
return registry.get(clean_callable_name)
except KeyError:
try:
from zope.dottedname.resolve import resolve
return resolve(clean_callable_name)
except ImportError:
raise ImportError(
.format(clean_callable_name)) | Resolve string :callable_name: to a callable.
:param callable_name: String representing callable name as registered
in ramses registry or dotted import path of callable. Can be
wrapped in double curly brackets, e.g. '{{my_callable}}'. |
4,409 | def features(sentence, i):
word = sentence[i]
yield "word:{}" + word.lower()
if word[0].isupper():
yield "CAP"
if i > 0:
yield "word-1:{}" + sentence[i - 1].lower()
if i > 1:
yield "word-2:{}" + sentence[i - 2].lower()
if i + 1 < len(sentence):
yield "word+1:{}" + sentence[i + 1].lower()
if i + 2 < len(sentence):
yield "word+2:{}" + sentence[i + 2].lower() | Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking. |
4,410 | def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
ret = LazyLoader(
_module_dirs(opts, ),
opts,
tag=,
pack={: functions, : returners, : utils},
)
ret.pack[] = ret
return ret | Returns the proxy module for this salt-proxy-minion |
4,411 | def p_statement_break(p):
if len(p) == 3:
p[0] = ast.Break(None, lineno=p.lineno(1))
else:
p[0] = ast.Break(p[2], lineno=p.lineno(1)) | statement : BREAK SEMI
| BREAK expr SEMI |
4,412 | def get_top_n_action_types(self, top_n):
action_type_to_counts = dict()
for action in self.actions:
actiontype = action.actiontype
if actiontype not in action_type_to_counts:
action_type_to_counts[actiontype] = 1
else:
action_type_to_counts[actiontype] = \
action_type_to_counts[actiontype] + 1
action_types = list()
counts = list()
for actiontype in action_type_to_counts.keys():
action_types.append(actiontype)
counts.append(action_type_to_counts[actiontype])
num_actions = len(self.actions)
num_actions2 = 0
for count in counts:
num_actions2 = num_actions2 + count
if num_actions != num_actions2:
raise(Exception())
sorted_inds = np.argsort(counts)
last_ind = len(sorted_inds)-1
top_actions = list()
if top_n > len(sorted_inds):
raise Exception( +
% (top_n, len(sorted_inds)))
for i in range(top_n):
top_actions.append(action_types[sorted_inds[last_ind-i]])
return top_actions | Returns the top N actions by count. |
4,413 | def create_interface_connection(interface_a, interface_b):
payload = {: interface_a,
: interface_b}
ret = _add(, , payload)
if ret:
return {: {: {ret[]: payload}}}
else:
return ret | .. versionadded:: 2019.2.0
Create an interface connection between 2 interfaces
interface_a
Interface id for Side A
interface_b
Interface id for Side B
CLI Example:
.. code-block:: bash
salt myminion netbox.create_interface_connection 123 456 |
4,414 | def _children_(self):
ret = {}
names = self._field_names_
def down(name, obj):
if isinstance(obj, BaseObj):
if not isinstance(obj, weakref.ProxyTypes):
ret[name] = obj
elif isinstance(obj, list):
for i, v in zip(range(len(obj)), obj):
down(jp_compose(str(i), name), v)
elif isinstance(obj, dict):
for k, v in six.iteritems(obj):
down(jp_compose(k, name), v)
for n in names:
down(jp_compose(n), getattr(self, n))
return ret | get children objects
:rtype: a dict of children {child_name: child_object} |
4,415 | def decrypt_file(file_path, recipient_key, *, base64=False):
"Returns (filename, file_contents) if successful"
crypto.assert_type_and_length(, recipient_key, crypto.UserLock)
with open(file_path, "rb") as I:
contents = I.read()
if base64:
contents = crypto.b64decode(contents)
crypted = crypto.MiniLockFile(contents)
return crypted.decrypt(recipient_key) | Returns (filename, file_contents) if successful |
4,416 | def prefix(self, name):
a_node = self.adapter.get_node_attribute_node(self.impl_element, name)
if a_node is None:
return None
return a_node.prefix | :param string name: the name of an attribute to look up.
:return: the prefix component of the named attribute's name,
or None. |
4,417 | def twitter_credential(name):
credential_name = + name.upper()
if hasattr(settings, credential_name):
return getattr(settings, credential_name)
else:
raise AttributeError( + credential_name) | Grab twitter credential from settings |
4,418 | def _channel_loop(detection, template, min_cc, detection_id, interpolate, i,
pre_lag_ccsum=None, detect_chans=0,
horizontal_chans=[, , , ], vertical_chans=[],
debug=0):
from eqcorrscan.core.match_filter import normxcorr2
import math
event = Event()
s_stachans = {}
cccsum = 0
checksum = 0
used_chans = 0
for tr in template:
temp_net = tr.stats.network
temp_sta = tr.stats.station
temp_chan = tr.stats.channel
debug_print( % (temp_net, temp_sta, temp_chan),
3, debug)
image = detection.select(station=temp_sta, channel=temp_chan)
if len(image) == 0 or sum(image[0].data) == 0:
print()
continue
if interpolate:
try:
ccc = normxcorr2(tr.data, image[0].data)
except Exception:
print()
print( % len(image[0].data))
print( % len(tr.data))
continue
try:
shift, cc_max = _xcorr_interp(ccc=ccc, dt=image[0].stats.delta)
except IndexError:
print()
ccc = normxcorr2(tr.data, image[0].data)
cc_max = np.amax(ccc)
shift = np.argmax(ccc) * image[0].stats.delta
if math.isnan(cc_max):
print()
continue
else:
picktime = image[0].stats.starttime + shift
else:
try:
ccc = normxcorr2(tr.data, image[0].data)
except Exception:
print()
print( % len(image[0].data))
print( % len(tr.data))
continue
cc_max = np.amax(ccc)
if math.isnan(cc_max):
print()
continue
else:
picktime = image[0].stats.starttime + (
np.argmax(ccc) * image[0].stats.delta)
debug_print( % cc_max, 3, debug)
checksum += cc_max
used_chans += 1
if cc_max < min_cc:
debug_print(, 3, debug)
continue
cccsum += cc_max
if temp_chan[-1] in vertical_chans:
phase =
elif temp_chan[-1] in horizontal_chans:
phase =
debug_print( %
(temp_net, temp_sta, temp_chan), 4, debug)
if temp_sta not in s_stachans.keys():
s_stachans[temp_sta] = ((temp_chan, np.amax(ccc),
picktime))
elif temp_sta in s_stachans.keys():
if np.amax(ccc) > s_stachans[temp_sta][1]:
picktime = picktime
else:
continue
else:
phase = None
_waveform_id = WaveformStreamID(
network_code=temp_net, station_code=temp_sta,
channel_code=temp_chan)
event.picks.append(Pick(
waveform_id=_waveform_id, time=picktime,
method_id=ResourceIdentifier(), phase_hint=phase,
creation_info=,
evaluation_mode=,
comments=[Comment(text= % cc_max)]))
event.resource_id = detection_id
ccc_str = ("detect_val=%s" % cccsum)
event.comments.append(Comment(text=ccc_str))
if used_chans == detect_chans:
if pre_lag_ccsum is not None and\
checksum - pre_lag_ccsum < -(0.3 * pre_lag_ccsum):
msg = (
% (pre_lag_ccsum, checksum))
raise LagCalcError(msg)
else:
warnings.warn(
% (detect_chans, used_chans))
return i, event | Inner loop for correlating and assigning picks.
Utility function to take a stream of data for the detected event and write
maximum correlation to absolute time as picks in an obspy.core.event.Event
object.
Only outputs picks for picks above min_cc.
:type detection: obspy.core.stream.Stream
:param detection:
Stream of data for the slave event detected using template.
:type template: obspy.core.stream.Stream
:param template: Stream of data as the template for the detection.
:type min_cc: float
:param min_cc: Minimum cross-correlation value to allow a pick to be made.
:type detection_id: str
:param detection_id: Detection ID to associate the event with.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type i: int
:param i:
Used to track which process has occurred when running in parallel.
:type pre_lag_ccsum: float
:param pre_lag_ccsum:
Cross-correlation sum before lag-calc, will check that the
cross-correlation sum is increased by lag-calc (using all channels,
ignoring min_cc)
:type detect_chans: int
:param detect_chans:
Number of channels originally used in detections, must match the number
used here to allow for cccsum checking.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type debug: int
:param debug: Debug output level 0-5.
:returns:
Event object containing network, station, channel and pick information.
:rtype: :class:`obspy.core.event.Event` |
4,419 | def forgot_password(self):
form = self._get_form()
if form.validate_on_submit():
self.security_service.send_reset_password_instructions(form.user)
self.flash(_(,
email=form.user.email),
category=)
if request.is_json:
return , HTTPStatus.NO_CONTENT
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render(,
forgot_password_form=form,
**self.security.run_ctx_processor()) | View function to request a password recovery email with a reset token.
Supports html and json requests. |
4,420 | def generate(self):
generated_arr = np.random.normal(loc=self.__mu, scale=self.__sigma, size=self.__output_shape)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. |
4,421 | def connect_discussion_signals():
post_save.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
post_delete.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.connect(
count_comments_handler, sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.connect(
count_pingbacks_handler, sender=comment_model,
dispatch_uid=PINGBACK_WF_COUNT_PINGBACKS)
trackback_was_posted.connect(
count_trackbacks_handler, sender=comment_model,
dispatch_uid=TRACKBACK_WF_COUNT_TRACKBACKS) | Connect all the signals on the Comment model to
maintains a valid discussion count on each entries
when an action is done with the comments. |
4,422 | def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
try:
sailthru_response = sailthru_client.purchase(email, [item],
incomplete=purchase_incomplete, message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
except SailthruClientError as exc:
logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc))
return False
return True | Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True |
4,423 | def debug_command(self, cmd, args=None, progress_callback=None):
if args is None:
args = {}
try:
self._on_progress = progress_callback
return self._loop.run_coroutine(self.adapter.debug(0, cmd, args))
finally:
self._on_progress = None | Send a debug command to the connected device.
This generic method will send a named debug command with the given
arguments to the connected device. Debug commands are typically used
for things like forcible reflashing of firmware or other, debug-style,
operations. Not all transport protocols support debug commands and
the supported operations vary depeneding on the transport protocol.
Args:
cmd (str): The name of the debug command to send.
args (dict): Any arguments required by the given debug command
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers.
Returns:
object: The return value of the debug command, if there is one. |
4,424 | def call_actions_future(
self,
service_name,
actions,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
timeout=None,
**kwargs
):
kwargs.pop(, None)
if timeout:
kwargs[] = timeout
expected_request_id = self.send_request(service_name, actions, **kwargs)
def get_response(_timeout=None):
responses = list(self.get_all_responses(service_name, receive_timeout_in_seconds=_timeout or timeout))
found = False
response = None
for request_id, response in responses:
if request_id == expected_request_id:
found = True
break
if not found:
self._perform_expansion(response.actions, expansions, **kwargs)
return response
return self.FutureResponse(get_response) | This method is identical in signature and behavior to `call_actions`, except that it sends the request and
then immediately returns a `FutureResponse` instead of blocking waiting on a response and returning a
`JobResponse`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the job response can later be retrieved
:rtype: Client.FutureResponse |
4,425 | def satisfy_one(self, assumptions=None, **params):
verbosity = params.get(, 0)
default_phase = params.get(, 2)
propagation_limit = params.get(, -1)
decision_limit = params.get(, -1)
seed = params.get(, 1)
return picosat.satisfy_one(self.nvars, self.clauses, assumptions,
verbosity, default_phase, propagation_limit,
decision_limit, seed) | If the input CNF is satisfiable, return a satisfying input point.
A contradiction will return None. |
4,426 | def _bind(self, _descriptor):
self._defcode = getattr(_descriptor.method, , 200)
self.content_type, self.serializer = _descriptor.serializer(self.req) | Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response. |
4,427 | def get_person_by_nickname(self, nickname):
return next((person for person in self.get_all_people()
if person.nickname.lower() == nickname.lower()), None) | Retrieves a person by nickname |
4,428 | def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, ) as f: lines = f.readlines()
return np.array([l.strip() for l in lines]) | Return `ndarray` of `str` of lines of text from `path`. |
4,429 | def create_issues_report(self, timeout=-1):
uri = "{}/issues/".format(self.data["uri"])
return self._helper.create_report(uri, timeout) | Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict. |
4,430 | def phone_number(self, phone_number):
if phone_number is None:
raise ValueError("Invalid value for `phone_number`, must not be `None`")
if len(phone_number) > 16:
raise ValueError("Invalid value for `phone_number`, length must be less than `16`")
self._phone_number = phone_number | Sets the phone_number of this OrderFulfillmentRecipient.
The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id.
:param phone_number: The phone_number of this OrderFulfillmentRecipient.
:type: str |
4,431 | def errorhandler(self, code_or_exception):
def decorator(fn):
self._defer(lambda app: app.register_error_handler(code_or_exception, fn))
return fn
return decorator | Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception |
4,432 | def lookup(self, auth, type, mapping, defer=False):
return self._call(, auth, [type, mapping], defer) | Look up a Resource ID by alias, owned Resource ID, or share activation code under the
client specified in <ClientID>.
Args:
auth: <cik>
type: Type of resource to lookup (alias | owner | shared)
mapping: Based on resource type defined above. |
4,433 | def getOrderVector(self):
incEdgesMap = self.getIncEdgesMap()
sortedKeys = sorted(incEdgesMap.keys(), reverse = True)
orderVector = []
print(sortedKeys)
for key in sortedKeys:
tier = []
cands = incEdgesMap[key]
for cand in cands:
tier.append(cand)
orderVector.append(tier[0])
return orderVector | Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally. |
4,434 | def defer_function(self, callable):
self._deferred_functions.append((callable, self.scope_stack[:], self.offset)) | Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred because code later in the file might modify
the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it
will contain any new bindings added to it. |
4,435 | def compile_migrations(migrator, models, reverse=False):
source = migrator.orm.values()
if reverse:
source, models = models, source
migrations = diff_many(models, source, migrator, reverse=reverse)
if not migrations:
return False
migrations = NEWLINE + NEWLINE.join(.join(migrations).split())
return CLEAN_RE.sub(, migrations) | Compile migrations for given models. |
4,436 | def get_system_uptime_input_rbridge_id(self, **kwargs):
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
input = ET.SubElement(get_system_uptime, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
4,437 | def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load() | Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in |
4,438 | def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = (,,)
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf() | Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting. |
4,439 | def cnst_A1T(self, Y1):
r
Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
self.cri.axisN) | r"""Compute :math:`A_1^T \mathbf{y}_1` component of
:math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
(\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. |
4,440 | def _mpda(self, re_grammar, splitstring=0):
cnfgrammar = CNFGenerator(re_grammar)
if not self.alphabet:
self._extract_alphabet(cnfgrammar)
cnftopda = CnfPda(self.alphabet)
productions = {}
nonterminals = []
nonterminals.append(cnfgrammar.init_symbol)
for key in list(cnfgrammar.grammar_nonterminals):
if key != cnfgrammar.init_symbol:
nonterminals.append(key)
for key in list(cnfgrammar.grammar_nonterminals):
j = 0
productions[key] = {}
for pair in cnfgrammar.grammar_rules:
cnf_form = list(pair)
if cnf_form[0] == key:
productions[key][j] = {}
if isinstance(cnf_form[1], type(())):
productions[key][j][] = list(cnf_form[1])[0]
productions[key][j][] = list(cnf_form[1])[1]
else:
productions[key][j][] = cnf_form[1]
j = j + 1
return cnftopda.initialize(
nonterminals, productions, list(
cnfgrammar.grammar_terminals), splitstring) | Args:
re_grammar (list): A list of grammar rules
splitstring (bool): A boolean for enabling or disabling
the splitting of symbols using a space
Returns:
PDA: The generated PDA |
4,441 | def _exec(self, cmd, url, json_data=None):
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
url = url.format(self.dev["ipv4_internal"])
auth = HTTPBasicAuth("dev", self.dev["api_key"])
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
res.raise_for_status()
return res.json() | execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command |
4,442 | def set_user_access(self, uid, channel=None, callback=False,
link_auth=True, ipmi_msg=True, privilege_level=):
if channel is None:
channel = self.get_network_channel()
b = 0b10000000
if callback:
b |= 0b01000000
if link_auth:
b |= 0b00100000
if ipmi_msg:
b |= 0b00010000
b |= channel & 0b00001111
privilege_levels = {
: 0,
: 1,
: 2,
: 3,
: 4,
: 5,
: 0x0F,
}
self.oem_init()
self._oem.set_user_access(
uid, channel, callback, link_auth, ipmi_msg, privilege_level)
data = [b, uid & 0b00111111,
privilege_levels[privilege_level] & 0b00001111, 0]
response = self.raw_command(netfn=0x06, command=0x43, data=data)
if in response:
raise Exception(response[])
return True | Set user access
:param uid: user number [1:16]
:param channel: number [1:7]
:parm callback: User Restricted to Callback
False = User Privilege Limit is determined by the User Privilege Limit
parameter, below, for both callback and non-callback connections.
True = User Privilege Limit is determined by the User Privilege Limit
parameter for callback connections, but is restricted to Callback
level for non-callback connections. Thus, a user can only initiate
a Callback when they 'call in' to the BMC, but once the callback
connection has been made, the user could potentially establish a
session as an Operator.
:param link_auth: User Link authentication
enable/disable (used to enable whether this
user's name and password information will be used for link
authentication, e.g. PPP CHAP) for the given channel. Link
authentication itself is a global setting for the channel and is
enabled/disabled via the serial/modem configuration parameters.
:param ipmi_msg: User IPMI Messaginge:
(used to enable/disable whether
this user's name and password information will be used for IPMI
Messaging. In this case, 'IPMI Messaging' refers to the ability to
execute generic IPMI commands that are not associated with a
particular payload type. For example, if IPMI Messaging is disabled for
a user, but that user is enabled for activatallow_authing the SOL
payload type, then IPMI commands associated with SOL and session
management, such as Get SOL Configuration Parameters and Close Session
are available, but generic IPMI commands such as Get SEL Time are
unavailable.)
:param privilege_level:
User Privilege Limit. (Determines the maximum privilege level that the
user is allowed to switch to on the specified channel.)
* callback
* user
* operator
* administrator
* proprietary
* no_access |
4,443 | def submit(jman, command, arguments, deps=[], array=None):
logdir = os.path.join(os.path.realpath(arguments.logdir),
tools.random_logdir())
jobname = os.path.splitext(os.path.basename(command[0]))[0]
cmd = tools.make_shell(sys.executable, command)
if arguments.dryrun:
return DryRunJob(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array)
return jman.submit(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array) | An easy submission option for grid-enabled scripts. Create the log
directories using random hash codes. Use the arguments as parsed by the main
script. |
4,444 | def get_code(module):
fp = open(module.path)
try:
return compile(fp.read(), str(module.name), )
finally:
fp.close() | Compile and return a Module's code object. |
4,445 | def process_event(self, event_name: str, data: dict):
if (isinstance(self.opt.get("learning_rate", None), float) and
isinstance(self.opt.get("learning_rate_decay", None), float)):
pass
else:
if event_name == :
if (self.get_learning_rate_variable() is not None) and ( not in data):
data[] = float(K.get_value(self.get_learning_rate_variable()))
if (self.get_momentum_variable() is not None) and ( not in data):
data[] = float(K.get_value(self.get_momentum_variable()))
else:
super().process_event(event_name, data) | Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None |
4,446 | def parse_args(argv):
base = argparse.ArgumentParser(
description="Khard is a carddav address book for the console",
formatter_class=argparse.RawTextHelpFormatter, add_help=False)
base.add_argument("-c", "--config", default="", help="config file to use")
base.add_argument("--debug", action="store_true",
help="enable debug output")
base.add_argument("--skip-unparsable", action="store_true",
help="skip unparsable vcard files")
base.add_argument("-v", "--version", action="version",
version="Khard version %s" % khard_version)
first_parser = argparse.ArgumentParser(parents=[base])
first_parser.add_argument(, nargs=argparse.REMAINDER)
parser = argparse.ArgumentParser(parents=[base])
default_addressbook_parser = argparse.ArgumentParser(add_help=False)
default_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of contacts")
new_addressbook_parser = argparse.ArgumentParser(add_help=False)
new_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify address book in which to create the new contact")
copy_move_addressbook_parser = argparse.ArgumentParser(add_help=False)
copy_move_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of contacts")
copy_move_addressbook_parser.add_argument(
"-A", "--target-addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify target address book in which to copy / move the "
"selected contact")
merge_addressbook_parser = argparse.ArgumentParser(add_help=False)
merge_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of source contacts")
merge_addressbook_parser.add_argument(
"-A", "--target-addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of target contacts")
email_header_input_file_parser = argparse.ArgumentParser(add_help=False)
email_header_input_file_parser.add_argument(
"-i", "--input-file", default="-",
help="Specify input email header file name or use stdin by default")
template_input_file_parser = argparse.ArgumentParser(add_help=False)
template_input_file_parser.add_argument(
"-i", "--input-file", default="-",
help="Specify input template file name or use stdin by default")
template_input_file_parser.add_argument(
"--open-editor", action="store_true", help="Open the default text "
"editor after successful creation of new contact")
sort_parser = argparse.ArgumentParser(add_help=False)
sort_parser.add_argument(
"-d", "--display", choices=("first_name", "last_name"),
help="Display names in contact table by first or last name")
sort_parser.add_argument(
"-g", "--group-by-addressbook", action="store_true",
help="Group contact table by address book")
sort_parser.add_argument(
"-r", "--reverse", action="store_true",
help="Reverse order of contact table")
sort_parser.add_argument(
"-s", "--sort", choices=("first_name", "last_name"),
help="Sort contact table by first or last name")
default_search_parser = argparse.ArgumentParser(add_help=False)
default_search_parser.add_argument(
"-f", "--search-in-source-files", action="store_true",
help="Look into source vcf files to speed up search queries in "
"large address books. Beware that this option could lead "
"to incomplete results.")
default_search_parser.add_argument(
"-e", "--strict-search", action="store_true",
help="narrow contact search to name field")
default_search_parser.add_argument(
"-u", "--uid", default="", help="select contact by uid")
default_search_parser.add_argument(
"search_terms", nargs="*", metavar="search terms",
help="search in all fields to find matching contact")
merge_search_parser = argparse.ArgumentParser(add_help=False)
merge_search_parser.add_argument(
"-f", "--search-in-source-files", action="store_true",
help="Look into source vcf files to speed up search queries in "
"large address books. Beware that this option could lead "
"to incomplete results.")
merge_search_parser.add_argument(
"-e", "--strict-search", action="store_true",
help="narrow contact search to name fields")
merge_search_parser.add_argument(
"-t", "--target-contact", "--target", default="",
help="search in all fields to find matching target contact")
merge_search_parser.add_argument(
"-u", "--uid", default="", help="select source contact by uid")
merge_search_parser.add_argument(
"-U", "--target-uid", default="", help="select target contact by uid")
merge_search_parser.add_argument(
"source_search_terms", nargs="*", metavar="source",
help="search in all fields to find matching source contact")
subparsers = parser.add_subparsers(dest="action")
list_parser = subparsers.add_parser(
"list",
aliases=Actions.get_aliases("list"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list all (selected) contacts",
help="list all (selected) contacts")
list_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: uid\\tcontact_name\\taddress_book_name")
subparsers.add_parser(
"details",
aliases=Actions.get_aliases("details"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="display detailed information about one contact",
help="display detailed information about one contact")
export_parser = subparsers.add_parser(
"export",
aliases=Actions.get_aliases("export"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="export a contact to the custom yaml format that is "
"also used for editing and creating contacts",
help="export a contact to the custom yaml format that is also "
"used for editing and creating contacts")
export_parser.add_argument(
"--empty-contact-template", action="store_true",
help="Export an empty contact template")
export_parser.add_argument(
"-o", "--output-file", default=sys.stdout,
type=argparse.FileType("w"),
help="Specify output template file name or use stdout by default")
birthdays_parser = subparsers.add_parser(
"birthdays",
aliases=Actions.get_aliases("birthdays"),
parents=[default_addressbook_parser, default_search_parser],
description="list birthdays (sorted by month and day)",
help="list birthdays (sorted by month and day)")
birthdays_parser.add_argument(
"-d", "--display", choices=("first_name", "last_name"),
help="Display names in birthdays table by first or last name")
birthdays_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: name\\tdate")
email_parser = subparsers.add_parser(
"email",
aliases=Actions.get_aliases("email"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list email addresses",
help="list email addresses")
email_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: address\\tname\\ttype")
email_parser.add_argument(
"--remove-first-line", action="store_true",
help="remove \"searching for ...\" line from parsable output "
"(that line is required by mutt)")
phone_parser = subparsers.add_parser(
"phone",
aliases=Actions.get_aliases("phone"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list phone numbers",
help="list phone numbers")
phone_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: number\\tname\\ttype")
post_address_parser = subparsers.add_parser(
"postaddress",
aliases=Actions.get_aliases("postaddress"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list postal addresses",
help="list postal addresses")
post_address_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: address\\tname\\ttype")
subparsers.add_parser(
"source",
aliases=Actions.get_aliases("source"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="edit the vcard file of a contact directly",
help="edit the vcard file of a contact directly")
new_parser = subparsers.add_parser(
"new",
aliases=Actions.get_aliases("new"),
parents=[new_addressbook_parser, template_input_file_parser],
description="create a new contact",
help="create a new contact")
new_parser.add_argument(
"--vcard-version", choices=("3.0", "4.0"),
help="Select preferred vcard version for new contact")
add_email_parser = subparsers.add_parser(
"add-email",
aliases=Actions.get_aliases("add-email"),
parents=[default_addressbook_parser, email_header_input_file_parser,
default_search_parser, sort_parser],
description="Extract email address from the \"From:\" field of an "
"email header and add to an existing contact or create a new one",
help="Extract email address from the \"From:\" field of an email "
"header and add to an existing contact or create a new one")
add_email_parser.add_argument(
"--vcard-version", choices=("3.0", "4.0"),
help="Select preferred vcard version for new contact")
subparsers.add_parser(
"merge",
aliases=Actions.get_aliases("merge"),
parents=[merge_addressbook_parser, merge_search_parser, sort_parser],
description="merge two contacts",
help="merge two contacts")
subparsers.add_parser(
"modify",
aliases=Actions.get_aliases("modify"),
parents=[default_addressbook_parser, template_input_file_parser,
default_search_parser, sort_parser],
description="edit the data of a contact",
help="edit the data of a contact")
subparsers.add_parser(
"copy",
aliases=Actions.get_aliases("copy"),
parents=[copy_move_addressbook_parser, default_search_parser,
sort_parser],
description="copy a contact to a different addressbook",
help="copy a contact to a different addressbook")
subparsers.add_parser(
"move",
aliases=Actions.get_aliases("move"),
parents=[copy_move_addressbook_parser, default_search_parser,
sort_parser],
description="move a contact to a different addressbook",
help="move a contact to a different addressbook")
remove_parser = subparsers.add_parser(
"remove",
aliases=Actions.get_aliases("remove"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="remove a contact",
help="remove a contact")
remove_parser.add_argument(
"--force", action="store_true",
help="Remove contact without confirmation")
subparsers.add_parser(
"addressbooks",
aliases=Actions.get_aliases("addressbooks"),
description="list addressbooks",
help="list addressbooks")
subparsers.add_parser(
"filename",
aliases=Actions.get_aliases("filename"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list filenames of all matching contacts",
help="list filenames of all matching contacts")
first_parser.print_help = parser.print_help
args = first_parser.parse_args(argv)
remainder = args.remainder
if "debug" in args and args.debug:
logging.basicConfig(level=logging.DEBUG)
global config
config = Config(args.config)
if ("debug" in args and args.debug) or config.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("first args=%s", args)
logging.debug("remainder=%s", remainder)
if not remainder or remainder[0] not in Actions.get_all():
remainder.insert(0, config.default_action)
logging.debug("updated remainder=%s", remainder)
skip = args.skip_unparsable
args = parser.parse_args(remainder)
args.skip_unparsable = skip
logging.debug("second args=%s", args)
if "uid" in args and args.uid and (
("search_terms" in args and args.search_terms) or
("source_search_terms" in args and args.source_search_terms)):
parser.error("You can not give arbitrary search terms and --uid at the"
" same time.")
return args | Parse the command line arguments and return the namespace that was
creates by argparse.ArgumentParser.parse_args().
:returns: the namespace parsed from the command line
:rtype: argparse.Namespace |
4,447 | def parent(self):
try:
return Resource(self[], uuid=self[], check=True)
except KeyError:
raise ResourceMissing( % self) | Return parent resource
:rtype: Resource
:raises ResourceNotFound: parent resource doesn't exists
:raises ResourceMissing: parent resource is not defined |
4,448 | def fcoe_get_login_input_fcoe_login_rbridge_id(self, **kwargs):
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
input = ET.SubElement(fcoe_get_login, "input")
fcoe_login_rbridge_id = ET.SubElement(input, "fcoe-login-rbridge-id")
fcoe_login_rbridge_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
4,449 | def finish(self):
self.cov.stop()
self.cov.combine()
self.cov.save() | Combines coverage data and sets the list of coverage objects to report on. |
4,450 | def search(self, **kwargs):
return super(ApiVipRequest, self).get(self.prepare_url(,
kwargs)) | Method to search vip's based on extends search.
:param search: Dict containing QuerySets to find vip's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vip's |
4,451 | def from_pyfile(self, filename):
d = types.ModuleType()
d.__file__ = filename
with open(filename) as config_file:
exec(compile(config_file.read(), filename, ), d.__dict__)
self.from_object(d)
return True | ๅจไธไธช Python ๆไปถไธญ่ฏปๅ้
็ฝฎใ
:param filename: ้
็ฝฎๆไปถ็ๆไปถๅ
:return: ๅฆๆ่ฏปๅๆๅ๏ผ่ฟๅ ``True``๏ผๅฆๆๅคฑ่ดฅ๏ผไผๆๅบ้่ฏฏๅผๅธธ |
4,452 | def __initialize_ui(self):
self.viewport().installEventFilter(ReadOnlyFilter(self))
if issubclass(type(self), QListView):
super(type(self), self).setUniformItemSizes(True)
elif issubclass(type(self), QTreeView):
super(type(self), self).setUniformRowHeights(True) | Initializes the View ui. |
4,453 | def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False,
variables=None):
r
if Ne > 9:
comma = ","
name = r"\rho"
open_brace = "_{"
close_brace = "}"
else:
comma = ""
name = "rho"
open_brace = ""
close_brace = ""
rho = []
for i in range(Ne):
row_rho = []
for j in range(Ne):
if i == j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables,
positive=True)]
elif i > j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
else:
if explicitly_hermitian:
row_rho += [conjugate(define_symbol(name, open_brace,
comma, j, i,
close_brace,
variables))]
else:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
rho += [row_rho]
if normalized:
rho11 = 1-sum([rho[i][i] for i in range(1, Ne)])
rho[0][0] = rho11
rho = Matrix(rho)
return rho | r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]]) |
4,454 | def createSubtitle(self, fps, section):
matched = self._pattern.search(section)
if matched is not None:
matchedDict = matched.groupdict()
return Subtitle(
self.frametime(fps, matchedDict.get("time_from")),
self.frametime(fps, matchedDict.get("time_to")),
self.formatSub(matchedDict.get("text"))
)
return None | Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression. |
4,455 | def write(nml, nml_path, force=False, sort=False):
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True) |
4,456 | def camel_case_to_name(name):
def convert_func(val):
return "_" + val.group(0).lower()
return name[0].lower() + re.sub(r, convert_func, name[1:]) | Used to convert a classname to a lowercase name |
4,457 | def is_subsequence(needle, haystack):
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True | Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout |
4,458 | def intcomma(value):
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", , orig)
if orig == new:
return new
else:
return intcomma(new) | Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. |
4,459 | def area(poly):
if len(poly) < 3:
return 0
total = [0, 0, 0]
num = len(poly)
for i in range(num):
vi1 = poly[i]
vi2 = poly[(i+1) % num]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
if total == [0, 0, 0]:
return 0
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2) | Area of a polygon poly |
4,460 | def parse_tasks_file_header(header, input_file_param_util,
output_file_param_util):
job_params = []
for col in header:
col_type =
col_value = col
if col.startswith():
col_type, col_value = split_pair(col, , 1)
if col_type == :
job_params.append(job_model.EnvParam(col_value))
elif col_type == :
job_params.append(job_model.LabelParam(col_value))
elif col_type == or col_type == :
name = input_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.InputFileParam(
name, recursive=(col_type.endswith())))
elif col_type == or col_type == :
name = output_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.OutputFileParam(
name, recursive=(col_type.endswith())))
else:
raise ValueError( % col)
return job_params | Parse the header from the tasks file into env, input, output definitions.
Elements are formatted similar to their equivalent command-line arguments,
but with associated values coming from the data rows.
Environment variables columns are headered as "--env <name>"
Inputs columns are headered as "--input <name>" with the name optional.
Outputs columns are headered as "--output <name>" with the name optional.
For historical reasons, bareword column headers (such as "JOB_ID") are
equivalent to "--env var_name".
Args:
header: Array of header fields
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
job_params: A list of EnvParams and FileParams for the environment
variables, LabelParams, input file parameters, and output file parameters.
Raises:
ValueError: If a header contains a ":" and the prefix is not supported. |
4,461 | def start_module(self):
try:
self._main()
except Exception as exp:
logger.exception(, traceback.format_exc())
raise Exception(exp) | Wrapper for _main function.
Catch and raise any exception occurring in the main function
:return: None |
4,462 | def guess_rank(M_E):
n, m = M_E.shape
epsilon = np.count_nonzero(M_E) / np.sqrt(m * n)
_, S0, _ = svds_descending(M_E, min(100, max(M_E.shape) - 1))
S0 = np.diag(S0)
S1 = S0[:-1] - S0[1:]
S1_ = S1 / np.mean(S1[-10:])
r1 = 0
lam = 0.05
cost = [None] * len(S1_)
while r1 <= 0:
for idx in range(len(S1_)):
cost[idx] = lam * max(S1_[idx:]) + idx
i2 = np.argmin(cost)
r1 = np.max(i2)
lam += 0.05
cost = [None] * (len(S0) - 1)
for idx in range(len(S0) - 1):
cost[idx] = (S0[idx + 1] + np.sqrt(idx * epsilon)
* S0[0] / epsilon) / S0[idx]
i2 = np.argmin(cost)
r2 = np.max(i2 + 1)
r = max([r1, r2])
return r | Guess the rank of the incomplete matrix |
4,463 | def rename(old_name, new_name):
app = get_app()
snapshot = app.get_snapshot(old_name)
if not snapshot:
click.echo("Couldn't find snapshot %s" % old_name)
sys.exit(1)
new_snapshot = app.get_snapshot(new_name)
if new_snapshot:
click.echo("Snapshot with name %s already exists" % new_name)
sys.exit(1)
app.rename_snapshot(snapshot, new_name)
click.echo("Renamed snapshot %s to %s" % (old_name, new_name)) | Renames a snapshot |
4,464 | def from_seedhex_file(path: str) -> SigningKeyType:
with open(path, ) as fh:
seedhex = fh.read()
return SigningKey.from_seedhex(seedhex) | Return SigningKey instance from Seedhex file
:param str path: Hexadecimal seed file path |
4,465 | def _as_chunk(self):
if self._chunks_offset == 0:
return self.contents
return self.contents[self._chunks_offset:] | A method to return a chunk of data that can be combined for
constructed method values
:return:
A native Python value that can be added together. Examples include
byte strings, unicode strings or tuples. |
4,466 | def get_worker(self, *queues):
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker | Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues. |
4,467 | def split_query(query):
def split_assignment(a):
sa = a.split(, 1)
return len(sa) == 2 and tuple(sa) or (sa[0], None)
assignments = query.split()
return tuple([split_assignment(a) for a in assignments if a]) | Handle the query as a WWW HTTP 1630 query, as this is how people
usually thinks of URI queries in general. We do not decode anything
in split operations, neither percent nor the terrible plus-to-space
conversion. Return:
>>> split_query("k1=v1&k2=v+2%12&k3=&k4&&&k5==&=k&==")
(('k1', 'v1'), ('k2', 'v+2%12'), ('k3', ''), ('k4', None), ('k5', '='), ('', 'k'), ('', '=')) |
4,468 | def getXML(self, CorpNum, NTSConfirmNum, UserID=None):
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "๊ตญ์ธ์ฒญ์น์ธ๋ฒํธ(NTSConfirmNum)๊ฐ ์ฌ๋ฐ๋ฅด์ง ์์ต๋๋ค.")
return self._httpget( + NTSConfirmNum + , CorpNum, UserID) | ์ ์์ธ๊ธ๊ณ์ฐ์ ์์ธ์ ๋ณด ํ์ธ - XML
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
NTSConfirmNum : ๊ตญ์ธ์ฒญ ์น์ธ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
์ ์์ธ๊ธ๊ณ์ฐ์ ์ ๋ณด๊ฐ์ฒด
raise
PopbillException |
4,469 | def update_ref(profile, ref, sha):
resource = "/refs/" + ref
payload = {"sha": sha}
data = api.patch_request(profile, resource, payload)
return prepare(data) | Point a ref to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to update, e.g., ``heads/my-feature-branch``.
sha
The SHA of the commit to point the ref to.
Returns
A dict with data about the ref. |
4,470 | def heating_level(self):
try:
if self.side == :
level = self.device.device_data[]
elif self.side == :
level = self.device.device_data[]
return level
except TypeError:
return None | Return heating level. |
4,471 | def reset(self, id=None):
if id is None: self.ccache.clear()
else: self.ccache.pop(id, None)
if DEBUG:
for route in self.routes:
if route[] not in self.ccache:
self.ccache[route[]] = self._build_callback(route) | Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID is given, only that specific route is affected. |
4,472 | def treeplot(self, qlist, credible_interval):
for y, _, label, values, color in self.iterator():
ntiles = np.percentile(values.flatten(), qlist)
ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval)
yield y, label, ntiles, color | Get data for each treeplot for the variable. |
4,473 | def delete_user(self, user_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_user_with_http_info(user_id, **kwargs)
else:
(data) = self.delete_user_with_http_info(user_id, **kwargs)
return data | Delete a user. # noqa: E501
An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
4,474 | def log_predictive_density(self, x_test, y_test, Y_metadata=None):
mu_star, var_star = self._raw_predict(x_test)
fy = self.warping_function.f(y_test)
ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata)
return ll_lpd + np.log(self.warping_function.fgrad_y(y_test)) | Calculation of the log predictive density. Notice we add
the jacobian of the warping function here.
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points |
4,475 | def codebox(msg="", title=" ", text=""):
return tb.textbox(msg, title, text, codebox=1) | Display some text in a monospaced font, with no line wrapping.
This function is suitable for displaying code and text that is
formatted using spaces.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox |
4,476 | def set_timezone(tz=None, deploy=False):
**
if not tz:
raise CommandExecutionError("Timezone name option must not be none.")
ret = {}
query = {: ,
: ,
: localhost.localdomain\,
: .format(tz)}
ret.update(__proxy__[](query))
if deploy is True:
ret.update(commit())
return ret | Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone UTC
salt '*' panos.set_timezone UTC deploy=True |
4,477 | def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:
doc_string = json.dumps(doc, sort_keys=True)
return _create_hash(doc_string) | Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash |
4,478 | def decode_token(encoded_token, csrf_value=None, allow_expired=False):
jwt_manager = _get_jwt_manager()
unverified_claims = jwt.decode(
encoded_token, verify=False, algorithms=config.algorithm
)
unverified_headers = jwt.get_unverified_header(encoded_token)
try:
secret = jwt_manager._decode_key_callback(unverified_claims, unverified_headers)
except TypeError:
msg = (
"The single-argument (unverified_claims) form of decode_key_callback ",
"is deprecated. Update your code to use the two-argument form ",
"(unverified_claims, unverified_headers)."
)
warn(msg, DeprecationWarning)
secret = jwt_manager._decode_key_callback(unverified_claims)
try:
return decode_jwt(
encoded_token=encoded_token,
secret=secret,
algorithm=config.algorithm,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
csrf_value=csrf_value,
audience=config.audience,
leeway=config.leeway,
allow_expired=allow_expired
)
except ExpiredSignatureError:
expired_token = decode_jwt(
encoded_token=encoded_token,
secret=secret,
algorithm=config.algorithm,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
csrf_value=csrf_value,
audience=config.audience,
leeway=config.leeway,
allow_expired=True
)
ctx_stack.top.expired_jwt = expired_token
raise | Returns the decoded token (python dict) from an encoded JWT. This does all
the checks to insure that the decoded token is valid before returning it.
:param encoded_token: The encoded JWT to decode into a python dict.
:param csrf_value: Expected CSRF double submit value (optional)
:param allow_expired: Options to ignore exp claim validation in token
:return: Dictionary containing contents of the JWT |
4,479 | def remove_node(cls, cluster_id_label, private_dns, parameters=None):
conn = Qubole.agent(version=Cluster.api_version)
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data) | Add a node to an existing cluster |
4,480 | def get_iterator_type(script_settings, subscripts={}):
if in script_settings:
if script_settings[] == :
iterator_type =
elif script_settings[] == :
iterator_type =
else:
raise TypeError()
else:
if in script_settings:
iterator_type =
elif in script_settings:
iterator_type =
else:
raise TypeError()
return iterator_type | figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns: |
4,481 | def get_logs_multipart(
w3,
startBlock,
stopBlock,
address,
topics,
max_blocks):
_block_ranges = block_ranges(startBlock, stopBlock, max_blocks)
for from_block, to_block in _block_ranges:
params = {
: from_block,
: to_block,
: address,
: topics
}
yield w3.eth.getLogs(
drop_items_with_none_value(params)) | Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``. |
4,482 | def new_line(self, tokens, line_end, line_start):
if _last_token_on_line_is(tokens, line_end, ";"):
self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split("\n")[0]
self.check_lines(line, line_num) | a new line has been encountered, process it if necessary |
4,483 | def set_info(self, info):
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx) | set my state from the passed info |
4,484 | def _calc_sdof_tf(self, osc_freq, damping=0.05):
return (-osc_freq ** 2. / (np.square(self.freqs) - np.square(osc_freq)
- 2.j * damping * osc_freq * self.freqs)) | Compute the transfer function for a single-degree-of-freedom
oscillator.
The transfer function computes the pseudo-spectral acceleration.
Parameters
----------
osc_freq : float
natural frequency of the oscillator [Hz]
damping : float, optional
damping ratio of the oscillator in decimal. Default value is
0.05, or 5%.
Returns
-------
tf : :class:`numpy.ndarray`
Complex-valued transfer function with length equal to `self.freq`. |
4,485 | def to_file(file, array):
try:
array.tofile(file)
except (TypeError, IOError, UnsupportedOperation):
file.write(array.tostring()) | Wrapper around ndarray.tofile to support any file-like object |
4,486 | def example_lab_to_xyz():
print("=== Simple Example: Lab->XYZ ===")
lab = LabColor(0.903, 16.296, -2.22)
print(lab)
xyz = convert_color(lab, XYZColor)
print(xyz)
print("=== End Example ===\n") | This function shows a simple conversion of an Lab color to an XYZ color. |
4,487 | def _execute(self, sql, args):
sql = sql.lower().strip()
args = args or ()
tmp = sql[:6]
with (yield self._pool.Connection()) as conn:
try:
with conn.cursor() as cursor:
yield cursor.execute(sql, args=args)
if tmp == :
datas = cursor.fetchall()
return datas
except Exception as e:
err = traceback.format_exc()
print(err)
if tmp in [, , ]:
yield conn.rollback()
else:
if tmp == :
insertId = conn.insert_id()
yield conn.commit()
return insertId
elif tmp in [, ]:
yield conn.commit() | ๆง่กsql่ฏญๅฅ
:param sql: sql่ฏญๅฅ
:param args: ๅๆฐ
:return: ่ฟๅ็้ฝๆฏๆฐ็ปๅฏน่ฑก |
4,488 | def find_related_modules(package, related_name_re=,
ignore_exceptions=False):
warnings.warn(,
DeprecationWarning)
package_elements = package.rsplit(".", 1)
try:
if len(package_elements) == 2:
pkg = __import__(package_elements[0], globals(), locals(), [
package_elements[1]])
pkg = getattr(pkg, package_elements[1])
else:
pkg = __import__(package_elements[0], globals(), locals(), [])
pkg_path = pkg.__path__
except AttributeError:
return []
p = re.compile(related_name_re)
modules = []
for name in find_modules(package, include_packages=True):
if p.match(name.split()[-1]):
try:
modules.append(import_string(name, silent=ignore_exceptions))
except Exception as e:
if not ignore_exceptions:
raise e
return modules | Find matching modules using a package and a module name pattern. |
4,489 | def cli(ctx, packages, all, list, force, platform):
if packages:
for package in packages:
Installer(package, platform, force).install()
elif all:
packages = Resources(platform).packages
for package in packages:
Installer(package, platform, force).install()
elif list:
Resources(platform).list_packages(installed=True, notinstalled=True)
else:
click.secho(ctx.get_help()) | Install packages. |
4,490 | def _infer_map(node, context):
values = {}
for name, value in node.items:
if isinstance(name, nodes.DictUnpack):
double_starred = helpers.safe_infer(value, context)
if not double_starred:
raise exceptions.InferenceError
if not isinstance(double_starred, nodes.Dict):
raise exceptions.InferenceError(node=node, context=context)
unpack_items = _infer_map(double_starred, context)
values = _update_with_replacement(values, unpack_items)
else:
key = helpers.safe_infer(name, context=context)
value = helpers.safe_infer(value, context=context)
if any(not elem for elem in (key, value)):
raise exceptions.InferenceError(node=node, context=context)
values = _update_with_replacement(values, {key: value})
return values | Infer all values based on Dict.items |
4,491 | def ploidy(args):
p = OptionParser(ploidy.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 2:
sys.exit(not p.print_help())
seqidsfile, klayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
fc = "darkslategrey"
radius = .012
ot = -.05
TextCircle(root, .1, .9 + ot, r, radius=radius, fc=fc)
root.text(.1, .88 + ot, r"$\times3$", ha="center", va="top", color=fc)
TextCircle(root, .08, .79 + ot, r, radius=radius, fc=fc)
TextCircle(root, .12, .79 + ot, r, radius=radius, fc=fc)
root.text(.1, .77 + ot, r"$\times3\times2\times2$", ha="center", va="top", color=fc)
root.text(.1, .67 + ot, r"Brassica triplication", ha="center",
va="top", color=fc, size=11)
root.text(.1, .65 + ot, r"$\times3\times2\times2\times3$", ha="center", va="top", color=fc)
root.text(.1, .42 + ot, r"Allo-tetraploidy", ha="center",
va="top", color=fc, size=11)
root.text(.1, .4 + ot, r"$\times3\times2\times2\times3\times2$", ha="center", va="top", color=fc)
bb = dict(boxstyle="round,pad=.5", fc="w", ec="0.5", alpha=0.5)
root.text(.5, .2 + ot, r"\noindent\textit{Brassica napus}\\"
"(A$\mathsf{_n}$C$\mathsf{_n}$ genome)", ha="center",
size=16, color="k", bbox=bb)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "napus"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog ploidy seqids layout
Build a figure that calls graphics.karyotype to illustrate the high ploidy
of B. napus genome. |
4,492 | def disconnect(self, connection):
proto = self._protocols.pop(connection)
proto.transport = None
return {} | Disconnects the given protocol. |
4,493 | def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter(.format(
event.data_type))
event_values = event.CopyToDict()
page_transition_type = event_values.get(, None)
if page_transition_type is not None:
page_transition, page_transition_long = self._PAGE_TRANSITIONS.get(
page_transition_type, self._UNKNOWN_PAGE_TRANSITION)
if page_transition_long:
event_values[] = .format(
page_transition, page_transition_long)
else:
event_values[] = page_transition
visit_source = event_values.get(, None)
if visit_source is not None:
event_values[] = self._VISIT_SOURCE.get(
visit_source, )
extras = []
url_hidden = event_values.get(, False)
if url_hidden:
extras.append()
typed_count = event_values.get(, 0)
if typed_count == 0:
extras.append()
elif typed_count == 1:
extras.append(.format(typed_count))
else:
extras.append(.format(typed_count))
event_values[] = .join(extras)
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions between
formatters and other components, such as storage and Windows EventLog
resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. |
4,494 | def plot(x, y, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
title = _get_title(title)
plt_ref = tc.extensions.plot(x, y, xlabel, ylabel, title)
return Plot(plt_ref) | Plots the data in `x` on the X axis and the data in `y` on the Y axis
in a 2d visualization, and shows the resulting visualization.
Uses the following heuristic to choose the visualization:
* If `x` and `y` are both numeric (SArray of int or float), and they contain
fewer than or equal to 5,000 values, show a scatter plot.
* If `x` and `y` are both numeric (SArray of int or float), and they contain
more than 5,000 values, show a heat map.
* If `x` is numeric and `y` is an SArray of string, show a box and whisker
plot for the distribution of numeric values for each categorical (string)
value.
* If `x` and `y` are both SArrays of string, show a categorical heat map.
This show method supports SArrays of dtypes: int, float, str.
Notes
-----
- The plot will be returned as a Plot object, which can then be shown,
saved, etc. and will display automatically in a Jupyter Notebook.
Parameters
----------
x : SArray
The data to plot on the X axis of a 2d visualization.
y : SArray
The data to plot on the Y axis of a 2d visualization. Must be the same
length as `x`.
xlabel : str (optional)
The text label for the X axis. Defaults to "X".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Y".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Examples
--------
Show a categorical heat map of pets and their feelings.
>>> x = turicreate.SArray(['dog', 'cat', 'dog', 'dog', 'cat'])
>>> y = turicreate.SArray(['happy', 'grumpy', 'grumpy', 'happy', 'grumpy'])
>>> turicreate.show(x, y)
Show a scatter plot of the function y = 2x, for x from 0 through 9, labeling
the axes and plot title with custom strings.
>>> x = turicreate.SArray(range(10))
>>> y = x * 2
>>> turicreate.show(x, y,
... xlabel="Custom X label",
... ylabel="Custom Y label",
... title="Custom title") |
4,495 | def inverse_kinematics(self, end_effector_transformation,
q=None,
max_iter=1000, tolerance=0.05,
mask=numpy.ones(6),
use_pinv=False):
if q is None:
q = numpy.zeros((len(self.links), 1))
q = numpy.matrix(q.reshape(-1, 1))
best_e = numpy.ones(6) * numpy.inf
best_q = None
alpha = 1.0
for _ in range(max_iter):
e = numpy.multiply(transform_difference(self.forward_kinematics(q)[0], end_effector_transformation), mask)
d = numpy.linalg.norm(e)
if d < numpy.linalg.norm(best_e):
best_e = e.copy()
best_q = q.copy()
alpha *= 2.0 ** (1.0 / 8.0)
else:
q = best_q.copy()
e = best_e.copy()
alpha *= 0.5
if use_pinv:
dq = numpy.linalg.pinv(self._jacob0(q)) * e.reshape((-1, 1))
else:
dq = self._jacob0(q).T * e.reshape((-1, 1))
q += alpha * dq
if d < tolerance:
return q
else:
raise ValueError(.format(numpy.linalg.norm(best_e))) | Computes the joint angles corresponding to the end effector transformation.
:param end_effector_transformation: the end effector homogeneous transformation matrix
:param vector q: initial estimate of the joint angles
:param int max_iter: maximum number of iteration
:param float tolerance: tolerance before convergence
:param mask: specify the cartesian DOF that will be ignore (in the case of a chain with less than 6 joints).
:rtype: vector of the joint angles (theta 1, theta 2, ..., theta n) |
4,496 | def _is_method_retryable(self, method):
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True | Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist. |
4,497 | def assert_shape_match(shape1, shape2):
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError( %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2) | Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None) |
4,498 | def update_artifact_cache(self, vts_artifactfiles_pairs):
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name=) | Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet. |
4,499 | def parse_string_expr(self, string_expr_node):
string_expr_node_value = string_expr_node[]
string_expr_str = string_expr_node_value[1:-1]
if string_expr_node_value[0] == "")
else:
string_expr_str = string_expr_str.replace(, )
raw_ast = self.parse_string( + string_expr_str)
parsed_string_expr_nodes = raw_ast[][0][]
start_pos = string_expr_node[]
def adjust_position(node):
pos = node[]
pos[] += start_pos[] - 1 - 5
pos[] += start_pos[] - 5
pos[] += start_pos[] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes | Parse a string node content. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.