Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,500 | def asdatetime(self, naive=True):
args = list(self.timetuple()[0:6])+[self.microsecond]
if not naive:
args.append(self.tzinfo)
return datetime.datetime(*args) | Return this datetime_tz as a datetime object.
Args:
naive: Return *without* any tz info.
Returns:
This datetime_tz as a datetime object. |
23,501 | def processPointOfSalePayment(request):
print( % request.GET)
data = json.loads(request.GET.get(,))
if data:
status = data.get()
errorCode = data.get()
errorDescription = errorCode
try:
stateData = data.get(,)
if stateData:
metadata = json.loads(b64decode(unquote(stateData).encode()).decode())
else:
metadata = {}
except (TypeError, ValueError, binascii.Error):
logger.error()
messages.error(
request,
format_html(
,
str(_()),
str(_()),
)
)
return HttpResponseRedirect(reverse())
serverTransId = data.get()
clientTransId = data.get()
else:
errorCode = request.GET.get()
errorDescription = request.GET.get()
status = if not errorCode else
serverTransId = request.GET.get()
clientTransId = request.GET.get()
try:
stateData = request.GET.get(,)
if stateData:
metadata = json.loads(b64decode(unquote(stateData).encode()).decode())
else:
metadata = {}
except (TypeError, ValueError, binascii.Error):
logger.error()
messages.error(
request,
format_html(
,
str(_()),
str(_()),
)
)
return HttpResponseRedirect(reverse())
sourceUrl = metadata.get(,reverse())
successUrl = metadata.get(,reverse())
submissionUserId = metadata.get(, getattr(getattr(request,,None),,None))
transactionType = metadata.get()
taxable = metadata.get(, False)
addSessionInfo = metadata.get(,False)
customerEmail = metadata.get()
if errorCode or status != :
logger.error( % (errorCode, errorDescription))
messages.error(
request,
format_html(
,
str(_()), errorCode, errorDescription
)
)
return HttpResponseRedirect(sourceUrl)
api_instance = TransactionsApi()
api_instance.api_client.configuration.access_token = getattr(settings,,)
location_id = getattr(settings,,)
if serverTransId:
try:
api_response = api_instance.retrieve_transaction(transaction_id=serverTransId,location_id=location_id)
except ApiException:
logger.error()
messages.error(request,_())
return HttpResponseRedirect(sourceUrl)
if api_response.errors:
logger.error( % api_response.errors)
messages.error(request,str(_()) + api_response.errors)
return HttpResponseRedirect(sourceUrl)
transaction = api_response.transaction
elif clientTransId:
try:
api_response = api_instance.list_transactions(location_id=location_id)
except ApiException:
logger.error()
messages.error(request,_())
return HttpResponseRedirect(sourceUrl)
if api_response.errors:
logger.error( % api_response.errors)
messages.error(request,str(_()) + api_response.errors)
return HttpResponseRedirect(sourceUrl)
transactions_list = [x for x in api_response.transactions if x.client_id == clientTransId]
if len(transactions_list) == 1:
transaction = transactions_list[0]
else:
logger.error()
messages.error(request,_())
return HttpResponseRedirect(sourceUrl)
else:
logger.error()
messages.error(request,_())
return HttpResponseRedirect(sourceUrl)
this_total = sum([x.amount_money.amount / 100 for x in transaction.tenders or []]) - \
sum([x.amount_money.amount / 100 for x in transaction.refunds or []])
submissionUser = None
if submissionUserId:
try:
submissionUser = User.objects.get(id=int(submissionUserId))
except (ValueError, ObjectDoesNotExist):
logger.warning()
if in metadata.keys():
try:
tr_id = int(metadata.get())
tr = TemporaryRegistration.objects.get(id=tr_id)
except (ValueError, TypeError, ObjectDoesNotExist):
logger.error( % metadata.get())
messages.error(
request,
str(_()) + % metadata.get()
)
return HttpResponseRedirect(sourceUrl)
tr.expirationDate = timezone.now() + timedelta(minutes=getConstant())
tr.save()
this_invoice = Invoice.get_or_create_from_registration(tr, submissionUser=submissionUser)
this_description = _( % tr_id)
elif in metadata.keys():
try:
this_invoice = Invoice.objects.get(id=int(metadata.get()))
this_description = _( % this_invoice.id)
except (ValueError, TypeError, ObjectDoesNotExist):
logger.error( % metadata.get())
messages.error(
request,
str(_()) + % metadata.get()
)
return HttpResponseRedirect(sourceUrl)
else:
if transactionType == :
this_description = _()
else:
this_description = transactionType
this_invoice = Invoice.create_from_item(
this_total,
this_description,
submissionUser=submissionUser,
calculate_taxes=(taxable is not False),
transactionType=transactionType,
)
paymentRecord, created = SquarePaymentRecord.objects.get_or_create(
transactionId=transaction.id,
locationId=transaction.location_id,
defaults={: this_invoice,}
)
if created:
this_invoice.processPayment(
amount=this_total,
fees=0,
paidOnline=True,
methodName=,
methodTxn=transaction.id,
notify=customerEmail,
)
updateSquareFees.schedule(args=(paymentRecord,), delay=60)
if addSessionInfo:
paymentSession = request.session.get(INVOICE_VALIDATION_STR, {})
paymentSession.update({
: str(this_invoice.id),
: this_total,
: successUrl,
})
request.session[INVOICE_VALIDATION_STR] = paymentSession
return HttpResponseRedirect(successUrl) | This view handles the callbacks from point-of-sale transactions.
Please note that this will only work if you have set up your callback
URL in Square to point to this view. |
23,502 | def create_preauth(byval, key, by=, expires=0, timestamp=None):
if timestamp is None:
timestamp = int(datetime.now().strftime("%s")) * 1000
pak = hmac.new(
codecs.latin_1_encode(key)[0],
( % (
byval,
by,
expires,
timestamp
)).encode("utf-8"),
hashlib.sha1
).hexdigest()
return pak | Generates a zimbra preauth value
:param byval: The value of the targeted user (according to the
by-parameter). For example: The account name, if "by" is "name".
:param key: The domain preauth key (you can retrieve that using zmprov gd)
:param by: What type is the byval-parameter? Valid parameters are "name"
(default), "id" and "foreignPrincipal"
:param expires: Milliseconds when the auth token expires. Defaults to 0
for default account expiration
:param timestamp: Current timestamp (is calculated by default)
:returns: The preauth value to be used in an AuthRequest
:rtype: str |
23,503 | def proc_file(infile: str, outfile: str, opts: Namespace) -> bool:
g = fhir_json_to_rdf(infile, opts.uribase, opts.graph, add_ontology_header=not opts.noontology,
do_continuations=not opts.nocontinuation, replace_narrative_text=bool(opts.nonarrative),
metavoc=opts.fhir_metavoc)
if g:
if not opts.graph:
serialize_graph(g, outfile, opts)
return True
else:
print("{} : Not a FHIR collection or resource".format(infile))
return False | Process infile.
:param infile: input file to be processed
:param outfile: target output file.
:param opts:
:return: |
23,504 | def transform_annotation(self, ann, duration):
vector
_, values = ann.to_interval_values()
vector = np.asarray(values[0], dtype=self.dtype)
if len(vector) != self.dimension:
raise DataError(
.format(len(vector), self.dimension))
return {: vector} | Apply the vector transformation.
Parameters
----------
ann : jams.Annotation
The input annotation
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['vector'] : np.ndarray, shape=(dimension,)
Raises
------
DataError
If the input dimension does not match |
23,505 | def writes(mdict, filename=, truncate_existing=False,
truncate_invalid_matlab=False, options=None, **keywords):
if not isinstance(options, Options):
options = Options(**keywords)
towrite = []
for p, v in mdict.items():
groupname, targetname = utilities.process_path(p)
towrite.append((groupname, targetname, v))
if truncate_existing or not os.path.isfile(filename):
f = h5py.File(filename, mode=, userblock_size=512)
else:
f = h5py.File(filename)
if options.matlab_compatible and truncate_invalid_matlab \
and f.userblock_size < 128:
f.close()
f = h5py.File(filename, mode=, userblock_size=512)
except:
raise
finally:
if isinstance(f, h5py.File):
userblock_size = f.userblock_size
f.close()
else:
raise IOError()
if options.matlab_compatible and userblock_size >= 128:
now = datetime.datetime.now()
s = \
+ __version__ + \
+ now.strftime() \
+
b = bytearray(s + (128-12-len(s))*, encoding=)
b.extend(bytearray.fromhex())
try:
fd = open(filename, )
fd.write(b)
except:
raise
finally:
fd.close()
f = None
try:
f = h5py.File(filename)
for groupname, targetname, data in towrite:
if groupname not in f:
grp = f.require_group(groupname)
else:
grp = f[groupname]
utilities.write_data(f, grp, targetname, data,
None, options)
except:
raise
finally:
if isinstance(f, h5py.File):
f.close() | Writes data into an HDF5 file (high level).
High level function to store one or more Python types (data) to
specified pathes in an HDF5 file. The paths are specified as POSIX
style paths where the directory name is the Group to put it in and
the basename is the name to write it to.
There are various options that can be used to influence how the data
is written. They can be passed as an already constructed ``Options``
into `options` or as additional keywords that will be used to make
one by ``options = Options(**keywords)``.
Two very important options are ``store_python_metadata`` and
``matlab_compatible``, which are ``bool``. The first makes it so
that enough metadata (HDF5 Attributes) are written that `data` can
be read back accurately without it (or its contents if it is a
container type) ending up different types, transposed in the case of
numpy arrays, etc. The latter makes it so that the appropriate
metadata is written, string and bool and complex types are converted
properly, and numpy arrays are transposed; which is needed to make
sure that MATLAB can import `data` correctly (the HDF5 header is
also set so MATLAB will recognize it).
Paths are POSIX style and can either be given directly as ``str`` or
``bytes``, or the separated path can be given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``utilities.escape_path``. Otherwise, the path is assumed to
be already escaped. Escaping is done so that targets with a part
that starts with one or more periods, contain slashes, and/or
contain nulls can be used without causing the wrong Group to be
looked in or the wrong target to be looked at. It essentially allows
one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving
around in the Dataset hierarchy.
Parameters
----------
mdict : dict, dict like
The ``dict`` or other dictionary type object of paths
and data to write to the file. The paths, the keys, must be
POSIX style paths where the directory name is the Group to put
it in and the basename is the name to write it to. The values
are the data to write.
filename : str, optional
The name of the HDF5 file to write `data` to.
truncate_existing : bool, optional
Whether to truncate the file if it already exists before writing
to it.
truncate_invalid_matlab : bool, optional
Whether to truncate a file if matlab_compatibility is being
done and the file doesn't have the proper header (userblock in
HDF5 terms) setup for MATLAB metadata to be placed.
options : Options, optional
The options to use when writing. Is mutually exclusive with any
additional keyword arguments given (set to ``None`` or don't
provide to use them).
**keywords :
If `options` was not provided or was ``None``, these are used as
arguments to make a ``Options``.
Raises
------
TypeError
If a path is of an invalid type.
NotImplementedError
If writing `data` is not supported.
exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
utilities.process_path
utilities.escape_path
write : Writes just a single piece of data
reads
read
Options
utilities.write_data : Low level version |
23,506 | def _translate_nd(self,
source: mx.nd.NDArray,
source_length: int,
restrict_lexicon: Optional[lexicon.TopKLexicon],
raw_constraints: List[Optional[constrained.RawConstraintList]],
raw_avoid_list: List[Optional[constrained.RawConstraintList]],
max_output_lengths: mx.nd.NDArray) -> List[Translation]:
return self._get_best_from_beam(*self._beam_search(source,
source_length,
restrict_lexicon,
raw_constraints,
raw_avoid_list,
max_output_lengths)) | Translates source of source_length, given a bucket_key.
:param source: Source ids. Shape: (batch_size, bucket_key, num_factors).
:param source_length: Bucket key.
:param restrict_lexicon: Lexicon to use for vocabulary restriction.
:param raw_constraints: A list of optional constraint lists.
:return: Sequence of translations. |
23,507 | def tasks(self, name):
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found] | Get all the tasks that match a name |
23,508 | def any(self, predicate=None):
if self.closed():
raise ValueError("Attempt to call any() on a closed Queryable.")
if predicate is None:
predicate = lambda x: True
if not is_callable(predicate):
raise TypeError("any() parameter predicate={predicate} is not callable".format(predicate=repr(predicate)))
for item in self.select(predicate):
if item:
return True
return False | Determine if the source sequence contains any elements which satisfy
the predicate.
Only enough of the sequence to satisfy the predicate once is consumed.
Note: This method uses immediate execution.
Args:
predicate: An optional single argument function used to test each
element. If omitted, or None, this method returns True if there
is at least one element in the source.
Returns:
True if the sequence contains at least one element which satisfies
the predicate, otherwise False.
Raises:
ValueError: If the Queryable is closed() |
23,509 | def get_bare_quoted_string(value):
if value[0] != :
raise errors.HeaderParseError(
"expected but found ".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != :
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:] | bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded. |
23,510 | def iau2000b(jd_tt):
dpplan = -0.000135 * 1e7
deplan = 0.000388 * 1e7
t = (jd_tt - T0) / 36525.0
el = fmod (485868.249036 +
t * 1717915923.2178, ASEC360) * ASEC2RAD;
elp = fmod (1287104.79305 +
t * 129596581.0481, ASEC360) * ASEC2RAD;
f = fmod (335779.526232 +
t * 1739527262.8478, ASEC360) * ASEC2RAD;
d = fmod (1072260.70369 +
t * 1602961601.2090, ASEC360) * ASEC2RAD;
om = fmod (450160.398036 -
t * 6962890.5431, ASEC360) * ASEC2RAD;
a = array((el, elp, f, d, om))
arg = nals_t[:77].dot(a)
fmod(arg, tau, out=arg)
sarg = sin(arg)
carg = cos(arg)
stsc = array((sarg, t * sarg, carg)).T
ctcs = array((carg, t * carg, sarg)).T
dp = tensordot(stsc, lunisolar_longitude_coefficients[:77,])
de = tensordot(ctcs, lunisolar_obliquity_coefficients[:77,])
dpsi = dpplan + dp
deps = deplan + de
return dpsi, deps | Compute Earth nutation based on the faster IAU 2000B nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each is either a float, or a NumPy array with
the same dimensions as the input argument. The result will not take
as long to compute as the full IAU 2000A series, but should still
agree with ``iau2000a()`` to within a milliarcsecond between the
years 1995 and 2020. |
23,511 | def satisfaction_rating_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings
api_path = "/api/v2/satisfaction_ratings/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#show-satisfaction-rating |
23,512 | def Message(msg, id=260, ok=None):
return psidialogs.message(message=msg, ok=ok) | Original doc: Display a MESSAGE string.
Return when the user clicks the OK button or presses Return.
The MESSAGE string can be at most 255 characters long. |
23,513 | def configure(self, ns, mappings=None, **kwargs):
if mappings is None:
mappings = dict()
mappings.update(kwargs)
for operation, definition in mappings.items():
try:
configure_func = self._find_func(operation)
except AttributeError:
pass
else:
configure_func(ns, self._make_definition(definition)) | Apply mappings to a namespace. |
23,514 | def fix_hp_addrs(server):
fixed = {A.server.ID: server[A.server.ID]}
both = server.get(A.server.PRIVATE_IPS)
if both:
fixed[A.server.PUBLIC_IPS] = [both[1]]
fixed[A.server.PRIVATE_IPS] = [both[0]]
return fixed | Works around hpcloud's peculiar "all ip addresses are returned as private
even though one is public" bug. This is also what the official hpfog gem
does in the ``Fog::Compute::HP::Server#public_ip_address`` method.
:param dict server: Contains the server ID, a list of public IP addresses,
and a list of private IP addresses. |
23,515 | def initialize(self, configfile=None):
method = "initialize"
A = None
metadata = {method: configfile}
send_array(self.socket, A, metadata)
A, metadata = recv_array(
self.socket, poll=self.poll, poll_timeout=self.poll_timeout,
flags=self.zmq_flags) | Initialize the module |
23,516 | def auth(self, request):
service = UserService.objects.get(user=request.user, name=)
callback_url = % (request.scheme, request.get_host(), reverse())
params = {: service.username,
: service.password,
: service.client_id,
: service.client_secret}
access_token = Wall.get_token(host=service.host, **params)
request.session[] = access_token
return callback_url | let's auth the user to the Service
:param request: request object
:return: callback url
:rtype: string that contains the url to redirect after auth |
23,517 | def display(self):
if self.degree and self.school:
disp = self.degree + u + self.school
else:
disp = self.degree or self.school or None
if disp is not None and self.date_range is not None:
disp += u % self.date_range.years_range
return disp or u | A unicode value with the object's data, to be used for displaying
the object in your application. |
23,518 | def run(self, files, stack):
"Clean your text"
for filename, post in files.items():
post.content = self.bleach.clean(post.content, *self.args, **self.kwargs) | Clean your text |
23,519 | def analyze(self):
key = % self.key
self._server.query(key, method=self._server._session.put) | Run an analysis on all of the items in this library section. See
See :func:`~plexapi.base.PlexPartialObject.analyze` for more details. |
23,520 | def get_variant_by_name(self, name, variant_info=None):
if not self.has_index:
raise NotImplementedError("Not implemented when IMPUTE2 file is "
"not indexed (see genipe)")
if variant_info is None:
try:
variant_info = self._impute2_index.loc[name, :]
except KeyError:
if name in self.get_duplicated_markers():
return [
self.get_variant_by_name(dup_name).pop()
for dup_name in self.get_duplicated_markers()[name]
]
else:
logging.variant_name_not_found(name)
return []
self._impute2_file.seek(variant_info.seek)
genotypes = self._parse_impute2_line(self._impute2_file.readline())
self._fix_genotypes_object(genotypes, variant_info)
return [genotypes] | Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
variant_info (pandas.Series): The marker information (e.g. seek).
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
dataset. Hence, we can use the 'get_geno_marker' function and be
sure only one variant is returned. |
23,521 | def scan_temperature(self, measure, temperature, rate, delay=1):
if not hasattr(measure, ):
raise TypeError()
self.set_temperature(temperature, rate, , wait_for_stability=False)
start = datetime.datetime.now()
while True:
if (self.system_status[] == and
(datetime.datetime.now() - start > datetime.timedelta(seconds=10))):
break
measure()
time.sleep(delay) | Performs a temperature scan.
Measures until the target temperature is reached.
:param measure: A callable called repeatedly until stability at target
temperature is reached.
:param temperature: The target temperature in kelvin.
:param rate: The sweep rate in kelvin per minute.
:param delay: The time delay between each call to measure in seconds. |
23,522 | def fullinfo_get(self, tids, session, fields=[]):
request = TOPRequest()
request[] = tids
if not fields:
trade = Trade()
fields = trade.fields
request[] = fields
self.create(self.execute(request, session)[])
return self | taobao.topats.trades.fullinfo.get 异步批量获取交易订单详情api
使用指南:http://open.taobao.com/dev/index.php/ATS%E4%BD%BF%E7%94%A8%E6%8C%87%E5%8D%97
- 1.提供异步批量获取订单详情功能
- 2.一次调用最多支持40个订单
- 3.提交任务会进行初步任务校验,如果成功会返回任务号和创建时间,如果失败就报错
- 4.可以接收淘宝发出的任务完成消息,也可以过一段时间来取结果。获取结果接口为taobao.topats.result.get
- 5.此api执行完成发送的通知消息格式为{"task":{"task_id":123456,"created":"2010-8-19"}}果 |
23,523 | def blocking_start(self, waiting_func=None):
self.logger.debug()
try:
for job_params in self._get_iterator():
self.config.logger.debug(, job_params)
self.quit_check()
if job_params is None:
if self.config.quit_on_empty_queue:
raise KeyboardInterrupt
self.logger.info("there is nothing to do. Sleeping "
"for %d seconds" %
self.config.idle_delay)
self._responsive_sleep(self.config.idle_delay)
continue
self.quit_check()
try:
args, kwargs = job_params
except ValueError:
args = job_params
kwargs = {}
try:
self.task_func(*args, **kwargs)
except Exception:
self.config.logger.error("Error in processing a job",
exc_info=True)
except KeyboardInterrupt:
self.logger.debug()
finally:
self.quit = True
self.logger.debug("ThreadlessTaskManager dies quietly") | this function starts the task manager running to do tasks. The
waiting_func is normally used to do something while other threads
are running, but here we don't have other threads. So the waiting
func will never get called. I can see wanting this function to be
called at least once after the end of the task loop. |
23,524 | def read_namespaced_service(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespaced_service_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_service_with_http_info(name, namespace, **kwargs)
return data | read_namespaced_service # noqa: E501
read the specified Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Service
If the method is called asynchronously,
returns the request thread. |
23,525 | def register_download_command(self, download_func):
description = "Download the contents of a remote remote project to a local folder."
download_parser = self.subparsers.add_parser(, description=description)
add_project_name_or_id_arg(download_parser, help_text_suffix="download")
_add_folder_positional_arg(download_parser)
include_or_exclude = download_parser.add_mutually_exclusive_group(required=False)
_add_include_arg(include_or_exclude)
_add_exclude_arg(include_or_exclude)
download_parser.set_defaults(func=download_func) | Add 'download' command for downloading a project to a directory.
For non empty directories it will download remote files replacing local files.
:param download_func: function to run when user choses this option |
23,526 | def make_date(df:DataFrame, date_field:str):
"Make sure `df[field_name]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True) | Make sure `df[field_name]` is of the right date type. |
23,527 | def has_firewall(vlan):
return bool(
vlan.get(, None) or
vlan.get(, None) or
vlan.get(, None) or
vlan.get(, None) or
vlan.get(, None)
) | Helper to determine whether or not a VLAN has a firewall.
:param dict vlan: A dictionary representing a VLAN
:returns: True if the VLAN has a firewall, false if it doesn't. |
23,528 | def MiddleClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
SetCursorPos(x, y)
screenWidth, screenHeight = GetScreenSize()
mouse_event(MouseEventFlag.MiddleDown | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0)
time.sleep(0.05)
mouse_event(MouseEventFlag.MiddleUp | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0)
time.sleep(waitTime) | Simulate mouse middle click at point x, y.
x: int.
y: int.
waitTime: float. |
23,529 | def is_parent_of_catalog(self, id_, catalog_id):
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=catalog_id)
return self._hierarchy_session.is_parent(id_=catalog_id, parent_id=id_) | Tests if an ``Id`` is a direct parent of a catalog.
arg: id (osid.id.Id): an ``Id``
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
return: (boolean) - ``true`` if this ``id`` is a parent of
``catalog_id,`` ``false`` otherwise
raise: NotFound - ``catalog_id`` is not found
raise: NullArgument - ``id`` or ``catalog_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
23,530 | def sign_with_privkey(
digest: bytes,
privkey: Ed25519PrivateKey,
global_pubkey: Ed25519PublicPoint,
nonce: int,
global_commit: Ed25519PublicPoint,
) -> Ed25519Signature:
h = _ed25519.H(privkey)
a = _ed25519.decodecoord(h)
S = (nonce + _ed25519.Hint(global_commit + global_pubkey + digest) * a) % _ed25519.l
return Ed25519Signature(_ed25519.encodeint(S)) | Create a CoSi signature of `digest` with the supplied private key.
This function needs to know the global public key and global commitment. |
23,531 | def tran_hash(self, a, b, c, n):
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255) | implementation of the tran53 hash function |
23,532 | def options(self, request, *args, **kwargs):
response = HttpResponse()
response[] = .join(self.allowed_methods)
response[] = 0
return response | Handles responding to requests for the OPTIONS HTTP verb |
23,533 | def hull(script, reorient_normal=True):
filter_xml = .join([
,
,
.format(str(reorient_normal).lower()),
,
,
,
])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer()
return None | Calculate the convex hull with Qhull library
http://www.qhull.org/html/qconvex.htm
The convex hull of a set of points is the boundary of the minimal convex
set containing the given non-empty finite set of points.
Args:
script: the FilterScript object or script filename to write
the filter to.
reorient_normal (bool): Re-orient all faces coherentely after hull
operation.
Layer stack:
Creates 1 new layer 'Convex Hull'
Current layer is changed to new layer
MeshLab versions:
2016.12
1.3.4BETA |
23,534 | def h2o_mean_absolute_error(y_actual, y_predicted, weights=None):
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual).abs()) | Mean absolute error regression loss.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean absolute error loss (best is 0.0). |
23,535 | def needle(reads):
from tempfile import mkdtemp
from shutil import rmtree
dir = mkdtemp()
file1 = join(dir, )
with open(file1, ) as fp:
print(reads[0].toString(), end=, file=fp)
file2 = join(dir, )
with open(file2, ) as fp:
print(reads[1].toString(), end=, file=fp)
out = join(dir, )
Executor().execute("needle -asequence -bsequence -auto "
"-outfile -aformat fasta" % (
file1, file2, out))
result = Reads(list(FastaReads(out)))
rmtree(dir)
return result | Run a Needleman-Wunsch alignment and return the two sequences.
@param reads: An iterable of two reads.
@return: A C{Reads} instance with the two aligned sequences. |
23,536 | def db_check(name,
table=None,
**connection_args):
**
ret = []
if table is None:
tables = db_tables(name, **connection_args)
for table in tables:
log.info(%s\%s\, name, table)
ret.append(__check_table(name, table, **connection_args))
else:
log.info(%s\%s\, name, table)
ret = __check_table(name, table, **connection_args)
return ret | Repairs the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_check dbname
salt '*' mysql.db_check dbname dbtable |
23,537 | def dependency(app_label, model):
from django.db.migrations import swappable_dependency
return swappable_dependency(get_model_name(app_label, model)) | Returns a Django 1.7+ style dependency tuple for inclusion in
migration.dependencies[] |
23,538 | def rmtree(path):
def onerror(func, path, exc_info):
os.chmod(path, stat.S_IWUSR)
try:
func(path)
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
return shutil.rmtree(path, False, onerror) | Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case |
23,539 | def add_parameters(self,template_file,in_file=None,pst_path=None):
assert os.path.exists(template_file),"template file not found".format(template_file)
assert template_file != in_file
parnme = pst_utils.parse_tpl_file(template_file)
new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]
if len(new_parnme) == 0:
warnings.warn("no new parameters found in template file {0}".format(template_file),PyemuWarning)
new_par_data = None
else:
new_par_data = pst_utils.populate_dataframe(new_parnme,pst_utils.pst_config["par_fieldnames"],
pst_utils.pst_config["par_defaults"],
pst_utils.pst_config["par_dtype"])
new_par_data.loc[new_parnme,"parnme"] = new_parnme
self.parameter_data = self.parameter_data.append(new_par_data)
if in_file is None:
in_file = template_file.replace(".tpl",)
if pst_path is not None:
template_file = os.path.join(pst_path,os.path.split(template_file)[-1])
in_file = os.path.join(pst_path, os.path.split(in_file)[-1])
self.template_files.append(template_file)
self.input_files.append(in_file)
return new_par_data | add new parameters to a control file
Parameters
----------
template_file : str
template file
in_file : str(optional)
model input file. If None, template_file.replace('.tpl','') is used
pst_path : str(optional)
the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. Default is None
Returns
-------
new_par_data : pandas.DataFrame
the data for the new parameters that were added. If no new parameters are in the
new template file, returns None
Note
----
populates the new parameter information with default values |
23,540 | def pb_id(self, pb_id: str):
self.set_state(DevState.ON)
self._pb_id = pb_id | Set the PB Id for this device. |
23,541 | def to_struct_file(self, f):
if isinstance(f, str):
f = open(f,)
f.write("STRUCTURE {0}\n".format(self.name))
f.write(" NUGGET {0}\n".format(self.nugget))
f.write(" NUMVARIOGRAM {0}\n".format(len(self.variograms)))
for v in self.variograms:
f.write(" VARIOGRAM {0} {1}\n".format(v.name,v.contribution))
f.write(" TRANSFORM {0}\n".format(self.transform))
f.write("END STRUCTURE\n\n")
for v in self.variograms:
v.to_struct_file(f) | write a PEST-style structure file
Parameters
----------
f : (str or file handle)
file to write the GeoStruct information to |
23,542 | def __read(path):
try:
with open(path, ) as data_file:
data = data_file.read()
data = json.loads(data)
return data
except IOError as err:
pass
except Exception as err:
pass | Reads a File with contents in correct JSON format.
Returns the data as Python objects.
path - (string) path to the file |
23,543 | def _route_action(self, action, email):
connection = self._get_ses_connection()
if action == "verify":
connection.verify_email_address(email)
print("A verification email has been sent to %s." % email)
elif action == "delete":
connection.delete_verified_email_address(email)
print("You have deleted %s from your SES account." % email)
elif action == "list":
verified_result = connection.list_verified_email_addresses()
if len(verified_result.VerifiedEmailAddresses) > 0:
print("The following emails have been fully verified on your "\
"Amazon SES account:")
for vemail in verified_result.VerifiedEmailAddresses:
print (" %s" % vemail)
else:
print("Your account has no fully verified email addresses yet.") | Given an action and an email (can be None), figure out what to do
with the validated inputs.
:param str action: The action. Must be one of self.valid_actions.
:type email: str or None
:param email: Either an email address, or None if the action doesn't
need an email address. |
23,544 | def _on_connection_open(self, connection):
_log.info("Successfully opened connection to %s", connection.params.host)
self._channel = connection.channel(on_open_callback=self._on_channel_open) | Callback invoked when the connection is successfully established.
Args:
connection (pika.connection.SelectConnection): The newly-estabilished
connection. |
23,545 | def _get_data(self, file_id):
title = % self.__class__.__name__
try:
record_data = self.drive.get_media(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
return record_data | a helper method for retrieving the byte data of a file |
23,546 | def reset_failed(self, pk):
TriggerService.objects.filter(consumer__name__id=pk).update(consumer_failed=0, provider_failed=0)
TriggerService.objects.filter(provider__name__id=pk).update(consumer_failed=0, provider_failed=0) | reset failed counter
:param pk:
:return: |
23,547 | def propose_unif(self):
if self.use_kdtree:
kdtree = spatial.KDTree(self.live_u)
else:
kdtree = None
while True:
u, q = self.radfriends.sample(self.live_u, rstate=self.rstate,
return_q=True, kdtree=kdtree)
if unitcheck(u, self.nonperiodic):
if q == 1 or self.rstate.rand() < 1.0 / q:
break
ax = np.identity(self.npdim) * self.radfriends.radius
return u, ax | Propose a new live point by sampling *uniformly* within
the union of N-spheres defined by our live points. |
23,548 | def subtype_ids(elements, subtype):
return [i for (i, element) in enumerate(elements)
if isinstance(element, subtype)] | returns the ids of all elements of a list that have a certain type,
e.g. show all the nodes that are ``TokenNode``\s. |
23,549 | def on_exception(self, exception):
logger.error(, exc_info=True)
self.streaming_exception = exception | An exception occurred in the streaming thread |
23,550 | def __constructMetricsModules(self, metricSpecs):
if not metricSpecs:
return
self.__metricSpecs = metricSpecs
for spec in metricSpecs:
if not InferenceElement.validate(spec.inferenceElement):
raise ValueError("Invalid inference element for metric spec: %r" %spec)
self.__metrics.append(metrics.getModule(spec))
self.__metricLabels.append(spec.getLabel()) | Creates the required metrics modules
Parameters:
-----------------------------------------------------------------------
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate |
23,551 | def feature_path_unset(self):
if not self.feature_file:
raise IOError()
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning()
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp | Copy features to memory and remove the association of the feature file. |
23,552 | def get_pid(name):
if not shutil.which("pidof"):
return False
try:
subprocess.check_output(["pidof", "-s", name])
except subprocess.CalledProcessError:
return False
return True | Check if process is running by name. |
23,553 | def run(self):
eta = self.settings[]
gamma = 2 * np.pi * self.settings[]
dt = 1. / self.settings[]
control = self.settings[]
self._state = self._output
while self._stop is False:
A = -gamma * dt
noise = np.sqrt(2*gamma*eta)*np.random.randn()
self._state *= (1. + A)
self._state += noise + control
self._output = self._state
self.msleep(int(1e3 / self.settings[])) | this is the actual execution of the instrument thread: continuously read values from the probes |
23,554 | def _write_plist(self, root):
indent_xml(root)
tree = ElementTree.ElementTree(root)
with open(self.preferences_file, "w") as prefs_file:
prefs_file.write(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" "
"\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
"<plist version=\"1.0\">\n")
tree.write(prefs_file, xml_declaration=False, encoding="utf-8")
prefs_file.write("</plist>") | Write plist file based on our generated tree. |
23,555 | def post_unvote(self, post_id):
return self._get(.format(post_id),
method=, auth=True) | Action lets you unvote for a post (Requires login).
Parameters:
post_id (int): |
23,556 | def restore_state(self):
last_source_path = setting(
, self.default_directory, expected_type=str)
self.source_directory.setText(last_source_path)
last_output_dir = setting(
, self.default_directory, expected_type=str)
self.output_directory.setText(last_output_dir)
use_default_output_dir = bool(setting(
, True, expected_type=bool))
self.scenario_directory_radio.setChecked(
use_default_output_dir) | Restore GUI state from configuration file. |
23,557 | def _ReformatMessageString(self, message_string):
def _PlaceHolderSpecifierReplacer(match_object):
expanded_groups = []
for group in match_object.groups():
try:
place_holder_number = int(group, 10) - 1
expanded_group = .format(place_holder_number)
except ValueError:
expanded_group = group
expanded_groups.append(expanded_group)
return .join(expanded_groups)
if not message_string:
return None
message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r, message_string)
message_string = self._TEXT_SPECIFIER_RE.sub(r, message_string)
message_string = self._CURLY_BRACKETS.sub(r, message_string)
return self._PLACE_HOLDER_SPECIFIER_RE.sub(
_PlaceHolderSpecifierReplacer, message_string) | Reformats the message string.
Args:
message_string (str): message string.
Returns:
str: message string in Python format() (PEP 3101) style. |
23,558 | def iter_delimiter(self, byte_size=8192):
partial = u
while True:
read_chars = self.read(byte_size)
if not read_chars: break
partial += read_chars
lines = partial.split(self.delimiter)
partial = lines.pop()
for line in lines:
yield line + self.delimiter
if partial:
yield partial | Generalization of the default iter file delimited by '\n'.
Note:
The newline string can be arbitrarily long; it need not be restricted to a
single character. You can also set the read size and control whether or not
the newline string is left on the end of the iterated lines. Setting
newline to '\0' is particularly good for use with an input file created with
something like "os.popen('find -print0')".
Args:
byte_size (integer): Number of bytes to be read at each time. |
23,559 | async def requirements(client: Client, search: str) -> dict:
return await client.get(MODULE + % search, schema=REQUIREMENTS_SCHEMA) | GET list of requirements for a given UID/Public key
:param client: Client to connect to the api
:param search: UID or public key
:return: |
23,560 | def get_stack_frames(error_stack: bool = True) -> list:
cauldron_path = environ.paths.package()
resources_path = environ.paths.resources()
frames = (
list(traceback.extract_tb(sys.exc_info()[-1]))
if error_stack else
traceback.extract_stack()
).copy()
def is_cauldron_code(test_filename: str) -> bool:
if not test_filename or not test_filename.startswith(cauldron_path):
return False
if test_filename.startswith(resources_path):
return False
return True
while len(frames) > 1 and is_cauldron_code(frames[0].filename):
frames.pop(0)
return frames | Returns a list of the current stack frames, which are pruned focus on the
Cauldron code where the relevant information resides. |
23,561 | def weave_instance(instance, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
if bag.has(instance):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_instance (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
instance, aspect, methods, lazy, options)
def fixup(func):
return func.__get__(instance, type(instance))
fixed_aspect = aspect + [fixup] if isinstance(aspect, (list, tuple)) else [aspect, fixup]
for attr in dir(instance):
func = getattr(instance, attr)
if method_matches(attr):
if ismethod(func):
if hasattr(func, ):
realfunc = func.__func__
else:
realfunc = func.im_func
entanglement.merge(
patch_module(instance, attr, _checked_apply(fixed_aspect, realfunc, module=None), **options)
)
return entanglement | Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object. |
23,562 | def parse_config_file(config_file, skip_unknown=False):
for reader, existence_check in _FILE_READERS:
if existence_check(config_file):
with reader(config_file) as f:
parse_config(f, skip_unknown=skip_unknown)
return
raise IOError(.format(config_file)) | Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader. |
23,563 | def _should_defer(input_layer, args, kwargs):
for arg in itertools.chain([input_layer], args, six.itervalues(kwargs)):
if isinstance(arg, (_DeferredLayer, UnboundVariable)):
return True
elif (isinstance(arg, collections.Sequence) and
not isinstance(arg, six.string_types)):
if _should_defer(None, arg, {}):
return True
elif isinstance(arg, collections.Mapping):
if _should_defer(None, (), arg):
return True
return False | Checks to see if any of the args are templates. |
23,564 | def correlation_plot(self, data):
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig | Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap. |
23,565 | def _pack_with_custom_ops(dataset, keys, length):
from tensor2tensor.data_generators.ops import pack_sequences_ops
k1, k2 = keys
def map_fn_custom(x):
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset | Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset. |
23,566 | def update_from_model_change(self, oldmodel, newmodel, tile):
self._loglikelihood -= self._calc_loglikelihood(oldmodel, tile=tile)
self._loglikelihood += self._calc_loglikelihood(newmodel, tile=tile)
self._residuals[tile.slicer] = self._data[tile.slicer] - newmodel | Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile` |
23,567 | def generic_visit(self, node: AST, dfltChaining: bool = True) -> str:
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value) | Default handler, called if no explicit visitor function exists for
a node. |
23,568 | def is_link(url, processed, files):
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False | Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled |
23,569 | def extract_index(index_data, global_index=False):
parsed_data = {}
keys = []
for key, value in six.iteritems(index_data):
for item in value:
for field, data in six.iteritems(item):
if field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = data
elif field == :
parsed_data[] = True
if parsed_data[]:
keys.append(
HashKey(
parsed_data[],
data_type=parsed_data[]
)
)
if parsed_data.get():
keys.append(
RangeKey(
parsed_data[],
data_type=parsed_data[]
)
)
if (
global_index and
parsed_data[] and
parsed_data[]):
parsed_data[] = {
: parsed_data[],
: parsed_data[]
}
if parsed_data[] and keys:
if global_index:
if parsed_data.get() and parsed_data.get():
raise SaltInvocationError()
if parsed_data.get():
return GlobalIncludeIndex(
parsed_data[],
parts=keys,
throughput=parsed_data[],
includes=parsed_data[]
)
elif parsed_data.get():
return GlobalKeysOnlyIndex(
parsed_data[],
parts=keys,
throughput=parsed_data[],
)
else:
return GlobalAllIndex(
parsed_data[],
parts=keys,
throughput=parsed_data[]
)
else:
return AllIndex(
parsed_data[],
parts=keys
) | Instantiates and returns an AllIndex object given a valid index
configuration
CLI Example:
salt myminion boto_dynamodb.extract_index index |
23,570 | def get_record_types(self):
from ..type.objects import TypeList
type_list = []
for type_idstr in self._supported_record_type_ids:
type_list.append(Type(**self._record_type_data_sets[Id(type_idstr).get_identifier()]))
return TypeList(type_list) | Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.* |
23,571 | def translate_abstract_actions_to_keys(self, abstract):
if len(abstract) >= 2 and not isinstance(abstract[1], (list, tuple)):
abstract = list((abstract,))
actions, axes = [], []
for a in abstract:
first_key = self.action_space_desc[a[0]]["keys"][0]
if isinstance(first_key, (bytes, str)):
actions.append((first_key, a[1]))
elif isinstance(first_key, tuple):
axes.append((first_key[0], a[1] * first_key[1]))
else:
raise TensorForceError("action_space_desc contains unsupported type for key {}!".format(a[0]))
return actions, axes | Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])
each single item in abstract will undergo the following translation:
Example1:
we want: "MoveRight": 5.0
possible keys for the action are: ("Right", 1.0), ("Left", -1.0)
result: "Right": 5.0 * 1.0 = 5.0
Example2:
we want: "MoveRight": -0.5
possible keys for the action are: ("Left", -1.0), ("Right", 1.0)
result: "Left": -0.5 * -1.0 = 0.5 (same as "Right": -0.5) |
23,572 | def prior_to_xarray(self):
prior = self.prior
prior_predictive = self.prior_predictive
if prior_predictive is None:
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
ignore = prior_predictive + ["lp__"]
data = get_draws(prior, ignore=ignore)
return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims) | Convert prior samples to xarray. |
23,573 | def get_model_field_label_and_value(instance, field_name) -> (str, str):
label = field_name
value = str(getattr(instance, field_name))
for f in instance._meta.fields:
if f.attname == field_name:
label = f.verbose_name
if hasattr(f, ) and len(f.choices) > 0:
value = choices_label(f.choices, value)
break
return label, force_text(value) | Returns model field label and value.
:param instance: Model instance
:param field_name: Model attribute name
:return: (label, value) tuple |
23,574 | def export(self, name, columns, points):
logger.debug("Export {} stats to Cassandra".format(name))
data = {k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number)}
try:
stmt = "INSERT INTO {} (plugin, time, stat) VALUES (?, ?, ?)".format(self.table)
query = self.session.prepare(stmt)
self.session.execute(
query,
(name, uuid_from_time(datetime.now()), data)
)
except Exception as e:
logger.error("Cannot export {} stats to Cassandra ({})".format(name, e)) | Write the points to the Cassandra cluster. |
23,575 | def create_notebook(self, position=Gtk.PositionType.TOP):
notebook = Gtk.Notebook()
notebook.set_tab_pos(position)
notebook.set_show_border(True)
return notebook | Function creates a notebook |
23,576 | def loop(self, sequences=None, outputs=None, non_sequences=None, block=None, **kwargs):
from loop import Loop
return Loop(sequences, outputs, non_sequences, block, **kwargs) | Start a loop.
Usage:
```
with deepy.graph.loop(sequences={"x": x}, outputs={"o": None}) as vars:
vars.o = vars.x + 1
loop_outputs = deepy.graph.loop_outputs()
result = loop_outputs.o
``` |
23,577 | def groups(self):
group_list = []
all_groups = self.get()
for group_dn in all_groups:
if self.__ldap_group_ou__ in group_dn:
group_list.append(group_dn)
return group_list | Get the list of Groups (by dn) that the bound CSH LDAP member object
is in. |
23,578 | def match(self, table, nomatch=0):
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) | Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell. |
23,579 | def min(self, array, role = None):
return self.reduce(array, reducer = np.minimum, neutral_element = np.infty, role = role) | Return the minimum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.min(salaries)
>>> array([0])
>>> household.min(salaries, role = Household.PARENT) # Assuming the 1st two persons are parents
>>> array([1500]) |
23,580 | def run_filter(vrn_file, align_bam, ref_file, data, items):
if not should_filter(items) or not vcfutils.vcf_has_variants(vrn_file):
return data
else:
raw_file = "%s-damage.vcf" % utils.splitext_plus(vrn_file)[0]
out_plot_files = ["%s%s" % (utils.splitext_plus(raw_file)[0], ext)
for ext in ["_seq_bias_simplified.pdf", "_pcr_bias_simplified.pdf"]]
if not utils.file_uptodate(raw_file, vrn_file) and not utils.file_uptodate(raw_file + ".gz", vrn_file):
with file_transaction(items[0], raw_file) as tx_out_file:
cmd = ["dkfzbiasfilter.py", "--filterCycles", "1", "--passOnly",
"--tempFolder", os.path.dirname(tx_out_file),
vrn_file, align_bam, ref_file, tx_out_file]
do.run(cmd, "Filter low frequency variants for DNA damage and strand bias")
for out_plot in out_plot_files:
tx_plot_file = os.path.join("%s_qcSummary" % utils.splitext_plus(tx_out_file)[0], "plots",
os.path.basename(out_plot))
if utils.file_exists(tx_plot_file):
shutil.move(tx_plot_file, out_plot)
raw_file = vcfutils.bgzip_and_index(raw_file, items[0]["config"])
data["vrn_file"] = _filter_to_info(raw_file, items[0])
out_plot_files = [x for x in out_plot_files if utils.file_exists(x)]
data["damage_plots"] = out_plot_files
return data | Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER. |
23,581 | def load(path, group=None, sel=None, unpack=False):
with tables.open_file(path, mode=) as h5file:
pathtable = {}
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file, h5file, group, sel=sel,
pathtable=pathtable)
else:
data = []
for g in group:
data_i = _load_specific_level(h5file, h5file, g, sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn(
)
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data | Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save |
23,582 | def run(self):
logger = getLogger()
socket.setdefaulttimeout(self.timeout)
for entry in self.entries:
reply = self.ping_entry(entry)
self.results.append(reply)
logger.info(, self.server_name, reply[])
socket.setdefaulttimeout(None) | Ping entries to a directory in a thread. |
23,583 | def get_summary(profile_block_list, maxlines=20):
time_list = [get_block_totaltime(block) for block in profile_block_list]
time_list = [time if time is not None else -1 for time in time_list]
blockid_list = [get_block_id(block) for block in profile_block_list]
sortx = ut.list_argsort(time_list)
sorted_time_list = ut.take(time_list, sortx)
sorted_blockid_list = ut.take(blockid_list, sortx)
aligned_blockid_list = ut.util_str.align_lines(sorted_blockid_list, )
summary_lines = [( % time) + line
for time, line in
zip(sorted_time_list, aligned_blockid_list)]
summary_lines_ = ut.listclip(summary_lines, maxlines, fromback=True)
summary_text = .join(summary_lines_)
return summary_text | References:
https://github.com/rkern/line_profiler |
23,584 | def get_header(self, idx, formatted=False):
header = self._uname if not formatted else self._fname
return [header[x] for x in idx] | Return a list of the variable names at the given indices |
23,585 | def SendKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
keybd_event(key, 0, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, 0)
keybd_event(key, 0, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, 0)
time.sleep(waitTime) | Simulate typing a key.
key: int, a value in class `Keys`. |
23,586 | def _set_port(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port.port, is_container=, presence=False, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__port = t
if hasattr(self, ):
self._set() | Setter method for port, mapped from YANG variable /qos/cpu/slot/port_group/port (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly. |
23,587 | def open(self, session, resource_name,
access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE):
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError( % open_timeout)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return 0, constants.StatusCode.error_invalid_resource_name
cls = sessions.Session.get_session_class(parsed.interface_type_const, parsed.resource_class)
sess = cls(session, resource_name, parsed)
try:
sess.device = self.devices[sess.attrs[constants.VI_ATTR_RSRC_NAME]]
except KeyError:
return 0, constants.StatusCode.error_resource_not_found
return self._register(sess), constants.StatusCode.success | Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param session: Resource Manager session
(should always be a session returned
from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes)
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode` |
23,588 | def QA_indicator_ASI(DataFrame, M1=26, M2=10):
CLOSE = DataFrame[]
HIGH = DataFrame[]
LOW = DataFrame[]
OPEN = DataFrame[]
LC = REF(CLOSE, 1)
AA = ABS(HIGH - LC)
BB = ABS(LOW-LC)
CC = ABS(HIGH - REF(LOW, 1))
DD = ABS(LC - REF(OPEN, 1))
R = IFAND(AA > BB, AA > CC, AA+BB/2+DD/4,
IFAND(BB > CC, BB > AA, BB+AA/2+DD/4, CC+DD/4))
X = (CLOSE - LC + (CLOSE - OPEN) / 2 + LC - REF(OPEN, 1))
SI = 16*X/R*MAX(AA, BB)
ASI = SUM(SI, M1)
ASIT = MA(ASI, M2)
return pd.DataFrame({
: ASI, : ASIT
}) | LC=REF(CLOSE,1);
AA=ABS(HIGH-LC);
BB=ABS(LOW-LC);
CC=ABS(HIGH-REF(LOW,1));
DD=ABS(LC-REF(OPEN,1));
R=IF(AA>BB AND AA>CC,AA+BB/2+DD/4,IF(BB>CC AND BB>AA,BB+AA/2+DD/4,CC+DD/4));
X=(CLOSE-LC+(CLOSE-OPEN)/2+LC-REF(OPEN,1));
SI=16*X/R*MAX(AA,BB);
ASI:SUM(SI,M1);
ASIT:MA(ASI,M2); |
23,589 | def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
self.check_s3_url(config[][][][])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config[],
,
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response | Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation |
23,590 | def condition_on_par_knowledge(cov,par_knowledge_dict):
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}".\
format(.join(missing)))
sel = cov.zero2d
sigma_ep = cov.zero2d
for parnme,var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
sel.x[idx,idx] = 1.0
sigma_ep.x[idx,idx] = var
print(sel)
term2 = sel * cov * sel.T
print(term2)
term2 = term2.inv
term2 *= sel
term2 *= cov
new_cov = cov - term2
return new_cov | experimental function to include conditional prior information
for one or more parameters in a full covariance matrix |
23,591 | def infer_isinstance(callnode, context=None):
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
obj_node, class_or_tuple_node = call.positional_arguments
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError:
raise UseInferenceDefault
try:
isinstance_bool = helpers.object_isinstance(obj_node, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc))
except MroError as exc:
raise UseInferenceDefault from exc
if isinstance_bool is util.Uninferable:
raise UseInferenceDefault
return nodes.Const(isinstance_bool) | Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred |
23,592 | def format_prefix(filename, sres):
try:
pwent = pwd.getpwuid(sres.st_uid)
user = pwent.pw_name
except KeyError:
user = sres.st_uid
try:
grent = grp.getgrgid(sres.st_gid)
group = grent.gr_name
except KeyError:
group = sres.st_gid
return % (
format_mode(sres),
sres.st_nlink,
user,
group,
sres.st_size,
format_mtime(sres.st_mtime),
) | Prefix to a filename in the directory listing. This is to make the
listing similar to an output of "ls -alh". |
23,593 | def _prev_month(self):
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() | Updated calendar to show the previous month. |
23,594 | def get_oc_api_token():
oc_command_exists()
try:
return run_cmd(["oc", "whoami", "-t"], return_output=True).rstrip()
except subprocess.CalledProcessError as ex:
raise ConuException("oc whoami -t failed: %s" % ex) | Get token of user logged in OpenShift cluster
:return: str, API token |
23,595 | def _updateB(oldB, B, W, degrees, damping, inds, backinds):
for j,d in enumerate(degrees):
kk = inds[j]
bk = backinds[j]
if d == 0:
B[kk,bk] = -np.inf
continue
belief = W[kk,bk] + W[j]
oldBj = oldB[j]
if d == oldBj.shape[0]:
bth = quickselect(-oldBj, d-1)
bplus = -1
else:
bth,bplus = quickselect(-oldBj, d-1, d)
belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth])
B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk] | belief update function. |
23,596 | def Record(self, obj):
if len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS:
return False
obj_id = id(obj)
if obj_id in self._visit_recorder_objects:
return False
self._visit_recorder_objects[obj_id] = obj
return True | Records the object as visited.
Args:
obj: visited object.
Returns:
True if the object hasn't been previously visited or False if it has
already been recorded or the quota has been exhausted. |
23,597 | def copy_bulma_files(self):
original_bulma_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
,
)
shutil.copytree(original_bulma_dir, self.static_root_bulma_dir) | Copies Bulma static files from package's static/bulma into project's
STATIC_ROOT/bulma |
23,598 | def ui_extensions(self):
return EnvironmentUIExtensionsProxy(self._client, self.space.id, self.id) | Provides access to UI extensions management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/ui-extensions
:return: :class:`EnvironmentUIExtensionsProxy <contentful_management.ui_extensions_proxy.EnvironmentUIExtensionsProxy>` object.
:rtype: contentful.ui_extensions_proxy.EnvironmentUIExtensionsProxy
Usage:
>>> ui_extensions_proxy = environment.ui_extensions()
<EnvironmentUIExtensionsProxy space_id="cfexampleapi" environment_id="master"> |
23,599 | def _get_pathcost_func(
name: str
) -> Callable[[int, int, int, int, Any], float]:
return ffi.cast(
"TCOD_path_func_t", ffi.addressof(lib, name)
) | Return a properly cast PathCostArray callback. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.