Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
27,700 |
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
num_cb=10, policy=None, md5=None):
if self.key_type & self.KEY_STREAM_WRITABLE:
raise BotoClientError()
elif self.key_type & self.KEY_STREAM_READABLE:
key_file = self.fp
else:
if not replace and os.path.exists(self.full_path):
return
key_file = open(self.full_path, )
try:
shutil.copyfileobj(fp, key_file)
finally:
key_file.close()
|
Store an object in a file using the name of the Key object as the
key in file URI and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: ignored in this subclass.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: ignored in this subclass.
:type cb: int
:param num_cb: ignored in this subclass.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: ignored in this subclass.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded
version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
:param md5: ignored in this subclass.
|
27,701 |
def parse(item):
r
key, sep, value = item.partition()
value = value.replace(, )
value = value.replace(, )
value = value.replace(, )
value = value.replace(, )
value = value.replace(, )
value = value or None
return {
: key,
: value,
}
|
r"""
>>> Tag.parse('x') == {'key': 'x', 'value': None}
True
>>> Tag.parse('x=yes') == {'key': 'x', 'value': 'yes'}
True
>>> Tag.parse('x=3')['value']
'3'
>>> Tag.parse('x=red fox\\:green eggs')['value']
'red fox;green eggs'
>>> Tag.parse('x=red fox:green eggs')['value']
'red fox:green eggs'
>>> Tag.parse('x=a\\nb\\nc')['value']
'a\nb\nc'
|
27,702 |
def list_regions(self):
url = .format(self.host)
return http._get_with_qiniu_mac(url, None, self.auth)
|
获得账号可见的区域的信息
列出当前用户所有可使用的区域。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回区域列表,失败返回None
- ResponseInfo 请求的Response信息
|
27,703 |
def listen(self, addr=None):
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = True
default_addr = socket.gethostbyname(socket.gethostname()), 0
try:
self.socket.bind(addr or default_addr)
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error as x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self
|
Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
|
27,704 |
def _color_dialog_changed(self, n, top, c):
self._button_save.setEnabled(True)
cp = self._colorpoint_list[n]
if self._checkboxes[n].isChecked():
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0],
[c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
elif top:
self.modify_colorpoint(n, cp[0], cp[1], [c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
else:
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], cp[2])
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
|
Updates the color of the slider.
|
27,705 |
def save_params_to_file(self, fname: str):
if self.aux_params is not None:
utils.save_params(self.params.copy(), fname, self.aux_params.copy())
else:
utils.save_params(self.params.copy(), fname)
logging.info(, fname)
|
Saves model parameters to file.
:param fname: Path to save parameters to.
|
27,706 |
def estimateAnomalyLikelihoods(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
if verbosity > 1:
print("In estimateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("Skip records=", skipRecords)
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(
anomalyScores,
windowSize = averagingWindow,
verbosity = verbosity)
s = [r[2] for r in aggRecordList]
dataValues = numpy.array(s)
if len(aggRecordList) <= skipRecords:
distributionParams = nullDistribution(verbosity = verbosity)
else:
distributionParams = estimateNormal(dataValues[skipRecords:])
s = [r[1] for r in aggRecordList]
if all([isinstance(r[1], numbers.Number) for r in aggRecordList]):
metricValues = numpy.array(s)
metricDistribution = estimateNormal(metricValues[skipRecords:],
performLowerBoundCheck=False)
if metricDistribution["variance"] < 1.5e-5:
distributionParams = nullDistribution(verbosity = verbosity)
likelihoods = numpy.array(dataValues, dtype=float)
for i, s in enumerate(dataValues):
likelihoods[i] = tailProbability(s, distributionParams)
filteredLikelihoods = numpy.array(
_filterLikelihoods(likelihoods) )
params = {
"distribution": distributionParams,
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": averagingWindow,
},
"historicalLikelihoods":
list(likelihoods[-min(averagingWindow, len(likelihoods)):]),
}
if verbosity > 1:
print("Discovered params=")
print(params)
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", (
filteredLikelihoods[0:min(20, len(filteredLikelihoods))] ))
print("leaving estimateAnomalyLikelihoods")
return (filteredLikelihoods, aggRecordList, params)
|
Given a series of anomaly scores, compute the likelihood for each score. This
function should be called once on a bunch of historical anomaly scores for an
initial estimate of the distribution. It should be called again every so often
(say every 50 records) to update the estimate.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
For best results, the list should be between 1000
and 10,000 records
:param averagingWindow: integer number of records to average over
:param skipRecords: integer specifying number of records to skip when
estimating distributions. If skip records are >=
len(anomalyScores), a very broad distribution is returned
that makes everything pretty likely.
:param verbosity: integer controlling extent of printouts for debugging
0 = none
1 = occasional information
2 = print every record
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
a small JSON dict that contains the state of the estimator
|
27,707 |
def fit(self, X, y=None):
X = check_array(X)
self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric)
dist_train = self.tree.query(X, k=2)[0]
if self.threshold == :
self.threshold_value = 0.5 * sqrt(var(dist_train[:, 1])) + mean(dist_train[:, 1])
elif self.threshold == :
if y is None:
raise ValueError("Y must be specified to find the optimal threshold.")
y = check_array(y, accept_sparse=, ensure_2d=False, dtype=None)
self.threshold_value = 0
score = 0
Y_pred, Y_true, AD = [], [], []
cv = KFold(n_splits=5, random_state=1, shuffle=True)
for train_index, test_index in cv.split(X):
x_train = safe_indexing(X, train_index)
x_test = safe_indexing(X, test_index)
y_train = safe_indexing(y, train_index)
y_test = safe_indexing(y, test_index)
data_test = safe_indexing(dist_train[:, 1], test_index)
if self.reg_model is None:
reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train)
else:
reg_model = clone(self.reg_model).fit(x_train, y_train)
Y_pred.append(reg_model.predict(x_test))
Y_true.append(y_test)
AD.append(data_test)
AD_ = unique(hstack(AD))
for z in AD_:
AD_new = hstack(AD) <= z
if self.score == :
val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)
elif self.score == :
val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)
if val >= score:
score = val
self.threshold_value = z
else:
self.threshold_value = self.threshold
return self
|
Fit distance-based AD.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency.
Returns
-------
self : object
Returns self.
|
27,708 |
def extract_stack(f=None, limit=None):
if f is not None:
raise RuntimeError("Timba.utils.extract_stack: f has to be None, don't ask why")
lim = limit
if lim is not None:
lim += 1
tb = traceback.extract_stack(None, lim)
if tb:
return tb[:-1];
return nonportable_extract_stack(f, limit)
|
equivalent to traceback.extract_stack(), but also works with psyco
|
27,709 |
def course_run_detail(self, request, pk, course_id):
enterprise_customer_catalog = self.get_object()
course_run = enterprise_customer_catalog.get_course_run(course_id)
if not course_run:
raise Http404
context = self.get_serializer_context()
context[] = enterprise_customer_catalog
serializer = serializers.CourseRunDetailSerializer(course_run, context=context)
return Response(serializer.data)
|
Return the metadata for the specified course run.
The course run needs to be included in the specified EnterpriseCustomerCatalog
in order for metadata to be returned from this endpoint.
|
27,710 |
async def acquire_lease_async(self, lease):
retval = True
new_lease_id = str(uuid.uuid4())
partition_id = lease.partition_id
try:
if asyncio.iscoroutinefunction(lease.state):
state = await lease.state()
else:
state = lease.state()
if state == "leased":
if not lease.token:
retval = False
else:
_logger.info("ChangingLease %r %r", self.host.guid, lease.partition_id)
await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.change_blob_lease,
self.lease_container_name,
partition_id,
lease.token,
new_lease_id))
lease.token = new_lease_id
else:
_logger.info("AcquiringLease %r %r", self.host.guid, lease.partition_id)
lease.token = await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.acquire_blob_lease,
self.lease_container_name,
partition_id,
self.lease_duration,
new_lease_id))
lease.owner = self.host.host_name
lease.increment_epoch()
retval = await self.update_lease_async(lease)
except Exception as err:
_logger.error("Failed to acquire lease %r %r %r", err, partition_id, lease.token)
return False
return retval
|
Acquire the lease on the desired partition for this EventProcessorHost.
Note that it is legal to acquire a lease that is already owned by another host.
Lease-stealing is how partitions are redistributed when additional hosts are started.
:param lease: The stored lease to be acquired.
:type lease: ~azure.eventprocessorhost.lease.Lease
:return: `True` if the lease was acquired successfully, `False` if not.
:rtype: bool
|
27,711 |
def get_backoff_time(self):
consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
reversed(self.history))))
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
|
Formula for computing the current backoff
:rtype: float
|
27,712 |
def showMenu(self, point):
menu = QMenu(self)
acts = {}
acts[] = menu.addAction()
trigger = menu.exec_(self.mapToGlobal(point))
if trigger == acts[]:
text, accepted = XTextEdit.getText(self.window(),
,
,
self.filterFormat(),
wrapped=False)
if accepted:
self.setFilterFormat(text)
|
Displays the menu for this filter widget.
|
27,713 |
def open_unknown_proxy(self, proxy, fullurl, data=None):
type, url = splittype(fullurl)
raise IOError(, % type, proxy)
|
Overridable interface to open unknown URL type.
|
27,714 |
def get_type_string(self, data, type_string):
if type_string is not None:
return type_string
else:
tp = type(data)
try:
return self.type_to_typestring[tp]
except KeyError:
return self.type_to_typestring[tp.__module__ +
+ tp.__name__]
|
Gets type string.
Finds the type string for 'data' contained in
``python_type_strings`` using its ``type``. Non-``None``
'type_string` overrides whatever type string is looked up.
The override makes it easier for subclasses to convert something
that the parent marshaller can write to disk but still put the
right type string in place).
Parameters
----------
data : type to be marshalled
The Python object that is being written to disk.
type_string : str or None
If it is a ``str``, it overrides any looked up type
string. ``None`` means don't override.
Returns
-------
str
The type string associated with 'data'. Will be
'type_string' if it is not ``None``.
Notes
-----
Subclasses probably do not need to override this method.
|
27,715 |
def to_int(argument):
if(argument.startswith("0b")):
return int(argument[2:], 2)
elif(argument.startswith("0x")):
return int(argument[2:], 16)
elif(argument.startswith("0") and argument != "0"):
return int(argument[1:], 8)
elif(argument[0] == ""):
return ord(argument[1])
return int(argument)
|
Converts the ``str`` argument to an integer:
>>> from py_register_machine2.engine_tools.conversions import *
>>> to_int("0x04")
4
>>> to_int("'a'")
97
|
27,716 |
def _nginx_http_spec(port_spec, bridge_ip):
server_string_spec = "\t server {\n"
server_string_spec += "\t \t {}\n".format(_nginx_max_file_size_string())
server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec))
server_string_spec += "\t \t {}\n".format(_nginx_server_name_string(port_spec))
server_string_spec += _nginx_location_spec(port_spec, bridge_ip)
server_string_spec += _custom_502_page()
server_string_spec += "\t }\n"
return server_string_spec
|
This will output the nginx HTTP config string for specific port spec
|
27,717 |
def in6_getha(prefix):
r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64))
r = in6_or(r, inet_pton(socket.AF_INET6, ))
return inet_ntop(socket.AF_INET6, r)
|
Return the anycast address associated with all home agents on a given
subnet.
|
27,718 |
def update_host_password(host, username, password, new_password, protocol=None, port=None):
*
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
account_manager = salt.utils.vmware.get_inventory(service_instance).accountManager
user_account = vim.host.LocalAccountManager.AccountSpecification()
user_account.id = username
user_account.password = new_password
try:
account_manager.UpdateUser(user_account)
except vmodl.fault.SystemError as err:
raise CommandExecutionError(err.msg)
except vim.fault.UserNotFound:
raise CommandExecutionError(vsphere.update_host_password\
.format(host))
except vim.fault.AlreadyExists:
pass
return True
|
Update the password for a given host.
.. note:: Currently only works with connections to ESXi hosts. Does not work with vCenter servers.
host
The location of the ESXi host.
username
The username used to login to the ESXi host, such as ``root``.
password
The password used to login to the ESXi host.
new_password
The new password that will be updated for the provided username on the ESXi host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
CLI Example:
.. code-block:: bash
salt '*' vsphere.update_host_password my.esxi.host root original-bad-password new-bad-password
|
27,719 |
def _search_files(self):
for root, _, files in os.walk(self.dirpath):
for filename in files:
location = os.path.join(root, filename)
yield location
|
Retrieve the file paths stored under the base path.
|
27,720 |
def sweFixedStar(star, jd):
sweList = swisseph.fixstar_ut(star, jd)
mag = swisseph.fixstar_mag(star)
return {
: star,
: mag,
: sweList[0],
: sweList[1]
}
|
Returns a fixed star from the Ephemeris.
|
27,721 |
def dag(self) -> Tuple[Dict, Dict]:
from pipelines import dags
operation_runs = self.operation_runs.all().prefetch_related()
def get_downstream(op_run):
return op_run.downstream_runs.values_list(, flat=True)
return dags.get_dag(operation_runs, get_downstream)
|
Construct the DAG of this pipeline run
based on the its operation runs and their downstream.
|
27,722 |
def search(cls, term=None, page=0, **criteria):
assert (term or criteria and not (term and criteria))
params = {
: int(page) * cls.SEARCH_OFFSET,
}
if term:
params[] = term
if criteria:
for key, value in criteria.items():
params[.format(key)] = value
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(
endpoint=cls.ENDPOINT + ,
params=params
).text
)
)
|
Search a list of the model
If you use "term":
- Returns a collection of people that have a name matching the term passed in through the URL.
If you use "criteria":
- returns people who match your search criteria.
Search by any criteria you can on the Contacts tab, including custom fields. Combine criteria to narrow results
:param term: params as string
:type term: str
:param criteria: search for more criteria
:type criteria: dict
:param page: the page
:type page: int
:return: the list of the parsed xml objects
:rtype: list
|
27,723 |
def opt_grid_parallel(params, func, limits, ftol=0.01, disp=0, compute_errors=True):
import multiprocessing
def spawn(f):
def fun(q_in,q_out):
while True:
i,x = q_in.get()
if i == None:
break
q_out.put((i,f(x)))
return fun
def parmap(f, X, nprocs = multiprocessing.cpu_count()):
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i,x)) for i,x in enumerate(X)]
[q_in.put((None,None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i,x in sorted(res)]
nthreads = multiprocessing.cpu_count()
caches = [[] for p in params]
newparams = numpy.copy(params)
errors = [[] for p in params]
indices = range(0, len(params), nthreads)
k = 0
while k < len(params):
j = min(len(params), k + nthreads * 2)
def run1d((i, curparams, curlimits)):
cache = []
def func1(x0):
curparams[i] = x0
v = func(curparams)
cache.append([x0, v])
return v
lo, hi = curlimits
bestval = optimize(func1, x0=p,
cons=[lambda x: x - lo, lambda x: hi - x],
ftol=ftol, disp=disp - 1)
beststat = func1(bestval)
if compute_errors:
errors = cache2errors(func1, cache, disp=disp - 1)
return bestval, beststat, errors, cache
return bestval, beststat, cache
results = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)])
for i, r in enumerate(results):
if compute_errors:
v, s, e, c = r
if disp > 0:
print % (i + k, v, e[0], e[1], s)
else:
v, s, c = r
e = []
if disp > 0:
print % (i + k, v, s)
newparams[i + k] = v
caches[i + k] = c
errors[i + k] = e
k = j
beststat = func(newparams)
if disp > 0:
print % (beststat)
if compute_errors:
return newparams, errors
else:
return newparams
|
parallelized version of :func:`opt_grid`
|
27,724 |
def save_linked_hdds_info(self):
hdd_table = []
if self.linked_clone:
if os.path.exists(self.working_dir):
hdd_files = yield from self._get_all_hdd_files()
vm_info = yield from self._get_vm_info()
for entry, value in vm_info.items():
match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry)
if match:
controller = match.group(1)
port = match.group(2)
device = match.group(3)
if value in hdd_files and os.path.exists(os.path.join(self.working_dir, self._vmname, "Snapshots", os.path.basename(value))):
log.info("VirtualBox VM [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
id=self.id,
controller=controller,
port=port,
device=device))
hdd_table.append(
{
"hdd": os.path.basename(value),
"controller": controller,
"port": port,
"device": device,
}
)
if hdd_table:
try:
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
with open(hdd_info_file, "w", encoding="utf-8") as f:
json.dump(hdd_table, f, indent=4)
except OSError as e:
log.warning("VirtualBox VM [{id}] could not write HHD info file: {error}".format(name=self.name,
id=self.id,
error=e.strerror))
return hdd_table
|
Save linked cloned hard disks information.
:returns: disk table information
|
27,725 |
def reset(self):
self._variables_shim = {}
self._executable = None
self._bitstrings = None
self.status =
|
Reset the Quantum Abstract Machine to its initial state, which is particularly useful
when it has gotten into an unwanted state. This can happen, for example, if the QAM
is interrupted in the middle of a run.
|
27,726 |
def get_new_client(self, public=True):
return self._get_client(public=public, cached=False)
|
Returns a new instance of the client for this endpoint.
|
27,727 |
def generate_host_keys(hostname: str) -> Iterator[str]:
labels = hostname.split()
for i in range(2, len(labels) + 1):
domain = .join(labels[-i:])
yield domain
yield + domain
|
Yield Chrome/Chromium keys for `hostname`, from least to most specific.
Given a hostname like foo.example.com, this yields the key sequence:
example.com
.example.com
foo.example.com
.foo.example.com
|
27,728 |
def vectored_io_from_metadata(md):
try:
mdattr = json.loads(
md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_VECTORED_IO]
except (KeyError, TypeError):
pass
else:
if mdattr[_JSON_KEY_VECTORED_IO_MODE] == _JSON_KEY_VECTORED_IO_STRIPE:
mdstripe = mdattr[_JSON_KEY_VECTORED_IO_STRIPE]
try:
nextptr = explode_vectored_io_next_entry(
mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_NEXT])
except (KeyError, AttributeError):
nextptr = None
vio = VectoredStripe(
total_size=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE],
offset_start=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START],
total_slices=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES],
slice_id=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID],
next=nextptr,
)
return vio
else:
raise RuntimeError(.format(
mdattr[_JSON_KEY_VECTORED_IO_MODE]))
return None
|
Convert vectored io metadata in json metadata
:param dict md: metadata dictionary
:rtype: VectoredStripe or None
:return: vectored io metadata
|
27,729 |
def get_functions_overridden_by(self, function):
candidates = [c.functions_not_inherited for c in self.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == function.full_name]
|
Return the list of functions overriden by the function
Args:
(core.Function)
Returns:
list(core.Function)
|
27,730 |
def _intermediary_to_markdown(tables, relationships):
t = .join(t.to_markdown() for t in tables)
r = .join(r.to_markdown() for r in relationships)
return .format(t, r)
|
Returns the er markup source in a string.
|
27,731 |
async def flush(self) -> None:
async with self._flush_lock:
if self.finished():
if self._exc:
raise self._exc
return
try:
await self._delegate.flush_buf()
except asyncio.CancelledError:
raise
except BaseWriteException as e:
self._finished.set()
if self._exc is None:
self._exc = e
raise
|
Give the writer a chance to flush the pending data
out of the internal buffer.
|
27,732 |
def cudnn_stacked_bi_gru(units,
n_hidden,
seq_lengths=None,
n_stacks=2,
keep_prob=1.0,
concat_stacked_outputs=False,
trainable_initial_states=False,
name=,
reuse=False):
if seq_lengths is None:
seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1]
outputs = [units]
with tf.variable_scope(name, reuse=reuse):
for n in range(n_stacks):
if n == 0:
inputs = outputs[-1]
else:
inputs = variational_dropout(outputs[-1], keep_prob=keep_prob)
(h_fw, h_bw), _ = cudnn_bi_gru(inputs, n_hidden, seq_lengths,
n_layers=1,
trainable_initial_states=trainable_initial_states,
name=.format(n),
reuse=reuse)
outputs.append(tf.concat([h_fw, h_bw], axis=2))
if concat_stacked_outputs:
return tf.concat(outputs[1:], axis=2)
return outputs[-1]
|
Fast CuDNN Stacked Bi-GRU implementation
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
seq_lengths: number of tokens in each sample in the batch
n_stacks: number of stacked Bi-GRU
keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout)
concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU,
trainable_initial_states: whether to create a special trainable variable
to initialize the hidden states of the network or use just zeros
name: name of the variable scope to use
reuse: whether to reuse already initialized variable
Returns:
h - all hidden states along T dimension,
tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)]
|
27,733 |
def get_minibam_bed(bamfile, bedfile, minibam=None):
pf = op.basename(bedfile).split(".")[0]
minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf))
minisamfile = minibam.replace(".bam", ".sam")
baifile = minibamfile + ".bai"
if op.exists(baifile):
sh("rm {}".format(baifile))
cmd = "samtools view -H {} > {}".format(bamfile, minisamfile)
sh(cmd)
cmd = "cat {}".format(bedfile)
cmd += " | perl -lane "
cmd += " | xargs -n1 -t -I \{\}"
cmd += " samtools view {}".format(bamfile)
cmd += " \{\} >> " + minisamfile
sh(cmd)
cmd = "samtools view {} -b".format(minisamfile)
cmd += " | samtools sort -"
cmd += " -o {0}".format(minibamfile)
sh(cmd)
sh("samtools index {0}".format(minibamfile))
return minibamfile
|
samtools view -L could do the work, but it is NOT random access. Here we
are processing multiple regions sequentially. See also:
https://www.biostars.org/p/49306/
|
27,734 |
def get_data_home(data_home=None):
if data_home is None:
data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
|
Return the path of the arviz data dir.
This folder is used by some dataset loaders to avoid downloading the
data several times.
By default the data dir is set to a folder named 'arviz_data' in the
user home folder.
Alternatively, it can be set by the 'ARVIZ_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str | None
The path to arviz data dir.
|
27,735 |
def directive(apply_globally=False, api=None):
def decorator(directive_method):
if apply_globally:
hug.defaults.directives[underscore(directive_method.__name__)] = directive_method
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(directive_method)
apply_to_api.add_directive(directive_method)
directive_method.directive = True
return directive_method
return decorator
|
A decorator that registers a single hug directive
|
27,736 |
def verify_token(self, token, expiration_in_seconds=None):
from cryptography.fernet import InvalidToken
try:
concatenated_str = self.decrypt_string(token, expiration_in_seconds)
data_items = self.decode_data_items(concatenated_str)
except InvalidToken:
data_items = None
return data_items
|
Verify token signature, verify token expiration, and decrypt token.
| Returns None if token is expired or invalid.
| Returns a list of strings and integers on success.
Implemented as::
concatenated_str = self.decrypt_string(token, expiration_in_seconds)
data_items = self.decode_data_items(concatenated_str)
return data_items
Example:
::
# Verify that a User with ``user_id`` has a password that ends in ``password_ends_with``.
token_is_valid = False
data_items = token_manager.verify(token, expiration_in_seconds)
if data_items:
user_id = data_items[0]
password_ends_with = data_items[1]
user = user_manager.db_manager.get_user_by_id(user_id)
token_is_valid = user and user.password[-8:]==password_ends_with
|
27,737 |
def copytree(src, dst, symlinks=False, ignore=None):
from shutil import copy2, Error, copystat
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
copy2(srcname, dstname)
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
|
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
|
27,738 |
def apply(self, args, kwargs):
_locals = {: self._func}
if args is not None:
_locals.update({
"arg{}".format(index): args[index]
for index, value in enumerate(args)})
if kwargs is not None:
kw_list = list(kwargs.keys())
_locals.update({
"kw{}".format(index): kwargs[key]
for index, key in enumerate(kw_list)})
params = []
if args is not None:
params.extend([
"arg{}".format(index)
for index in range(len(args))])
if kwargs is not None:
params.extend([
"{}=kw{}".format(key, index)
for index, key in enumerate(kw_list)])
expr = "func({})".format(", ".join(params))
return eval(expr, globals(), _locals)
|
Replicate a call to the encapsulated function.
Unlike func(*args, **kwargs) the call is deterministic in the order
kwargs are being checked by python. In other words, it behaves exactly
the same as if typed into the repl prompt.
This is usually only a problem when a function is given two invalid
keyword arguments. In such cases func(*args, **kwargs) syntax will
result in random error on either of those invalid keyword arguments.
This is most likely caused by a temporary dictionary created by the
runtime.
For testing a OderedDictionary instance may be passed as kwargs. In
such case the call, and the error message, is fully deterministic.
This function is implemented with eval()
|
27,739 |
def ssh_accept_sec_context(self, hostname, username, recv_token):
self._gss_host = hostname
self._username = username
targ_name = "host/" + self._gss_host
self._gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=targ_name)
error, token = self._gss_srv_ctxt.authorize(recv_token)
token = token[0].Buffer
if error == 0:
self._gss_srv_ctxt_status = True
token = None
return token
|
Accept a SSPI context (server mode).
:param str hostname: The servers FQDN
:param str username: The name of the user who attempts to login
:param str recv_token: The SSPI Token received from the server,
if it's not the initial call.
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
|
27,740 |
def get_metrics(thing, extra=):
thing = thing or
if not isinstance(thing, str):
if type(thing) == type:
thing = % (thing.__module__, thing.__name__)
else:
thing = % (
thing.__class__.__module__, thing.__class__.__name__
)
if extra:
thing = % (thing, extra)
return MetricsInterface(thing)
|
Return MetricsInterface instance with specified name.
The name is used as the prefix for all keys generated with this
:py:class:`markus.main.MetricsInterface`.
The :py:class:`markus.main.MetricsInterface` is not tied to metrics
backends. The list of active backends are globally configured. This allows
us to create :py:class:`markus.main.MetricsInterface` classes without
having to worry about bootstrapping order of the app.
:arg class/instance/str thing: The name to use as a key prefix.
If this is a class, it uses the dotted Python path. If this is an
instance, it uses the dotted Python path plus ``str(instance)``.
:arg str extra: Any extra bits to add to the end of the name.
:returns: a ``MetricsInterface`` instance
Examples:
>>> from markus import get_metrics
Create a MetricsInterface with the name "myapp" and generate a count with
stat "myapp.thing1" and value 1:
>>> metrics = get_metrics('myapp')
>>> metrics.incr('thing1', value=1)
Create a MetricsInterface with the prefix of the Python module it's being
called in:
>>> metrics = get_metrics(__name__)
Create a MetricsInterface with the prefix as the qualname of the class:
>>> class Foo:
... def __init__(self):
... self.metrics = get_metrics(self)
Create a prefix of the class path plus some identifying information:
>>> class Foo:
... def __init__(self, myname):
... self.metrics = get_metrics(self, extra=myname)
...
>>> foo = Foo('jim')
Assume that ``Foo`` is defined in the ``myapp`` module. Then this will
generate the name ``myapp.Foo.jim``.
|
27,741 |
def do_commit(self, subcmd, opts, *args):
print " opts: %s" % (subcmd, opts)
print " args: %s" % (subcmd, args)
|
Send changes from your working copy to the repository.
usage:
commit [PATH...]
A log message must be provided, but it can be empty. If it is not
given by a --message or --file option, an editor will be started.
${cmd_option_list}
|
27,742 |
def getMaxDelay(inferences):
maxDelay = 0
for inferenceElement, inference in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,
key),
maxDelay)
else:
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),
maxDelay)
return maxDelay
|
Returns the maximum delay for the InferenceElements in the inference
dictionary
Parameters:
-----------------------------------------------------------------------
inferences: A dictionary where the keys are InferenceElements
|
27,743 |
def get_operator_statistic(self, name):
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
|
|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic
|
27,744 |
def get_cancel_operation_by_id(cls, cancel_operation_id, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs)
else:
(data) = cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs)
return data
|
Find CancelOperation
Return single instance of CancelOperation by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_cancel_operation_by_id(cancel_operation_id, async=True)
>>> result = thread.get()
:param async bool
:param str cancel_operation_id: ID of cancelOperation to return (required)
:return: CancelOperation
If the method is called asynchronously,
returns the request thread.
|
27,745 |
def strip_suffix(string, suffix, regex=False):
if not isinstance(string, six.string_types) or not isinstance(suffix, six.string_types):
msg = \
.format(s=type(string), p=type(suffix))
raise TypeError(msg)
if not regex:
suffix = re.escape(suffix)
if not suffix.endswith():
suffix = .format(s=suffix)
return _strip(string, suffix)
|
Strip the suffix from the string.
If 'regex' is specified, suffix is understood as a regular expression.
|
27,746 |
def _get_rdfclass(self, class_type, **kwargs):
def select_class(class_name):
try:
return getattr(MODULE.rdfclass, class_name.pyuri)
except AttributeError:
return RdfClassBase
if kwargs.get("def_load"):
return RdfClassBase
if isinstance(class_type[self.omap], list):
bases = [select_class(class_name)
for class_name in class_type[self.omap]]
bases = [base for base in bases if base != RdfClassBase]
if len(bases) == 0:
return RdfClassBase
elif len(bases) == 1:
return bases[0]
else:
bases = remove_parents(bases)
if len(bases) == 1:
return bases[0]
else:
name = "_".join(sorted(class_type[self.omap]))
if hasattr(MODULE.rdfclass, name):
return getattr(MODULE.rdfclass, name)
new_class = type(name,
tuple(bases),
{})
new_class.hierarchy = list_hierarchy(class_type[self.omap][0],
bases)
new_class.class_names = sorted([base.__name__ \
for base in bases \
if base not in [RdfClassBase, dict]])
setattr(MODULE.rdfclass, name, new_class)
return new_class
else:
return select_class(class_type[self.omap])
|
returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types
|
27,747 |
def _populate_cparams(self, img_array, mct=None, cratios=None, psnr=None,
cinema2k=None, cinema4k=None, irreversible=None,
cbsize=None, eph=None, grid_offset=None, modesw=None,
numres=None, prog=None, psizes=None, sop=None,
subsam=None, tilesize=None, colorspace=None):
other_args = (mct, cratios, psnr, irreversible, cbsize, eph,
grid_offset, modesw, numres, prog, psizes, sop, subsam)
if (((cinema2k is not None or cinema4k is not None) and
(not all([arg is None for arg in other_args])))):
msg = ("Cannot specify cinema2k/cinema4k along with any other "
"options.")
raise IOError(msg)
if cratios is not None and psnr is not None:
msg = "Cannot specify cratios and psnr options together."
raise IOError(msg)
if version.openjpeg_version_tuple[0] == 1:
cparams = opj.set_default_encoder_parameters()
else:
cparams = opj2.set_default_encoder_parameters()
outfile = self.filename.encode()
num_pad_bytes = opj2.PATH_LEN - len(outfile)
outfile += b * num_pad_bytes
cparams.outfile = outfile
if self.filename[-4:].endswith((, )):
cparams.codec_fmt = opj2.CODEC_JP2
else:
cparams.codec_fmt = opj2.CODEC_J2K
cparams.tcp_rates[0] = 0
cparams.tcp_numlayers = 1
cparams.cp_disto_alloc = 1
cparams.irreversible = 1 if irreversible else 0
if cinema2k is not None:
self._cparams = cparams
self._set_cinema_params(, cinema2k)
return
if cinema4k is not None:
self._cparams = cparams
self._set_cinema_params(, cinema4k)
return
if cbsize is not None:
cparams.cblockw_init = cbsize[1]
cparams.cblockh_init = cbsize[0]
if cratios is not None:
cparams.tcp_numlayers = len(cratios)
for j, cratio in enumerate(cratios):
cparams.tcp_rates[j] = cratio
cparams.cp_disto_alloc = 1
cparams.csty |= 0x02 if sop else 0
cparams.csty |= 0x04 if eph else 0
if grid_offset is not None:
cparams.image_offset_x0 = grid_offset[1]
cparams.image_offset_y0 = grid_offset[0]
if modesw is not None:
for shift in range(6):
power_of_two = 1 << shift
if modesw & power_of_two:
cparams.mode |= power_of_two
if numres is not None:
cparams.numresolution = numres
if prog is not None:
cparams.prog_order = core.PROGRESSION_ORDER[prog.upper()]
if psnr is not None:
cparams.tcp_numlayers = len(psnr)
for j, snr_layer in enumerate(psnr):
cparams.tcp_distoratio[j] = snr_layer
cparams.cp_fixed_quality = 1
if psizes is not None:
for j, (prch, prcw) in enumerate(psizes):
cparams.prcw_init[j] = prcw
cparams.prch_init[j] = prch
cparams.csty |= 0x01
cparams.res_spec = len(psizes)
if subsam is not None:
cparams.subsampling_dy = subsam[0]
cparams.subsampling_dx = subsam[1]
if tilesize is not None:
cparams.cp_tdx = tilesize[1]
cparams.cp_tdy = tilesize[0]
cparams.tile_size_on = opj2.TRUE
if mct is None:
cparams.tcp_mct = 1 if self._colorspace == opj2.CLRSPC_SRGB else 0
else:
if self._colorspace == opj2.CLRSPC_GRAY:
msg = ("Cannot specify usage of the multi component transform "
"if the colorspace is gray.")
raise IOError(msg)
cparams.tcp_mct = 1 if mct else 0
self._validate_compression_params(img_array, cparams, colorspace)
self._cparams = cparams
|
Directs processing of write method arguments.
Parameters
----------
img_array : ndarray
Image data to be written to file.
kwargs : dictionary
Non-image keyword inputs provided to write method.
|
27,748 |
def select_upstream(self, device: devicetools.Device) -> :
upstream = self.search_upstream(device)
self.nodes = upstream.nodes
self.elements = upstream.elements
return self
|
Restrict the current selection to the network upstream of the given
starting point, including the starting point itself.
See the documentation on method |Selection.search_upstream| for
additional information.
|
27,749 |
def make_method_names(self):
lst = []
for group in self.all_groups:
for single in group.singles:
name, english = single.name, single.english
if english[1:-1] != name.replace(, ):
lst.extend(self.tokens.make_name_modifier(not group.root, single.identifier, english))
return lst
|
Create tokens for setting __testname__ on functions
|
27,750 |
def allele_support_df(loci, sources):
return pandas.DataFrame(
allele_support_rows(loci, sources),
columns=EXPECTED_COLUMNS)
|
Returns a DataFrame of allele counts for all given loci in the read sources
|
27,751 |
def find_match_command(self, rule):
command_string = rule[]
command_list = command_string.split()
self.logdebug( %
(command_list, self.original_command_list))
if rule.get():
self.logdebug(
)
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug()
return {: self.original_command_list}
else:
self.logdebug()
elif rule.get():
if re.search(command_string, self.original_command_string):
return {: self.original_command_list}
elif command_list == self.original_command_list:
return {: command_list}
|
Return a matching (possibly munged) command, if found in rule.
|
27,752 |
def build_casc(ObsData, hourly=True,level=9,
months=None,
avg_stats=True,
percentile=50):
list_seasonal_casc = list()
if months is None:
months = [np.arange(12) + 1]
for cur_months in months:
vdn = seasonal_subset(ObsData, cur_months)
if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0:
ObsData.precip[np.isnan(ObsData.precip)] = 0
casc_opt = melodist.cascade.CascadeStatistics()
casc_opt.percentile = percentile
list_casc_opt = list()
count = 0
if hourly:
aggre_level = 5
else:
aggre_level = level
thresholds = np.zeros(aggre_level)
for i in range(0, aggre_level):
casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \
percentile=percentile)
thresholds[i] = casc_opt_i.threshold
copy_of_casc_opt_i = copy.copy(casc_opt_i)
list_casc_opt.append(copy_of_casc_opt_i)
n_vdn = len(vdn)
casc_opt_i * n_vdn
casc_opt + casc_opt_i
count = count + n_vdn
casc_opt * (1. / count)
casc_opt.threshold = thresholds
if avg_stats:
stat_obj = casc_opt
else:
stat_obj = list_casc_opt
list_seasonal_casc.append(stat_obj)
return list_seasonal_casc
|
Builds the cascade statistics of observed data for disaggregation
Parameters
-----------
ObsData : pd.Series
hourly=True -> hourly obs data
else -> 5min data (disaggregation level=9 (default), 10, 11)
months : numpy array of ints
Months for each seasons to be used for statistics (array of
numpy array, default=1-12, e.g., [np.arange(12) + 1])
avg_stats : bool
average statistics for all levels True/False (default=True)
percentile : int, float
percentile for splitting the dataset in small and high
intensities (default=50)
Returns
-------
list_seasonal_casc :
list holding the results
|
27,753 |
def create(self, path, data, **kwargs):
metadata_response = self._put(
path, , expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
assert not metadata_response.content
data_response = self._requests_session.put(
metadata_response.headers[], data=data, **self._requests_kwargs)
_check_response(data_response, expected_status=httplib.CREATED)
assert not data_response.content
|
Create a file at the given path.
:param data: ``bytes`` or a ``file``-like object to upload
:param overwrite: If a file already exists, should it be overwritten?
:type overwrite: bool
:param blocksize: The block size of a file.
:type blocksize: long
:param replication: The number of replications of a file.
:type replication: short
:param permission: The permission of a file/directory. Any radix-8 integer (leading zeros
may be omitted.)
:type permission: octal
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
|
27,754 |
def _analyze_read_write(self):
write_var = [x.variables_written_as_expression for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
write_var = [next(obj) for i, obj in groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))]
self._expression_vars_written = write_var
write_var = [x.variables_written for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
write_var = [next(obj) for i, obj in\
groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))]
self._vars_written = write_var
read_var = [x.variables_read_as_expression for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
read_var = [next(obj) for i, obj in\
groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))]
self._expression_vars_read = read_var
read_var = [x.variables_read for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
read_var = [next(obj) for i, obj in\
groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))]
self._vars_read = read_var
self._state_vars_written = [x for x in self.variables_written if\
isinstance(x, StateVariable)]
self._state_vars_read = [x for x in self.variables_read if\
isinstance(x, (StateVariable))]
self._solidity_vars_read = [x for x in self.variables_read if\
isinstance(x, (SolidityVariable))]
self._vars_read_or_written = self._vars_written + self._vars_read
slithir_variables = [x.slithir_variables for x in self.nodes]
slithir_variables = [x for x in slithir_variables if x]
self._slithir_variables = [item for sublist in slithir_variables for item in sublist]
|
Compute variables read/written/...
|
27,755 |
def value(self):
if self._wrapped is not self.Null:
return self._wrapped
else:
return self.obj
|
returns the object instead of instance
|
27,756 |
def CEscape(text, as_utf8):
Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return .join(_cescape_utf8_to_str[Ord(c)] for c in text)
return .join(_cescape_byte_to_str[Ord(c)] for c in text)
|
Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
|
27,757 |
def deprecated(will_be=None, on_version=None, name=None):
def outer_function(function):
if name is None:
_name = function.__name__
else:
_name = name
warning_msg = % _name
if will_be is not None and on_version is not None:
warning_msg += " It will be %s on version %s" % (
will_be,
.join(map(str, on_version)))
@wraps(function)
def inner_function(*args, **kwargs):
warnings.warn(warning_msg,
category=DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
return inner_function
return outer_function
|
Function decorator that warns about deprecation upon function invocation.
:param will_be: str representing the target action on the deprecated function
:param on_version: tuple representing a SW version
:param name: name of the entity to be deprecated (useful when decorating
__init__ methods so you can specify the deprecated class name)
:return: callable
|
27,758 |
def flags2text(self):
r = []
for v in self.flags_dict.keys():
if self.flags & v:
r.append(self.flags_dict[v])
return r
|
parse the `self.flags` field and create a list of `CKF_*` strings
corresponding to bits set in flags
:return: a list of strings
:rtype: list
|
27,759 |
def setUp(self, tp):
self._item = tp
assert tp.isComplex() is True and tp.content.isSimple() is True,\
%tp.content.getItemTrace()
simple = tp.content
dv = simple.content
assert dv.isExtension() is True or dv.isRestriction() is True,\
\
%tp.content.getItemTrace()
self.name = tp.getAttribute()
self.ns = tp.getTargetNamespace()
self.content.attributeContent = dv.getAttributeContent()
base = dv.getAttribute()
if base is not None:
self.sKlass = BTI.get_typeclass( base[1], base[0] )
if not self.sKlass:
self.sKlass,self.sKlassNS = base[1], base[0]
self.attrComponents = self._setAttributes(
self.content.attributeContent
)
return
raise Wsdl2PythonError,\
%tp.getItemTrace()
|
tp -- complexType/simpleContent/[Exention,Restriction]
|
27,760 |
def git_merge(base, head, no_ff=False):
pretend = context.get(, False)
branch = git.current_branch(refresh=True)
if branch.name != base and not pretend:
git_checkout(base)
args = []
if no_ff:
args.append()
log.info("Merging <33>{}<32> into <33>{}<32>", head, base)
shell.run(.format(
args=.join(args),
branch=head,
))
if branch.name != base and not pretend:
git_checkout(branch.name)
|
Merge *head* into *base*.
Args:
base (str):
The base branch. *head* will be merged into this branch.
head (str):
The branch that will be merged into *base*.
no_ff (bool):
If set to **True** it will force git to create merge commit. If set
to **False** (default) it will do a fast-forward merge if possible.
|
27,761 |
def _linux_os_release():
pretty_name =
ashtray = {}
keys = [, ]
try:
with open(os.path.join(, )) as f:
for line in f:
for key in keys:
if line.startswith(key):
ashtray[key] = re.sub(r, , line.strip().split()[1])
except (OSError, IOError):
return pretty_name
if ashtray:
if in ashtray:
pretty_name = ashtray[]
if in ashtray:
pretty_name += .format(ashtray[])
return pretty_name
|
Try to determine the name of a Linux distribution.
This function checks for the /etc/os-release file.
It takes the name from the 'NAME' field and the version from 'VERSION_ID'.
An empty string is returned if the above values cannot be determined.
|
27,762 |
def sampleCellsWithinColumns(numCellPairs, cellsPerColumn, numColumns, seed=42):
np.random.seed(seed)
cellPairs = []
for i in range(numCellPairs):
randCol = np.random.randint(numColumns)
randCells = np.random.choice(np.arange(cellsPerColumn), (2, ), replace=False)
cellsPair = randCol * cellsPerColumn + randCells
cellPairs.append(cellsPair)
return cellPairs
|
Generate indices of cell pairs, each pair of cells are from the same column
@return cellPairs (list) list of cell pairs
|
27,763 |
def add_node(self, node_or_ID, **kwds):
if not isinstance(node_or_ID, Node):
nodeID = str( node_or_ID )
if nodeID in self.nodes:
node = self.nodes[ self.nodes.index(nodeID) ]
else:
if self.default_node is not None:
node = self.default_node.clone_traits(copy="deep")
node.ID = nodeID
else:
node = Node(nodeID)
self.nodes.append( node )
else:
node = node_or_ID
if node in self.nodes:
node = self.nodes[ self.nodes.index(node_or_ID) ]
else:
self.nodes.append( node )
node.set( **kwds )
return node
|
Adds a node to the graph.
|
27,764 |
def _copy_deploy_scripts_for_hosts(self, domains):
with LogTask():
for host_name, host_spec in domains.iteritems():
host_metadata = host_spec.get(, {})
deploy_scripts = self._get_scripts(host_metadata)
new_scripts = self._copy_delpoy_scripts(deploy_scripts)
self._set_scripts(
host_metadata=host_metadata,
scripts=new_scripts,
)
return domains
|
Copy the deploy scripts for all the domains into the prefix scripts dir
Args:
domains(dict): spec with the domains info as when loaded from the
initfile
Returns:
None
|
27,765 |
def check_my_users(user):
user_data = my_users.get(user[])
if not user_data:
return False
elif user_data.get() == user[]:
return True
return False
|
Check if user exists and its credentials.
Take a look at encrypt_app.py and encrypt_cli.py
to see how to encrypt passwords
|
27,766 |
def reset_permission_factories(self):
for key in (, , , ):
full_key = .format(key)
if full_key in self.__dict__:
del self.__dict__[full_key]
|
Remove cached permission factories.
|
27,767 |
def main(self, standalone=False):
window = _Tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg="white",\
highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop()
|
Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
|
27,768 |
def prune_unspecified_categories(modules, categories):
res = {}
for mod_name, mod_info in modules.items():
mod_categories = mod_info.get("categories", all_categories)
for category in categories:
if category in mod_categories:
break
else:
continue
for input_name, input_info in mod_info["inputs"].items():
for c in input_info["categories"]:
if c in categories:
break
else:
del mod_info["inputs"][input_name]
for output_name, output_info in mod_info["outputs"].items():
for c in output_info["categories"]:
if c in categories:
break
else:
del mod_info["outputs"][output_name]
res[mod_name] = mod_info
return res
|
Removes unspecified module categories.
Mutates dictionary and returns it.
|
27,769 |
def get_location(self, location, columns=None, as_dict=False, index=True):
if columns is None:
columns = self._columns
elif not isinstance(columns, list):
c = self._columns.index(columns)
return self._data[c][location]
elif all([isinstance(i, bool) for i in columns]):
if len(columns) != len(self._columns):
raise ValueError()
columns = list(compress(self._columns, columns))
data = dict()
for column in columns:
c = self._columns.index(column)
data[column] = self._data[c][location]
index_value = self._index[location]
if as_dict:
if index:
data[self._index_name] = index_value
return data
else:
data = {k: [data[k]] for k in data}
return DataFrame(data=data, index=[index_value], columns=columns, index_name=self._index_name,
sort=self._sort)
|
For an index location and either (1) list of columns return a DataFrame or dictionary of the values or
(2) single column name and return the value of that cell. This is optimized for speed because it does not need
to lookup the index location with a search. Also can accept relative indexing from the end of the DataFrame
in standard python notation [-3, -2, -1]
:param location: index location in standard python form of positive or negative number
:param columns: list of columns, single column name, or None to include all columns
:param as_dict: if True then return a dictionary
:param index: if True then include the index in the dictionary if as_dict=True
:return: DataFrame or dictionary if columns is a list or value if columns is a single column name
|
27,770 |
def create_widget(self):
d = self.declaration
button_type = UIButton.UIButtonTypeSystem if d.flat else UIButton.UIButtonTypeRoundedRect
self.widget = UIButton(buttonWithType=button_type)
|
Create the toolkit widget for the proxy object.
|
27,771 |
def _from_dict_dict(cls, dic):
return cls({_convert_id(i): v for i, v in dic.items()})
|
Takes a dict {id : dict_attributes}
|
27,772 |
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None):
t find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.value.
'
return _local_search(problem,
_first_expander,
iterations_limit=iterations_limit,
fringe_size=beam_size,
random_initial_states=True,
stop_when_no_better=iterations_limit==0,
viewer=viewer)
|
Beam search best first.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.value.
|
27,773 |
def get_artist_hotttnesss(self, cache=True):
if not (cache and ( in self.cache)):
response = self.get_attribute(, bucket=)
self.cache[] = response[][0][]
return self.cache[]
|
Get our numerical description of how hottt a song's artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOOLGAZ127F3E1B87C')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002
>>>
|
27,774 |
def has_parent_logs(self, log_id):
if self._catalog_session is not None:
return self._catalog_session.has_parent_catalogs(catalog_id=log_id)
return self._hierarchy_session.has_parents(id_=log_id)
|
Tests if the ``Log`` has any parents.
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if the log has parents, ``false``
otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
27,775 |
def setup_venv(self):
venv = self.opts.venv
if not venv:
venv = os.environ.get()
if not venv and self.config[]:
venv = self.config[].get()
if venv:
if not venv.endswith():
add_path = os.path.join(, )
self.logger.debug(f)
venv = os.path.join(venv, add_path)
self.logger.debug(f)
self.cmd = f
|
Setup virtualenv if necessary.
|
27,776 |
def csw_global_dispatch(request, url=None, catalog_id=None):
if request.user.is_authenticated():
settings.REGISTRY_PYCSW[][] =
env = request.META.copy()
if request.method == :
from StringIO import StringIO
env[] = StringIO(request.body)
env.update({: os.path.dirname(__file__),
: request.build_absolute_uri()})
if url is not None:
settings.REGISTRY_PYCSW[][] = url
if catalog_id is not None:
settings.REGISTRY_PYCSW[][] = % catalog_id
csw = server.Csw(settings.REGISTRY_PYCSW, env)
content = csw.dispatch_wsgi()
if isinstance(content, list):
content = content[1]
response = HttpResponse(content, content_type=csw.contenttype)
response[] =
return response
|
pycsw wrapper
|
27,777 |
def check_backup_count_and_state(self, site):
basebackups = self.get_remote_basebackups_info(site)
self.log.debug("Found %r basebackups", basebackups)
if basebackups:
last_backup_time = basebackups[-1]["metadata"]["start-time"]
else:
last_backup_time = None
allowed_basebackup_count = self.config["backup_sites"][site]["basebackup_count"]
if allowed_basebackup_count is None:
allowed_basebackup_count = len(basebackups)
while len(basebackups) > allowed_basebackup_count:
self.log.warning("Too many basebackups: %d > %d, %r, starting to get rid of %r",
len(basebackups), allowed_basebackup_count, basebackups, basebackups[0]["name"])
basebackup_to_be_deleted = basebackups.pop(0)
pg_version = basebackup_to_be_deleted["metadata"].get("pg-version")
last_wal_segment_still_needed = 0
if basebackups:
last_wal_segment_still_needed = basebackups[0]["metadata"]["start-wal-segment"]
if last_wal_segment_still_needed:
self.delete_remote_wal_before(last_wal_segment_still_needed, site, pg_version)
self.delete_remote_basebackup(site, basebackup_to_be_deleted["name"], basebackup_to_be_deleted["metadata"])
self.state["backup_sites"][site]["basebackups"] = basebackups
return last_backup_time
|
Look up basebackups from the object store, prune any extra
backups and return the datetime of the latest backup.
|
27,778 |
def distance_to_edge(self, skydir):
xpix, ypix = skydir.to_pixel(self.wcs, origin=0)
deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0],
ndmin=1)
deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1],
ndmin=1)
deltax = np.abs(deltax) - 0.5 * self._width[0]
deltay = np.abs(deltay) - 0.5 * self._width[1]
m0 = (deltax < 0) & (deltay < 0)
m1 = (deltax > 0) & (deltay < 0)
m2 = (deltax < 0) & (deltay > 0)
m3 = (deltax > 0) & (deltay > 0)
mx = np.abs(deltax) <= np.abs(deltay)
my = np.abs(deltay) < np.abs(deltax)
delta = np.zeros(len(deltax))
delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1]
delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2]
return delta
|
Return the angular distance from the given direction and
the edge of the projection.
|
27,779 |
async def set_analog_latch(self, pin, threshold_type, threshold_value,
cb=None, cb_type=None):
if Constants.LATCH_GT <= threshold_type <= Constants.LATCH_LTE:
key = + str(pin)
if 0 <= threshold_value <= 1023:
self.latch_map[key] = [Constants.LATCH_ARMED, threshold_type,
threshold_value, 0, 0, cb, cb_type]
return True
else:
return False
|
This method "arms" an analog pin for its data to be latched and saved
in the latching table
If a callback method is provided, when latching criteria is achieved,
the callback function is called with latching data notification.
Data returned in the callback list has the pin number as the
first element,
:param pin: Analog pin number
(value following an 'A' designator, i.e. A5 = 5
:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT |
ANALOG_LATCH_GTE | ANALOG_LATCH_LTE
:param threshold_value: numerical value - between 0 and 1023
:param cb: callback method
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: True if successful, False if parameter data is invalid
|
27,780 |
def _ReadLine(self, file_object):
if len(self._buffer) < self._buffer_size:
content = file_object.read(self._buffer_size)
content = content.decode(self._encoding)
self._buffer = .join([self._buffer, content])
line, new_line, self._buffer = self._buffer.partition()
if not line and not new_line:
line = self._buffer
self._buffer =
self._current_offset += len(line)
if line.endswith():
line = line[:-len()]
if new_line:
line = .join([line, ])
self._current_offset += len()
return line
|
Reads a line from the file object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
str: line read from the file-like object.
|
27,781 |
def encrypt_dynamodb_item(item, crypto_config):
if crypto_config.attribute_actions.take_no_actions:
return item.copy()
for reserved_name in ReservedAttributes:
if reserved_name.value in item:
raise EncryptionError(
.format(reserved_name.value)
)
encryption_materials = crypto_config.encryption_materials()
inner_material_description = encryption_materials.material_description.copy()
try:
encryption_materials.encryption_key
except AttributeError:
if crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN):
raise EncryptionError(
"Attribute actions ask for some attributes to be encrypted but no encryption key is available"
)
encrypted_item = item.copy()
else:
encryption_mode = MaterialDescriptionValues.CBC_PKCS5_ATTRIBUTE_ENCRYPTION.value
inner_material_description[MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value] = encryption_mode
algorithm_descriptor = encryption_materials.encryption_key.algorithm + encryption_mode
encrypted_item = {}
for name, attribute in item.items():
if crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
encrypted_item[name] = encrypt_attribute(
attribute_name=name,
attribute=attribute,
encryption_key=encryption_materials.encryption_key,
algorithm=algorithm_descriptor,
)
else:
encrypted_item[name] = attribute.copy()
signature_attribute = sign_item(encrypted_item, encryption_materials.signing_key, crypto_config)
encrypted_item[ReservedAttributes.SIGNATURE.value] = signature_attribute
try:
inner_material_description[
MaterialDescriptionKeys.SIGNING_KEY_ALGORITHM.value
] = encryption_materials.signing_key.signing_algorithm()
except NotImplementedError:
pass
material_description_attribute = serialize_material_description(inner_material_description)
encrypted_item[ReservedAttributes.MATERIAL_DESCRIPTION.value] = material_description_attribute
return encrypted_item
|
Encrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item
>>> plaintext_item = {
... 'some': {'S': 'data'},
... 'more': {'N': '5'}
... }
>>> encrypted_item = encrypt_dynamodb_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Plaintext DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed DynamoDB item
:rtype: dict
|
27,782 |
def _get_server(vm_, volumes, nics):
vm_size = _override_size(vm_)
availability_zone = config.get_cloud_config_value(
, vm_, __opts__, default=None,
search_global=False
)
cpu_family = config.get_cloud_config_value(
, vm_, __opts__, default=None,
search_global=False
)
return Server(
name=vm_[],
ram=vm_size[],
availability_zone=availability_zone,
cores=vm_size[],
cpu_family=cpu_family,
create_volumes=volumes,
nics=nics
)
|
Construct server instance from cloud profile config
|
27,783 |
def same_disks(self, count=2):
ret = self
if len(self) > 0:
type_counter = Counter(self.drive_type)
drive_type, counts = type_counter.most_common()[0]
self.set_drive_type(drive_type)
if len(self) > 0:
size_counter = Counter(self.capacity)
size, counts = size_counter.most_common()[0]
self.set_capacity(size)
if len(self) >= count:
indices = self.index[:count]
self.set_indices(indices)
else:
self.set_indices()
return ret
|
filter self to the required number of disks with same size and type
Select the disks with the same type and same size. If not
enough disks available, set self to empty.
:param count: number of disks to retrieve
:return: disk list
|
27,784 |
def _shutdown(self):
if self._channel:
_log.info("Halting %r consumer sessions", self._channel.consumer_tags)
self._running = False
if self._connection and self._connection.is_open:
self._connection.close()
for signum in (signal.SIGTERM, signal.SIGINT):
signal.signal(signum, signal.SIG_DFL)
|
Gracefully shut down the consumer and exit.
|
27,785 |
def decrypt(self, k, a, iv, e, t):
cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t),
backend=self.backend)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(a)
return decryptor.update(e) + decryptor.finalize()
|
Decrypt accoriding to the selected encryption and hashing
functions.
:param k: Encryption key (optional)
:param a: Additional Authenticated Data
:param iv: Initialization Vector
:param e: Ciphertext
:param t: Authentication Tag
Returns plaintext or raises an error
|
27,786 |
def linOriginRegression(points):
j = sum([ i[0] for i in points ])
k = sum([ i[1] for i in points ])
if j != 0:
return k/j, j, k
return 1, j, k
|
computes a linear regression starting at zero
|
27,787 |
def update_params_for_auth(self, headers, querys, auth_settings):
if self.auth_token_holder.token is not None:
headers[Configuration.AUTH_TOKEN_HEADER_NAME] = self.auth_token_holder.token
else:
headers[] = self.configuration.get_basic_auth_token()
|
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
|
27,788 |
def search_task_views(self, user, search_string):
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SEARCH_TASK_VIEWS,
parameter1=user,
parameter2=search_string)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_TASK_VIEWS)
return result
|
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
|
27,789 |
def solar_elevation(self, dateandtime, latitude, longitude):
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
if dateandtime.tzinfo is None:
zone = 0
utc_datetime = dateandtime
else:
zone = -dateandtime.utcoffset().total_seconds() / 3600.0
utc_datetime = dateandtime.astimezone(pytz.utc)
timenow = (
utc_datetime.hour
+ (utc_datetime.minute / 60.0)
+ (utc_datetime.second / 3600)
)
JD = self._julianday(dateandtime)
t = self._jday_to_jcentury(JD + timenow / 24.0)
theta = self._sun_declination(t)
eqtime = self._eq_of_time(t)
solarDec = theta
solarTimeFix = eqtime - (4.0 * -longitude) + (60 * zone)
trueSolarTime = (
dateandtime.hour * 60.0
+ dateandtime.minute
+ dateandtime.second / 60.0
+ solarTimeFix
)
while trueSolarTime > 1440:
trueSolarTime = trueSolarTime - 1440
hourangle = trueSolarTime / 4.0 - 180.0
if hourangle < -180:
hourangle = hourangle + 360.0
harad = radians(hourangle)
csz = sin(radians(latitude)) * sin(radians(solarDec)) + cos(
radians(latitude)
) * cos(radians(solarDec)) * cos(harad)
if csz > 1.0:
csz = 1.0
elif csz < -1.0:
csz = -1.0
zenith = degrees(acos(csz))
azDenom = cos(radians(latitude)) * sin(radians(zenith))
if abs(azDenom) > 0.001:
azRad = (
(sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))
) / azDenom
if abs(azRad) > 1.0:
if azRad < 0:
azRad = -1.0
else:
azRad = 1.0
azimuth = 180.0 - degrees(acos(azRad))
if hourangle > 0.0:
azimuth = -azimuth
else:
if latitude > 0.0:
azimuth = 180.0
else:
azimuth = 0.0
if azimuth < 0.0:
azimuth = azimuth + 360.0
exoatmElevation = 90.0 - zenith
if exoatmElevation > 85.0:
refractionCorrection = 0.0
else:
te = tan(radians(exoatmElevation))
if exoatmElevation > 5.0:
refractionCorrection = (
58.1 / te
- 0.07 / (te * te * te)
+ 0.000086 / (te * te * te * te * te)
)
elif exoatmElevation > -0.575:
step1 = -12.79 + exoatmElevation * 0.711
step2 = 103.4 + exoatmElevation * (step1)
step3 = -518.2 + exoatmElevation * (step2)
refractionCorrection = 1735.0 + exoatmElevation * (step3)
else:
refractionCorrection = -20.774 / te
refractionCorrection = refractionCorrection / 3600.0
solarzen = zenith - refractionCorrection
solarelevation = 90.0 - solarzen
return solarelevation
|
Calculate the elevation angle of the sun.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The elevation angle in degrees above the horizon.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
|
27,790 |
def _LeaseFlowProcessingReqests(self, cursor=None):
now = rdfvalue.RDFDatetime.Now()
expiry = now + rdfvalue.Duration("10m")
query =
id_str = utils.ProcessIdString()
args = {
"expiry": mysql_utils.RDFDatetimeToTimestamp(expiry),
"id": id_str,
"limit": 50,
}
updated = cursor.execute(query, args)
if updated == 0:
return []
query =
args = {
"expiry": mysql_utils.RDFDatetimeToTimestamp(expiry),
"id": id_str,
"updated": updated,
}
cursor.execute(query, args)
res = []
for timestamp, request in cursor.fetchall():
req = rdf_flows.FlowProcessingRequest.FromSerializedString(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_until = expiry
req.leased_by = id_str
res.append(req)
return res
|
Leases a number of flow processing requests.
|
27,791 |
def default_validity_start():
start = datetime.now() - timedelta(days=1)
return start.replace(hour=0, minute=0, second=0, microsecond=0)
|
Sets validity_start field to 1 day before the current date
(avoids "certificate not valid yet" edge case).
In some cases, because of timezone differences, when certificates
were just created they were considered valid in a timezone (eg: Europe)
but not yet valid in another timezone (eg: US).
This function intentionally returns naive datetime (not timezone aware),
so that certificates are valid from 00:00 AM in all timezones.
|
27,792 |
def dvhat(s1):
assert len(s1) is 6
s1 = stypes.toDoubleVector(s1)
sout = stypes.emptyDoubleVector(6)
libspice.dvhat_c(s1, sout)
return stypes.cVectorToPython(sout)
|
Find the unit vector corresponding to a state vector and the
derivative of the unit vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvhat_c.html
:param s1: State to be normalized.
:type s1: 6-Element Array of floats
:return: Unit vector s1 / abs(s1), and its time derivative.
:rtype: 6-Element Array of floats
|
27,793 |
def iscan(self, *, match=None, count=None):
return _ScanIter(lambda cur: self.scan(cur,
match=match, count=count))
|
Incrementally iterate the keys space using async for.
Usage example:
>>> async for key in redis.iscan(match='something*'):
... print('Matched:', key)
|
27,794 |
def download_docs(client, output_filename=None, expanded=False):
if output_filename is None:
projname = _sanitize_filename(client.get()[])
output_filename = .format(projname)
counter = 0
while os.access(output_filename, os.F_OK):
counter += 1
output_filename = .format(projname, counter)
print(.format(output_filename))
with open(output_filename, , encoding=) as out:
for doc in iterate_docs(client, expanded=expanded, progress=True):
print(json.dumps(doc, ensure_ascii=False), file=out)
|
Given a LuminosoClient pointing to a project and a filename to write to,
retrieve all its documents in batches, and write them to a JSON lines
(.jsons) file with one document per line.
|
27,795 |
def format_search_results(self, search_results):
formatted_lines = []
for search_result in search_results:
lines = self._format_search_result(search_result)
formatted_lines.extend(lines)
return formatted_lines
|
Format search results.
Args:
search_results (list of `ResourceSearchResult`): Search to format.
Returns:
List of 2-tuple: Text and color to print in.
|
27,796 |
def _access_control(self, access_control, my_media_group=None):
extension = None
if access_control is AccessControl.Private:
if my_media_group:
my_media_group.private = gdata.media.Private()
elif access_control is AccessControl.Unlisted:
from gdata.media import YOUTUBE_NAMESPACE
from atom import ExtensionElement
kwargs = {
"namespace": YOUTUBE_NAMESPACE,
"attributes": {: , : },
}
extension = ([ExtensionElement(, **kwargs)])
return extension
|
Prepares the extension element for access control
Extension element is the optional parameter for the YouTubeVideoEntry
We use extension element to modify access control settings
Returns:
tuple of extension elements
|
27,797 |
def get_patches_ignore_regex(self):
match = re.search(r,
self.txt)
if not match:
return None
regex_string = match.group(1)
try:
return re.compile(regex_string)
except Exception:
return None
|
Returns a string representing a regex for filtering out patches
This string is parsed from a comment in the specfile that contains the
word filter-out followed by an equal sign.
For example, a comment as such:
# patches_ignore=(regex)
would mean this method returns the string '(regex)'
Only a very limited subset of characters are accepted so no fancy stuff
like matching groups etc.
|
27,798 |
def get_changes(self, commit_id, repository_id, project=None, top=None, skip=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if commit_id is not None:
route_values[] = self._serialize.url(, commit_id, )
if repository_id is not None:
route_values[] = self._serialize.url(, repository_id, )
query_parameters = {}
if top is not None:
query_parameters[] = self._serialize.query(, top, )
if skip is not None:
query_parameters[] = self._serialize.query(, skip, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response)
|
GetChanges.
Retrieve changes for a particular commit.
:param str commit_id: The id of the commit.
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param str project: Project ID or project name
:param int top: The maximum number of changes to return.
:param int skip: The number of changes to skip.
:rtype: :class:`<GitCommitChanges> <azure.devops.v5_0.git.models.GitCommitChanges>`
|
27,799 |
def create(self, doc_details):
title = % self.__class__.__name__
if self.model:
doc_details = self.model.validate(doc_details, path_to_root=, object_title= % title)
from copy import deepcopy
new_record = deepcopy(doc_details)
url = self.bucket_url +
response = requests.post(url, json=new_record)
if response.status_code not in (200, 201):
response = response.json()
raise Exception( % (title, response))
response = response.json()
new_record[] = response[]
new_record[] = response[]
return new_record
|
a method to create a new document in the collection
:param doc_details: dictionary with document details and user id value
:return: dictionary with document details and _id and _rev values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.