Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
6,900 | def make_tophat_ee (lower, upper):
if not np.isfinite (lower):
raise ValueError ( % lower)
if not np.isfinite (upper):
raise ValueError ( % upper)
def range_tophat_ee (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower < x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ee.__doc__ = (
) % (lower, upper)
return range_tophat_ee | Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise. |
6,901 | def _get_type(self, obj):
typever = obj[]
typesplit = typever.split()
return typesplit[0] + + typesplit[1] | Return the type of an object. |
6,902 | def cov_pmrapmdec_to_pmllpmbb(cov_pmradec,ra,dec,degree=False,epoch=2000.0):
if len(cov_pmradec.shape) == 3:
out= sc.zeros(cov_pmradec.shape)
ndata= out.shape[0]
lb = radec_to_lb(ra,dec,degree=degree,epoch=epoch)
for ii in range(ndata):
out[ii,:,:]= cov_pmradec_to_pmllbb_single(cov_pmradec[ii,:,:],
ra[ii],dec[ii],lb[ii,1],
degree,epoch)
return out
else:
l,b = radec_to_lb(ra,dec,degree=degree,epoch=epoch)
return cov_pmradec_to_pmllbb_single(cov_pmradec,ra,dec,b,degree,epoch) | NAME:
cov_pmrapmdec_to_pmllpmbb
PURPOSE:
propagate the proper motions errors through the rotation from (ra,dec) to (l,b)
INPUT:
covar_pmradec - uncertainty covariance matrix of the proper motion in ra (multplied with cos(dec)) and dec [2,2] or [:,2,2]
ra - right ascension
dec - declination
degree - if True, ra and dec are given in degrees (default=False)
epoch - epoch of ra,dec (right now only 2000.0 and 1950.0 are supported when not using astropy's transformations internally; when internally using astropy's coordinate transformations, epoch can be None for ICRS, 'JXXXX' for FK5, and 'BXXXX' for FK4)
OUTPUT:
covar_pmllbb [2,2] or [:,2,2] [pmll here is pmll x cos(b)]
HISTORY:
2010-04-12 - Written - Bovy (NYU) |
6,903 | def getOverlayTexture(self, ulOverlayHandle, pNativeTextureRef):
fn = self.function_table.getOverlayTexture
pNativeTextureHandle = c_void_p()
pWidth = c_uint32()
pHeight = c_uint32()
pNativeFormat = c_uint32()
pAPIType = ETextureType()
pColorSpace = EColorSpace()
pTextureBounds = VRTextureBounds_t()
result = fn(ulOverlayHandle, byref(pNativeTextureHandle), pNativeTextureRef, byref(pWidth), byref(pHeight), byref(pNativeFormat), byref(pAPIType), byref(pColorSpace), byref(pTextureBounds))
return result, pNativeTextureHandle.value, pWidth.value, pHeight.value, pNativeFormat.value, pAPIType, pColorSpace, pTextureBounds | Get the native texture handle/device for an overlay you have created.
On windows this handle will be a ID3D11ShaderResourceView with a ID3D11Texture2D bound.
* The texture will always be sized to match the backing texture you supplied in SetOverlayTexture above.
* You MUST call ReleaseNativeOverlayHandle() with pNativeTextureHandle once you are done with this texture.
* pNativeTextureHandle is an OUTPUT, it will be a pointer to a ID3D11ShaderResourceView *.
pNativeTextureRef is an INPUT and should be a ID3D11Resource *. The device used by pNativeTextureRef will be used to bind pNativeTextureHandle. |
6,904 | def set_block(arr, arr_block):
nr_col = arr.shape[1]
nr_row = arr.shape[0]
nr_col_block = arr_block.shape[1]
nr_row_block = arr_block.shape[0]
if np.mod(nr_row, nr_row_block) or np.mod(nr_col, nr_col_block):
raise ValueError(
)
if nr_row/nr_row_block != nr_col/nr_col_block:
raise ValueError(
)
arr_out = arr.copy()
for row_ind in range(int(nr_row/nr_row_block)):
row_start = row_ind*nr_row_block
row_end = nr_row_block+nr_row_block*row_ind
col_start = row_ind*nr_col_block
col_end = nr_col_block+nr_col_block*row_ind
arr_out[row_start:row_end, col_start:col_end] = arr_block
return arr_out | Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array) |
6,905 | def delete_commit_branches(self, enrich_backend):
fltr = % self.perceval_backend.origin
es_query = % fltr
index = enrich_backend.elastic.index_url
r = self.requests.post(index + "/_update_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Error while deleting branches on %s",
self.elastic.anonymize_url(index))
logger.error(r.text)
return
logger.debug("Delete branches %s, index %s", r.text, self.elastic.anonymize_url(index)) | Delete the information about branches from the documents representing
commits in the enriched index.
:param enrich_backend: the enrich backend |
6,906 | def sort_by_decreasing_count(self):
words = [w for w, ct in self._counts.most_common()]
v = self.subset(words)
return v | Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency. |
6,907 | def image_path_from_index(self, index):
assert self.image_set_index is not None, "Dataset not initialized"
pos = self.image_set_index[index]
n_db, n_index = self._locate_index(index)
return self.imdbs[n_db].image_path_from_index(n_index) | given image index, find out full path
Parameters
----------
index: int
index of a specific image
Returns
----------
full path of this image |
6,908 | def enable_tracing(self):
if not self.connected:
raise HardwareError("Cannot enable tracing if we are not in a connected state")
if self._traces is not None:
_clear_queue(self._traces)
return self._traces
self._traces = queue.Queue()
self._loop.run_coroutine(self.adapter.open_interface(0, ))
return self._traces | Open the tracing interface and accumulate traces in a queue.
This method is safe to call multiple times in a single device
connection. There is no way to check if the tracing interface is
opened or to close it once it is opened (apart from disconnecting from
the device).
The first time this method is called, it will open the tracing
interface and return a queue that will be filled asynchronously with
reports as they are received. Subsequent calls will just empty the
queue and return the same queue without interacting with the device at
all.
Returns:
queue.Queue: A queue that will be filled with trace data from the device.
The trace data will be in disjoint bytes objects in the queue |
6,909 | def make_tophat_ie (lower, upper):
if not np.isfinite (lower):
raise ValueError ( % lower)
if not np.isfinite (upper):
raise ValueError ( % upper)
def range_tophat_ie (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower <= x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ie.__doc__ = (
) % (lower, upper)
return range_tophat_ie | Return a ufunc-like tophat function on the defined range, left-inclusive
and right-exclusive. Returns 1 if lower <= x < upper, 0 otherwise. |
6,910 | def add(
self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config):
timeout = None
if job_add_options is not None:
timeout = job_add_options.timeout
client_request_id = None
if job_add_options is not None:
client_request_id = job_add_options.client_request_id
return_client_request_id = None
if job_add_options is not None:
return_client_request_id = job_add_options.return_client_request_id
ocp_date = None
if job_add_options is not None:
ocp_date = job_add_options.ocp_date
url = self.add.metadata[]
path_format_arguments = {
: self._serialize.url("self.config.batch_url", self.config.batch_url, , skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters[] = self._serialize.query("self.api_version", self.api_version, )
if timeout is not None:
query_parameters[] = self._serialize.query("timeout", timeout, )
header_parameters = {}
header_parameters[] =
if self.config.generate_client_request_id:
header_parameters[] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters[] = self._serialize.header("self.config.accept_language", self.config.accept_language, )
if client_request_id is not None:
header_parameters[] = self._serialize.header("client_request_id", client_request_id, )
if return_client_request_id is not None:
header_parameters[] = self._serialize.header("return_client_request_id", return_client_request_id, )
if ocp_date is not None:
header_parameters[] = self._serialize.header("ocp_date", ocp_date, )
body_content = self._serialize.body(job, )
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
: ,
: ,
: ,
: ,
: ,
})
return client_raw_response | Adds a job to the specified account.
The Batch service supports two ways to control the work done as part of
a job. In the first approach, the user specifies a Job Manager task.
The Batch service launches this task when it is ready to start the job.
The Job Manager task controls all other tasks that run under this job,
by using the Task APIs. In the second approach, the user directly
controls the execution of tasks under an active job, by using the Task
APIs. Also note: when naming jobs, avoid including sensitive
information such as user names or secret project names. This
information may appear in telemetry logs accessible to Microsoft
Support engineers.
:param job: The job to be added.
:type job: ~azure.batch.models.JobAddParameter
:param job_add_options: Additional parameters for the operation
:type job_add_options: ~azure.batch.models.JobAddOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>` |
6,911 | def get_input_list_from_task(task, placeholder_dict):
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
input_data = []
if task.link_input_data:
for path in task.link_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split()) > 1:
temp = {
: path.split()[0].strip(),
: path.split()[1].strip(),
: rp.LINK
}
else:
temp = {
: path.split()[0].strip(),
: os.path.basename(path.split()[0].strip()),
: rp.LINK
}
input_data.append(temp)
if task.upload_input_data:
for path in task.upload_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split()) > 1:
temp = {
: path.split()[0].strip(),
: path.split()[1].strip()
}
else:
temp = {
: path.split()[0].strip(),
: os.path.basename(path.split()[0].strip())
}
input_data.append(temp)
if task.copy_input_data:
for path in task.copy_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split()) > 1:
temp = {
: path.split()[0].strip(),
: path.split()[1].strip(),
: rp.COPY
}
else:
temp = {
: path.split()[0].strip(),
: os.path.basename(path.split()[0].strip()),
: rp.COPY
}
input_data.append(temp)
if task.move_input_data:
for path in task.move_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split()) > 1:
temp = {
: path.split()[0].strip(),
: path.split()[1].strip(),
: rp.MOVE
}
else:
temp = {
: path.split()[0].strip(),
: os.path.basename(path.split()[0].strip()),
: rp.MOVE
}
input_data.append(temp)
return input_data
except Exception, ex:
logger.exception( % ex)
raise | Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out |
6,912 | def emit_data_changed(self):
item = self.get_treeitem()
m = item.get_model()
if m:
start = m.index_of_item(item)
parent = start.parent()
end = m.index(start.row(), item.column_count()-1, parent)
m.dataChanged.emit(start, end) | Emit the data changed signal on the model of the treeitem
if the treeitem has a model.
:returns: None
:rtype: None
:raises: None |
6,913 | def log_html(self, log) -> str:
if not self.omit_loglevel(log["status"]):
emoticon = EMOTICON[log["status"]]
status = log["status"]
message = html.escape(log["message"]).replace("\n", "<br/>")
return (
"<li class=>"
f"<span class=>{emoticon} {status}</span>"
f"<span class=>{message}</span>"
"</li>"
)
return "" | Return single check sub-result string as HTML or not if below log
level. |
6,914 | def find_dependencies(self, depslock_file_path, property_validate=True, deps_content=None):
self._raw = [x for x in
self._downloader.common_parser.iter_packages_params(depslock_file_path, deps_content=deps_content)]
self.packages = self._downloader.get_dependency_packages({: self._raw},
property_validate=property_validate) | Find all dependencies by package
:param depslock_file_path:
:param property_validate: for `root` packages we need check property, bad if we find packages from `lock` file,
:param deps_content: HACK for use --dependencies-content and existed dependencies.txt.lock file
we can skip validate part
:return: |
6,915 | def get_element_centroids(self):
centroids = np.vstack((
np.mean(self.grid[], axis=1), np.mean(self.grid[], axis=1)
)).T
return centroids | return the central points of all elements
Returns
-------
Nx2 array
x/z coordinates for all (N) elements |
6,916 | def is_functional(cls):
if not cls._tested:
cls._tested = True
np.random.seed(SEED)
test_problem_dimension = 10
mat = np.random.randn(test_problem_dimension, test_problem_dimension)
posmat = mat.dot(mat.T)
posvar = cvxpy.Variable(test_problem_dimension, test_problem_dimension)
prob = cvxpy.Problem(cvxpy.Minimize((cvxpy.trace(posmat * posvar)
+ cvxpy.norm(posvar))),
[posvar >> 0, cvxpy.trace(posvar) >= 1.])
try:
prob.solve(SOLVER)
cls._functional = True
except cvxpy.SolverError:
_log.warning("No convex SDP solver found. You will not be able to solve"
" tomography problems with matrix positivity constraints.")
return cls._functional | Checks lazily whether a convex solver is installed that handles positivity constraints.
:return: True if a solver supporting positivity constraints is installed.
:rtype: bool |
6,917 | def level_matches(self, level, consumer_level):
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level | >>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False |
6,918 | def timing(function):
@wraps(function)
def wrapped(*args, **kwargs):
start_time = time.time()
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
end_time = time.time()
if function.__module__.startswith():
mod_name = function.__module__[16:]
else:
mod_name = function.__module__
fstr = .format(
sys.float_info.dig
)
log.profile(fstr, mod_name, function.__name__, end_time - start_time)
return ret
return wrapped | Decorator wrapper to log execution time, for profiling purposes |
6,919 | def _get_site_type_dummy_variables(self, sites):
ssa = np.zeros(len(sites.vs30))
ssb = np.zeros(len(sites.vs30))
ssd = np.zeros(len(sites.vs30))
idx = (sites.vs30 < 180.0)
ssd[idx] = 1.0
idx = (sites.vs30 >= 360.0) & (sites.vs30 < 800.0)
ssb[idx] = 1.0
idx = (sites.vs30 >= 800.0)
ssa[idx] = 1.0
for value in sites.vs30:
if 180 <= value < 360:
raise Exception(
)
return ssa, ssb, ssd | Get site type dummy variables, which classified the sites into
different site classes based on the shear wave velocity in the
upper 30 m (Vs30) according to the EC8 (CEN 2003):
class A: Vs30 > 800 m/s
class B: Vs30 = 360 - 800 m/s
class C*: Vs30 = 180 - 360 m/s
class D: Vs30 < 180 m/s
*Not computed by this GMPE |
6,920 | def get_proteins_for_db(fastafn):
objects = {}
for record in parse_fasta(fastafn):
objects[parse_protein_identifier(record)] = record
return (((acc,) for acc in list(objects)),
((acc, str(record.seq)) for acc, record in objects.items()),
((acc, get_uniprot_evidence_level(record.description))
for acc, record in objects.items())) | Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one. |
6,921 | def closest(self):
current = self.tag
closest = None
while closest is None and current is not None:
if self.match(current):
closest = current
else:
current = self.get_parent(current)
return closest | Match closest ancestor. |
6,922 | def add_command_formatting(self, command):
if command.description:
self.paginator.add_line(command.description, empty=True)
signature = self.get_command_signature(command)
if command.aliases:
self.paginator.add_line(signature)
self.add_aliases_formatting(command.aliases)
else:
self.paginator.add_line(signature, empty=True)
if command.help:
try:
self.paginator.add_line(command.help, empty=True)
except RuntimeError:
for line in command.help.splitlines():
self.paginator.add_line(line)
self.paginator.add_line() | A utility function to format commands and groups.
Parameters
------------
command: :class:`Command`
The command to format. |
6,923 | def get_urn(self):
urn = self.ecrm_P1_is_identified_by.one
try:
return CTS_URN(urn)
except Exception, e:
raise e | TODO |
6,924 | def search_dashboard_for_facet(self, facet, **kwargs):
kwargs[] = True
if kwargs.get():
return self.search_dashboard_for_facet_with_http_info(facet, **kwargs)
else:
(data) = self.search_dashboard_for_facet_with_http_info(facet, **kwargs)
return data | Lists the values of a specific facet over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. |
6,925 | def chat(self, id):
json = self.skype.conn("GET", "{0}/users/ME/conversations/{1}".format(self.skype.conn.msgsHost, id),
auth=SkypeConnection.Auth.RegToken, params={"view": "msnp24Equivalent"}).json()
cls = SkypeSingleChat
if "threadProperties" in json:
info = self.skype.conn("GET", "{0}/threads/{1}".format(self.skype.conn.msgsHost, json.get("id")),
auth=SkypeConnection.Auth.RegToken, params={"view": "msnp24Equivalent"}).json()
json.update(info)
cls = SkypeGroupChat
return self.merge(cls.fromRaw(self.skype, json)) | Get a single conversation by identifier.
Args:
id (str): single or group chat identifier |
6,926 | def write_pdf_files(args, infilenames, outfilename):
if not outfilename.endswith():
outfilename = outfilename +
outfilename = overwrite_file_check(args, outfilename)
options = {}
try:
if args[]:
infilename = infilenames[0]
if not args[]:
print(.format(outfilename))
else:
options[] = None
if args[]:
html = parse_html(read_files(infilename), args[])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string(.join(html), outfilename,
options=options)
else:
pk.from_string(.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilename, outfilename, options=options)
elif args[]:
if not args[]:
print(
.format(len(infilenames), outfilename))
else:
options[] = None
if args[]:
html = parse_html(read_files(infilenames), args[])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string(.join(html), outfilename,
options=options)
else:
pk.from_string(.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilenames, outfilename, options=options)
return True
except (OSError, IOError) as err:
sys.stderr.write(
.format(outfilename, str(err)))
return False | Write pdf file(s) to disk using pdfkit.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output pdf file (str) |
6,927 | def _parse_ISBN_EAN(details):
isbn_ean = _get_td_or_none(
details,
"ctl00_ContentPlaceHolder1_tblRowIsbnEan"
)
if not isbn_ean:
return None, None
ean = None
isbn = None
if "/" in isbn_ean:
isbn, ean = isbn_ean.split("/")
isbn = isbn.strip()
ean = ean.strip()
else:
isbn = isbn_ean.strip()
if not isbn:
isbn = None
return isbn, ean | Parse ISBN and EAN.
Args:
details (obj): HTMLElement containing slice of the page with details.
Returns:
(ISBN, EAN): Tuple with two string or two None. |
6,928 | def _netstat_sunos():
log.warning()
ret = []
for addr_family in (, ):
cmd = .format(addr_family)
out = __salt__[](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
: if addr_family == else ,
: comps[5],
: comps[4],
: comps[0],
: comps[1],
: comps[6]})
cmd = .format(addr_family)
out = __salt__[](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
: if addr_family == else ,
: comps[0],
: comps[1] if len(comps) > 2 else })
return ret | Return netstat information for SunOS flavors |
6,929 | def get_value(self, **kwargs):
key = tuple(kwargs[group] for group in self.groups)
if key not in self.data:
self.data[key] = 0
return self.data[key] | Return the value for a specific key. |
6,930 | def reread(user=None, conf_file=None, bin_env=None):
s configuration files
user
user to run supervisorctl as
conf_file
path to supervisord config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
CLI Example:
.. code-block:: bash
salt supervisord.reread
cmd.run_allreread', None, conf_file, bin_env),
runas=user,
python_shell=False,
)
return _get_return(ret) | Reload the daemon's configuration files
user
user to run supervisorctl as
conf_file
path to supervisord config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
CLI Example:
.. code-block:: bash
salt '*' supervisord.reread |
6,931 | def compute_venn2_colors(set_colors):
rg
ccv = ColorConverter()
base_colors = [np.array(ccv.to_rgb(c)) for c in set_colors]
return (base_colors[0], base_colors[1], mix_colors(base_colors[0], base_colors[1])) | Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram.
returns a list of 3 elements, providing colors for regions (10, 01, 11).
>>> compute_venn2_colors(('r', 'g'))
(array([ 1., 0., 0.]), array([ 0. , 0.5, 0. ]), array([ 0.7 , 0.35, 0. ])) |
6,932 | def send_message(self, chat_id, text, **options):
return self.api_call("sendMessage", chat_id=chat_id, text=text, **options) | Send a text message to chat
:param int chat_id: ID of the chat to send the message to
:param str text: Text to send
:param options: Additional sendMessage options
(see https://core.telegram.org/bots/api#sendmessage) |
6,933 | def _read_certificates(self):
stack_pointer = libssl.SSL_get_peer_cert_chain(self._ssl)
if is_null(stack_pointer):
handle_openssl_error(0, TLSError)
if libcrypto_version_info < (1, 1):
number_certs = libssl.sk_num(stack_pointer)
else:
number_certs = libssl.OPENSSL_sk_num(stack_pointer)
self._intermediates = []
for index in range(0, number_certs):
if libcrypto_version_info < (1, 1):
x509_ = libssl.sk_value(stack_pointer, index)
else:
x509_ = libssl.OPENSSL_sk_value(stack_pointer, index)
buffer_size = libcrypto.i2d_X509(x509_, null())
cert_buffer = buffer_from_bytes(buffer_size)
cert_pointer = buffer_pointer(cert_buffer)
cert_length = libcrypto.i2d_X509(x509_, cert_pointer)
handle_openssl_error(cert_length)
cert_data = bytes_from_buffer(cert_buffer, cert_length)
cert = x509.Certificate.load(cert_data)
if index == 0:
self._certificate = cert
else:
self._intermediates.append(cert) | Reads end-entity and intermediate certificate information from the
TLS session |
6,934 | def hardware_info():
try:
if sys.platform == :
out = _mac_hardware_info()
elif sys.platform == :
out = _win_hardware_info()
elif sys.platform in [, ]:
out = _linux_hardware_info()
else:
out = {}
except:
return {}
else:
return out | Returns basic hardware information about the computer.
Gives actual number of CPU's in the machine, even when hyperthreading is
turned on.
Returns
-------
info : dict
Dictionary containing cpu and memory information. |
6,935 | def addDataModels(self, mods):
nameclass.path.ctordocThe foo thing.namebasetype
for modlname, mdef in mods:
for name, ctor, opts, info in mdef.get(, ()):
item = s_dyndeps.tryDynFunc(ctor, self, name, info, opts)
self.types[name] = item
self._modeldef[].append((name, ctor, opts, info))
for modlname, mdef in mods:
for typename, (basename, opts), info in mdef.get(, ()):
base = self.types.get(basename)
if base is None:
raise s_exc.NoSuchType(name=basename)
self.types[typename] = base.extend(typename, opts, info)
self._modeldef[].append((typename, (basename, opts), info))
for modlname, mdef in mods:
for univname, typedef, univinfo in mdef.get(, ()):
self.addUnivProp(univname, typedef, univinfo)
for modlname, mdef in mods:
for formname, forminfo, propdefs in mdef.get(, ()):
if not s_syntax.isFormName(formname):
mesg = f
raise s_exc.BadFormDef(name=formname, mesg=mesg)
_type = self.types.get(formname)
if _type is None:
raise s_exc.NoSuchType(name=formname)
self._modeldef[].append((formname, forminfo, propdefs))
form = Form(self, formname, forminfo)
self.forms[formname] = form
self.props[formname] = form
for univname, typedef, univinfo in self.univs:
self._addFormUniv(form, univname, typedef, univinfo)
for propdef in propdefs:
if len(propdef) != 3:
raise s_exc.BadPropDef(valu=propdef)
propname, typedef, propinfo = propdef
prop = Prop(self, form, propname, typedef, propinfo)
full = f
self.props[full] = prop
self.props[(formname, propname)] = prop
self._modelinfo.addDataModels(mods) | Add a list of (name, mdef) tuples.
A model definition (mdef) is structured as follows::
{
"ctors":(
('name', 'class.path.ctor', {}, {'doc': 'The foo thing.'}),
),
"types":(
('name', ('basetype', {typeopts}), {info}),
),
"forms":(
(formname, (typename, typeopts), {info}, (
(propname, (typename, typeopts), {info}),
)),
),
"univs":(
(propname, (typename, typeopts), {info}),
)
}
Args:
mods (list): The list of tuples.
Returns:
None |
6,936 | def result(self):
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
raise self._final_exception | Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.
Example usage::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:") |
6,937 | def default_cy(self):
px_height = self.image.px_height
horz_dpi = self.image.horz_dpi
height_in_emu = 914400 * px_height / horz_dpi
return Emu(height_in_emu) | Native height of this image, calculated from its height in pixels and
vertical dots per inch (dpi). |
6,938 | def _list_resource_descriptors(args, _):
project_id = args[]
pattern = args[] or
descriptors = gcm.ResourceDescriptors(project_id=project_id)
dataframe = descriptors.as_dataframe(pattern=pattern)
return _render_dataframe(dataframe) | Lists the resource descriptors in the project. |
6,939 | def _make_repr(attrs, ns):
attr_names = tuple(a.name for a in attrs if a.repr)
def __repr__(self):
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name in attr_names:
if first:
first = False
else:
result.append(", ")
result.extend((name, "=", repr(getattr(self, name, NOTHING))))
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__ | Make a repr method for *attr_names* adding *ns* to the full name. |
6,940 | def get_machines(self, origin, hostnames):
hostnames = {
hostname: True
for hostname in hostnames
}
machines = origin.Machines.read(hostnames=hostnames)
machines = [
machine
for machine in machines
if hostnames.pop(machine.hostname, False)
]
if len(hostnames) > 0:
raise CommandError(
"Unable to find %s %s." % (
"machines" if len(hostnames) > 1 else "machine",
.join(hostnames)))
return machines | Return a set of machines based on `hostnames`.
Any hostname that is not found will result in an error. |
6,941 | def console_to_str(data):
encoding = locale.getpreferredencoding()
if (not encoding) or codecs.lookup(encoding).name == "ascii":
encoding = "utf-8"
try:
decoded_data = data.decode(encoding)
except UnicodeDecodeError:
logger.warning(
"Subprocess output does not appear to be encoded as %s",
encoding,
)
decoded_data = data.decode(encoding, errors=backslashreplace_decode)
output_encoding = getattr(getattr(sys, "__stderr__", None),
"encoding", None)
if output_encoding:
output_encoded = decoded_data.encode(
output_encoding,
errors="backslashreplace"
)
decoded_data = output_encoded.decode(output_encoding)
return decoded_data | Return a string, safe for output, of subprocess output.
We assume the data is in the locale preferred encoding.
If it won't decode properly, we warn the user but decode as
best we can.
We also ensure that the output can be safely written to
standard output without encoding errors. |
6,942 | def to_json(self):
commands = ",".join(map(lambda x: x.to_json(), self._commands))
return "{\"commands\": [" + commands + "]}" | Returns:
str: Json for commands array object and all of the commands inside the array. |
6,943 | def parse(self, data):
self.validate_packet(data)
id_ = self.dump_hex(data[4:6])
humidity = data[6]
humidity_status = self._extract_humidity_status(data[7])
sensor_specific = {
: id_,
: humidity,
: humidity_status
}
results = self.parse_header_part(data)
results.update(RfxPacketUtils.parse_signal_and_battery(data[8]))
results.update(sensor_specific)
return results | Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict |
6,944 | def parse_usearch61_failures(seq_path,
failures,
output_fasta_fp):
parsed_out = open(output_fasta_fp, "w")
for label, seq in parse_fasta(open(seq_path), "U"):
curr_label = label.split()[0]
if curr_label in failures:
parsed_out.write(">%s\n%s\n" % (label, seq))
parsed_out.close()
return output_fasta_fp | Parses seq IDs from failures list, writes to output_fasta_fp
seq_path: filepath of original input fasta file.
failures: list/set of failure seq IDs
output_fasta_fp: path to write parsed sequences |
6,945 | def stop(self, reason=None):
self.logger.info()
self.loop.stop(pyev.EVBREAK_ALL) | Shutdown the service with a reason. |
6,946 | def parse_links(self, markup):
links = []
m = re.findall(self.re["link"], markup)
for link in m:
if link.find("{") >= 0:
link = re.sub("\{{1,3}[0-9]{0,2}\|", "", link)
link = link.replace("{", "")
link = link.replace("}", "")
link = link.split("|")
link[0] = link[0].split("
page = link[0][0].strip()
if not page in links:
links.append(page)
links.sort()
return links | Returns a list of internal Wikipedia links in the markup.
# A Wikipedia link looks like:
# [[List of operating systems#Embedded | List of embedded operating systems]]
# It does not contain a colon, this indicates images, users, languages, etc.
The return value is a list containing the first part of the link,
without the anchor. |
6,947 | def DateTimeField(formatter=types.DEFAULT_DATETIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, datetime)
converter = converters.to_datetime_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key)) | Create new datetime field on a model.
:param formatter: datetime formatter string (default: "ISO_FORMAT")
:param default: any datetime or string that can be converted to a datetime
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict. |
6,948 | def sendMessage(self,chat_id,text,parse_mode=None,disable_web=None,reply_msg_id=None,markup=None):
payload={ : chat_id, : text, : parse_mode , : disable_web , : reply_msg_id}
if(markup):
payload[]=json.dumps(markup)
response_str = self._command(,payload,method=)
return _validate_response_msg(response_str) | On failure returns False
On success returns Message Object |
6,949 | def _fill_disk_filename(vm_name, disk, hypervisor, **kwargs):
base_dir = disk.get(, None)
if hypervisor in [, , ]:
if not base_dir:
base_dir = _get_images_dir()
else:
if not base_dir.startswith():
infos = pool_info(base_dir, **kwargs)
pool = infos[base_dir] if base_dir in infos else None
if not pool or not pool[] or pool[].startswith():
raise CommandExecutionError(
.format(disk[], base_dir))
base_dir = pool[]
if hypervisor == and vm_name:
disk[] = .format(vm_name, disk[])
disk[] = os.path.join(, base_dir or , disk[])
elif vm_name:
disk[] = .format(vm_name, disk[], disk[])
disk[] = os.path.join(base_dir, disk[]) | Compute the disk file name and update it in the disk value. |
6,950 | def is_on(self):
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED, CONST.STATUS_OPEN) | Get sensor state.
Assume offline or open (worst case). |
6,951 | def evaluate(self, x, y, flux, x_0, y_0, sigma):
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma))))) | Model function Gaussian PSF model. |
6,952 | def remove_highdepth_regions(in_file, items):
encode_bed = tz.get_in(["genome_resources", "variation", "encode_blacklist"], items[0])
if encode_bed and os.path.exists(encode_bed):
return _remove_regions(in_file, [encode_bed], "glimit", items[0])
else:
return in_file | Remove high depth regions from a BED file for analyzing a set of calls.
Tries to avoid spurious errors and slow run times in collapsed repeat regions.
Also adds ENCODE blacklist regions which capture additional collapsed repeats
around centromeres. |
6,953 | def hold(self, policy="combine"):
combinecollectcombinecollectcombinecombine
if self._hold is not None and self._hold != policy:
log.warning("hold already active with , ignoring " % (self._hold, policy))
return
if policy not in HoldPolicy:
raise ValueError("Unknown hold policy %r" % policy)
self._hold = policy | Activate a document hold.
While a hold is active, no model changes will be applied, or trigger
callbacks. Once ``unhold`` is called, the events collected during the
hold will be applied according to the hold policy.
Args:
hold ('combine' or 'collect', optional)
Whether events collected during a hold should attempt to be
combined (default: 'combine')
When set to ``'collect'`` all events will be collected and
replayed in order as-is when ``unhold`` is called.
When set to ``'combine'`` Bokeh will attempt to combine
compatible events together. Typically, different events that
change the same property on the same mode can be combined.
For example, if the following sequence occurs:
.. code-block:: python
doc.hold('combine')
slider.value = 10
slider.value = 11
slider.value = 12
Then only *one* callback, for the last ``slider.value = 12``
will be triggered.
Returns:
None
.. note::
``hold`` only applies to document change events, i.e. setting
properties on models. It does not apply to events such as
``ButtonClick``, etc. |
6,954 | def Parse(self, how):
if type(how) == types.ClassType: how = how.typecode
return how.parse(self.body_root, self) | Parse the message. |
6,955 | def position(self):
dLbl = self._dLbl
if dLbl is None:
return None
dLblPos = dLbl.dLblPos
if dLblPos is None:
return None
return dLblPos.val | Read/write :ref:`XlDataLabelPosition` member specifying the position
of this data label with respect to its data point, or |None| if no
position is specified. Assigning |None| causes PowerPoint to choose
the default position, which varies by chart type. |
6,956 | def list_nodes(conn=None, call=None):
if call == :
raise SaltCloudSystemExit(
)
if not conn:
conn = get_conn()
ret = {}
datacenter_id = get_datacenter_id()
try:
nodes = conn.list_servers(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error(
, datacenter_id)
raise
for item in nodes[]:
node = {: item[]}
node.update(item[])
node[] = node.pop()
ret[node[]] = node
return ret | Return a list of VMs that are on the provider |
6,957 | def retrieve_all(self, sids, default_none=False):
failures = set()
hits = {}
for sid in sids:
try:
hits[sid] = self._asset_cache[sid]
except KeyError:
if not default_none:
failures.add(sid)
else:
hits[sid] = None
if len(failures) > 0:
raise SidsNotFound(sids=list(failures))
return [hits[sid] for sid in sids] | Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of string
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False. |
6,958 | def stretch(self, factor, window=20):
if not is_number(factor) or factor <= 0:
raise ValueError("factor must be a positive number")
if factor < 0.5 or factor > 2:
logger.warning(
"Using an extreme time stretching factor. "
"Quality of results will be poor"
)
if abs(factor - 1.0) > 0.1:
logger.warning(
"For this stretch factor, "
"the tempo effect has better performance."
)
if not is_number(window) or window <= 0:
raise ValueError(
"window must be a positive number."
)
effect_args = [, .format(factor), .format(window)]
self.effects.extend(effect_args)
self.effects_log.append()
return self | Change the audio duration (but not its pitch).
**Unless factor is close to 1, use the tempo effect instead.**
This effect is broadly equivalent to the tempo effect with search set
to zero, so in general, its results are comparatively poor; it is
retained as it can sometimes out-perform tempo for small factors.
Parameters
----------
factor : float
The ratio of the new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with tempo.
window : float, default=20
Window size in miliseconds
See Also
--------
tempo, speed, pitch |
6,959 | def _set_redistribute_bgp(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=redistribute_bgp.redistribute_bgp, is_container=, presence=True, yang_name="redistribute-bgp", rest_name="bgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__redistribute_bgp = t
if hasattr(self, ):
self._set() | Setter method for redistribute_bgp, mapped from YANG variable /rbridge_id/ipv6/router/ospf/redistribute/redistribute_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redistribute_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redistribute_bgp() directly.
YANG Description: BGP routes |
6,960 | def _obj_getattr(obj, fqdn, start=1):
node = obj
for chain in fqdn.split()[start:]:
if hasattr(node, chain):
node = getattr(node, chain)
else:
node = None
break
return node | Returns the attribute specified by the fqdn list from obj. |
6,961 | def findSnpWithMaf0(freqFileName, prefix):
maf_0_set = set()
na_set = set()
try:
with open(freqFileName, "r") as inputFile:
headerIndex = None
for i, line in enumerate(inputFile):
row = createRowFromPlinkSpacedOutput(line)
if i == 0:
headerIndex = dict([
(row[i], i) for i in xrange(len(row))
])
for columnName in ["SNP", "MAF"]:
if columnName not in headerIndex:
msg = "%(freqFileName)s: no column named " \
"%(columnName)s" % locals()
raise ProgramError(msg)
else:
snpName = row[headerIndex["SNP"]]
snpMAF = row[headerIndex["MAF"]]
if snpMAF == "0":
maf_0_set.add(snpName)
elif snpMAF == "NA":
na_set.add(snpName)
except IOError:
msg = "%(freqFileName)s: no such file" % locals()
raise ProgramError(msg)
if len(maf_0_set) == 0:
logger.info(" - There are no markers with MAF 0")
else:
logger.info(" - There are {} markers with MAF 0".format(
len(maf_0_set),
))
outputFile = None
try:
with open(prefix + ".list", "w") as output_file:
for marker_name in maf_0_set:
print >>output_file, marker_name
except IOError:
msg = "{}.list: cant write file".format(prefix)
raise ProgramError(msg) | Finds SNPs with MAF of 0 and put them in a file.
:param freqFileName: the name of the frequency file.
:param prefix: the prefix of all the files.
:type freqFileName: str
:type prefix: str
Reads a frequency file from Plink, and find markers with a minor allele
frequency of zero. |
6,962 | def tolist(val):
if val is None:
return None
try:
val.extend([])
return val
except AttributeError:
pass
try:
return re.split(r, val)
except TypeError:
return list(val) | Convert a value that may be a list or a (possibly comma-separated)
string into a list. The exception: None is returned as None, not [None].
>>> tolist(["one", "two"])
['one', 'two']
>>> tolist("hello")
['hello']
>>> tolist("separate,values, with, commas, spaces , are ,ok")
['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok'] |
6,963 | def seal_aes_ctr_legacy(key_service, secret, digest_method=DEFAULT_DIGEST):
key, encoded_key = key_service.generate_key_data(64)
ciphertext, hmac = _seal_aes_ctr(
secret, key, LEGACY_NONCE, digest_method,
)
return {
: b64encode(encoded_key).decode(),
: b64encode(ciphertext).decode(),
: codecs.encode(hmac, "hex_codec"),
: digest_method,
} | Encrypts `secret` using the key service.
You can decrypt with the companion method `open_aes_ctr_legacy`. |
6,964 | def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
candidates = all_dates[:current_start_pos + 1]
choices = select_sampling_indices(candidates, self._frequency)
new_start_date = candidates[choices[-1]]
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos) | Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date. |
6,965 | def user_organization_membership_show(self, user_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships
api_path = "/api/v2/users/{user_id}/organization_memberships/{id}.json"
api_path = api_path.format(user_id=user_id, id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_memberships#show-membership |
6,966 | def median_depth_img(self, num_img=1, fill_depth=0.0):
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth | Collect a series of depth images and return the median of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
DepthImage
The median DepthImage collected from the frames. |
6,967 | def iterShapes(self):
shp = self.__getFileObj(self.shp)
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape() | Serves up shapes in a shapefile as an iterator. Useful
for handling large shapefiles. |
6,968 | def action_size(self) -> Sequence[Sequence[int]]:
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_size(fluents, ordering) | The size of each action fluent in canonical order.
Returns:
Sequence[Sequence[int]]: A tuple of tuple of integers
representing the shape and size of each fluent. |
6,969 | def download_data(identifier, outdir):
if use_local_data_repository is not None:
url_base = + request.pathname2url(
use_local_data_repository + os.sep)
else:
url_base = repository_url
print(.format(url_base))
url = url_base + inventory_filename
filename, headers =request.urlretrieve(url)
df = pd.read_csv(
filename,
delim_whitespace=True,
comment=,
header=None,
names=[, ],
)
rel_path_query = df.query(.format(identifier))
if rel_path_query.shape[0] == 0:
raise Exception()
rel_path = rel_path_query[].values[0]
url = url_base + rel_path
print(.format(url))
filename, headers =request.urlretrieve(url)
if not os.path.isdir(outdir):
os.makedirs(outdir)
zip_obj = zipfile.ZipFile(filename)
zip_obj.extractall(outdir) | Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory |
6,970 | def get_el_from_z(z):
if(type(z)==float):
z=int(z)
if(type(z)==int):
z=str(z)
dict_z={: , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : }
return dict_z[z] | Very simple Vfunction that gives the atomic number AS A STRING when given the element symbol.
Uses predefined a dictionnary.
Parameter :
z : string or number
For the other way, see get_z_from_el |
6,971 | def mkproject_cmd(argv):
if in argv or in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep=)
return
parser = mkvirtualenv_argparser()
parser.add_argument()
parser.add_argument(
, action=, default=[], dest=, help=)
parser.add_argument(
, , action=, help=)
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get(, ))
if not projects_home.exists():
sys.exit( % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit( % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project)) | Create a new project directory and its associated virtualenv. |
6,972 | def _keplerian_circular_to_keplerian(cls, coord, center):
a, ex, ey, i, Ω, u = coord
e = sqrt(ex ** 2 + ey ** 2)
ω = arctan2(ey / e, ex / e)
ν = u - ω
return np.array([a, e, i, Ω, ω, ν], dtype=float) | Conversion from Keplerian near-circular elements to Mean Keplerian |
6,973 | def copy(self):
a, b = it.tee(self._data)
self._data = a
return Stream(b) | Returns a "T" (tee) copy of the given stream, allowing the calling
stream to continue being used. |
6,974 | def build_machine(network=None,
machine_type=None,
preemptible=None,
service_account=None,
boot_disk_size_gb=None,
disks=None,
accelerators=None,
labels=None,
cpu_platform=None,
nvidia_driver_version=None):
return {
: network,
: machine_type,
: preemptible,
: service_account,
: boot_disk_size_gb,
: disks,
: accelerators,
: labels,
: cpu_platform,
: nvidia_driver_version,
} | Build a VirtualMachine object for a Pipeline request.
Args:
network (dict): Network details for the pipeline to run in.
machine_type (str): GCE Machine Type string for the pipeline.
preemptible (bool): Use a preemptible VM for the job.
service_account (dict): Service account configuration for the VM.
boot_disk_size_gb (int): Boot disk size in GB.
disks (list[dict]): List of disks to mount.
accelerators (list[dict]): List of accelerators to attach to the VM.
labels (dict[string, string]): Labels for the VM.
cpu_platform (str): The CPU platform to request.
nvidia_driver_version (str): The NVIDIA driver version to use when attaching
an NVIDIA GPU accelerator.
Returns:
An object representing a VirtualMachine. |
6,975 | def list_to_file(orig_list, file_name, file_location):
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
list_line = ( % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name | Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info |
6,976 | def unselect(self, value=None, field=None, **kwargs):
if field:
self.find("select", field, **kwargs).find("option", value, **kwargs).unselect_option()
else:
self.find("option", value, **kwargs).unselect_option() | Find a select box on the page and unselect a particular option from it. If the select box is
a multiple select, ``unselect`` can be called multiple times to unselect more than one
option. The select box can be found via its name, id, or label text. ::
page.unselect("March", field="Month")
Args:
value (str, optional): Which option to unselect.
field (str, optional): The id, name, or label of the select box.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. |
6,977 | def sort_by_distance(cls, consumer_offsets_metadata):
sorted_offsets = sorted(
list(consumer_offsets_metadata.items()),
key=lambda topic_offsets: sum([o.highmark - o.current for o in topic_offsets[1]])
)
return OrderedDict(sorted_offsets) | Receives a dict of (topic_name: ConsumerPartitionOffset) and returns a
similar dict where the topics are sorted by total offset distance. |
6,978 | def _set_show_ntp(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_ntp.show_ntp, is_leaf=True, yang_name="show-ntp", rest_name="show-ntp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__show_ntp = t
if hasattr(self, ):
self._set() | Setter method for show_ntp, mapped from YANG variable /brocade_ntp_rpc/show_ntp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_ntp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_ntp() directly.
YANG Description: show active ntp server for cluster or specified switchid |
6,979 | def savemap(self, filename, filetype=, papertype="a4"):
self.fig.savefig(filename,
dpi=self.dpi,
format=filetype,
papertype=papertype) | Save the figure |
6,980 | def relative_resources(pathstring, failover=):
if working_dir is None:
return Path(failover, pathstring).resolve()
else:
return Path(devconfig.resources, pathstring).resolve().relative_to(working_dir.resolve()) | relative paths to resources in this repository
`failover` matches the location relative to the
github location (usually for prov purposes) |
6,981 | def get_properties(cls):
property_names = [p for p in dir(cls)
if isinstance(getattr(cls, p), property)]
return property_names | Get all properties of the MessageFlags class. |
6,982 | def when(self, key):
ctx = Context(key, self)
self.context.append(ctx)
return ctx | Specify context, i.e. condition that must be met.
Arguments:
key (str): Name of the context whose value you want to query.
Returns:
Context: |
6,983 | def heartbeat(self):
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit() | Override the scheduler heartbeat to determine when the test is complete |
6,984 | def _ul_per_mm(self, ul: float, func: str) -> float:
sequence = self.ul_per_mm[func]
return pipette_config.piecewise_volume_conversion(ul, sequence) | :param ul: microliters as a float
:param func: must be one of 'aspirate' or 'dispense'
:return: microliters/mm as a float |
6,985 | def type(self, variant_probe_coverages, variant=None):
if not isinstance(variant_probe_coverages, list):
variant_probe_coverages = [variant_probe_coverages]
calls = []
for variant_probe_coverage in variant_probe_coverages:
calls.append(
self._type_variant_probe_coverages(
variant_probe_coverage, variant))
hom_alt_calls = [c for c in calls if sum(c["genotype"]) > 1]
het_calls = [c for c in calls if sum(c["genotype"]) == 1]
if hom_alt_calls:
hom_alt_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return hom_alt_calls[0]
elif het_calls:
het_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return het_calls[0]
else:
calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return calls[0] | Takes a list of VariantProbeCoverages and returns a Call for the Variant.
Note, in the simplest case the list will be of length one. However, we may be typing the
Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant. |
6,986 | def getSingleVisualPropertyValue(self, networkId, viewId, objectType, objectId, visualProperty, verbose=None):
response=api(url=self.___url++str(networkId)++str(viewId)++str(objectType)++str(objectId)++str(visualProperty)+, method="H", verbose=verbose, parse_params=False)
return response | Gets the Visual Property specificed by the `visualProperty` parameter for the node or edge specified by the `objectId` parameter in the Network View specified by the `viewId` and `networkId` parameters.
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param networkId: SUID of the Network
:param viewId: SUID of the Network View
:param objectType: Type of Object
:param objectId: SUID of the Object
:param visualProperty: Name of the Visual Property
:param verbose: print more
:returns: 200: successful operation |
6,987 | def _match_exec(self, i):
self.col_match = self.RE_EXEC.match(self._source[i])
if self.col_match is not None:
if self.col_match.group("codetype") == "function":
self.el_type = Function
else:
self.el_type = Subroutine
self.el_name = self.col_match.group("name")
return True
else:
return False | Looks at line 'i' for a subroutine or function definition. |
6,988 | def DocbookSlidesHtml(env, target, source=None, *args, **kw):
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = []
elif not SCons.Util.is_List(source):
source = [source]
__init_xsl_stylesheet(kw, env, , [,,])
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
base_dir = kw.get(, )
if base_dir:
__create_output_dir(base_dir)
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), ), source[0], **kw)
env.Depends(r, kw[])
result.extend(r)
env.Clean(r, [os.path.join(base_dir, )] +
glob.glob(os.path.join(base_dir, )))
return result | A pseudo-Builder, providing a Docbook toolchain for HTML slides output. |
6,989 | def lev_bounds(self):
try:
for domname, dom in self.domains.items():
try:
thislev = dom.axes[].bounds
except:
pass
return thislev
except:
raise ValueError(t resolve a lev axis.') | Pressure levels at grid interfaces (hPa or mb)
:getter: Returns the bounds of axis ``'lev'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lev'`` axis can be found. |
6,990 | def get_dataset(self, key, info):
res = super(HRITJMAFileHandler, self).get_dataset(key, info)
self._check_sensor_platform_consistency(info[])
res = self._mask_space(self.calibrate(res, key.calibration))
res.attrs.update(info)
res.attrs[] = self.platform
res.attrs[] = float(self.mda[][])
res.attrs[] = 0.
res.attrs[] = float(self.mda[][])
return res | Get the dataset designated by *key*. |
6,991 | def play_env_problem_randomly(env_problem,
num_steps):
env_problem.reset()
for _ in range(num_steps):
actions = np.stack([env_problem.action_space.sample() for _ in range(
env_problem.batch_size)])
_, _, dones, _ = env_problem.step(actions)
env_problem.reset(indices=done_indices(dones)) | Plays the env problem by randomly sampling actions for `num_steps`. |
6,992 | def update_key(
self, vault_base_url, key_name, key_version, key_ops=None, key_attributes=None, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.KeyUpdateParameters(key_ops=key_ops, key_attributes=key_attributes, tags=tags)
url = self.update_key.metadata[]
path_format_arguments = {
: self._serialize.url("vault_base_url", vault_base_url, , skip_quote=True),
: self._serialize.url("key_name", key_name, ),
: self._serialize.url("key_version", key_version, )
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters[] = self._serialize.query("self.api_version", self.api_version, )
header_parameters = {}
header_parameters[] =
if self.config.generate_client_request_id:
header_parameters[] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters[] = self._serialize.header("self.config.accept_language", self.config.accept_language, )
body_content = self._serialize.body(parameters, )
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize(, response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param key_ops: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_ops: list[str or
~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyBundle or ClientRawResponse if raw=true
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>` |
6,993 | def tracking_save(sender, instance, raw, using, update_fields, **kwargs):
if _has_changed(instance):
if instance._original_fields[] is None:
_create_create_tracking_event(instance)
else:
_create_update_tracking_event(instance)
if _has_changed_related(instance):
_create_update_tracking_related_event(instance)
if _has_changed(instance) or _has_changed_related(instance):
_set_original_fields(instance) | Post save, detect creation or changes and log them.
We need post_save to have the object for a create. |
6,994 | def QA_util_future_to_tradedatetime(real_datetime):
if len(str(real_datetime)) >= 19:
dt = datetime.datetime.strptime(
str(real_datetime)[0:19],
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_next_datetime(dt,
1)
elif len(str(real_datetime)) == 16:
dt = datetime.datetime.strptime(
str(real_datetime)[0:16],
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_next_datetime(dt,
1) | 输入是真实交易时间,返回按期货交易所规定的时间* 适用于tb/文华/博弈的转换
Arguments:
real_datetime {[type]} -- [description]
Returns:
[type] -- [description] |
6,995 | def xstatus(self):
return max(node.xstatus for node in self.nodes) if len(self.nodes) else 0 | UNIX-like exit status, only coherent if the context has stopped. |
6,996 | def SetStatus(self, status, message="", backtrace=None):
self.status.status = status
self.status.error_message = utils.SmartUnicode(message)
if backtrace:
self.status.backtrace = utils.SmartUnicode(backtrace) | Set a status to report back to the server. |
6,997 | def add_platform(name, platform_set, server_url):
config = _get_asam_configuration(server_url)
if not config:
return False
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Specified platform already exists on {0}".format(server_url)}
platform_sets = list_platform_sets(server_url)
if platform_set not in platform_sets[server_url]:
return {name: "Specified platform set does not exist on {0}".format(server_url)}
url = config[]
data = {
: name,
: platform_set,
: ,
: ,
: ,
:
}
auth = (
config[],
config[]
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to add platform on {0}".format(server_url)
log.error(, err_msg, exc)
return {name: err_msg}
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Successfully added platform on {0}".format(server_url)}
else:
return {name: "Failed to add platform on {0}".format(server_url)} | To add an ASAM platform using the specified ASAM platform set on the Novell
Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.add_platform my-test-vm test-platform-set prov1.domain.com |
6,998 | def team_names_to_ids(self):
d = self.team_ids_to_names()
return {v: k for k, v in d.items()} | Mapping from full team names to 3-letter team IDs.
:returns: Dictionary with tean names as keys and team IDs as values. |
6,999 | def UNIFAC_groups(self):
rs online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> pprint(Chemical().UNIFAC_groups)
{1: 2, 9: 5, 13: 1}
'
if self.__UNIFAC_groups:
return self.__UNIFAC_groups
else:
load_group_assignments_DDBST()
if self.InChI_Key in DDBST_UNIFAC_assignments:
self.__UNIFAC_groups = DDBST_UNIFAC_assignments[self.InChI_Key]
return self.__UNIFAC_groups
else:
return None | r'''Dictionary of UNIFAC subgroup: count groups for the original
UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> pprint(Chemical('Cumene').UNIFAC_groups)
{1: 2, 9: 5, 13: 1} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.