Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
14,700 | def safe_dump(data, stream=None, **kwargs):
if not in kwargs:
kwargs[] = True
return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs) | Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly. Ensure that unicode strings are encoded unless
explicitly told not to. |
14,701 | def load(steps, reload=False):
if reload:
_STEP_CACHE.clear()
if callable(steps):
steps = steps()
if not isinstance(steps, collections.Iterable):
return load([steps])[0]
loaded = []
for s in steps:
digest = s._digest
if digest in _STEP_CACHE:
loaded.append(_STEP_CACHE[digest])
else:
try:
s.load()
_STEP_CACHE[digest] = s
loaded.append(s)
except(Exception):
logging.warn( %
util.indent(traceback.format_exc()))
return loaded | safely load steps in place, excluding those that fail
Args:
steps: the steps to load |
14,702 | def resfinderreporter(self):
resistance_classes = ResistanceNotes.classes(self.targetpath)
workbook = xlsxwriter.Workbook(os.path.join(self.reportpath, .format(self.analysistype)))
worksheet = workbook.add_worksheet()
bold = workbook.add_format({: True, : , : 8})
courier = workbook.add_format({: , : 8})
courier.set_align()
row = 0
col = 0
columnwidth = dict()
extended = False
headers = [, , , , , , , ,
]
for sample in self.metadata:
sample[self.analysistype].pipelineresults = list()
sample[self.analysistype].sampledata = list()
try:
blastresults = sample[self.analysistype].blastresults
except AttributeError:
blastresults =
if blastresults != :
for result in sample[self.analysistype].blastresults:
name = result[]
gname, genename, accession, allele = ResistanceNotes.gene_name(name)
data = list()
resistance = ResistanceNotes.resistance(name, resistance_classes)
data.append(genename)
data.append(allele)
data.append(resistance)
percentid = result[]
data.append(percentid)
data.append(result[])
data.append(result[])
data.append(.join([str(result[]), str(result[])]))
try:
sample[self.analysistype].pipelineresults.append(
.format(rgene=genename,
pid=percentid,
rclass=resistance))
if self.align and percentid != 100.00:
self.alignprotein(sample, name)
if not extended:
headers.extend([,
,
,
,
])
extended = True
record = SeqRecord(sample[self.analysistype].dnaseq[name],
id=.format(sample.name, name),
description=)
data.extend([record.format(),
sample[self.analysistype].aaidentity[name],
sample[self.analysistype].aaalign[name],
sample[self.analysistype].aaindex[name],
sample[self.analysistype].ntalign[name],
sample[self.analysistype].ntindex[name]
])
else:
record = SeqRecord(Seq(result[], IUPAC.unambiguous_dna),
id=.format(sample.name, name),
description=)
data.append(record.format())
if self.align:
data.extend([, , , , ])
except (KeyError, TypeError):
data.append()
sample[self.analysistype].sampledata.append(data)
if not in headers:
headers.append()
for header in headers:
worksheet.write(row, col, header, bold)
try:
columnwidth[col] = len(header) if len(header) > columnwidth[col] else columnwidth[
col]
except KeyError:
columnwidth[col] = len(header)
worksheet.set_column(col, col, columnwidth[col])
col += 1
row += 1
col = 0
for sample in self.metadata:
if not sample[self.analysistype].sampledata:
row += 1
col = 0
worksheet.set_row(row)
worksheet.set_column(col, col, columnwidth[col])
for data in sample[self.analysistype].sampledata:
columnwidth[col] = len(sample.name) + 2
worksheet.set_column(col, col, columnwidth[col])
worksheet.write(row, col, sample.name, courier)
col += 1
totallines = list()
for results in data:
worksheet.write(row, col, results, courier)
try:
alignmentcorrect = len(str(results).split()[1])
lines = results.count() if results.count() >= 1 else 1
totallines.append(lines)
except IndexError:
try:
alignmentcorrect = len(str(results).split()[0])
lines = results.count() if results.count() >= 1 else 1
totallines.append(lines)
except AttributeError:
alignmentcorrect = len(str(results))
lines = 1
totallines.append(lines)
try:
columnwidth[col] = alignmentcorrect if alignmentcorrect > columnwidth[col] else \
columnwidth[col]
except KeyError:
columnwidth[col] = alignmentcorrect
worksheet.set_column(col, col, columnwidth[col])
col += 1
worksheet.set_row(row, max(totallines) * 11)
row += 1
col = 0
workbook.close() | Custom reports for ResFinder analyses. These reports link the gene(s) found to their resistance phenotypes |
14,703 | def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited()
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) | Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors |
14,704 | def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True,
gatk_input=True):
broad_runner = broad.runner_from_config(config)
anns = ["MappingQualityRankSumTest", "MappingQualityZero",
"QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"]
if include_baseqranksum:
anns += ["BaseQualityRankSumTest"]
if gatk_input or broad_runner.gatk_type() == "gatk4":
anns += ["FisherStrand"]
if broad_runner.gatk_type() == "gatk4":
anns += ["MappingQuality"]
else:
anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"]
if include_depth:
anns += ["DepthPerAlleleBySample"]
if broad_runner.gatk_type() in ["restricted", "gatk4"]:
anns += ["Coverage"]
else:
anns += ["DepthOfCoverage"]
return anns | Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it. |
14,705 | def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
with open(client_secrets_file, ) as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs) | Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance. |
14,706 | def xspec_cosmo(H0=None,q0=None,lambda_0=None):
current_settings = _xspec.get_xscosmo()
if (H0 is None) and (q0 is None) and (lambda_0 is None):
return current_settings
else:
user_inputs = [H0, q0, lambda_0]
for i, current_setting in enumerate(current_settings):
if user_inputs[i] is None:
user_inputs[i] = current_setting
_xspec.set_xscosmo(*user_inputs) | Define the Cosmology in use within the XSpec models. See Xspec manual for help:
http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html
All parameters can be modified or just a single parameter
:param H0: the hubble constant
:param q0:
:param lambda_0:
:return: Either none or the current setting (H_0, q_0, lambda_0) |
14,707 | def refresh_token(self, refresh_token):
url =
data = {: self.client_id,
: ,
: refresh_token}
r = requests.post(url, data=data)
check_error(r)
return r.json() | return origin json |
14,708 | def readdir(path):
*
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError()
if not os.path.isdir(path):
raise SaltInvocationError()
dirents = [, ]
dirents.extend(os.listdir(path))
return dirents | .. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/ |
14,709 | def process(obj):
merged = merge(obj)
if obj.get():
print .format(obj[], len(merged)/1024.0)
_save(obj[], merged)
else:
print .format(len(merged)/1024.0)
if obj.get():
jsMin(merged, obj[])
if obj.get():
cssMin(merged, obj[]) | Process each block of the merger object. |
14,710 | def connection_lost(self, exc):
if exc:
self.log.exception()
else:
self.log.info()
self._closed.set() | Stop when connection is lost. |
14,711 | def print_status(self, indent="", recurse=False):
print ("%s%30s : %15s : %20s" %
(indent, "Linkname", "Link Status", "Jobs Status"))
for link in self._links.values():
if hasattr(link, ):
status_vect = link.check_status(
stream=sys.stdout, no_wait=True, do_print=False)
else:
status_vect = None
key = JobDetails.make_fullkey(link.full_linkname)
link_status = JOB_STATUS_STRINGS[link.check_job_status(key)]
if status_vect is None:
jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()]
else:
jobs_status = status_vect
print ("%s%30s : %15s : %20s" %
(indent, link.linkname, link_status, jobs_status))
if hasattr(link, ) and recurse:
print ("---------- %30s -----------" % link.linkname)
link.print_status(indent + " ", recurse=True)
print ("------------------------------------------------") | Print a summary of the job status for each `Link` in this `Chain` |
14,712 | def revoke_access(src, dst=, port=None, proto=None):
return modify_access(src, dst=dst, port=port, proto=proto, action=) | Revoke access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp) |
14,713 | def cut(list_, index=0):
if isinstance(index, int):
cut_ = lambda x: x[index]
else:
cut_ = lambda x: getattr(x, index)
return list(map(cut_, list_)) | Cut a list by index or arg |
14,714 | async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int):
LOGGER.debug(, rr_id, timestamp)
if not ok_rev_reg_id(rr_id):
LOGGER.debug(, rr_id)
raise BadIdentifier(.format(rr_id))
rr_json = None
ledger_timestamp = None
get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp)
resp_json = await self._submit(get_rr_req_json)
resp = json.loads(resp_json)
if resp.get(, {}).get(, None) and resp[][].get(, None):
try:
(_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json)
except IndyError:
LOGGER.debug(, rr_id)
raise AbsentRevReg(.format(rr_id))
else:
LOGGER.debug(
,
rr_id,
timestamp)
raise BadRevStateTime(.format(rr_id, timestamp))
rv = (rr_json, ledger_timestamp)
LOGGER.debug(, rv)
return rv | Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds) |
14,715 | def Eps(value=None, loc=None):
@llrule(loc, lambda parser: [])
def rule(parser):
return value
return rule | A rule that accepts no tokens (epsilon) and returns ``value``. |
14,716 | def display(result, stream):
if result is None:
return
elif isinstance(result, basestring):
pass
elif isinstance(result, collections.Mapping):
result = u.join(u % (k, v) for
k, v in result.iteritems() if v is not None)
elif isinstance(result, collections.Iterable):
result = u.join(unicode(x) for x in result if x is not None)
else:
result = unicode(result)
stream.write(result.encode())
stream.write() | Intelligently print the result (or pass if result is None).
:param result:
:return: None |
14,717 | def _update_pop(self, pop_size):
valid_particles = []
invalid_particles = []
for part in self.population:
if any(x > 1 or x < -1 for x in part):
invalid_particles.append(part)
else:
valid_particles.append(part)
self._model_count += len(valid_particles)
for part in valid_particles:
self.update_particle(part)
self.assign_fitnesses(valid_particles)
for part in valid_particles:
if part.fitness > part.best.fitness:
part.best = creator.Particle(part)
part.best.fitness = part.fitness
for part in invalid_particles:
self.update_particle(part)
self.population[:] = valid_particles + invalid_particles
self.population.sort(key=lambda x: x.ident)
return | Assigns fitnesses to particles that are within bounds. |
14,718 | def start(self):
connect_thread = threading.Thread(target=self._connect)
connect_thread.start() | Start the connection to a transport. |
14,719 | def nguHanhNapAm(diaChi, thienCan, xuatBanMenh=False):
banMenh = {
"K1": "HẢI TRUNG KIM",
"T1": "GIÁNG HẠ THỦY",
"H1": "TÍCH LỊCH HỎA",
"O1": "BÍCH THƯỢNG THỔ",
"M1": "TANG ÐỐ MỘC",
"T2": "ÐẠI KHÊ THỦY",
"H2": "LƯ TRUNG HỎA",
"O2": "THÀNH ÐẦU THỔ",
"M2": "TÒNG BÁ MỘC",
"K2": "KIM BẠCH KIM",
"H3": "PHÚ ÐĂNG HỎA",
"O3": "SA TRUNG THỔ",
"M3": "ÐẠI LÂM MỘC",
"K3": "BẠCH LẠP KIM",
"T3": "TRƯỜNG LƯU THỦY",
"K4": "SA TRUNG KIM",
"T4": "THIÊN HÀ THỦY",
"H4": "THIÊN THƯỢNG HỎA",
"O4": "LỘ BÀN THỔ",
"M4": "DƯƠNG LIỄU MỘC",
"T5": "TRUYỀN TRUNG THỦY",
"H5": "SƠN HẠ HỎA",
"O5": "ÐẠI TRẠCH THỔ",
"M5": "THẠCH LỰU MỘC",
"K5": "KIẾM PHONG KIM",
"H6": "SƠN ÐẦU HỎA",
"O6": "ỐC THƯỢNG THỔ",
"M6": "BÌNH ÐỊA MỘC",
"K6": "XOA XUYẾN KIM",
"T6": "ÐẠI HẢI THỦY"}
matranNapAm = [
[0, "G", "Ất", "Bính", "Đinh", "Mậu", "Kỷ", "Canh", "Tân", "N", "Q"],
[1, "K1", False, "T1", False, "H1", False, "O1", False, "M1", False],
[2, False, "K1", False, "T1", False, "H1", False, "O1", False, "M1"],
[3, "T2", False, "H2", False, "O2", False, "M2", False, "K2", False],
[4, False, "T2", False, "H2", False, "O2", False, "M2", False, "K2"],
[5, "H3", False, "O3", False, "M3", False, "K3", False, "T3", False],
[6, False, "H3", False, "O3", False, "M3", False, "K3", False, "T3"],
[7, "K4", False, "T4", False, "H4", False, "O4", False, "M4", False],
[8, False, "K4", False, "T4", False, "H4", False, "O4", False, "M4"],
[9, "T5", False, "H5", False, "O5", False, "M5", False, "K5", False],
[10, False, "T5", False, "H5", False, "O5", False, "M5", False, "K5"],
[11, "H6", False, "O6", False, "M6", False, "K6", False, "T6", False],
[12, False, "H6", False, "O6", False, "M6", False, "K6", False, "T6"]
]
try:
nh = matranNapAm[diaChi][thienCan]
if nh[0] in ["K", "M", "T", "H", "O"]:
if xuatBanMenh is True:
return banMenh[nh]
else:
return nh[0]
except:
raise Exception(nguHanhNapAm.__doc__) | Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M) |
14,720 | def process_internal_commands(self):
with self._main_lock:
self.check_output_redirect()
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
reset_cache = not self._running_thread_ids
for t in all_threads:
if getattr(t, , False):
pass
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once()
elif is_thread_alive(t):
if reset_cache:
curr_thread_id = get_current_thread_id(threadingCurrentThread())
for thread_id in (curr_thread_id, ):
queue = self.get_internal_queue(thread_id)
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydev_log.verbose("processing internal command ", int_cmd)
int_cmd.do_it(self)
else:
pydev_log.verbose("NOT processing internal command ", int_cmd)
cmds_to_add_back.append(int_cmd)
except _queue.Empty:
for int_cmd in cmds_to_add_back:
queue.put(int_cmd) | This function processes internal commands |
14,721 | def decode(token, key, algorithms=None, options=None, audience=None,
issuer=None, subject=None, access_token=None):
defaults = {
: True,
: True,
: True,
: True,
: True,
: True,
: True,
: True,
: True,
: False,
: False,
: False,
: False,
: False,
: False,
: False,
: False,
: 0,
}
if options:
defaults.update(options)
verify_signature = defaults.get(, True)
try:
payload = jws.verify(token, key, algorithms, verify=verify_signature)
except JWSError as e:
raise JWTError(e)
algorithm = jws.get_unverified_header(token)[]
try:
claims = json.loads(payload.decode())
except ValueError as e:
raise JWTError( % e)
if not isinstance(claims, Mapping):
raise JWTError()
_validate_claims(claims, audience=audience, issuer=issuer,
subject=subject, algorithm=algorithm,
access_token=access_token,
options=defaults)
return claims | Verifies a JWT string's signature and validates reserved claims.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
audience (str): The intended audience of the token. If the "aud" claim is
included in the claim set, then the audience must be included and must equal
the provided claim.
issuer (str or iterable): Acceptable value(s) for the issuer of the token.
If the "iss" claim is included in the claim set, then the issuer must be
given and the claim in the token must be among the acceptable values.
subject (str): The subject of the token. If the "sub" claim is
included in the claim set, then the subject must be included and must equal
the provided claim.
access_token (str): An access token string. If the "at_hash" claim is included in the
claim set, then the access_token must be included, and it must match
the "at_hash" claim.
options (dict): A dictionary of options for skipping validation steps.
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
Returns:
dict: The dict representation of the claims set, assuming the signature is valid
and all requested data validation passes.
Raises:
JWTError: If the signature is invalid in any way.
ExpiredSignatureError: If the signature has expired.
JWTClaimsError: If any claim is invalid in any way.
Examples:
>>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jwt.decode(payload, 'secret', algorithms='HS256') |
14,722 | def same_page(c):
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_from_span(_to_span(c[i])).page
== bbox_from_span(_to_span(c[0])).page
for i in range(len(c))
]
) | Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Mention.
:param c: The candidate to evaluate
:rtype: boolean |
14,723 | def add_component(self, entity: int, component_instance: Any) -> None:
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache() | Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance. |
14,724 | def fetchone(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
return select(table, cols, where, group, order, limit, **kwargs).fetchone() | Convenience wrapper for database SELECT and fetch one. |
14,725 | def policy_map_clss_set_set_dscp_dscp(self, **kwargs):
config = ET.Element("config")
policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
po_name_key = ET.SubElement(policy_map, "po-name")
po_name_key.text = kwargs.pop()
clss = ET.SubElement(policy_map, "class")
cl_name_key = ET.SubElement(clss, "cl-name")
cl_name_key.text = kwargs.pop()
set = ET.SubElement(clss, "set")
set_dscp = ET.SubElement(set, "set_dscp")
dscp = ET.SubElement(set_dscp, "dscp")
dscp.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
14,726 | def _find_lang(langdict, lang, script, region):
full_locale = _full_locale(lang, script, region)
if (full_locale in _LOCALE_NORMALIZATION_MAP and
_LOCALE_NORMALIZATION_MAP[full_locale] in langdict):
return langdict[_LOCALE_NORMALIZATION_MAP[full_locale]]
if full_locale in langdict:
return langdict[full_locale]
if script is not None:
lang_script = "%s_%s" % (lang, script)
if lang_script in langdict:
return langdict[lang_script]
if region is not None:
lang_region = "%s_%s" % (lang, region)
if lang_region in langdict:
return langdict[lang_region]
if lang in langdict:
return langdict[lang]
if _may_fall_back_to_english(lang):
return langdict.get("en", None)
else:
return None | Return the entry in the dictionary for the given language information. |
14,727 | def get_node_by_path(self, path):
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) )
if path.startswith("/"):
_node = self._root
_pathlist.pop(0)
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node | Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None |
14,728 | def enable_inheritance(path, objectType, clear=False):
minion-id
dc = daclConstants()
objectType = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectType)
return _set_dacl_inheritance(path, objectType, True, None, clear) | enable/disable inheritance on an object
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
clear: True will remove non-Inherited ACEs from the ACL
Returns (dict): A dictionary containing the results
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.enable_inheritance c:\temp directory |
14,729 | def print_infos(results):
print( % results.total_transactions)
print( % results.total_timers)
print( % results.total_errors)
print( % results.start_datetime)
print( % results.finish_datetime) | Print informations in standard output
:param ReportResults results: the report result containing all compiled informations |
14,730 | def InitAgeCheck(self):
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text =
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, ,
self.er_magic_data.headers, self.panel, )
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, , incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
self.age_grid.SetColLabelValue(0, )
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label=)
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, )
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label=)
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | make an interactive grid in which users can edit ages |
14,731 | def select_serial_number_row(self, serial_number):
sheet = self.table
col = self.db_sheet_cols.id
rows = sheet.loc[:, col] == serial_number
return sheet.loc[rows, :] | Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame |
14,732 | def find_library_full_path(name):
from ctypes.util import find_library
if os.name == "posix" and sys.platform == "darwin":
return find_library(name)
def _use_proc_maps(name):
procmap = os.path.join(, str(os.getpid()), )
if not os.path.isfile(procmap):
return None
with open(procmap, ) as f:
for line in f:
line = line.strip().split()
sofile = line[-1]
basename = os.path.basename(sofile)
if + name + in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
def _use_ld(name):
cmd = "ld -t -l{} -o {}".format(name, os.devnull)
ld_lib_path = os.environ.get(, )
for d in ld_lib_path.split():
cmd = cmd + " -L " + d
result, ret = subproc_call(cmd + )
expr = r % re.escape(name)
res = re.search(expr, result.decode())
if res:
res = res.group(0)
if not os.path.isfile(res):
return None
return os.path.realpath(res)
def _use_ldconfig(name):
with change_env(, ), change_env(, ):
ldconfig, ret = subproc_call("ldconfig -p")
ldconfig = ldconfig.decode()
if ret != 0:
return None
expr = r % (re.escape(name))
res = re.search(expr, ldconfig)
if not res:
return None
else:
ret = res.group(2)
return os.path.realpath(ret)
if sys.platform.startswith():
return _use_proc_maps(name) or _use_ld(name) or _use_ldconfig(name) or find_library(name)
return find_library(name) | Similar to `from ctypes.util import find_library`, but try
to return full path if possible. |
14,733 | def load_include_path(paths):
for path in paths:
if not os.path.isdir(path):
continue
if path not in sys.path:
sys.path.insert(1, path)
for f in os.listdir(path):
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
load_include_path([fpath]) | Scan for and add paths to the include path |
14,734 | def _wait_for_disk_threads(self, terminate):
if terminate:
self._upload_terminate = terminate
for thr in self._disk_threads:
thr.join() | Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads |
14,735 | def _ctype_key_value(keys, vals):
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array(, [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array(, [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys) | Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only. |
14,736 | def submitter(self, f):
f = self._wrap_coro_function_with_sem(f)
@wraps(f)
def wrapped(*args, **kwargs):
return self.submit(f(*args, **kwargs))
return wrapped | Decorator to submit a coro-function as NewTask to self.loop with sem control.
Use default_callback frequency of loop. |
14,737 | def calculate_size(name, entry_processor):
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(entry_processor)
return data_size | Calculates the request payload size |
14,738 | def get_relaxation(self, A_configuration, B_configuration, I):
coefficients = collinsgisin_to_faacets(I)
M, ncIndices = get_faacets_moment_matrix(A_configuration,
B_configuration, coefficients)
self.n_vars = M.max() - 1
bs = len(M)
self.block_struct = [bs]
self.F = lil_matrix((bs**2, self.n_vars + 1))
for i in range(bs):
for j in range(i, bs):
if M[i, j] != 0:
self.F[i*bs+j, abs(M[i, j])-1] = copysign(1, M[i, j])
self.obj_facvar = [0 for _ in range(self.n_vars)]
for i in range(1, len(ncIndices)):
self.obj_facvar[abs(ncIndices[i])-2] += \
copysign(1, ncIndices[i])*coefficients[i] | Get the sparse SDP relaxation of a Bell inequality.
:param A_configuration: The definition of measurements of Alice.
:type A_configuration: list of list of int.
:param B_configuration: The definition of measurements of Bob.
:type B_configuration: list of list of int.
:param I: The matrix describing the Bell inequality in the
Collins-Gisin picture.
:type I: list of list of int. |
14,739 | def from_veto_def(cls, veto):
name = % (veto.ifo, veto.name)
try:
name += % int(veto.version)
except TypeError:
pass
if veto.end_time == 0:
veto.end_time = +inf
known = Segment(veto.start_time, veto.end_time)
pad = (veto.start_pad, veto.end_pad)
return cls(name=name, known=[known], category=veto.category,
description=veto.comment, padding=pad) | Define a `DataQualityFlag` from a `VetoDef`
Parameters
----------
veto : :class:`~ligo.lw.lsctables.VetoDef`
veto definition to convert from |
14,740 | def verify(self, **kwargs):
super(MetadataStatement, self).verify(**kwargs)
if "signing_keys" in self:
if in self:
raise VerificationError(
)
else:
kj = KeyJar()
try:
kj.import_jwks(self[], )
except Exception:
raise VerificationError()
if "metadata_statements" in self and "metadata_statement_uris" in self:
s = set(self[].keys())
t = set(self[].keys())
if s.intersection(t):
raise VerificationError(
)
return True | Verifies that an instance of this class adheres to the given
restrictions.
:param kwargs: A set of keyword arguments
:return: True if it verifies OK otherwise False. |
14,741 | def _add_tag_manifest_file(zip_file, dir_name, tag_info_list):
_add_tag_file(
zip_file, dir_name, tag_info_list, _gen_tag_manifest_file_tup(tag_info_list)
) | Generate the tag manifest file and add it to the zip. |
14,742 | def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = (
re.search(r + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames)
return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh)
for fileroot in gs_blocks_fileroots] | Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data` |
14,743 | def register(self, perm_func=None, model=None, allow_staff=None, allow_superuser=None,
allow_anonymous=None, unauthenticated_handler=None, request_types=None, name=None,
replace=False, _return_entry=False):
allow_staff = _default(allow_staff, self._allow_staff)
allow_superuser = _default(allow_superuser, self._allow_superuser)
allow_anonymous = _default(allow_anonymous, self._allow_anonymous)
unauthenticated_handler = _default(unauthenticated_handler, self._unauthenticated_handler)
request_types = _default(request_types, self._request_types)
if perm_func is None:
return (
lambda perm_func_:
self.register(
perm_func_, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, name, replace, _return_entry)
)
name = _default(name, perm_func.__name__)
if name == :
raise PermissionsError()
elif name in self._registry and not replace:
raise DuplicatePermissionError(name)
view_decorator = self._make_view_decorator(
name, perm_func, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types)
entry = Entry(
name, perm_func, view_decorator, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, set())
self._registry[name] = entry
@wraps(perm_func)
def wrapped_func(user, instance=NO_VALUE):
if user is None:
return False
if not allow_anonymous and user.is_anonymous():
return False
test = lambda: perm_func(user) if instance is NO_VALUE else perm_func(user, instance)
return (
allow_staff and user.is_staff or
allow_superuser and user.is_superuser or
test()
)
register.filter(name, wrapped_func)
log.debug(.format(name))
return entry if _return_entry else wrapped_func | Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``. |
14,744 | def memoized_ignoreargs(func):
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper | A decorator. It performs memoization ignoring the arguments used to call
the function. |
14,745 | def calc_rate_susceptibility(self, rate_std=None, params=None):
params = params or {}
if rate_std is None:
if not (self.clock_model[] and in self.clock_model):
self.logger("ClockTree.calc_rate_susceptibility: need valid standard deviation of the clock rate to estimate dating error.", 1, warn=True)
return ttconf.ERROR
rate_std = np.sqrt(self.clock_model[][0,0])
current_rate = np.abs(self.clock_model[])
upper_rate = self.clock_model[] + rate_std
lower_rate = max(0.1*current_rate, self.clock_model[] - rate_std)
for n in self.tree.find_clades():
if n.up:
n._orig_gamma = n.branch_length_interpolator.gamma
n.branch_length_interpolator.gamma = n._orig_gamma*upper_rate/current_rate
self.logger("
self.make_time_tree(**params)
self.logger("
for n in self.tree.find_clades():
n.numdate_rate_variation = [(upper_rate, n.numdate)]
if n.up:
n.branch_length_interpolator.gamma = n._orig_gamma*lower_rate/current_rate
self.logger("
self.make_time_tree(**params)
self.logger("
for n in self.tree.find_clades():
n.numdate_rate_variation.append((lower_rate, n.numdate))
if n.up:
n.branch_length_interpolator.gamma = n._orig_gamma
self.logger("
self.make_time_tree(**params)
self.logger("
for n in self.tree.find_clades():
n.numdate_rate_variation.append((current_rate, n.numdate))
n.numdate_rate_variation.sort(key=lambda x:x[1])
return ttconf.SUCCESS | return the time tree estimation of evolutionary rates +/- one
standard deviation form the ML estimate.
Returns
-------
TreeTime.return_code : str
success or failure |
14,746 | def add_subsegment(self, subsegment):
super(Segment, self).add_subsegment(subsegment)
self.increment() | Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter. |
14,747 | def is_in_plane(self, pp, dist_tolerance):
return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance | Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise |
14,748 | def _handle_get_application_request(self, app_id, semver, key, logical_id):
get_application = (lambda app_id, semver: self._sar_client.get_application(
ApplicationId=self._sanitize_sar_str_param(app_id),
SemanticVersion=self._sanitize_sar_str_param(semver)))
try:
self._sar_service_call(get_application, logical_id, app_id, semver)
self._applications[key] = {}
except EndpointConnectionError as e:
| Method that handles the get_application API call to the serverless application repo
This method puts something in the `_applications` dictionary because the plugin expects
something there in a later event.
:param string app_id: ApplicationId
:param string semver: SemanticVersion
:param string key: The dictionary key consisting of (ApplicationId, SemanticVersion)
:param string logical_id: the logical_id of this application resource |
14,749 | def remove_pickle_problems(obj):
if hasattr(obj, "doc_loader"):
obj.doc_loader = None
if hasattr(obj, "embedded_tool"):
obj.embedded_tool = remove_pickle_problems(obj.embedded_tool)
if hasattr(obj, "steps"):
obj.steps = [remove_pickle_problems(s) for s in obj.steps]
return obj | doc_loader does not pickle correctly, causing Toil errors, remove from
objects. |
14,750 | def recv_message(self, debug=False):
if debug:
packet = self.sock.recv(1024)
hexdump(packet)
packet_length_data = self.sock.recv(4)
if len(packet_length_data) < 4:
raise Exception("Nothing in the socket!")
packet_length = struct.unpack("<I", packet_length_data)[0]
packet = self.sock.recv(packet_length - 4)
if not crc32(packet_length_data + packet[0:-4]) == struct.unpack(, packet[-4:])[0]:
raise Exception("CRC32 was not correct!")
x = struct.unpack("<I", packet[:4])
auth_key_id = packet[4:12]
if auth_key_id == b:
(message_id, message_length) = struct.unpack("<QI", packet[12:24])
data = packet[24:24+message_length]
elif auth_key_id == self.auth_key_id:
pass
message_key = packet[12:28]
encrypted_data = packet[28:-4]
aes_key, aes_iv = self.aes_calculate(message_key, direction="from server")
decrypted_data = crypt.ige_decrypt(encrypted_data, aes_key, aes_iv)
assert decrypted_data[0:8] == self.server_salt
assert decrypted_data[8:16] == self.session_id
message_id = decrypted_data[16:24]
seq_no = struct.unpack("<I", decrypted_data[24:28])[0]
message_data_length = struct.unpack("<I", decrypted_data[28:32])[0]
data = decrypted_data[32:32+message_data_length]
else:
raise Exception("Got unknown auth_key id")
return data | Reading socket and receiving message from server. Check the CRC32. |
14,751 | def inverse_kinematics(
self,
target_position_right,
target_orientation_right,
target_position_left,
target_orientation_left,
rest_poses,
):
ndof = 48
ik_solution = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_right,
target_position_right,
targetOrientation=target_orientation_right,
restPoses=rest_poses[:7],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.7] * ndof,
)
)
ik_solution2 = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_left,
target_position_left,
targetOrientation=target_orientation_left,
restPoses=rest_poses[7:],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.7] * ndof,
)
)
for i in range(8, 15):
ik_solution[i] = ik_solution2[i]
return ik_solution[1:] | Helper function to do inverse kinematics for a given target position and
orientation in the PyBullet world frame.
Args:
target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.
target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for
a orientation quaternion.
rest_poses: A list of size @num_joints to favor ik solutions close by.
Returns:
A list of size @num_joints corresponding to the joint angle solution. |
14,752 | def open(self):
if self.conn is not None:
self.close()
self.conn = sqlite3.connect(self.filename)
self.cursor = self.conn.cursor()
c = self.cursor
c.execute()
if (u,) in c:
c.execute()
if self.verbose:
print(, file=sys.stderr)
c.executescript(
) | Open a connection to the database.
If a connection appears to be open already, transactions are committed
and it is closed before proceeding. After establishing the connection,
the searchIndex table is prepared (and dropped if it already exists). |
14,753 | def getValidCertifications(self):
certs = []
today = date.today()
for c in self.getCertifications():
validfrom = c.getValidFrom() if c else None
validto = c.getValidTo() if validfrom else None
if not validfrom or not validto:
continue
validfrom = validfrom.asdatetime().date()
validto = validto.asdatetime().date()
if (today >= validfrom and today <= validto):
certs.append(c)
return certs | Returns the certifications fully valid |
14,754 | def choice_input(options=[], prompt=,
showopts=True, qopt=False):
choice = None
if showopts:
prompt = prompt + + str(options)
if qopt:
prompt = prompt +
while not choice:
try:
choice = string_input(prompt + )
except SyntaxError:
if options == []:
pass
if choice:
if choice in options:
return choice
elif qopt == True and choice == :
choice = None
is_sure = string_input()
if is_sure in (, , ):
exit()
elif options == []:
return 0
else:
print( + str(options) +
)
if options:
choice = None
elif options == []:
return 0
else:
print( + str(options) +
) | Get input from a list of choices (q to quit) |
14,755 | def vectorizable_features(fcs):
is_mapping = lambda obj: isinstance(obj, collections.Mapping)
return sorted(set([name for fc in fcs for name in fc if is_mapping(fc[name])])) | Discovers the ordered set of vectorizable features in ``fcs``.
Returns a list of feature names, sorted lexicographically.
Feature names are only included if the corresponding
features are vectorizable (i.e., they are an instance of
:class:`collections.Mapping`). |
14,756 | def get_remote_url(path, remote="origin"):
path = get_path(path)
cmd = ["config", "--get", "remote.%s.url" % remote]
return __run_git(cmd, path)[0] | Run git config --get remote.<remote>.url in path.
:param path: Path where git is to be run
:param remote: Remote name
:return: str or None |
14,757 | def visit_Assign(self, node):
if self._in_class(node):
element_full_name = self._pop_indent_stack(node, "prop")
code_id = (self._fname, node.lineno)
self._processed_line = node.lineno
self._callables_db[element_full_name] = {
"name": element_full_name,
"type": "prop",
"code_id": code_id,
"last_lineno": None,
}
self._reverse_callables_db[code_id] = element_full_name
self.generic_visit(node) | Implement assignment walker.
Parse class properties defined via the property() function |
14,758 | def methodcall(obj, method_name, *args, **kwargs):
this_engine = distob.engine.eid
args = [obj] + list(args)
prefer_local = kwargs.pop(, None)
if prefer_local is None:
if isinstance(obj, Remote):
prefer_local = obj.prefer_local
else:
prefer_local = True
block = kwargs.pop(, True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = getattr(args[0], method_name)(*args[1:], **kwargs)
else:
if False and prefer_local:
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (args[0], method_name, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == :
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
else:
raise
except KeyError:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
return convert_result(r)
else:
return r | Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True. |
14,759 | def read_namespaced_pod_disruption_budget_status(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, **kwargs)
return data | read status of the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread. |
14,760 | def A(self):
return fft(np.dstack([np.eye(self.m), -self.b]),
self.nfft * 2 - 1)[:, :, :self.nfft] | Spectral VAR coefficients.
.. math:: \mathbf{A}(f) = \mathbf{I} - \sum_{k=1}^{p} \mathbf{a}^{(k)}
\mathrm{e}^{-2\pi f} |
14,761 | def matches_count(count, options):
if options.get("count") is not None:
return count == int(options["count"])
if options.get("maximum") is not None and int(options["maximum"]) < count:
return False
if options.get("minimum") is not None and int(options["minimum"]) > count:
return False
if options.get("between") is not None and count not in options["between"]:
return False
return True | Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options. |
14,762 | def accumulate_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename] | Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__`` |
14,763 | def bin_to_edge_slice(s, n):
s = canonify_slice(s, n)
start = s.start
stop = s.stop
if start > stop:
_stop = start + 1
start = stop + 1
stop = _stop
start = max(start - 1, 0)
step = abs(s.step)
if stop <= 1 or start >= n - 1 or stop == start + 1:
return slice(0, None, min(step, n - 2))
s = slice(start, stop, abs(s.step))
if len(range(*s.indices(n - 1))) < 2:
return slice(start, stop, stop - start - 1)
return s | Convert a bin slice into a bin edge slice. |
14,764 | def zip_strip_namespace(zip_src, namespace, logger=None):
namespace_prefix = "{}__".format(namespace)
lightning_namespace = "{}:".format(namespace)
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
orig_content = zip_src.read(name)
try:
orig_content = orig_content.decode("utf-8")
except UnicodeDecodeError:
new_content = orig_content
else:
new_content = orig_content.replace(namespace_prefix, "")
new_content = new_content.replace(lightning_namespace, "c:")
name = name.replace(namespace_prefix, "")
if orig_content != new_content and logger:
logger.info(
" {file_name}: removed {namespace}".format(
file_name=name, namespace=namespace_prefix
)
)
new_content = new_content.encode("utf-8")
zip_dest.writestr(name, new_content)
return zip_dest | Given a namespace, strips 'namespace__' from all files and filenames
in the zip |
14,765 | def tag(name, tag_name):
with LOCK:
metric(name)
TAGS.setdefault(tag_name, set()).add(name) | Tag the named metric with the given tag. |
14,766 | def _delete_fw(self, tenant_id, data):
LOG.debug("In Delete fw data is %s", data)
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub)
if arg_dict.get() is None:
LOG.error("Router ID unknown for tenant %s", tenant_id)
return False
if tenant_id not in self.tenant_dict:
self.create_tenant_dict(tenant_id, arg_dict.get())
ret = self.send_in_router_port_msg(tenant_id, arg_dict, )
if not ret:
return False
ret = self.send_out_router_port_msg(tenant_id, arg_dict, )
if not ret:
return False
if not router_ret:
LOG.error("Unable to delete router for tenant %s, error case",
tenant_id)
return router_ret
del self.tenant_dict[tenant_id]
return router_ret | Internal routine called when a FW is deleted. |
14,767 | def enc(data, **kwargs):
if in kwargs:
salt.utils.versions.warn_until(
,
keyfile\
sk_file\
)
kwargs[] = kwargs[]
if in kwargs:
salt.utils.versions.warn_until(
,
key\
sk\
)
kwargs[] = kwargs[]
box_type = _get_config(**kwargs)[]
if box_type == :
return secretbox_encrypt(data, **kwargs)
return sealedbox_encrypt(data, **kwargs) | Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default) |
14,768 | def GetFormatSpecification(cls):
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b, offset=0)
return format_specification | Retrieves the format specification.
Returns:
FormatSpecification: format specification. |
14,769 | def _load_cell(args, schema):
name = args[]
table = _get_table(name)
if not table:
table = datalab.bigquery.Table(name)
if table.exists():
if args[] == :
raise Exception( % name)
elif schema:
table.create(json.loads(schema))
elif not args[]:
raise Exception(
)
| Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed. |
14,770 | def get_run_states(self) -> List[RunState]:
raw_states = self.get_state()
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states[]:
raw_state = i[]
_LOGGER.info("Initializing runstate %s", raw_state[])
run_states.append(RunState(self, raw_state))
return run_states | Get a list of RunStates from the ZoneMinder API. |
14,771 | def save_hex(hex_file, path):
if not hex_file:
raise ValueError()
if not path.endswith():
raise ValueError()
with open(path, ) as output:
output.write(hex_file.encode()) | Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError. |
14,772 | def reload_solver(self, constraints=None):
if constraints is None:
constraints = self._solver.constraints
self._stored_solver = None
self._solver.add(constraints) | Reloads the solver. Useful when changing solver options.
:param list constraints: A new list of constraints to use in the reloaded solver instead of the current one |
14,773 | def packet_from_xml_packet(xml_pkt, psml_structure=None):
if not isinstance(xml_pkt, lxml.objectify.ObjectifiedElement):
parser = lxml.objectify.makeparser(huge_tree=True)
xml_pkt = lxml.objectify.fromstring(xml_pkt, parser)
if psml_structure:
return _packet_from_psml_packet(xml_pkt, psml_structure)
return _packet_from_pdml_packet(xml_pkt) | Gets a TShark XML packet object or string, and returns a pyshark Packet objec.t
:param xml_pkt: str or xml object.
:param psml_structure: a list of the fields in each packet summary in the psml data. If given, packets will
be returned as a PacketSummary object.
:return: Packet object. |
14,774 | def _create_any_group(self, parent_node, name, type_name, instance=None, constructor=None,
args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
full_name = self._make_full_name(parent_node.v_full_name, name)
if instance is None:
if constructor is None:
if type_name == RESULT_GROUP:
constructor = ResultGroup
elif type_name == PARAMETER_GROUP:
constructor = ParameterGroup
elif type_name == CONFIG_GROUP:
constructor = ConfigGroup
elif type_name == DERIVED_PARAMETER_GROUP:
constructor = DerivedParameterGroup
elif type_name == GROUP:
constructor = NNGroupNode
else:
raise RuntimeError()
instance = self._root_instance._construct_instance(constructor, full_name,
*args, **kwargs)
else:
instance._rename(full_name)
if type_name == RESULT_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError( %
str(type(instance)))
elif type_name == PARAMETER_GROUP:
if type(instance) in (NNGroupNode,
ResultGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError( %
str(type(instance)))
elif type_name == CONFIG_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ResultGroup,
DerivedParameterGroup):
raise TypeError( %
str(type(instance)))
elif type_name == DERIVED_PARAMETER_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ConfigGroup,
ResultGroup):
raise TypeError(
% str(type(instance)))
elif type_name == GROUP:
if type(instance) in (ResultGroup,
ParameterGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError( %
str(type(instance)))
else:
raise RuntimeError()
self._set_details_tree_node(parent_node, name, instance)
instance._nn_interface = self
self._root_instance._all_groups[instance.v_full_name] = instance
self._add_to_nodes_and_leaves(instance)
parent_node._children[name] = instance
parent_node._groups[name] = instance
return instance | Generically creates a new group inferring from the `type_name`. |
14,775 | def model_code_key_prefix(code_location_key_prefix, model_name, image):
training_job_name = sagemaker.utils.name_from_image(image)
return .join(filter(None, [code_location_key_prefix, model_name or training_job_name])) | Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code |
14,776 | def parse(readDataInstance):
if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8:
newDataDirectory = DataDirectory()
for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):
newDataDirectory[i].name.value = dirs[i]
newDataDirectory[i].rva.value = readDataInstance.readDword()
newDataDirectory[i].size.value = readDataInstance.readDword()
else:
raise excep.DirectoryEntriesLengthException("The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.")
return newDataDirectory | Returns a L{DataDirectory}-like object.
@type readDataInstance: L{ReadData}
@param readDataInstance: L{ReadData} object to read from.
@rtype: L{DataDirectory}
@return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects.
@raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects. |
14,777 | def submit_cookbook(self, cookbook, params={}, _extra_params={}):
self._check_user_parameters(params)
files = {: cookbook}
return self._submit(params, files, _extra_params=_extra_params) | Submit a cookbook. |
14,778 | def get(self, request):
self.app_list = site.get_app_list(request)
self.apps_dict = self.create_app_list_dict()
items = get_config()
if not items:
voices = self.get_default_voices()
else:
voices = []
for item in items:
self.add_voice(voices, item)
return JsonResponse(voices, safe=False) | Returns a json representing the menu voices
in a format eaten by the js menu.
Raised ImproperlyConfigured exceptions can be viewed
in the browser console |
14,779 | def convert_examples_to_features(examples, seq_length, tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features | Loads a data file into a list of `InputFeature`s. |
14,780 | def add_group(self, groupname, statements):
msg = OmapiMessage.open(b"group")
msg.message.append(("create", struct.pack("!I", 1)))
msg.obj.append(("name", groupname))
msg.obj.append(("statements", statements))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiError("add group failed") | Adds a group
@type groupname: bytes
@type statements: str |
14,781 | def from_httplib(ResponseCls, r, **response_kw):
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw) | Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``. |
14,782 | def user_field(user, field, *args):
if not field:
return
User = get_user_model()
try:
field_meta = User._meta.get_field(field)
max_length = field_meta.max_length
except FieldDoesNotExist:
if not hasattr(user, field):
return
max_length = None
if args:
v = args[0]
if v:
v = v[0:max_length]
setattr(user, field, v)
else:
return getattr(user, field) | Gets or sets (optional) user model fields. No-op if fields do not exist. |
14,783 | def set2d(self):
glDisable(GL_LIGHTING)
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL)
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() | Configures OpenGL to draw in 2D.
Note that wireframe mode is always disabled in 2D-Mode, but can be re-enabled by calling ``glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)``\ . |
14,784 | def __on_presence(self, data):
room_jid = data[].bare
muc_presence = data[]
room = muc_presence[]
nick = muc_presence[]
with self.__lock:
try:
room_data = self.__rooms[room]
if room_data.nick != nick:
return
except KeyError:
return
else:
del self.__rooms[room]
if not self.__rooms:
self.__xmpp.del_event_handler("presence", self.__on_presence)
if data[] == :
self.__safe_errback(room_data, data[][],
data[][])
elif muc_presence[] != :
self.__safe_errback(room_data, ,
)
else:
try:
config = self.__muc.getRoomConfig(room_jid)
except ValueError:
"XMPP room %s", room_jid)
self.__safe_callback(room_data)
else:
custom_values = room_data.configuration or {}
known_fields = config[]
to_remove = [key for key in custom_values
if key not in known_fields]
for key in to_remove:
del custom_values[key]
form = self.__xmpp[].make_form("submit")
form[] = custom_values
self.__muc.setRoomConfig(room_jid, form)
self.__safe_callback(room_data) | Got a presence stanza |
14,785 | def pack_req(cls, code, pl_ratio_min,
pl_ratio_max, trd_env, acc_id, trd_mkt, conn_id):
from futuquant.common.pb.Trd_GetPositionList_pb2 import Request
req = Request()
req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env]
req.c2s.header.accID = acc_id
req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt]
if code:
req.c2s.filterConditions.codeList.append(code)
if pl_ratio_min is not None:
req.c2s.filterPLRatioMin = float(pl_ratio_min) / 100.0
if pl_ratio_max is not None:
req.c2s.filterPLRatioMax = float(pl_ratio_max) / 100.0
return pack_pb_req(req, ProtoId.Trd_GetPositionList, conn_id) | Convert from user request for trading days to PLS request |
14,786 | def log_normalization(self, name="log_normalization"):
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension)) | Computes the log normalizing constant, log(Z). |
14,787 | def _index(self, model):
doc_type = model
if not isinstance(model, str):
doc_type = model.__table__.name
index_name = doc_type
if hasattr(model, "__msearch_index__"):
index_name = model.__msearch_index__
if doc_type not in self._indexs:
self._indexs[doc_type] = Index(self._client, index_name, doc_type)
return self._indexs[doc_type] | Elasticsearch multi types has been removed
Use multi index unless set __msearch_index__. |
14,788 | def _begin_request(self):
headers = self.m2req.headers
self._request = HTTPRequest(connection=self,
method=headers.get("METHOD"),
uri=self.m2req.path,
version=headers.get("VERSION"),
headers=headers,
remote_ip=headers.get("x-forwarded-for"))
if len(self.m2req.body) > 0:
self._request.body = self.m2req.body
if self.m2req.is_disconnect():
self.finish()
elif headers.get("x-mongrel2-upload-done", None):
expected = headers.get("x-mongrel2-upload-start", "BAD")
upload = headers.get("x-mongrel2-upload-done", None)
if expected == upload:
self.request_callback(self._request)
elif headers.get("x-mongrel2-upload-start", None):
pass
else:
self.request_callback(self._request) | Actually start executing this request. |
14,789 | def alpha3(self, code):
code = self.alpha2(code)
try:
return self.alt_codes[code][0]
except KeyError:
return "" | Return the ISO 3166-1 three letter country code matching the provided
country code.
If no match is found, returns an empty string. |
14,790 | def main():
print_head()
puts("Welcome to the will project generator.")
puts("")
if args.config_dist_only:
print("Generating config.py.dist...")
else:
print("\nGenerating will scaffold...")
current_dir = os.getcwd()
plugins_dir = os.path.join(current_dir, "plugins")
templates_dir = os.path.join(current_dir, "templates")
if not args.config_dist_only:
print(" /plugins")
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
print(" __init__.py")
with open(os.path.join(plugins_dir, "__init__.py"), ) as f:
pass
print(" morning.py")
morning_file_path = os.path.join(plugins_dir, "morning.py")
if not os.path.exists(morning_file_path):
with open(morning_file_path, ) as f:
f.write()
print(" /templates")
if not os.path.exists(templates_dir):
os.makedirs(templates_dir)
print(" blank.html")
with open(os.path.join(templates_dir, "blank.html"), ) as f:
pass
print(" .gitignore")
gitignore_path = os.path.join(current_dir, ".gitignore")
if not os.path.exists(gitignore_path):
with open(gitignore_path, ) as f:
f.write()
else:
append_ignore = False
with open(gitignore_path, "r+") as f:
if "shelf.db" not in f.read():
append_ignore = True
if append_ignore:
with open(gitignore_path, "a") as f:
f.write("\nshelf.db\n")
print(" run_will.py")
run_will_path = os.path.join(current_dir, "run_will.py")
if not os.path.exists(run_will_path):
with open(run_will_path, ) as f:
f.write()
st = os.stat()
os.chmod("run_will.py", st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print(" config.py.dist")
config_path = os.path.join(current_dir, "config.py.dist")
if not os.path.exists(config_path) or ask_user("! config.py.dist exists. Overwrite it?"):
with open(os.path.join(PROJECT_ROOT, "config.py.dist"), "r") as source_f:
source = source_f.read()
if args.backends:
for backend in SERVICE_BACKENDS:
if backend in args.backends:
_enable_service(backend, source)
else:
__disable_service(backend, source)
else:
print("\nWill supports a few different service backends. Letw+w+w+s new!\n") | Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md |
14,791 | def push(self, instance, action, success, idxs=_marker):
uid = api.get_uid(instance)
info = self.objects.get(uid, {})
idx = [] if idxs is _marker else idxs
info[action] = {: success, : idx}
self.objects[uid] = info | Adds an instance into the pool, to be reindexed on resume |
14,792 | def extract_jwt_token(self, token):
with InvalidTokenHeader.handle_errors():
data = jwt.decode(
token,
self.encode_key,
algorithms=self.allowed_algorithms,
options={: False},
)
self._validate_jwt_data(data, access_type=AccessType.access)
return data | Extracts a data dictionary from a jwt token |
14,793 | def modify_virtual(hostname, username, password, name, destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
virtual_state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
ret = {: name, : {}, : False, : }
if __opts__[]:
return _test_output(ret, , params={
: hostname,
: username,
: password,
: name,
: destination,
: pool,
: address_status,
: auto_lasthop,
: bwc_policy,
: cmp_enabled,
: connection_limit,
: dhcp_relay,
: description,
: fallback_persistence,
: flow_eviction_policy,
: gtm_score,
: ip_forward,
: ip_protocol,
: internal,
: twelve_forward,
: last_hop_pool,
: mask,
: mirror,
: nat64,
: persist,
: profiles,
: policies,
: rate_class,
: rate_limit,
: rate_limit_mode,
: rate_limit_dst,
: rate_limit_src,
: rules,
: related_rules,
: reject,
: source,
: source_address_translation,
: source_port,
: virtual_state,
: traffic_classes,
: translate_address,
: translate_port,
: vlans
}
)
existing = __salt__[](hostname, username, password, name)
if existing[] == 200:
modified = __salt__[](hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans)
if modified[] == 200:
relisting = __salt__[](hostname, username, password, name)
if relisting[] == 200:
relisting = _strip_key(relisting, )
existing = _strip_key(existing, )
ret = _check_for_changes(, ret, existing, relisting)
else:
ret = _load_result(relisting, ret)
else:
ret = _load_result(modified, ret)
elif existing[] == 404:
ret[] =
else:
ret = _load_result(existing, ret)
return ret | Modify an virtual server. modify an existing virtual. Only parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary ]
vlan_ids
[ list]
enabled
[ true | false ] |
14,794 | def add_hyperedge(self, tail, head, attr_dict=None, **attr):
attr_dict = self._combine_attribute_arguments(attr_dict, attr)
if not tail and not head:
raise ValueError("tail and head arguments \
cannot both be empty.")
frozen_tail = frozenset(tail)
frozen_head = frozenset(head)
if frozen_tail not in self._successors:
self._successors[frozen_tail] = {}
if frozen_head not in self._predecessors:
self._predecessors[frozen_head] = {}
is_new_hyperedge = not self.has_hyperedge(frozen_tail, frozen_head)
if is_new_hyperedge:
self.add_nodes(frozen_head)
self.add_nodes(frozen_tail)
hyperedge_id = self._assign_next_hyperedge_id()
for node in frozen_tail:
self._forward_star[node].add(hyperedge_id)
for node in frozen_head:
self._backward_star[node].add(hyperedge_id)
self._successors[frozen_tail][frozen_head] = hyperedge_id
self._predecessors[frozen_head][frozen_tail] = hyperedge_id
self._hyperedge_attributes[hyperedge_id] = \
{"tail": tail, "__frozen_tail": frozen_tail,
"head": head, "__frozen_head": frozen_head,
"weight": 1}
else:
hyperedge_id = self._successors[frozen_tail][frozen_head]
self._hyperedge_attributes[hyperedge_id].update(attr_dict)
return hyperedge_id | Adds a hyperedge to the hypergraph, along with any related
attributes of the hyperedge.
This method will automatically add any node from the tail and
head that was not in the hypergraph.
A hyperedge without a "weight" attribute specified will be
assigned the default value of 1.
:param tail: iterable container of references to nodes in the
tail of the hyperedge to be added.
:param head: iterable container of references to nodes in the
head of the hyperedge to be added.
:param attr_dict: dictionary of attributes shared by all
the hyperedges.
:param attr: keyword arguments of attributes of the hyperedge;
attr's values will override attr_dict's values
if both are provided.
:returns: str -- the ID of the hyperedge that was added.
:raises: ValueError -- tail and head arguments cannot both be empty.
Examples:
::
>>> H = DirectedHypergraph()
>>> x = H.add_hyperedge(["A", "B"], ["C", "D"])
>>> y = H.add_hyperedge(("A", "C"), ("B"), 'weight'=2)
>>> z = H.add_hyperedge(set(["D"]),
set(["A", "C"]),
{color: "red"}) |
14,795 | def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state | Full LSTM cell. |
14,796 | def setAnimated(self, state):
self._animated = state
self.setAttribute(Qt.WA_TranslucentBackground, state) | Sets whether or not the popup widget should animate its opacity
when it is shown.
:param state | <bool> |
14,797 | def run(self, loopinfo=None, batch_size=1):
logger.info("{}.Starting...".format(self.__class__.__name__))
if loopinfo:
while True:
for topic in self.topics:
self.call_kafka(topic, batch_size)
time.sleep(loopinfo.sleep)
else:
for topic in self.topics:
self.call_kafka(topic, batch_size) | Run consumer |
14,798 | def split(self, X, y=None, groups=None):
X, y, groups = indexable(X, y, groups)
cgrs = [~r for r in X]
condition_structure = defaultdict(set)
for structure, condition in zip(cgrs, groups):
condition_structure[condition].add(structure)
train_data = defaultdict(list)
test_data = []
for n, (structure, condition) in enumerate(zip(cgrs, groups)):
train_data[structure].append(n)
if len(condition_structure[condition]) > 1:
test_data.append(n)
if self.n_splits > len(train_data):
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of transformations: %d."
% (self.n_splits, len(train_data)))
structures_weight = sorted(((x, len(y)) for x, y in train_data.items()), key=lambda x: x[1], reverse=True)
fold_mean_size = len(cgrs) // self.n_splits
if structures_weight[0][1] > fold_mean_size:
warning()
for idx in range(self.n_repeats):
train_folds = [[] for _ in range(self.n_splits)]
for structure, structure_length in structures_weight:
if self.shuffle:
check_random_state(self.random_state).shuffle(train_folds)
for fold in train_folds[:-1]:
if len(fold) + structure_length <= fold_mean_size:
fold.extend(train_data[structure])
break
else:
roulette_param = (structure_length - fold_mean_size + len(fold)) / structure_length
if random() > roulette_param:
fold.extend(train_data[structure])
break
else:
train_folds[-1].extend(train_data[structure])
test_folds = [[] for _ in range(self.n_splits)]
for test, train in zip(test_folds, train_folds):
for index in train:
if index in test_data:
test.append(index)
for i in range(self.n_splits):
train_index = []
for fold in train_folds[:i]:
train_index.extend(fold)
for fold in train_folds[i+1:]:
train_index.extend(fold)
test_index = test_folds[i]
yield array(train_index), array(test_index) | Generate indices to split data into training and test set.
Parameters
----------
X : array-like, of length n_samples
Training data, includes reaction's containers
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split. |
14,799 | def fit_overlays(self, text, start=None, end=None, **kw):
for ovl in text.overlays:
if ovl.match(props=self.props_match, rng=(start, end)):
yield ovl | Get an overlay thet fits the range [start, end). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.