Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
3,000 | def list_taxa(pdb_list, sleep_time=.1):
crisprThermus thermophilusSulfolobus solfataricus P2Hyperthermus butylicus DSM 5456unidentified phageSulfolobus solfataricus P2Pseudomonas aeruginosa UCBPP-PA14Pseudomonas aeruginosa UCBPP-PA14Pseudomonas aeruginosa UCBPP-PA14Sulfolobus solfataricusThermus thermophilus HB8
if len(pdb_list)*sleep_time > 30:
warnings.warn("Because of API limitations, this function\
will take at least " + str(len(pdb_list)*sleep_time) + " seconds to return results.\
If you need greater speed, try modifying the optional argument sleep_time=.1, (although \
this may cause the search to time out)" )
taxa = []
for pdb_id in pdb_list:
all_info = get_all_info(pdb_id)
species_results = walk_nested_dict(all_info, , maxdepth=25,outputs=[])
first_result = walk_nested_dict(species_results,,outputs=[])
if first_result:
taxa.append(first_result[-1])
else:
taxa.append()
time.sleep(sleep_time)
return taxa | Given a list of PDB IDs, look up their associated species
This function digs through the search results returned
by the get_all_info() function and returns any information on
taxonomy included within the description.
The PDB website description of each entry includes the name
of the species (and sometimes details of organ or body part)
for each protein structure sample.
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
taxa : list of str
A list of the names or classifictions of species
associated with entries
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_taxa(crispr_results[:10]))
['Thermus thermophilus',
'Sulfolobus solfataricus P2',
'Hyperthermus butylicus DSM 5456',
'unidentified phage',
'Sulfolobus solfataricus P2',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Sulfolobus solfataricus',
'Thermus thermophilus HB8'] |
3,001 | def _get_tmp_gcs_bucket(cls, writer_spec):
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec[cls.TMP_BUCKET_NAME_PARAM]
return cls._get_gcs_bucket(writer_spec) | Returns bucket used for writing tmp files. |
3,002 | def pow(base, exp):
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError( % (str(type(base)), str(type(exp)))) | Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
`sym.pow` is being deprecated, please use `sym.power` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32) |
3,003 | def dropEvent(self, event):
data = event.mimeData()
if data.hasFormat() and \
data.hasFormat():
tableName = self.tableTypeName()
if nstr(data.data()) == tableName:
data = nstr(data.data())
query = Q.fromXmlString(data)
self.setQuery(query)
return
elif self.tableType() and data.hasFormat():
from projexui.widgets.xorbtreewidget import XOrbTreeWidget
records = XOrbTreeWidget.dataRestoreRecords(data)
for record in records:
if isinstance(record, self.tableType()):
self.setCurrentRecord(record)
return
super(XOrbRecordBox, self).dropEvent(event) | Listens for query's being dragged and dropped onto this tree.
:param event | <QDropEvent> |
3,004 | def describe_numeric_1d(series, **kwargs):
_percentile_format = "{:.0%}"
stats = dict()
stats[] = base.TYPE_NUM
stats[] = series.mean()
stats[] = series.std()
stats[] = series.var()
stats[] = series.min()
stats[] = series.max()
stats[] = stats[] - stats[]
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats[] = stats[] - stats[]
stats[] = series.kurt()
stats[] = series.skew()
stats[] = series.sum()
stats[] = series.mad()
stats[] = stats[] / stats[] if stats[] else np.NaN
stats[] = (len(series) - np.count_nonzero(series))
stats[] = stats[] * 1.0 / len(series)
stats[] = histogram(series, **kwargs)
stats[] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name) | Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. |
3,005 | def to_dict(self):
list_data = []
for key, value in list(self.data.items()):
row = list(key)
row.append(value)
list_data.append(row)
return {
: self.groups,
: list_data
} | Return common list python object.
:returns: Dictionary of groups and data
:rtype: dict |
3,006 | def mptt_before_update(mapper, connection, instance):
node_id = getattr(instance, instance.get_pk_name())
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
default_level = instance.get_default_level()
table_pk = getattr(table.c, db_pk.name)
mptt_move_inside = None
left_sibling = None
left_sibling_tree_id = None
if hasattr(instance, ):
mptt_move_inside = instance.mptt_move_inside
if hasattr(instance, ):
(
right_sibling_left,
right_sibling_right,
right_sibling_parent,
right_sibling_level,
right_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.level,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_before
)
).fetchone()
current_lvl_nodes = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
and_(
table.c.level == right_sibling_level,
table.c.tree_id == right_sibling_tree_id,
table.c.lft < right_sibling_left
)
)
).fetchall()
if current_lvl_nodes:
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = current_lvl_nodes[-1]
instance.parent_id = left_sibling_parent
left_sibling = {
: left_sibling_left,
: left_sibling_right,
: False
}
elif not right_sibling_parent:
left_sibling_tree_id = right_sibling_tree_id - 1
if hasattr(instance, ):
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_after
)
).fetchone()
instance.parent_id = left_sibling_parent
left_sibling = {
: left_sibling_left,
: left_sibling_right,
: False
}
subtree = connection.execute(
select([table_pk])
.where(
and_(
table.c.lft >= instance.left,
table.c.rgt <= instance.right,
table.c.tree_id == instance.tree_id
)
).order_by(
table.c.lft
)
).fetchall()
subtree = [x[0] for x in subtree]
(
node_pos_left,
node_pos_right,
node_tree_id,
node_parent_id,
node_level
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.parent_id,
table.c.level
]
).where(
table_pk == node_id
)
).fetchone()
if not left_sibling \
and str(node_parent_id) == str(instance.parent_id) \
and not mptt_move_inside:
if left_sibling_tree_id is None:
return
if instance.parent_id is not None:
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
if node_parent_id is None and node_tree_id == parent_tree_id:
instance.parent_id = None
return
mptt_before_delete(mapper, connection, instance, False)
if instance.parent_id is not None:
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
}
instance.tree_id = parent_tree_id
_insert_subtree(
table,
connection,
node_size,
node_pos_left,
node_pos_right,
parent_pos_left,
parent_pos_right,
subtree,
parent_tree_id,
parent_level,
node_level,
left_sibling,
table_pk
)
else:
if left_sibling_tree_id or left_sibling_tree_id == 0:
tree_id = left_sibling_tree_id + 1
connection.execute(
table.update(
table.c.tree_id > left_sibling_tree_id
).values(
tree_id=table.c.tree_id + 1
)
)
else:
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
)
connection.execute(
table.update(
table_pk.in_(
subtree
)
).values(
lft=table.c.lft - node_pos_left + 1,
rgt=table.c.rgt - node_pos_left + 1,
level=table.c.level - node_level + default_level,
tree_id=tree_id
)
) | Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set |
3,007 | def pad_sequence_to_length(sequence: List,
desired_length: int,
default_value: Callable[[], Any] = lambda: 0,
padding_on_right: bool = True) -> List:
if padding_on_right:
padded_sequence = sequence[:desired_length]
else:
padded_sequence = sequence[-desired_length:]
for _ in range(desired_length - len(padded_sequence)):
if padding_on_right:
padded_sequence.append(default_value())
else:
padded_sequence.insert(0, default_value())
return padded_sequence | Take a list of objects and pads it to the desired length, returning the padded list. The
original list is not modified.
Parameters
----------
sequence : List
A list of objects to be padded.
desired_length : int
Maximum length of each sequence. Longer sequences are truncated to this length, and
shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as padding values. This is
a lambda to avoid using the same object when the default value is more complex, like a
list.
padding_on_right : bool, default=True
When we add padding tokens (or truncate the sequence), should we do it on the right or
the left?
Returns
-------
padded_sequence : List |
3,008 | def _add_devices_from_config(args):
config = _parse_config(args.config)
for device in config[]:
if args.default:
if device == "default":
raise ValueError()
if config[][device][] == args.default:
raise ValueError()
add(device, config[][device][], config[][device].get(, ),
config[][device].get(, ), config[][device].get(, 5037)) | Add devices from config. |
3,009 | def get_time(self):
if isinstance(self.path, pathlib.Path):
thetime = self.path.stat().st_mtime
else:
thetime = np.nan
return thetime | Time of the TIFF file
Currently, only the file modification time is supported.
Note that the modification time of the TIFF file is
dependent on the file system and may have temporal
resolution as low as 3 seconds. |
3,010 | def where(self, where: str) -> :
sd = SASdata(self.sas, self.libref, self.table, dsopts=dict(self.dsopts))
sd.HTML = self.HTML
sd.dsopts[] = where
return sd | This method returns a clone of the SASdata object, with the where attribute set. The original SASdata object is not affected.
:param where: the where clause to apply
:return: SAS data object |
3,011 | def reassign(self, user_ids, requester):
path = .format(self.collection.name)
assignments = []
if not user_ids:
raise Error()
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_reference"
}
}
assignments.append(ref)
data = {
"incidents": [
{
"id": self.id,
"type": "incident_reference",
"assignments": assignments
}
]
}
extra_headers = {"From": requester}
return self.pagerduty.request(, path, data=_json_dumper(data), extra_headers=extra_headers) | Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign |
3,012 | def Ctrl_c(self, dl = 0):
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("c")
self.keyboard.release_key(self.keyboard.control_key) | Ctrl + c 复制 |
3,013 | def API_GET(self, courseid=None):
output = []
if courseid is None:
courses = self.course_factory.get_all_courses()
else:
try:
courses = {courseid: self.course_factory.get_course(courseid)}
except:
raise APINotFound("Course not found")
username = self.user_manager.session_username()
user_info = self.database.users.find_one({"username": username})
for courseid, course in courses.items():
if self.user_manager.course_is_open_to_user(course, username, False) or course.is_registration_possible(user_info):
data = {
"id": courseid,
"name": course.get_name(self.user_manager.session_language()),
"require_password": course.is_password_needed_for_registration(),
"is_registered": self.user_manager.course_is_open_to_user(course, username, False)
}
if self.user_manager.course_is_open_to_user(course, username, False):
data["tasks"] = {taskid: task.get_name(self.user_manager.session_language()) for taskid, task in course.get_tasks().items()}
data["grade"] = self.user_manager.get_course_cache(username, course)["grade"]
output.append(data)
return 200, output | List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found. |
3,014 | def get_path(self, temp_ver):
if temp_ver not in self:
raise RuntimeError(
.format(temp_ver.name)
)
return self._prefixed(temp_ver.name) | Get the path of the given version in this store
Args:
temp_ver TemplateVersion: version to look for
Returns:
str: The path to the template version inside the store
Raises:
RuntimeError: if the template is not in the store |
3,015 | def get_current_qualification_score(self, name, worker_id):
qtype = self.get_qualification_type_by_name(name)
if qtype is None:
raise QualificationNotFoundException(
.format(name)
)
try:
score = self.get_qualification_score(qtype["id"], worker_id)
except (WorkerLacksQualification, RevokedQualification):
score = None
return {"qtype": qtype, "score": score} | Return the current score for a worker, on a qualification with the
provided name. |
3,016 | def _is_prime(bit_size, n):
r = 0
s = n - 1
while s % 2 == 0:
r += 1
s //= 2
if bit_size >= 1300:
k = 2
elif bit_size >= 850:
k = 3
elif bit_size >= 650:
k = 4
elif bit_size >= 550:
k = 5
elif bit_size >= 450:
k = 6
for _ in range(k):
a = random.randrange(2, n - 1)
x = pow(a, s, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True | An implementation of Miller–Rabin for checking if a number is prime.
:param bit_size:
An integer of the number of bits in the prime number
:param n:
An integer, the prime number
:return:
A boolean |
3,017 | def remove_gaps(A, B):
a_seq, b_seq = [], []
for a, b in zip(list(A), list(B)):
if a == or a == or b == or b == :
continue
a_seq.append(a)
b_seq.append(b)
return .join(a_seq), .join(b_seq) | skip column if either is a gap |
3,018 | def get_table_cache_key(db_alias, table):
cache_key = % (db_alias, table)
return sha1(cache_key.encode()).hexdigest() | Generates a cache key from a SQL table.
:arg db_alias: Alias of the used database
:type db_alias: str or unicode
:arg table: Name of the SQL table
:type table: str or unicode
:return: A cache key
:rtype: int |
3,019 | def domain_search(auth=None, **kwargs):
**
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_domains(**kwargs) | Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1 |
3,020 | def add_update_user(self, user, capacity=None):
if isinstance(user, str):
user = hdx.data.user.User.read_from_hdx(user, configuration=self.configuration)
elif isinstance(user, dict):
user = hdx.data.user.User(user, configuration=self.configuration)
if isinstance(user, hdx.data.user.User):
users = self.data.get()
if users is None:
users = list()
self.data[] = users
if capacity is not None:
user[] = capacity
self._addupdate_hdxobject(users, , user)
return
raise HDXError( % type(user).__name__) | Add new or update existing user in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
user (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary
capacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.
Returns:
None |
3,021 | def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
, vmss_name,
, COMP_API])
body = + str(capacity) +
return do_patch(endpoint, body, access_token) | Change the instance count of an existing VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
capacity (int): New number of VMs.
Returns:
HTTP response. |
3,022 | def parse_env(envlist):
if not isinstance(envlist, list):
envlist = [envlist]
exports = []
for env in envlist:
pieces = re.split("( |\\\".*?\\\"|)", env)
pieces = [p for p in pieces if p.strip()]
while len(pieces) > 0:
current = pieces.pop(0)
if current.endswith():
next = ""
if len(pieces) > 0:
next = pieces.pop(0)
exports.append("%s%s" %(current, next))
elif in current:
exports.append(current)
elif current.endswith():
continue
else:
next = pieces.pop(0)
exports.append("%s=%s" %(current, next))
return exports | parse_env will parse a single line (with prefix like ENV removed) to
a list of commands in the format KEY=VALUE For example:
ENV PYTHONBUFFER 1 --> [PYTHONBUFFER=1]
::Notes
Docker: https://docs.docker.com/engine/reference/builder/#env |
3,023 | def ready(self):
models_config.auto_load_configs()
self.auto_load_app_modules([, ])
app_menu.auto_load_model_menu()
auto_register_search_models()
tabs.auto_generate_missing_tabs() | Auto load Trionyx |
3,024 | def check_key(self, key, raise_error=True, *args, **kwargs):
return check_key(
key, possible_keys=list(self), raise_error=raise_error,
name=, *args, **kwargs) | Checks whether the key is a valid formatoption
Parameters
----------
%(check_key.parameters.no_possible_keys|name)s
Returns
-------
%(check_key.returns)s
Raises
------
%(check_key.raises)s |
3,025 | def relpath_to_modname(relpath):
| Convert relative path to module name
Within a project, a path to the source file is uniquely identified with a
module name. Relative paths of the form 'foo/bar' are *not* converted to
module names 'foo.bar', because (1) they identify directories, not regular
files, and (2) already 'foo/bar/__init__.py' would claim that conversion.
Args:
relpath (str): Relative path from some location on sys.path
Example:
>>> relpath_to_modname('ballet/util/_util.py')
'ballet.util._util' |
3,026 | def cosine_similarity(sent1: str, sent2: str) -> float:
WORD = re.compile(r)
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
vector1 = text_to_vector(sent1)
vector2 = text_to_vector(sent2)
cosine = get_cosine(vector1, vector2)
return cosine | Calculates cosine similarity between 2 sentences/documents.
Thanks to @vpekar, see http://goo.gl/ykibJY |
3,027 | def _create_tag_lowlevel(self, tag_name, message=None, force=True,
patch=False):
tags = self.get_tags(patch=patch)
old_commit = tags.get(tag_name)
if old_commit is not None:
if not force:
return False
old_rev = old_commit[]
if self.is_ancestor(old_rev, , patch=patch):
altered = self.hg(, , old_rev, , ,
)
if not altered or altered == []:
force = False
if not force:
return False
tag_args = [, tag_name]
if message:
tag_args += [, message]
self.hg(patch=patch, *tag_args)
return True | Create a tag on the toplevel or patch repo
If the tag exists, and force is False, no tag is made. If force is True,
and a tag exists, but it is a direct ancestor of the current commit,
and there is no difference in filestate between the current commit
and the tagged commit, no tag is made. Otherwise, the old tag is
overwritten to point at the current commit.
Returns True or False indicating whether the tag was actually committed |
3,028 | def save_series(self) -> None:
hydpy.pub.sequencemanager.open_netcdf_writer(
flatten=hydpy.pub.options.flattennetcdf,
isolate=hydpy.pub.options.isolatenetcdf)
self.prepare_sequencemanager()
for sequence in self._iterate_sequences():
sequence.save_ext()
hydpy.pub.sequencemanager.close_netcdf_writer() | Save time series data as defined by the actual XML `writer`
element.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... hp.init_models()
... interface = XMLInterface('single_run.xml')
... interface.update_options()
>>> interface.update_timegrids()
>>> series_io = interface.series_io
>>> series_io.prepare_series()
>>> hp.elements.land_dill.model.sequences.fluxes.pc.series[2, 3] = 9.0
>>> hp.nodes.lahn_2.sequences.sim.series[4] = 7.0
>>> with TestIO():
... series_io.save_series()
>>> import numpy
>>> with TestIO():
... os.path.exists(
... 'LahnH/series/output/land_lahn_2_flux_pc.npy')
... os.path.exists(
... 'LahnH/series/output/land_lahn_3_flux_pc.npy')
... numpy.load(
... 'LahnH/series/output/land_dill_flux_pc.npy')[13+2, 3]
... numpy.load(
... 'LahnH/series/output/lahn_2_sim_q_mean.npy')[13+4]
True
False
9.0
7.0 |
3,029 | def endpoint_delete(auth=None, **kwargs):
*
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_endpoint(**kwargs) | Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9 |
3,030 | def plot_options(cls, obj, percent_size):
from .plot import MPLPlot
factor = percent_size / 100.0
obj = obj.last if isinstance(obj, HoloMap) else obj
options = Store.lookup_options(cls.backend, obj, ).options
fig_size = options.get(, MPLPlot.fig_size)*factor
return dict({:fig_size},
**MPLPlot.lookup_options(obj, ).options) | Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options. |
3,031 | def handle_signature(self, sig, signode):
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError(.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError(.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
, self.env.temp_data.get())
classname = self.env.temp_data.get()
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
name_prefix = name_prefix[len(classname):].lstrip()
elif name_prefix:
return fullname, name_prefix | Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`. |
3,032 | def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d) | Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium |
3,033 | def find_ent_space_price(package, category, size, tier_level):
if category == :
category_code =
elif category == :
category_code =
else:
category_code =
level = ENDURANCE_TIERS.get(tier_level)
for item in package[]:
if int(item[]) != size:
continue
price_id = _find_price_id(item[], category_code, , level)
if price_id:
return price_id
raise ValueError("Could not find price for %s storage space" % category) | Find the space price for the given category, size, and tier
:param package: The Enterprise (Endurance) product package
:param category: The category of space (endurance, replication, snapshot)
:param size: The size for which a price is desired
:param tier_level: The endurance tier for which a price is desired
:return: Returns the matching price, or an error if not found |
3,034 | def fromXml(cls, elem):
if elem is None:
return None
addon = cls.byName(elem.tag)
if not addon:
raise RuntimeError(.format(elem.tag))
return addon.load(elem) | Converts the inputted element to a Python object by looking through
the IO addons for the element's tag.
:param elem | <xml.etree.ElementTree.Element>
:return <variant> |
3,035 | def mtr_tr_dense(sz):
n = 2 ** sz
hparams = mtf_bitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 128
hparams.d_ff = int(4096 * n)
hparams.d_kv = 128
hparams.encoder_num_heads = int(8 * n)
hparams.decoder_num_heads = int(8 * n)
hparams.learning_rate_decay_steps = 51400
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
hparams.label_smoothing = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams | Series of machine translation models.
All models are trained on sequences of 256 tokens.
You can use the dataset translate_enfr_wmt32k_packed.
154000 steps = 3 epochs.
Args:
sz: an integer
Returns:
a hparams |
3,036 | def distance_to_closest(self, ps: Union["Units", List["Point2"], Set["Point2"]]) -> Union[int, float]:
assert ps
closest_distance_squared = math.inf
for p2 in ps:
if not isinstance(p2, Point2):
p2 = p2.position
distance = (self[0] - p2[0]) ** 2 + (self[1] - p2[1]) ** 2
if distance < closest_distance_squared:
closest_distance_squared = distance
return closest_distance_squared ** 0.5 | This function assumes the 2d distance is meant |
3,037 | def metrics(self):
masterThrp, backupThrp = self.getThroughputs(self.instances.masterId)
r = self.instance_throughput_ratio(self.instances.masterId)
m = [
("{} Monitor metrics:".format(self), None),
("Delta", self.Delta),
("Lambda", self.Lambda),
("Omega", self.Omega),
("instances started", self.instances.started),
("ordered request counts",
{i: r[0] for i, r in self.numOrderedRequests.items()}),
("ordered request durations",
{i: r[1] for i, r in self.numOrderedRequests.items()}),
("master request latencies", self.masterReqLatencies),
("client avg request latencies", {i: self.getLatency(i)
for i in self.instances.ids}),
("throughput", {i: self.getThroughput(i)
for i in self.instances.ids}),
("master throughput", masterThrp),
("total requests", self.totalRequests),
("avg backup throughput", backupThrp),
("master throughput ratio", r)]
return m | Calculate and return the metrics. |
3,038 | def set_dash(self, dashes, offset=0):
cairo.cairo_set_dash(
self._pointer, ffi.new(, dashes), len(dashes), offset)
self._check_status() | Sets the dash pattern to be used by :meth:`stroke`.
A dash pattern is specified by dashes, a list of positive values.
Each value provides the length of alternate "on" and "off"
portions of the stroke.
:obj:`offset` specifies an offset into the pattern
at which the stroke begins.
Each "on" segment will have caps applied
as if the segment were a separate sub-path.
In particular, it is valid to use an "on" length of 0
with :obj:`LINE_CAP_ROUND` or :obj:`LINE_CAP_SQUARE`
in order to distributed dots or squares along a path.
Note: The length values are in user-space units
as evaluated at the time of stroking.
This is not necessarily the same as the user space
at the time of :meth:`set_dash`.
If :obj:`dashes` is empty dashing is disabled.
If it is of length 1 a symmetric pattern is assumed
with alternating on and off portions of the size specified
by the single value.
:param dashes:
A list of floats specifying alternate lengths
of on and off stroke portions.
:type offset: float
:param offset:
An offset into the dash pattern at which the stroke should start.
:raises:
:exc:`CairoError`
if any value in dashes is negative,
or if all values are 0.
The context will be put into an error state. |
3,039 | def search_cloud_integration_deleted_for_facet(self, facet, **kwargs):
kwargs[] = True
if kwargs.get():
return self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs)
else:
(data) = self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs)
return data | Lists the values of a specific facet over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. |
3,040 | def add_permission(self, name):
perm = self.find_permission(name)
if perm is None:
try:
perm = self.permission_model(name=name)
perm.save()
return perm
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMISSION.format(str(e)))
return perm | Adds a permission to the backend, model permission
:param name:
name of the permission: 'can_add','can_edit' etc... |
3,041 | def zeros_coefs(nmax, mmax, coef_type=scalar):
if(mmax > nmax):
raise ValueError(err_msg[])
if(coef_type == scalar):
L = (nmax + 1) + mmax * (2 * nmax - mmax + 1)
vec = np.zeros(L, dtype=np.complex128)
return ScalarCoefs(vec, nmax, mmax)
elif(coef_type == vector):
L = (nmax + 1) + mmax * (2 * nmax - mmax + 1)
vec1 = np.zeros(L, dtype=np.complex128)
vec2 = np.zeros(L, dtype=np.complex128)
return VectorCoefs(vec1, vec2, nmax, mmax)
else:
raise TypeError(err_msg[]) | Returns a ScalarCoefs object or a VectorCoeffs object where each of the
coefficients is set to 0. The structure is such that *nmax* is th largest
*n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*.
(See *ScalarCoefs* and *VectorCoefs* for details.)
Examples::
>>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar)
>>> c = spherepy.zeros_coefs(5, 3) # same as above
>>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector)
Args:
nmax (int): Largest *n* value in the set of modes.
mmax (int): Largest abs(*m*) value in the set of modes.
coef_type (int, optional): Set to 0 for scalar, and 1 for vector.
The default option is scalar. If you would like to return a set of
vector spherical hamonic coefficients, the preferred way to do so
is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector).
Returns:
coefs: Returns a ScalarCoefs object if coef_type is either blank or
set to 0. Returns a VectorCoefs object if coef_type = 1.
Raises:
TypeError: If coef_type is anything but 0 or 1. |
3,042 | def xmoe_2d():
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams | Two-dimensional hierarchical mixture of 16 experts. |
3,043 | def constraint_present(name, constraint_id, constraint_type, constraint_options=None, cibname=None):
addvip_galerawithhaproxy-clone
return _item_present(name=name,
item=,
item_id=constraint_id,
item_type=constraint_type,
create=None,
extra_args=constraint_options,
cibname=cibname) | Ensure that a constraint is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: {{formulaname}}__constraint_present_{{constraint_id}})
constraint_id
name for the constraint (try first to create manually to find out the autocreated name)
constraint_type
constraint type (location, colocation, order)
constraint_options
options for creating the constraint
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
pcs.constraint_present:
- constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
- constraint_type: colocation
- constraint_options:
- 'add'
- 'vip_galera'
- 'with'
- 'haproxy-clone'
- cibname: cib_for_haproxy |
3,044 | def bezier(self, points):
coordinates = pgmagick.CoordinateList()
for point in points:
x, y = float(point[0]), float(point[1])
coordinates.append(pgmagick.Coordinate(x, y))
self.drawer.append(pgmagick.DrawableBezier(coordinates)) | Draw a Bezier-curve.
:param points: ex.) ((5, 5), (6, 6), (7, 7))
:type points: list |
3,045 | def delete(self, refobj):
refobjinter = self.get_refobjinter()
reference = refobjinter.get_reference(refobj)
if reference:
fullns = cmds.referenceQuery(reference, namespace=True)
cmds.file(removeReference=True, referenceNode=reference)
else:
parentns = common.get_namespace(refobj)
ns = cmds.getAttr("%s.namespace" % refobj)
fullns = ":".join((parentns.rstrip(":"), ns.lstrip(":")))
cmds.namespace(removeNamespace=fullns, deleteNamespaceContent=True) | Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None |
3,046 | def ebrisk(rupgetter, srcfilter, param, monitor):
riskmodel = param[]
E = rupgetter.num_events
L = len(riskmodel.lti)
N = len(srcfilter.sitecol.complete)
e1 = rupgetter.first_event
with monitor(, measuremem=False):
with datastore.read(srcfilter.filename) as dstore:
assetcol = dstore[]
assets_by_site = assetcol.assets_by_site()
A = len(assetcol)
getter = getters.GmfGetter(rupgetter, srcfilter, param[])
with monitor():
getter.init()
hazard = getter.get_hazard()
mon_risk = monitor(, measuremem=False)
mon_agg = monitor(, measuremem=False)
events = rupgetter.get_eid_rlz()
eid2idx = dict(zip(events[], range(e1, e1 + E)))
tagnames = param[]
shape = assetcol.tagcol.agg_shape((E, L), tagnames)
elt_dt = [(, U64), (, U16), (, (F32, shape[1:]))]
if param[]:
alt = numpy.zeros((A, E, L), F32)
acc = numpy.zeros(shape, F32)
if param[]:
losses_by_A = numpy.zeros((A, L), F32)
else:
losses_by_A = 0
times = numpy.zeros(N)
num_events_per_sid = 0
epspath = param[]
for sid, haz in hazard.items():
t0 = time.time()
assets_on_sid = assets_by_site[sid]
if len(assets_on_sid) == 0:
continue
num_events_per_sid += len(haz)
weights = getter.weights[haz[], 0]
assets_by_taxo = get_assets_by_taxo(assets_on_sid, epspath)
eidx = numpy.array([eid2idx[eid] for eid in haz[]]) - e1
haz[] = eidx + e1
with mon_risk:
out = riskmodel.get_output(assets_by_taxo, haz)
with mon_agg:
for a, asset in enumerate(assets_on_sid):
aid = asset[]
tagi = asset[tagnames] if tagnames else ()
tagidxs = tuple(idx - 1 for idx in tagi)
for lti, lt in enumerate(riskmodel.loss_types):
lratios = out[lt][a]
if lt == :
losses = lratios * asset[]
else:
losses = lratios * asset[ + lt]
if param[]:
alt[aid, eidx, lti] = losses
acc[(eidx, lti) + tagidxs] += losses
if param[]:
losses_by_A[aid, lti] += losses @ weights
times[sid] = time.time() - t0
if hazard:
num_events_per_sid /= len(hazard)
with monitor():
elt = numpy.fromiter(
((event[], event[], losses)
for event, losses in zip(events, acc) if losses.sum()), elt_dt)
agg = general.AccumDict(accum=numpy.zeros(shape[1:], F32))
for rec in elt:
agg[rec[]] += rec[] * param[]
res = {: elt, : agg, : times,
: num_events_per_sid}
if param[]:
res[] = losses_by_A * param[]
if param[]:
res[] = alt, events[]
return res | :param rupgetter:
a RuptureGetter instance
:param srcfilter:
a SourceFilter instance
:param param:
a dictionary of parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
:returns:
an ArrayWrapper with shape (E, L, T, ...) |
3,047 | def _get_seal_key_ntlm2(negotiate_flags, exported_session_key, magic_constant):
if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_128:
seal_key = exported_session_key
elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_56:
seal_key = exported_session_key[:7]
else:
seal_key = exported_session_key[:5]
seal_key = hashlib.md5(seal_key + magic_constant).digest()
return seal_key | 3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for authentication where
NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has been negotiated. Will weaken the keys
if NTLMSSP_NEGOTIATE_128 is not negotiated, will try NEGOTIATE_56 and then will default
to the 40-bit key
@param negotiate_flags: The negotiate_flags structure sent by the server
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return seal_key: Key used to seal messages |
3,048 | def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None | Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger. |
3,049 | def p_factor_unary_operators(self, p):
p[0] = p[2]
if p[1] == :
p[0] = Instruction(, context={: p[0]}) | term : SUB factor
| ADD factor |
3,050 | def prep_cwl(samples, workflow_fn, out_dir, out_file, integrations=None,
add_container_tag=None):
if add_container_tag is None:
container_tags = None
elif add_container_tag.lower() == "quay_lookup":
container_tags = {}
else:
container_tags = collections.defaultdict(lambda: add_container_tag)
step_dir = utils.safe_makedir(os.path.join(out_dir, "steps"))
get_retriever = GetRetriever(integrations, samples)
variables, keyvals = _flatten_samples(samples, out_file, get_retriever)
cur_remotes = _get_cur_remotes(keyvals)
file_estimates = _calc_input_estimates(keyvals, get_retriever)
out = _cwl_workflow_template(variables)
parent_wfs = []
step_parallelism = {}
steps, wfoutputs = workflow_fn(samples)
used_inputs = set([])
for cur in workflow.generate(variables, steps, wfoutputs):
if cur[0] == "step":
_, name, parallel, inputs, outputs, image, programs, disk, cores, no_files = cur
step_file = _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, cores, samples, cur_remotes, no_files, container_tags)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "expressiontool":
_, name, inputs, outputs, expression, parallel = cur
step_file = _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "upload":
for output in cur[1]:
wf_output = copy.deepcopy(output)
if "outputSource" not in wf_output:
wf_output["outputSource"] = wf_output.pop("source")
wf_output = _clean_record(wf_output)
if wf_output["id"] in used_inputs:
wf_output["id"] = "%s_out" % wf_output["id"]
out["outputs"].append(wf_output)
elif cur[0] == "wf_start":
parent_wfs.append(out)
out = _cwl_workflow_template(cur[1])
elif cur[0] == "wf_finish":
_, name, parallel, inputs, outputs, scatter = cur
wf_out_file = "wf-%s.cwl" % name
with open(os.path.join(out_dir, wf_out_file), "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
out = parent_wfs.pop(-1)
out["steps"].append(_step_template(name, wf_out_file, inputs, outputs, parallel,
step_parallelism, scatter))
used_inputs |= set(x["id"] for x in inputs)
else:
raise ValueError("Unexpected workflow value %s" % str(cur))
step_parallelism[name] = parallel
with open(out_file, "w") as out_handle:
out["inputs"] = [x for x in out["inputs"] if x["id"] in used_inputs]
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
sample_json = "%s-samples.json" % utils.splitext_plus(out_file)[0]
out_clean = _clean_final_outputs(copy.deepcopy({k: v for k, v in keyvals.items() if k in used_inputs}),
get_retriever)
with open(sample_json, "w") as out_handle:
json.dump(out_clean, out_handle, sort_keys=True, indent=4, separators=(, ))
return out_file, sample_json | Output a CWL description with sub-workflows and steps. |
3,051 | def charge_parent(self, mol, skip_standardize=False):
if not skip_standardize:
mol = self.standardize(mol)
fragment = self.fragment_parent(mol, skip_standardize=True)
if fragment:
uncharged = self.uncharge(fragment)
uncharged = self.standardize(uncharged)
return uncharged | Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol |
3,052 | def check_expected_infos(self, test_method):
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) | This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`. |
3,053 | def server_add(s_name, s_ip, s_state=None, **connection_args):
*serverNameserverIpAddress*serverNameserverIpAddressserverState
ret = True
if server_exists(s_name, **connection_args):
return False
nitro = _connect(**connection_args)
if nitro is None:
return False
server = NSServer()
server.set_name(s_name)
server.set_ipaddress(s_ip)
if s_state is not None:
server.set_state(s_state)
try:
NSServer.add(nitro, server)
except NSNitroError as error:
log.debug(, error)
ret = False
_disconnect(nitro)
return ret | Add a server
Note: The default server state is ENABLED
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_add 'serverName' 'serverIpAddress'
salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' |
3,054 | def _set_tcp_keepalive(sock, opts):
if hasattr(socket, ):
if opts.get(, False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, ):
if hasattr(socket, ):
tcp_keepalive_idle = opts.get(, -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, ):
tcp_keepalive_cnt = opts.get(, -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, ):
tcp_keepalive_intvl = opts.get(, -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, ):
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0) | Ensure that TCP keepalives are set for the socket. |
3,055 | def validation_step(self, Xi, yi, **fit_params):
self.module_.eval()
with torch.no_grad():
y_pred = self.infer(Xi, **fit_params)
loss = self.get_loss(y_pred, yi, X=Xi, training=False)
return {
: loss,
: y_pred,
} | Perform a forward step using batched data and return the
resulting loss.
The module is set to be in evaluation mode (e.g. dropout is
not applied).
Parameters
----------
Xi : input data
A batch of the input data.
yi : target data
A batch of the target data.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call. |
3,056 | def combine_dictionaries(a, b):
c = {}
for key in list(b.keys()): c[key]=b[key]
for key in list(a.keys()): c[key]=a[key]
return c | returns the combined dictionary. a's values preferentially chosen |
3,057 | def request_generic(self, act, coro, perform, complete):
overlapped = OVERLAPPED()
overlapped.object = act
self.add_token(act, coro, (overlapped, perform, complete))
rc, nbytes = perform(act, overlapped)
completion_key = c_long(0)
if rc == 0:
elif rc != WSA_IO_PENDING:
self.remove_token(act)
raise SocketError(rc, "%s on %r" % (ctypes.FormatError(rc), act)) | Performs an overlapped request (via `perform` callable) and saves
the token and the (`overlapped`, `perform`, `complete`) trio. |
3,058 | def compute_absolute_error(self, predicted_data, record, dataframe_record):
absolute_error = abs(record[] - predicted_data[self.ddg_analysis_type])
dataframe_record[] = absolute_error | Calculate the absolute error for this case. |
3,059 | def teleport(self, agent_name, location=None, rotation=None):
self.agents[agent_name].teleport(location * 100, rotation)
self.tick() | Teleports the target agent to any given location, and applies a specific rotation.
Args:
agent_name (str): The name of the agent to teleport.
location (np.ndarray or list): XYZ coordinates (in meters) for the agent to be teleported to.
If no location is given, it isn't teleported, but may still be rotated. Defaults to None.
rotation (np.ndarray or list): A new rotation target for the agent.
If no rotation is given, it isn't rotated, but may still be teleported. Defaults to None. |
3,060 | def close(self):
project_nodes_id = set([n.id for n in self.nodes])
for module in self.compute():
module_nodes_id = set([n.id for n in module.instance().nodes])
if len(module_nodes_id & project_nodes_id):
yield from module.instance().project_closing(self)
yield from self._close_and_clean(False)
for module in self.compute():
module_nodes_id = set([n.id for n in module.instance().nodes])
if len(module_nodes_id & project_nodes_id):
yield from module.instance().project_closed(self)
try:
if os.path.exists(self.tmp_working_directory()):
shutil.rmtree(self.tmp_working_directory())
except OSError:
pass | Closes the project, but keep information on disk |
3,061 | def __clear_buffer_watch(self, bw):
pid = bw.pid
start = bw.start
end = bw.end
base = MemoryAddresses.align_address_to_page_start(start)
limit = MemoryAddresses.align_address_to_page_end(end)
pages = MemoryAddresses.get_buffer_size_in_pages(start, end - start)
continue
cset.add(condition)
condition.remove(bw)
if condition.count() == 0:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except WindowsError:
msg = "Cannot remove page breakpoint at address %s"
msg = msg % HexDump.address( bp.get_address() )
warnings.warn(msg, BreakpointWarning)
page_addr = page_addr + pageSize | Used by L{dont_watch_buffer} and L{dont_stalk_buffer}.
@type bw: L{BufferWatch}
@param bw: Buffer watch identifier. |
3,062 | def get_roles(self):
if self.role.exist:
return [self.role]
else:
roles = []
if self.role_query_code:
roles = RoleModel.objects.filter(**self.role_query_code)
elif self.unit.exist:
if self.recursive_units:
roles = (RoleModel.objects.get(k) for k in
UnitModel.get_role_keys(self.unit.key))
else:
roles = RoleModel.objects.filter(unit=self.unit)
elif self.get_roles_from:
return ROLE_GETTER_METHODS[self.get_roles_from](RoleModel)
if self.abstract_role.exist and roles:
if isinstance(roles, (list, types.GeneratorType)):
roles = [a for a in roles if a.abstract_role.key == self.abstract_role.key]
else:
roles = roles.filter(abstract_role=self.abstract_role)
else:
roles = RoleModel.objects.filter(abstract_role=self.abstract_role)
return roles | Returns:
Role instances according to task definition. |
3,063 | def list_pr_comments(repo: GithubRepository, pull_id: int
) -> List[Dict[str, Any]]:
url = ("https://api.github.com/repos/{}/{}/issues/{}/comments"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
return payload | References:
https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue |
3,064 | def get_http_method_arg_name(self):
if self.method == :
arg_name =
else:
arg_name =
return getattr(requests, self.method), arg_name | Return the HTTP function to call and the params/data argument name |
3,065 | def dateadd(value: fields.DateTime(),
addend: fields.Int(validate=Range(min=1)),
unit: fields.Str(validate=OneOf([, ]))=):
value = value or dt.datetime.utcnow()
if unit == :
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {: result} | Add a value to a date. |
3,066 | def _profile_module(self):
with open(self._run_object, ) as srcfile, _StatProfiler() as prof:
code = compile(srcfile.read(), self._run_object, )
prof.base_frame = inspect.currentframe()
try:
exec(code, self._globs, None)
except SystemExit:
pass
call_tree = prof.call_tree
return {
: self._object_name,
: _SAMPLE_INTERVAL,
: prof.run_time,
: call_tree,
: call_tree.get(, 0),
: int(time.time())
} | Runs statistical profiler on a module. |
3,067 | def diff(new, old):
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update | Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple |
3,068 | def get_translation_lookup(identifier, field, value):
parts = field.split("__")
transformers = parts[1:] if len(parts) > 1 else None
field_name = parts[0]
language = get_fallback_language()
name_parts = parts[0].split("_")
if len(name_parts) > 1:
supported_languages = get_supported_languages()
last_part = name_parts[-1]
if last_part in supported_languages:
field_name = "_".join(name_parts[:-1])
language = last_part
else:
field_name = "_".join(name_parts)
value_lookup = (
"field_value"
if transformers is None
else "field_value__%s" % "__".join(transformers)
)
lookup = {"field_name": field_name, "identifier": identifier, "language": language}
lookup[value_lookup] = value
return lookup | Mapper that takes a language field, its value and returns the
related lookup for Translation model. |
3,069 | def _augment_url_with_version(auth_url):
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user. |
3,070 | def django(line):
[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }dataloglevelINFOlogname[app.middleware_log_req:50]messageView func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }timestamp2017-08-23T11:35:25levelINFOtimestamp2017-08-23T11:35:25[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}dataloglevelINFOlogname[app.function:6022]messageUUIDc47f3530-9f5f-11e7-a559-917d011459f7host_urllocalhost:8888messageajax successmiscend_time_msready_staterequest_time_msresponse_lengthstart_time_msstatusstatus_messageOKurl/api/function?timestampuserroottimestamp2017-09-22T06:32:15levelINFOtimestamp2017-09-22T06:32:15localhost:27017collection_cachefunction_dummy_version
data = {}
log = re.findall(r, line)
if len(log) == 1:
data[] = datetime.datetime.strptime(re.findall(r,\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data[] = re.findall(, log[0])[1]
data[] = re.findall(, log[0])[0]
message = re.findall(, log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(, log[0])
message = .join(message[2:])
except ValueError:
message = re.split(, log[0])
message = .join(message[2:])
data[] = message
return dict(
timestamp=data[],
level=data[],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
) | >>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true } |
3,071 | def ekopw(fname):
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.ekopw_c(fname, ctypes.byref(handle))
return handle.value | Open an existing E-kernel file for writing.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopw_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int |
3,072 | def check_read_inputs(self, sampfrom, sampto, channels, physical,
smooth_frames, return_res):
if not hasattr(sampfrom, ):
raise TypeError()
if not hasattr(sampto, ):
raise TypeError()
if not isinstance(channels, list):
raise TypeError()
if sampfrom < 0:
raise ValueError()
if sampfrom > self.sig_len:
raise ValueError()
if sampto < 0:
raise ValueError()
if sampto > self.sig_len:
raise ValueError()
if sampto <= sampfrom:
raise ValueError()
if len(channels):
if min(channels) < 0:
raise ValueError()
if max(channels) > self.n_sig - 1:
raise ValueError()
if return_res not in [64, 32, 16, 8]:
raise ValueError("return_res must be one of the following: 64, 32, 16, 8")
if physical is True and return_res == 8:
raise ValueError("return_res must be one of the following when physical is True: 64, 32, 16")
if isinstance(self, MultiRecord):
if smooth_frames is False:
raise ValueError() | Ensure that input read parameters (from rdsamp) are valid for
the record |
3,073 | def switch_window(self, window_id: int):
if window_id not in self.tmux_available_window_ids:
for i in range(max(self.tmux_available_window_ids)+1, window_id+1):
self._run_raw(f)
tmux_window = self.tmux_session + + str(i)
cmd = shlex.quote(f)
tmux_cmd = f
self._run_raw(tmux_cmd)
self.tmux_available_window_ids.append(i)
self.tmux_window_id = window_id | Switches currently active tmux window for given task. 0 is the default window
Args:
window_id: integer id of tmux window to use |
3,074 | def transfer_config_dict(soap_object, data_dict):
for key, val in data_dict.items():
setattr(soap_object, key, val) | This is a utility function used in the certification modules to transfer
the data dicts above to SOAP objects. This avoids repetition and allows
us to store all of our variable configuration here rather than in
each certification script. |
3,075 | def version_range(guid, version, before=None, app_versions=None):
if app_versions is None:
app_versions = validator.constants.APPROVED_APPLICATIONS
app_key = None
for app_guid, app_name in APPLICATIONS.items():
if app_name == guid:
guid = app_guid
break
for key in app_versions.keys():
if app_versions[key][] == guid:
app_key = key
break
if not app_key or version not in app_versions[app_key][]:
raise Exception(
% version)
all_versions = app_versions[app_key][]
version_pos = all_versions.index(version)
before_pos = None
if before is not None and before in all_versions:
before_pos = all_versions.index(before)
return all_versions[version_pos:before_pos] | Returns all values after (and including) `version` for the app `guid` |
3,076 | def pip_install(self, reqs):
if not reqs:
return
log.info(, reqs)
check_call([
sys.executable, , , , ,
, self.path] + list(reqs)) | Install dependencies into this env by calling pip in a subprocess |
3,077 | def slugable(self):
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False | A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url. |
3,078 | def setup_plugins(extra_plugin_dir=None):
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if extra_plugin_dir:
load_plugins(extra_plugin_dir) | Loads any additional plugins. |
3,079 | def authenticate_user(username, password):
user_model = Query()
user = db.get(user_model.username == username)
if not user:
logger.warning("User %s not found", username)
return False
if user[] == hash_password(password, user.get()):
return user[]
return False | Authenticate a username and password against our database
:param username:
:param password:
:return: authenticated username |
3,080 | def mft_mirror_offset(self):
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster | Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes |
3,081 | def __process_by_ccore(self):
ccore_metric = metric_wrapper.create_instance(self.__metric)
self.__score = wrapper.silhoeutte(self.__data, self.__clusters, ccore_metric.get_pointer()) | !
@brief Performs processing using CCORE (C/C++ part of pyclustering library). |
3,082 | def _get_boolean(data, position, dummy0, dummy1):
end = position + 1
return data[position:end] == b"\x01", end | Decode a BSON true/false to python True/False. |
3,083 | def _CollapseStrings(elided):
if _RE_PATTERN_INCLUDE.match(elided):
return elided
collapsed =
while True:
match = Match(r"]*)([\, elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == :
second_quote = tail.find()
if second_quote >= 0:
collapsed += head +
elided = tail[second_quote + 1:]
else:
elided = match_literal.group(2)
else:
second_quote = tail.find('"
elided = tail[second_quote + 1:]
else:
collapsed += elided
break
return collapsed | Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings. |
3,084 | def get_variables(self) -> Set[str]:
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces. |
3,085 | def can_user_access_build(param_name):
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
if not build_id:
logging.debug(, param_name)
abort(400)
ops = operations.UserOps(current_user.get_id())
build, user_is_owner = ops.owns_build(build_id)
if not build:
logging.debug(, build_id)
abort(404)
if current_user.is_authenticated() and not user_is_owner:
abort(login.needs_refresh())
return build | Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to. |
3,086 | def to_python(self, value):
value = super(LinkedTZDateTimeField, self).to_python(value)
if not value:
return value
return value.astimezone(self.timezone) | Convert the value to the appropriate timezone. |
3,087 | def _get_metricsmgr_cmd(self, metricsManagerId, sink_config_file, port):
metricsmgr_main_class =
metricsmgr_cmd = [os.path.join(self.heron_java_home, ),
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
self.metrics_manager_classpath,
metricsmgr_main_class,
+ metricsManagerId,
+ str(port),
+ self.topology_name,
+ self.cluster,
+ self.role,
+ self.environment,
+ self.topology_id,
+ self.heron_internals_config_file,
+ self.override_config_file,
+ sink_config_file]
return Command(metricsmgr_cmd, self.shell_env) | get the command to start the metrics manager processes |
3,088 | def generate_ast(path):
if os.path.isfile(path):
with open(path, ) as f:
try:
tree = ast.parse(f.read())
return PytTransformer().visit(tree)
except SyntaxError:
global recursive
if not recursive:
_convert_to_3(path)
recursive = True
return generate_ast(path)
else:
raise SyntaxError(
)
raise IOError( + path) | Generate an Abstract Syntax Tree using the ast module.
Args:
path(str): The path to the file e.g. example/foo/bar.py |
3,089 | def set_params(w, src):
params = extract_source_params(src)
params.update(extract_geometry_params(src))
mfd_pars, rate_pars = extract_mfd_params(src)
params.update(mfd_pars)
params.update(rate_pars)
strikes, dips, rakes, np_weights = extract_source_nodal_planes(src)
params.update(strikes)
params.update(dips)
params.update(rakes)
params.update(np_weights)
hds, hdsw = extract_source_hypocentral_depths(src)
params.update(hds)
params.update(hdsw)
pstrikes, pdips = extract_source_planes_strikes_dips(src)
params.update(pstrikes)
params.update(pdips)
params[] = striptag(src.tag)
w.record(**params) | Set source parameters. |
3,090 | def make_cashed(self):
self._descendance_cash = [dict() for _ in self.graph]
self.descend = self._descend_cashed | Включает кэширование запросов к descend |
3,091 | def verify_file_exists(file_name, file_location):
return __os.path.isfile(__os.path.join(file_location, file_name)) | Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False |
3,092 | def deactivate_lvm_volume_group(block_device):
vg = list_lvm_volume_group(block_device)
if vg:
cmd = [, , vg]
check_call(cmd) | Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume |
3,093 | def list_vdirs(site, app=_DEFAULT_APP):
*
ret = dict()
ps_cmd = [,
, r"".format(site),
, r"".format(app),
, "Select-Object PhysicalPath, @{ Name = ;",
r"Expression = { $_.path.Split()[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret[], strict=False)
except ValueError:
raise CommandExecutionError()
for item in items:
ret[item[]] = {: item[]}
if not ret:
log.warning(, cmd_ret)
return ret | Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site |
3,094 | def generate_property_names(self):
property_names_definition = self._definition.get(, {})
if property_names_definition is True:
pass
elif property_names_definition is False:
self.create_variable_keys()
with self.l():
self.l()
else:
self.create_variable_is_dict()
with self.l():
self.create_variable_with_length()
with self.l():
self.l()
with self.l():
with self.l():
self.generate_func_code_block(
property_names_definition,
.format(self._variable),
self._variable_name,
clear_variables=True,
)
with self.l():
self.l()
with self.l():
self.l() | Means that keys of object must to follow this definition.
.. code-block:: python
{
'propertyNames': {
'maxLength': 3,
},
}
Valid keys of object for this definition are foo, bar, ... but not foobar for example. |
3,095 | def encode(self, sequence):
polymorphisms = []
defaultSequence =
binSequence = array.array(self.forma.typecode)
b = 0
i = 0
trueI = 0
poly = set()
while i < len(sequence)-1:
b = b | self.forma[self.charToBin[sequence[i]]]
if sequence[i+1] == :
poly.add(sequence[i])
i += 2
else :
binSequence.append(b)
if len(poly) > 0 :
poly.add(sequence[i])
polymorphisms.append((trueI, poly))
poly = set()
bb = 0
while b % 2 != 0 :
b = b/2
defaultSequence += sequence[i]
b = 0
i += 1
trueI += 1
if i < len(sequence) :
b = b | self.forma[self.charToBin[sequence[i]]]
binSequence.append(b)
if len(poly) > 0 :
if sequence[i] not in poly :
poly.add(sequence[i])
polymorphisms.append((trueI, poly))
defaultSequence += sequence[i]
return (binSequence, defaultSequence, polymorphisms) | Returns a tuple (binary reprensentation, default sequence, polymorphisms list) |
3,096 | def from_dict(cls, pref, prefix = None):
if prefix is None:
prefix = Prefix()
prefix.id = pref[]
if pref[] is not None:
prefix.vrf = VRF.get(pref[])
prefix.family = pref[]
prefix.prefix = pref[]
prefix.display_prefix = pref[]
prefix.description = pref[]
prefix.comment = pref[]
prefix.node = pref[]
if pref[] is not None:
prefix.pool = Pool.get(pref[])
prefix.type = pref[]
prefix.indent = pref[]
prefix.country = pref[]
prefix.order_id = pref[]
prefix.customer_id = pref[]
prefix.external_key = pref[]
prefix.authoritative_source = pref[]
prefix.alarm_priority = pref[]
prefix.monitor = pref[]
prefix.vlan = pref[]
prefix.added = pref[]
prefix.last_modified = pref[]
prefix.total_addresses = int(pref[])
prefix.used_addresses = int(pref[])
prefix.free_addresses = int(pref[])
prefix.status = pref[]
prefix.avps = pref[]
prefix.expires = pref[]
prefix.inherited_tags = {}
for tag_name in pref[]:
tag = Tag.from_dict({: tag_name })
prefix.inherited_tags[tag_name] = tag
prefix.tags = {}
for tag_name in pref[]:
tag = Tag.from_dict({: tag_name })
prefix.tags[tag_name] = tag
if in pref:
prefix.match = pref[]
if in pref:
prefix.display = pref[]
if in pref:
prefix.children = pref[]
return prefix | Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input. |
3,097 | def validate(retval, func, args):
if retval != 0 and not ERROR.details:
return args
err = "{}() failed".format(func.__name__)
details = {"retval": retval, "args": args}
raise ScreenShotError(err, details=details) | Validate the returned value of a Xlib or XRANDR function. |
3,098 | def raw(self, module, method=, data=None):
request = self.session
url = % (self.host, self.port, module)
if self.verbose:
print data
if method==:
response = request.get(url)
elif method==:
response = request.post(url,data)
elif method==:
response = request.put(url,data)
elif method==:
response = request.delete(url)
else:
return { : % method}
return response | Submits or requsts raw input |
3,099 | def update_subscription(self, update_parameters, subscription_id):
route_values = {}
if subscription_id is not None:
route_values[] = self._serialize.url(, subscription_id, )
content = self._serialize.body(update_parameters, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response) | UpdateSubscription.
[Preview API] Update an existing subscription. Depending on the type of subscription and permissions, the caller can update the description, filter settings, channel (delivery) settings and more.
:param :class:`<NotificationSubscriptionUpdateParameters> <azure.devops.v5_0.notification.models.NotificationSubscriptionUpdateParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<NotificationSubscription> <azure.devops.v5_0.notification.models.NotificationSubscription>` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.