Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
386,400 | def get_es_requirements(es_version):
es_version = es_version.replace(, )
es_version = map(int, es_version.split())
if es_version >= [6]:
return ">=6.0.0, <7.0.0"
elif es_version >= [5]:
return ">=5.0.0, <6.0.0"
elif es_version >= [2]:
return ">=2.0.0, <3.0.0"
elif es_version >= [1]:
return ">=1.0.0, <2.0.0"
else:
return "<1.0.0" | Get the requirements string for elasticsearch-py library
Returns a suitable requirements string for the elsaticsearch-py library
according to the elasticsearch version to be supported (es_version) |
386,401 | def help_center_article_subscriptions(self, article_id, locale=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/subscriptions
api_path = "/api/v2/help_center/articles/{article_id}/subscriptions.json"
api_path = api_path.format(article_id=article_id)
if locale:
api_opt_path = "/api/v2/help_center/{locale}/articles/{article_id}/subscriptions.json"
api_path = api_opt_path.format(article_id=article_id, locale=locale)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-article-subscriptions |
386,402 | def on_create_view(self):
d = self.declaration
changed = not d.condition
if changed:
d.condition = True
view = self.get_view()
if changed:
self.ready.set_result(True)
return view | Trigger the click |
386,403 | def required_fields(self):
return {f:v for f, v in self.normal_fields.items() if v.required} | The normal required fields (eg, no magic fields like _id are included) |
386,404 | def updateVocalAuto(self, component, files):
auto_model = self.model().autoParams()
row = auto_model.fileParameter(component)
if len(files) > 1:
clean_component = self.model().data(self.model().indexByComponent(component), AbstractDragView.DragRole)
p = { : ,
: files,
: [clean_component]
}
if row is None:
auto_model.insertItem(auto_model.index(0,0), p)
else:
auto_model.setData(auto_model.index(row,0),p)
elif row is not None:
auto_model.removeRow(row)
self.countChanged.emit() | Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-parameter from the model.
:param component: Component that the auto-parameter is modifying
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param files: list of file names to act as the auto-parameter list
:type files: list<str> |
386,405 | def parse_annotation(code):
module = ast.parse(code)
assert type(module) is ast.Module,
assert len(module.body) == 1,
assert type(module.body[0]) is ast.Expr,
return module.body[0] | Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@') |
386,406 | def value_validate(self, value):
if not isinstance(value, datetime.datetime):
raise tldap.exceptions.ValidationError("is invalid date time") | Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this. |
386,407 | def _write(
df,
filename=None,
schema=,
taxon_col=,
taxon_annotations=[],
node_col=,
node_annotations=[],
branch_lengths=True,
**kwargs
):
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema) | Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths. |
386,408 | def remove(self, items, working_tree=False, **kwargs):
args = []
if not working_tree:
args.append("--cached")
args.append("--")
paths = self._items_to_rela_paths(items)
removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines()
return [p[4:-1] for p in removed_paths] | Remove the given items from the index and optionally from
the working tree as well.
:param items:
Multiple types of items are supported which may be be freely mixed.
- path string
Remove the given path at all stages. If it is a directory, you must
specify the r=True keyword argument to remove all file entries
below it. If absolute paths are given, they will be converted
to a path relative to the git repository directory containing
the working tree
The path string may include globs, such as *.c.
- Blob Object
Only the path portion is used in this case.
- BaseIndexEntry or compatible type
The only relevant information here Yis the path. The stage is ignored.
:param working_tree:
If True, the entry will also be removed from the working tree, physically
removing the respective file. This may fail if there are uncommitted changes
in it.
:param kwargs:
Additional keyword arguments to be passed to git-rm, such
as 'r' to allow recursive removal of
:return:
List(path_string, ...) list of repository relative paths that have
been removed effectively.
This is interesting to know in case you have provided a directory or
globs. Paths are relative to the repository. |
386,409 | def _compute_total_chunks(self, chunk_size):
try:
if self._src_block_list is not None:
blen = len(self._src_block_list)
if blen > 0:
return blen
else:
return 1
else:
return int(math.ceil(self._src_ase.size / chunk_size))
except ZeroDivisionError:
return 1 | Compute total number of chunks for entity
:param Descriptor self: this
:param int chunk_size: chunk size
:rtype: int
:return: num chunks |
386,410 | def persist(self):
if self.app.dry:
return
for proj in self.subprojects.values():
proj.persist() | Banana banana |
386,411 | def locate(cls):
if cls._INSTANCE is None:
bootstrap_path = __file__
module_import_path = __name__.split()
for _ in module_import_path:
bootstrap_path = os.path.dirname(bootstrap_path)
cls._INSTANCE = cls(sys_path_entry=bootstrap_path)
return cls._INSTANCE | Locates the active PEX bootstrap.
:rtype: :class:`Bootstrap` |
386,412 | def auto_track_url(track):
hub = track.root(cls=Hub)
if hub is None:
raise ValueError(
"track is not fully connected because the root is %s" % repr(hub))
if hub.url is None:
raise ValueError("hub.url is not set")
if track.source is None:
raise ValueError("track.source is not set") | Automatically sets the bigDataUrl for `track`.
Requirements:
* the track must be fully connected, such that its root is a Hub object
* the root Hub object must have the Hub.url attribute set
* the track must have the `source` attribute set |
386,413 | def btc_tx_witness_strip( tx_serialized ):
if not btc_tx_is_segwit(tx_serialized):
return tx_serialized
tx = btc_tx_deserialize(tx_serialized)
for inp in tx[]:
del inp[]
tx_stripped = btc_tx_serialize(tx)
return tx_stripped | Strip the witness information from a serialized transaction |
386,414 | def delete(self, *args, **kwargs):
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).delete(*args, **kwargs)
except django.db.utils.OperationalError:
if count >= max_retries:
raise
count += 1 | This method implements retries for object deletion. |
386,415 | def unquoted(self):
key = str(self)
if key.startswith() and key.endswith():
return key[1:-1]
return key | Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword. |
386,416 | def mutate(self,p_i,func_set,term_set):
self.point_mutate(p_i,func_set,term_set) | point mutation, addition, removal |
386,417 | def createGroups(self, configFiles, dateTimeFormat=None):
groupInfo = None
groupFile = None
iconPath = None
startTime = None
thumbnail = None
result = None
config = None
sciptPath = None
orgTools = None
if dateTimeFormat is None:
dateTimeFormat =
scriptStartTime = datetime.datetime.now()
try:
print ("********************Create Groups********************")
print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat))
if self.securityhandler is None or \
self.securityhandler.valid == False:
print ("Login required")
else:
orgTools = orgtools.orgtools(securityinfo=self)
if orgTools is None:
print ("Error creating orgtools")
else:
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
startTime = datetime.datetime.now()
print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat)))
groupInfo = config[]
groupFile = groupInfo[]
iconPath = groupInfo[]
if os.path.isfile(groupFile):
with open(groupFile, ) as csvfile:
for row in csv.DictReader(csvfile,dialect=):
if os.path.isfile(os.path.join(iconPath,row[])):
thumbnail = os.path.join(iconPath,row[])
if not os.path.isabs(thumbnail):
sciptPath = os.getcwd()
thumbnail = os.path.join(sciptPath,thumbnail)
result = orgTools.createGroup(title=row[],description=row[],tags=row[],snippet=row[],phone=row[],access=row[],sortField=row[],sortOrder=row[], \
isViewOnly=row[],isInvitationOnly=row[],thumbnail=thumbnail)
else:
result = orgTools.createGroup(title=row[],description=row[],tags=row[],snippet=row[],phone=row[],access=row[],sortField=row[],sortOrder=row[], \
isViewOnly=row[],isInvitationOnly=row[])
if result is None:
pass
else:
print ("Group created: " + result.title)
print ("Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime)))
else:
print ("Config %s not found" % configFile)
except(TypeError,ValueError,AttributeError) as e:
print (e)
except (common.ArcRestHelperError) as e:
print ("error in function: %s" % e[0][])
print ("error on line: %s" % e[0][])
print ("error in file name: %s" % e[0][])
print ("with error message: %s" % e[0][])
if in e[0]:
print ("with arcpy message: %s" % e[0][])
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print ("error in function: %s" % e[0][])
print ("error on line: %s" % e[0][])
print ("error in file name: %s" % e[0][])
print ("with error message: %s" % e[0][])
if in e[0]:
print ("with arcpy message: %s" % e[0][])
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
finally:
print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime))
print ("
print ("")
groupInfo = None
groupFile = None
iconPath = None
startTime = None
thumbnail = None
result = None
config = None
sciptPath = None
orgTools = None
del groupInfo
del groupFile
del iconPath
del startTime
del thumbnail
del result
del config
del sciptPath
del orgTools
gc.collect() | Parses a JSON configuration file to create groups.
Args:
configFiles (list): A list of JSON files on disk containing
configuration data for creating groups.
dateTimeFormat (str): A valid date formatting directive, as understood
by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,
``'%Y-%m-%d %H:%M'``. |
386,418 | def get_char_weights(doc_weighted_spans, preserve_density=None):
if preserve_density is None:
preserve_density = doc_weighted_spans.preserve_density
char_weights = np.zeros(len(doc_weighted_spans.document))
feature_counts = Counter(f for f, _, __ in doc_weighted_spans.spans)
for feature, spans, weight in doc_weighted_spans.spans:
for start, end in spans:
start = max(0, start)
if preserve_density:
weight /= (end - start)
weight /= feature_counts[feature]
char_weights[start:end] += weight
return char_weights | Return character weights for a text document with highlighted features.
If preserve_density is True, then color for longer fragments will be
less intensive than for shorter fragments, so that "sum" of intensities
will correspond to feature weight.
If preserve_density is None, then it's value is taken from
the corresponding attribute of doc_weighted_spans. |
386,419 | def interactive(plugin):
items = [item for item in once(plugin) if not item.get_played()]
parent_stack = []
selected_item = get_user_choice(items)
while selected_item is not None:
if parent_stack and selected_item == parent_stack[-1]:
parent_stack.pop()
else:
parent_stack.append(ListItem.from_dict(label=,
path=plugin.request.url))
patch_plugin(plugin, selected_item.get_path())
items = [item for item in once(plugin, parent_stack=parent_stack)
if not item.get_played()]
selected_item = get_user_choice(items) | A run mode for the CLI that runs the plugin in a loop based on user
input. |
386,420 | def get_kubernetes_configuration(self, mount_point=):
url = .format(mount_point)
return self._adapter.get(url).json() | GET /auth/<mount_point>/config
:param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes".
:type mount_point: str.
:return: Parsed JSON response from the config GET request
:rtype: dict. |
386,421 | def setQuickColor( self, color ):
colorset = XPaletteColorSet()
colorset.setPalette(QPalette(color))
self.setColorSet(colorset) | Sets the quick color for the palette to the given color.
:param color | <QColor> |
386,422 | def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r, "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / .format(formula, i)
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(str(d), make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif) | Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
\\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure. |
386,423 | def parse_config(self, device=None, profile=None, native=None, attrs=None):
if attrs is None:
attrs = self.elements().values()
for v in attrs:
parser = Parser(
v, device=device, profile=profile, native=native, is_config=True
)
parser.parse() | Parse native configuration and load it into the corresponding models. Only models
that have been added to the root object will be parsed.
If ``native`` is passed to the method that's what we will parse, otherwise, we will use the
``device`` to retrieve it.
Args:
device (NetworkDriver): Device to load the configuration from.
profile (list): Profiles that the device supports. If no ``profile`` is passed it will
be read from ``device``.
native (list of strings): Native configuration to parse.
Examples:
>>> # Load from device
>>> running_config = napalm_yang.base.Root()
>>> running_config.add_model(napalm_yang.models.openconfig_interfaces)
>>> running_config.parse_config(device=d)
>>> # Load from file
>>> with open("junos.config", "r") as f:
>>> config = f.read()
>>>
>>> running_config = napalm_yang.base.Root()
>>> running_config.add_model(napalm_yang.models.openconfig_interfaces)
>>> running_config.parse_config(native=[config], profile="junos") |
386,424 | def read_node(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_node_with_http_info(name, **kwargs)
else:
(data) = self.read_node_with_http_info(name, **kwargs)
return data | read_node # noqa: E501
read the specified Node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Node (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Node
If the method is called asynchronously,
returns the request thread. |
386,425 | def list_database_names(self, session=None):
return [doc["name"]
for doc in self.list_databases(session, nameOnly=True)] | Get a list of the names of all databases on the connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionadded:: 3.6 |
386,426 | def emitRecordMiddleClicked(self, item):
if isinstance(item, XOrbRecordItem) and not self.signalsBlocked():
self.recordMiddleClicked.emit(item.record()) | Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem> |
386,427 | def toggle_autojump():
if not autojump_enabled():
with open(AUTOJUMP_FILE, ) as ajfile:
ajfile.write("enabled")
else:
os.remove(AUTOJUMP_FILE) | Toggles Autojump |
386,428 | def all(self, audience=None, page=None, per_page=None, include_totals=False, client_id=None):
params = {
: audience,
: page,
: per_page,
: str(include_totals).lower(),
: client_id,
}
return self.client.get(self._url(), params=params) | Retrieves all client grants.
Args:
audience (str, optional): URL encoded audience of a Resource Server
to filter
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
client_id (string, optional): The id of a client to filter
See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants |
386,429 | def visit_attribute(self, node):
for pattern in self.config.generated_members:
if re.match(pattern, node.attrname):
return
if re.match(pattern, node.as_string()):
return
try:
inferred = list(node.expr.infer())
except exceptions.InferenceError:
return
missingattr = set()
non_opaque_inference_results = [
owner
for owner in inferred
if owner is not astroid.Uninferable
and not isinstance(owner, astroid.nodes.Unknown)
]
if (
len(non_opaque_inference_results) != len(inferred)
and self.config.ignore_on_opaque_inference
):
return
for owner in non_opaque_inference_results:
name = getattr(owner, "name", None)
if _is_owner_ignored(
owner, name, self.config.ignored_classes, self.config.ignored_modules
):
continue
try:
if not [
n
for n in owner.getattr(node.attrname)
if not isinstance(n.statement(), astroid.AugAssign)
]:
missingattr.add((owner, name))
continue
except AttributeError:
continue
except exceptions.NotFoundError:
if not _emit_no_member(
node,
owner,
name,
ignored_mixins=self.config.ignore_mixin_members,
ignored_none=self.config.ignore_none,
):
continue
missingattr.add((owner, name))
continue
break
else:
done = set()
for owner, name in missingattr:
if isinstance(owner, astroid.Instance):
actual = owner._proxied
else:
actual = owner
if actual in done:
continue
done.add(actual)
msg, hint = self._get_nomember_msgid_hint(node, owner)
self.add_message(
msg,
node=node,
args=(owner.display_type(), name, node.attrname, hint),
confidence=INFERENCE,
) | check that the accessed attribute exists
to avoid too much false positives for now, we'll consider the code as
correct if a single of the inferred nodes has the accessed attribute.
function/method, super call and metaclasses are ignored |
386,430 | def add_field(self, field_instance_or_string):
if isinstance(field_instance_or_string, basestring):
field_instance = Field(field_instance_or_string)
elif isinstance(field_instance_or_string, Field):
field_instance_or_string = field_instance
else:
raise ValueError()
self.fields.append(field_instance)
return self | Appends a field, can be a :class:`~es_fluent.fields.Field` or string. |
386,431 | def camelize(word):
return .join(w[0].upper() + w[1:]
for w in re.sub(, , word).split()) | Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string. |
386,432 | def _is_instance(type_to_check, element, condition="any", deep=False):
out = None
if deep is False:
if condition == "any":
out = any(isinstance(el, type_to_check) for el in element)
elif condition == "all":
out = all(isinstance(el, type_to_check) for el in element)
else:
for row in range(0, len(element)):
for column in range(0, len(element[row])):
flag = _is_instance(type_to_check, element[column][row], "all", deep=False)
if flag is False:
out = flag
else:
out = True
return out | -----
Brief
-----
Function that verifies when "all" or "any" elements of the list "element" have the type
specified in "type_to_check" input.
-----------
Description
-----------
In some biosignalsnotebooks functions their implementation is extremely dependent on a specific
criterion, i.e., 'all' list entries should be of a specific data type.
In order to ensure this functionality _is_instance function was implemented.
For example, when plotting data through 'plot' function of 'visualise' module, 'all' entries
of time axis and data samples lists need to be 'Numeric'.
In order to this condition be checked _is_instance should be called with the following input
values:
_is_instance(Number, [1, 2, 3, True, ...], 'all')
Sometimes is also relevant to check if at least one of list entries belongs to a data type, for
cases like this, the argument "condition" should have value equal to "any".
--------
Examples
--------
>>> _is_instance(Number, [1, 2, 3, True], 'all')
False
>>> _is_instance(Number, [1, 1.2, 3, 5], 'all')
True
----------
Parameters
----------
type_to_check : type element
Data type (all or any elements of 'element' list must be of the type specified in the
current input).
element : list
List where condition specified in "condition" will be checked.
condition : str
String with values "any" or "all" verifying when "any" or "all" element entries have the
specified type.
deep : bool
Flag that identifies when element is in a matrix format and each of its elements should be
verified iteratively.
Returns
-------
out : boolean
Returns True when the "condition" is verified for the entries of "element" list. |
386,433 | def lookup_field_class(self, field, obj=None, default=None):
css = ""
if field in self.field_config and in self.field_config[field]:
css = self.field_config[field][]
elif default:
css = default
return css | Looks up any additional class we should include when rendering this field |
386,434 | def validateOneElement(self, doc, elem):
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret | Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately |
386,435 | def set_tempo(self, bpm):
self.bpm = bpm
self.track_data += self.set_tempo_event(self.bpm) | Convert the bpm to a midi event and write it to the track_data. |
386,436 | def check_pre_approval_notification(self, code):
response = self.get(
url=self.config.PRE_APPROVAL_NOTIFICATION_URL % code)
return PagSeguroPreApprovalNotificationResponse(
response.content, self.config) | check a notification by its code |
386,437 | def _CheckStorageFile(self, storage_file_path):
if os.path.exists(storage_file_path):
if not os.path.isfile(storage_file_path):
raise errors.BadConfigOption(
.format(
storage_file_path))
logger.warning()
dirname = os.path.dirname(storage_file_path)
if not dirname:
dirname =
if not os.access(dirname, os.W_OK):
raise errors.BadConfigOption(
.format(storage_file_path)) | Checks if the storage file path is valid.
Args:
storage_file_path (str): path of the storage file.
Raises:
BadConfigOption: if the storage file path is invalid. |
386,438 | def onPublish(self, topic, payload, qos, dup, retain, msgId):
log.debug("msg={payload}", payload=payload) | Callback Receiving messages from publisher |
386,439 | def parse_stdout(self, filelike):
from aiida.orm import Dict
formulae = {}
content = filelike.read().strip()
if not content:
return self.exit_codes.ERROR_EMPTY_OUTPUT_FILE
try:
for line in content.split():
datablock, formula = re.split(r, line.strip(), 1)
formulae[datablock] = formula
except Exception:
self.logger.exception(, traceback.format_exc())
return self.exit_codes.ERROR_PARSING_OUTPUT_DATA
else:
self.out(, Dict(dict=formulae))
return | Parse the formulae from the content written by the script to standard out.
:param filelike: filelike object of stdout
:returns: an exit code in case of an error, None otherwise |
386,440 | def audit_1_15(self):
for policy in resources.iam.policies.all():
self.assertEqual(len(list(policy.attached_users.all())), 0, "{} has users attached to it".format(policy)) | 1.15 Ensure IAM policies are attached only to groups or roles (Scored) |
386,441 | def _integrate_plugins():
import sys
from airflow.plugins_manager import macros_modules
for macros_module in macros_modules:
sys.modules[macros_module.__name__] = macros_module
globals()[macros_module._name] = macros_module | Integrate plugins to the context |
386,442 | def expect_file_line_regex_match_count_to_be_between(self,
regex,
expected_min_count=0,
expected_max_count=None,
skip=None,
mostly=None,
null_lines_regex=r"^\s*$",
result_format=None,
include_config=False,
catch_exceptions=None,
meta=None,
_lines=None):
try:
comp_regex = re.compile(regex)
except:
raise ValueError("Must enter valid regular expression for regex")
if expected_min_count != None:
try:
assert float(expected_min_count).is_integer()
assert float(expected_min_count) >= 0
except:
raise ValueError("expected_min_count must be a non-negative \
integer or None")
if expected_max_count != None:
try:
assert float(expected_max_count).is_integer()
assert float(expected_max_count) >= 0
except:
raise ValueError("expected_max_count must be a non-negative \
integer or None")
if expected_max_count != None and expected_min_count != None:
try:
assert expected_max_count >= expected_min_count
except:
raise ValueError("expected_max_count must be greater than or \
equal to expected_min_count")
if expected_max_count != None and expected_min_count != None:
truth_list = [True if(len(comp_regex.findall(line)) >= expected_min_count and \
len(comp_regex.findall(line)) <= expected_max_count) else False \
for line in _lines]
elif expected_max_count != None:
truth_list = [True if(len(comp_regex.findall(line)) <= expected_max_count) else False \
for line in _lines]
elif expected_min_count != None:
truth_list = [True if(len(comp_regex.findall(line)) >= expected_min_count) else False \
for line in _lines]
else:
truth_list = [True for line in _lines]
return truth_list | Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_min_count (None or nonnegative integer): \
Specifies the minimum number of times regex is expected to appear
on each line of the file
expected_max_count (None or nonnegative integer): \
Specifies the maximum number of times regex is expected to appear
on each line of the file
Keyword Args:
skip (None or nonnegative integer): \
Integer specifying the first lines in the file the method should
skip before assessing expectations
mostly (None or number between 0 and 1): \
Specifies an acceptable error for expectations. If the percentage
of unexpected lines is less than mostly, the method still returns
true even if all lines don't match the expectation criteria.
null_lines_regex (valid regular expression or None): \
If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the
result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
_lines (list): \
The lines over which to operate (provided by the file_lines_map_expectation decorator)
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to
:ref:`result_format <result_format>` and :ref:`include_config`,
:ref:`catch_exceptions`, and :ref:`meta`. |
386,443 | def addGenotype(
self, genotype_id, genotype_label,
genotype_type=None,
genotype_description=None
):
if genotype_type is None:
genotype_type = self.globaltt[]
self.model.addIndividualToGraph(
genotype_id, genotype_label, genotype_type, genotype_description)
return | If a genotype_type is not supplied,
we will default to 'intrinsic_genotype'
:param genotype_id:
:param genotype_label:
:param genotype_type:
:param genotype_description:
:return: |
386,444 | def viewinfo(self, postinfo):
out_json = {
: postinfo.uid,
: postinfo.time_update,
: postinfo.title,
: tornado.escape.xhtml_unescape(postinfo.cnt_html),
}
self.write(json.dumps(out_json)) | View the info |
386,445 | def get_random(self):
import random
Statement = self.get_model()
session = self.Session()
count = self.count()
if count < 1:
raise self.EmptyDatabaseException()
random_index = random.randrange(0, count)
random_statement = session.query(Statement)[random_index]
statement = self.model_to_object(random_statement)
session.close()
return statement | Returns a random statement from the database. |
386,446 | def calc_el_lz_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nmbzones):
if (con.zonetype[k] == ILAKE) and (flu.tc[k] > con.ttice[k]):
flu.el[k] = flu.epc[k]
sta.lz -= der.relzonearea[k]*flu.el[k]
else:
flu.el[k] = 0. | Calculate lake evaporation.
Required control parameters:
|NmbZones|
|ZoneType|
|TTIce|
Required derived parameters:
|RelZoneArea|
Required fluxes sequences:
|TC|
|EPC|
Updated state sequence:
|LZ|
Basic equations:
:math:`\\frac{dLZ}{dt} = -EL` \n
:math:`EL = \\Bigl \\lbrace
{
{EPC \\ | \\ TC > TTIce}
\\atop
{0 \\ | \\ TC \\leq TTIce}
}`
Examples:
Six zones of the same size are initialized. The first three
zones are no internal lakes, they can not exhibit any lake
evaporation. Of the last three zones, which are internal lakes,
only the last one evaporates water. For zones five and six,
evaporation is suppressed due to an assumed ice layer, whenever
the associated theshold temperature is not exceeded:
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(6)
>>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE)
>>> ttice(-1.0)
>>> derived.relzonearea = 1.0/6.0
>>> fluxes.epc = 0.6
>>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0
>>> states.lz = 10.0
>>> model.calc_el_lz_v1()
>>> fluxes.el
el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0)
>>> states.lz
lz(9.9)
Note that internal lakes always contain water. Hence, the
HydPy-H-Land model allows for negative values of the lower
zone storage:
>>> states.lz = 0.05
>>> model.calc_el_lz_v1()
>>> fluxes.el
el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0)
>>> states.lz
lz(-0.05) |
386,447 | def _grabix_index(data):
in_file = data["bgzip_file"]
config = data["config"]
grabix = config_utils.get_program("grabix", config)
gbi_file = _get_grabix_index(in_file)
if not gbi_file or _is_partial_index(gbi_file):
if gbi_file:
utils.remove_safe(gbi_file)
else:
gbi_file = in_file + ".gbi"
with file_transaction(data, gbi_file) as tx_gbi_file:
tx_in_file = os.path.splitext(tx_gbi_file)[0]
utils.symlink_plus(in_file, tx_in_file)
do.run([grabix, "index", tx_in_file], "Index input with grabix: %s" % os.path.basename(in_file))
assert utils.file_exists(gbi_file)
return [gbi_file] | Create grabix index of bgzip input file.
grabix does not allow specification of output file, so symlink the original
file into a transactional directory. |
386,448 | def from_proto(cls, repeated_split_infos):
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_dict | Returns a new SplitDict initialized from the `repeated_split_infos`. |
386,449 | def mark_clean(self, entity):
state = EntityState.get_state(entity)
state.status = ENTITY_STATUS.CLEAN
state.is_persisted = True | Marks the given entity as CLEAN.
This is done when an entity is loaded fresh from the repository or
after a commit. |
386,450 | def get_smtp_mail(self):
header = self.get_smtp_header()
body = self.get_body().replace(, )
return header + + body + | Returns the SMTP formatted email, as it may be passed to sendmail.
:rtype: string
:return: The SMTP formatted mail. |
386,451 | def _all_tag(self):
all_tag = self.get_conf_value()
if len(all_tag) == 0:
return False
else:
return all_tag[0].lower() == | Return the all tag of the Glances/Docker configuration file.
# By default, Glances only display running containers
# Set the following key to True to display all containers
all=True |
386,452 | def nifti_copy(filename,prefix=None,gzip=True):
if prefix==None:
prefix = filename
nifti_filename = globals()[](prefix) + ".nii"
if gzip:
nifti_filename +=
if not os.path.exists(nifti_filename):
try:
subprocess.check_call([,,nifti_filename,str(filename)])
except subprocess.CalledProcessError:
nl.notify( % filename,level=nl.level.error)
return None
return nifti_filename | creates a ``.nii`` copy of the given dataset and returns the filename as a string |
386,453 | def memory_write32(self, addr, data, zone=None):
return self.memory_write(addr, data, zone, 32) | Writes words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of words to write
zone (str): optional memory zone to access
Returns:
Number of words written to target.
Raises:
JLinkException: on memory access error. |
386,454 | def clean(self):
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError("Cookies must be enabled.")
return self.cleaned_data | Check user has cookies enabled |
386,455 | def count(self):
if not self.query.store.autocommit:
self.query.store.checkpoint()
target = .join([
tableClass.storeID.getColumnName(self.query.store)
for tableClass in self.query.tableClass ])
sql, args = self.query._sqlAndArgs(
,
target)
sql = + sql +
result = self.query.store.querySQL(sql, args)
assert len(result) == 1, % (result,)
return result[0][0] or 0 | Count the number of distinct results of the wrapped query.
@return: an L{int} representing the number of distinct results. |
386,456 | def GetBatchJobHelper(self, version=sorted(_SERVICE_MAP.keys())[-1],
server=None):
if not server:
server = _DEFAULT_ENDPOINT
request_builder = BatchJobHelper.GetRequestBuilder(
self, version=version, server=server)
response_parser = BatchJobHelper.GetResponseParser()
return BatchJobHelper(request_builder, response_parser) | Returns a BatchJobHelper to work with the BatchJobService.
This is a convenience method. It is functionally identical to calling
BatchJobHelper(adwords_client, version).
Args:
[optional]
version: A string identifying the AdWords version to connect to. This
defaults to what is currently the latest version. This will be updated
in future releases to point to what is then the latest version.
server: A string identifying the webserver hosting the AdWords API.
Returns:
An initialized BatchJobHelper tied to this client. |
386,457 | def set_palette_name(self, palette_name):
combo = self.get_widget()
found = False
log.debug("wanting palette: %r", palette_name)
for i in combo.get_model():
if i[0] == palette_name:
combo.set_active_iter(i.iter)
found = True
break
if not found:
combo.set_active(self.custom_palette_index) | If the given palette matches an existing one, shows it in the
combobox |
386,458 | def _domain_differs(self, href):
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin | Check that a link is not on the same domain as the source URL |
386,459 | def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor,
gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5):
return TrpoPolicyGradient(
max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters,
discount_factor=discount_factor,
gae_lambda=gae_lambda,
improvement_acceptance_ratio=improvement_acceptance_ratio,
max_grad_norm=max_grad_norm
) | Vel factory function |
386,460 | def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs):
conf = DotDict(conf)
urls = utils.listize(conf[])
for item in _INPUT:
for item_url in urls:
url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs)
url = utils.get_abspath(url)
f = urlopen(url)
content = unicode(f.read(), )
if context and context.verbose:
print
print content
print
xpath = conf.get(, **kwargs)
html5 = conf.get(, **kwargs) ==
use_as_string = conf.get(, **kwargs) ==
tree = html5parser.parse(f) if html5 else html.parse(f)
root = tree.getroot()
items = root.xpath(xpath)
if context and context.verbose:
print , len(items)
for etree in items:
i = utils.etree_to_dict(etree)
if context and context.verbose:
print
print i
print
if use_as_string:
yield {: unicode(i)}
else:
yield i
if item.get():
break | A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use html5 parser?
useAsString -- emit items as string?
TODOS:
- don't retrieve pages larger than 1.5MB
- don't retrieve if page is not indexable.
Yields
------
_OUTPUT : items |
386,461 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.location is not None:
_dict[] = self.location._to_dict()
if hasattr(self, ) and self.text is not None:
_dict[] = self.text
if hasattr(self, ) and self.section_title is not None:
_dict[] = self.section_title._to_dict()
if hasattr(self, ) and self.table_headers is not None:
_dict[] = [x._to_dict() for x in self.table_headers]
if hasattr(self, ) and self.row_headers is not None:
_dict[] = [x._to_dict() for x in self.row_headers]
if hasattr(self, ) and self.column_headers is not None:
_dict[] = [
x._to_dict() for x in self.column_headers
]
if hasattr(self,
) and self.key_value_pairs is not None:
_dict[] = [
x._to_dict() for x in self.key_value_pairs
]
if hasattr(self, ) and self.body_cells is not None:
_dict[] = [x._to_dict() for x in self.body_cells]
return _dict | Return a json dictionary representing this model. |
386,462 | def get_parent_aligned_annotation(self, ref_id):
parentTier = self.tiers[self.annotations[ref_id]]
while "PARENT_REF" in parentTier[2] and len(parentTier[2]) > 0:
ref_id = parentTier[1][ref_id][0]
parentTier = self.tiers[self.annotations[ref_id]]
return parentTier[0][ref_id] | Give the aligment annotation that a reference annotation belongs to directly, or indirectly through other
reference annotations.
:param str ref_id: Id of a reference annotation.
:raises KeyError: If no annotation exists with the id or if it belongs to an alignment annotation.
:returns: The alignment annotation at the end of the reference chain. |
386,463 | def add_jump(self, name, min, max, num, warp=None, var_type=float):
if not isinstance(var_type, type):
if var_type == :
var_type = int
elif var_type == :
var_type = float
else:
raise ValueError(
% (var_type))
min, max = map(var_type, (min, max))
num = int(num)
if not warp:
choices = np.linspace(min, max, num=num, dtype=var_type)
elif (min >= 0) and warp == :
choices = np.logspace(np.log10(min), np.log10(max), num=num,
dtype=var_type)
elif (min <= 0)and warp == :
raise ValueError()
else:
raise ValueError(
% (name, warp))
self.variables[name] = EnumVariable(name, choices.tolist()) | An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int. |
386,464 | def connect(config_dir=None, optional_config_files=None, cron_cfg="cron"):
from pyrocore.scripts.base import ScriptBase
from pyrocore.util import load_config
ScriptBase.setup(cron_cfg=cron_cfg)
load_config.ConfigLoader(config_dir).load(optional_config_files or [])
from pyrocore import config
config.engine.open()
return config.engine | Initialize everything for interactive use.
Returns a ready-to-use RtorrentEngine object. |
386,465 | def open_fileswitcher_dlg(self):
if not self.tabs.count():
return
if self.fileswitcher_dlg is not None and \
self.fileswitcher_dlg.is_visible:
self.fileswitcher_dlg.hide()
self.fileswitcher_dlg.is_visible = False
return
self.fileswitcher_dlg = FileSwitcher(self, self, self.tabs, self.data,
ima.icon())
self.fileswitcher_dlg.sig_goto_file.connect(self.set_stack_index)
self.fileswitcher_dlg.show()
self.fileswitcher_dlg.is_visible = True | Open file list management dialog box |
386,466 | def modify_order(self, modify_order_op, order_id, qty, price, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):
return super(OpenHKCCTradeContext, self).modify_order(modify_order_op=modify_order_op,
order_id=order_id,
qty=qty,
price=price,
adjust_limit=adjust_limit,
trd_env=trd_env,
acc_id=acc_id,
acc_index=acc_index) | 详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
:param trd_env:
:param acc_id:
:return: |
386,467 | def my_main(context):
print()
if context[]:
print()
for k in context:
print(.format(k, context[k]))
print()
return 0 | The starting point for your app. |
386,468 | def generate_random_string(size=6, chars=string.ascii_uppercase + string.digits):
return .join(random.choice(chars) for _ in range(size)) | Generate random string.
:param size: Length of the returned string. Default is 6.
:param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits.
:type size: int
:type chars: str
:return: The random string.
:rtype: str |
386,469 | def pbkdf2(seed: str or bytes, dk_len: int) -> bytes:
key = b
index = 1
bytes_seed = str_to_bytes(seed)
while len(key) < dk_len:
key += Digest.sha256(b.join([bytes_seed, index.to_bytes(4, , signed=True)]))
index += 1
return key[:dk_len] | Derive one key from a seed.
:param seed: the secret pass phrase to generate the keys from.
:param dk_len: the length in bytes of every derived key.
:return: |
386,470 | def build_url(base_url, partial_url):
if not base_url.endswith():
base_url +=
if partial_url.startswith():
partial_url = partial_url[1:]
return urlparse.urljoin(base_url, partial_url) | Makes sure the URL is built properly.
>>> urllib.parse.urljoin('https://test.com/1/', '2/3')
https://test.com/1/2/3
>>> urllib.parse.urljoin('https://test.com/1/', '/2/3')
https://test.com/2/3
>>> urllib.parse.urljoin('https://test.com/1', '2/3')
https://test.com/2/3' |
386,471 | def input_validate_yubikey_secret(data, name=):
if isinstance(data, pyhsm.aead_cmd.YHSM_YubiKeySecret):
data = data.pack()
return input_validate_str(data, name) | Input validation for YHSM_YubiKeySecret or string. |
386,472 | def _clear_surface(self, surface, rect=None):
clear_color = self._rgb_clear_color if self._clear_color is None else self._clear_color
surface.fill(clear_color, rect) | Clear the buffer, taking in account colorkey or alpha
:return: |
386,473 | def random_tickers(
length, n_tickers, endswith=None, letters=None, slicer=itertools.islice
):
if letters is None:
letters = string.ascii_uppercase
if endswith:
length = length - len(endswith)
join = "".join
def yield_ticker(rand=random.choices):
if endswith:
while True:
yield join(rand(letters, k=length)) + endswith
else:
while True:
yield join(rand(letters, k=length))
tickers = itertools.islice(unique_everseen(yield_ticker()), n_tickers)
return list(tickers) | Generate a length-n_tickers list of unique random ticker symbols.
Parameters
----------
length : int
The length of each ticker string.
n_tickers : int
Number of tickers to generate.
endswith : str, default None
Specify the ending element(s) of each ticker (for example, 'X').
letters : sequence, default None
Sequence of possible letters to choose from. If None, defaults to
`string.ascii_uppercase`.
Returns
-------
list of str
Examples
--------
>>> from pyfinance import utils
>>> utils.random_tickers(length=5, n_tickers=4, endswith='X')
['UZTFX', 'ROYAX', 'ZBVIX', 'IUWYX']
>>> utils.random_tickers(3, 8)
['SBW', 'GDF', 'FOG', 'PWO', 'QDH', 'MJJ', 'YZD', 'QST'] |
386,474 | def radius_server_host_protocol(self, **kwargs):
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop()
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop()
protocol = ET.SubElement(host, "protocol")
protocol.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
386,475 | def _Open(self, path_spec, mode=):
if not path_spec.HasParent():
raise errors.PathSpecError(
)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
cpio_archive_file = cpio.CPIOArchiveFile()
try:
cpio_archive_file.Open(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._cpio_archive_file = cpio_archive_file | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. |
386,476 | def _raw_open(self, flags, mode=0o777):
return self._accessor.open(self, flags, mode) | Open the file pointed by this path and return a file descriptor,
as os.open() does. |
386,477 | def is_cython_function(fn):
if hasattr(fn, "__func__"):
fn = fn.__func__
name = type(fn).__name__
return (
name == "method_descriptor"
or name == "cython_function_or_method"
or name == "builtin_function_or_method"
) | Checks if a function is compiled w/Cython. |
386,478 | def duration(self):
if self._duration:
return self._duration
elif self.end:
return self.end - self.begin
else:
return None | Get or set the duration of the event.
| Will return a timedelta object.
| May be set to anything that timedelta() understands.
| May be set with a dict ({"days":2, "hours":6}).
| If set to a non null value, removes any already
existing end time. |
386,479 | def get_annotation_values(graph, annotation: str) -> Set[str]:
return set(iter_annotation_values(graph, annotation)) | Get all values for the given annotation.
:param pybel.BELGraph graph: A BEL graph
:param annotation: The annotation to summarize
:return: A set of all annotation values |
386,480 | def sanitize(self):
super(MapNotifyMessage, self).sanitize()
if not isinstance(self.xtr_id, numbers.Integral) \
or self.xtr_id < 0 or self.xtr_id >= 2 ** 128:
raise ValueError()
if not isinstance(self.site_id, numbers.Integral) \
or self.site_id < 0 or self.site_id >= 2 ** 64:
raise ValueError()
if len(bytes(self.nonce)) != 8:
raise ValueError()
if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96,
KEY_ID_HMAC_SHA_256_128):
raise ValueError()
if not isinstance(self.authentication_data, bytes):
raise ValueError()
for record in self.records:
if not isinstance(record, MapRegisterRecord):
raise ValueError()
record.sanitize() | Check if the current settings conform to the LISP specifications and
fix them where possible. |
386,481 | def _fast_hit_windows(ref, est, window):
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side=)
right_idx = np.searchsorted(ref_sorted, est + window, side=)
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est | Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window`` |
386,482 | def updateHistory(self, activeCells, forceOutput=False):
self._activeCellsHistory.append(activeCells)
if len(self._activeCellsHistory) > self._historyLength:
self._activeCellsHistory.pop(0)
self._unionSDR = numpy.zeros(shape=(self._numInputs,))
if (len(self._activeCellsHistory) >= self._minHistory) or forceOutput:
for i in self._activeCellsHistory:
self._unionSDR[i] = 1
return self._unionSDR | Computes one cycle of the Union Pooler algorithm. Return the union SDR
Parameters:
----------------------------
@param activeCells: A list that stores indices of active cells
@param forceOutput: if True, a union will be created without regard to
minHistory |
386,483 | def resurrect(self, force=False):
if self.dead.empty():
return
try:
timeout, connection = self.dead.get(block=False)
except Empty:
return
if not force and timeout > time.time():
self.dead.put((timeout, connection))
return
self.connections.append(connection)
logger.info(, connection, force) | Attempt to resurrect a connection from the dead pool. It will try to
locate one (not all) eligible (it's timeout is over) connection to
return to th live pool.
:arg force: resurrect a connection even if there is none eligible (used
when we have no live connections) |
386,484 | def all(cls):
query = meta.Session.query(SemanticTag)
query = query.distinct().join(TagSemanticTag)
return query | Return all tags that are currently applied to any dataset.
:returns: a list of all tags that are currently applied to any dataset
:rtype: list of ckan.model.tag.Tag objects |
386,485 | def set_instrument(self, channel, instr, bank=1):
self.track_data += self.select_bank(channel, bank)
self.track_data += self.program_change_event(channel, instr) | Add a program change and bank select event to the track_data. |
386,486 | def protected_resource_view(scopes=None):
if scopes is None:
scopes = []
def wrapper(view):
def view_wrapper(request, *args, **kwargs):
access_token = extract_access_token(request)
try:
try:
kwargs[] = Token.objects.get(access_token=access_token)
except Token.DoesNotExist:
logger.debug(, access_token)
raise BearerTokenError()
if kwargs[].has_expired():
logger.debug(, access_token)
raise BearerTokenError()
if not set(scopes).issubset(set(kwargs[].scope)):
logger.debug()
raise BearerTokenError()
except BearerTokenError as error:
response = HttpResponse(status=error.status)
response[] = .format(
error.code, error.description)
return response
return view(request, *args, **kwargs)
return view_wrapper
return wrapper | View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7 |
386,487 | def seek(self, offset, whence=SEEK_SET):
self.data.seek(offset, whence)
new_pos = self.data.tell()
missing_bytes_to_read = new_pos - self._current_lob_length
if missing_bytes_to_read > 0:
self.data.seek(0, SEEK_END)
self.read(missing_bytes_to_read + self.EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK)
self.data.seek(new_pos)
return new_pos | Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data. |
386,488 | def run_failure_step_group(pipeline, context):
logger.debug("starting")
try:
assert pipeline
context=context)
except Exception as exception:
logger.error("Failure handler also failed. Swallowing.")
logger.error(exception)
logger.debug("done") | Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with. |
386,489 | def _provision_vm(name=None, session=None):
if session is None:
session = _get_session()
log.info(, name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session) | Provision vm right after clone/copy |
386,490 | def run(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = self.COMMAND_RUN | Drive servo to the position set in the `position_sp` attribute. |
386,491 | def getAll(self):
if not bool(len(self.ATTRIBUTES)):
self.load_attributes()
return eval(str(self.ATTRIBUTES)) | Return a dictionary with all variables |
386,492 | def tokenize(self, path):
assert os.path.exists(path)
with open(path, ) as f:
tokens = 0
for line in f:
words = line.split() + []
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, ) as f:
ids = np.zeros((tokens,), dtype=)
token = 0
for line in f:
words = line.split() + []
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return mx.nd.array(ids, dtype=) | Tokenizes a text file. |
386,493 | def is_all_field_none(self):
if self._UserLight is not None:
return False
if self._UserPerson is not None:
return False
if self._UserCompany is not None:
return False
if self._UserApiKey is not None:
return False
return True | :rtype: bool |
386,494 | def AgregarTambo(self, nro_tambo_interno, nro_renspa,
fecha_venc_cert_tuberculosis, fecha_venc_cert_brucelosis,
nro_tambo_provincial=None, **kwargs):
"Agrego los datos del productor a la liq."
tambo = {: nro_tambo_interno,
: nro_tambo_provincial,
: nro_renspa,
: {},
: fecha_venc_cert_tuberculosis,
: fecha_venc_cert_brucelosis}
self.solicitud[] = tambo
return True | Agrego los datos del productor a la liq. |
386,495 | def open_zip(path_or_file, *args, **kwargs):
if not path_or_file:
raise InvalidZipPath(.format(path_or_file))
allowZip64 = kwargs.pop(, True)
try:
zf = zipfile.ZipFile(path_or_file, *args, allowZip64=allowZip64, **kwargs)
except zipfile.BadZipfile as bze:
raise zipfile.BadZipfile("Bad Zipfile {0}: {1}".format(os.path.realpath(path_or_file), bze))
try:
yield zf
finally:
zf.close() | A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or_file is invalid.
:raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file.
:returns: `class 'contextlib.GeneratorContextManager`. |
386,496 | def modutf7_encode(data: str) -> bytes:
ret = bytearray()
is_usascii = True
encode_start = None
for i, symbol in enumerate(data):
charpoint = ord(symbol)
if is_usascii:
if charpoint == 0x26:
ret.extend(b)
elif 0x20 <= charpoint <= 0x7e:
ret.append(charpoint)
else:
encode_start = i
is_usascii = False
else:
if 0x20 <= charpoint <= 0x7e:
to_encode = data[encode_start:i]
encoded = _modified_b64encode(to_encode)
ret.append(0x26)
ret.extend(encoded)
ret.extend((0x2d, charpoint))
is_usascii = True
if not is_usascii:
to_encode = data[encode_start:]
encoded = _modified_b64encode(to_encode)
ret.append(0x26)
ret.extend(encoded)
ret.append(0x2d)
return bytes(ret) | Encode the string using modified UTF-7.
Args:
data: The input string to encode. |
386,497 | def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array:
arrayType = ctypes.c_uint32 * count
values = arrayType()
_DllClient.instance().dll.BitmapGetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count)
return values | x: int.
y: int.
count: int.
Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally. |
386,498 | def ignore_whitespace_text_nodes(cls, wrapped_node):
for child in wrapped_node.children:
if child.is_text and child.value.strip() == :
child.delete()
else:
cls.ignore_whitespace_text_nodes(child) | Find and delete any text nodes containing nothing but whitespace in
in the given node and its descendents.
This is useful for cleaning up excess low-value text nodes in a
document DOM after parsing a pretty-printed XML document. |
386,499 | def fit_class1_pan_allele_models(
self,
n_models,
architecture_hyperparameters,
alleles,
peptides,
affinities,
inequalities,
models_dir_for_save=None,
verbose=1,
progress_preamble="",
progress_print_interval=5.0):
alleles = pandas.Series(alleles).map(mhcnames.normalize_allele_name)
allele_encoding = AlleleEncoding(
alleles,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence)
encodable_peptides = EncodableSequences.create(peptides)
models = []
for i in range(n_models):
logging.info("Training model %d / %d" % (i + 1, n_models))
model = Class1NeuralNetwork(**architecture_hyperparameters)
model.fit(
encodable_peptides,
affinities,
inequalities=inequalities,
allele_encoding=allele_encoding,
verbose=verbose,
progress_preamble=progress_preamble,
progress_print_interval=progress_print_interval)
model_name = self.model_name("pan-class1", i)
self.class1_pan_allele_models.append(model)
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", "pan-class1"),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
if models_dir_for_save:
self.save(
models_dir_for_save, model_names_to_write=[model_name])
models.append(model)
self.clear_cache()
return models | Fit one or more pan-allele predictors using a single neural network
architecture.
The new predictors are saved in the Class1AffinityPredictor instance
and will be used on subsequent calls to `predict`.
Parameters
----------
n_models : int
Number of neural networks to fit
architecture_hyperparameters : dict
alleles : list of string
Allele names (not sequences) corresponding to each peptide
peptides : `EncodableSequences` or list of string
affinities : list of float
nM affinities
inequalities : list of string, each element one of ">", "<", or "="
See Class1NeuralNetwork.fit for details.
models_dir_for_save : string, optional
If specified, the Class1AffinityPredictor is (incrementally) written
to the given models dir after each neural network is fit.
verbose : int
Keras verbosity
progress_preamble : string
Optional string of information to include in each progress update
progress_print_interval : float
How often (in seconds) to print progress. Set to None to disable.
Returns
-------
list of `Class1NeuralNetwork` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.