Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
376,400 | def set_doc_ids(self, doc_ids):
if isinstance(doc_ids, list):
self.set_documents(dict.fromkeys(doc_ids))
else:
self.set_documents({doc_ids: None}) | Build xml documents from a list of document ids.
Args:
doc_ids -- A document id or a lost of those. |
376,401 | def iterGet(self, objectType, *args, **coolArgs) :
for e in self._makeLoadQuery(objectType, *args, **coolArgs).iterRun() :
if issubclass(objectType, pyGenoRabaObjectWrapper) :
yield objectType(wrapped_object_and_bag = (e, self.bagKey))
else :
yield e | Same as get. But retuns the elements one by one, much more efficient for large outputs |
376,402 | def bandit(self, choice_rewards):
total_pulls = max(1, sum(len(r) for r in choice_rewards.values()))
def ucb1(choice):
rewards = choice_rewards[choice]
choice_pulls = max(len(rewards), 1)
average_reward = np.nanmean(rewards) if len(rewards) else 0
error = np.sqrt(2.0 * np.log(total_pulls) / choice_pulls)
return average_reward + error
return max(shuffle(choice_rewards), key=ucb1) | Multi-armed bandit method which chooses the arm for which the upper
confidence bound (UCB) of expected reward is greatest.
If there are multiple arms with the same UCB1 index, then one is chosen
at random.
An explanation is here:
https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf |
376,403 | def create_storage_policy(policy_name, policy_dict, service_instance=None):
*policy name
log.trace(%s\, policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
policy_dict[] = policy_name
log.trace()
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {: True} | Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict" |
376,404 | def newDocPI(self, name, content):
ret = libxml2mod.xmlNewDocPI(self._o, name, content)
if ret is None:raise treeError()
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a processing instruction element. |
376,405 | def make_tables(grammar, precedence):
ACTION = {}
GOTO = {}
labels = {}
def get_label(closure):
if closure not in labels:
labels[closure] = len(labels)
return labels[closure]
def resolve_shift_reduce(lookahead, s_action, r_action):
s_assoc, s_level = precedence[lookahead]
r_assoc, r_level = precedence[r_action[1]]
if s_level < r_level:
return r_action
elif s_level == r_level and r_assoc == LEFT:
return r_action
else:
return s_action
initial, closures, goto = grammar.closures()
for closure in closures:
label = get_label(closure)
for rule in closure:
new_action, lookahead = None, rule.lookahead
if not rule.at_end:
symbol = rule.rhs[rule.pos]
is_terminal = symbol in grammar.terminals
has_goto = symbol in goto[closure]
if is_terminal and has_goto:
next_state = get_label(goto[closure][symbol])
new_action, lookahead = (, next_state), symbol
elif rule.production == grammar.start and rule.at_end:
new_action = (,)
elif rule.at_end:
new_action = (, rule.production)
if new_action is None:
continue
prev_action = ACTION.get((label, lookahead))
if prev_action is None or prev_action == new_action:
ACTION[label, lookahead] = new_action
else:
types = (prev_action[0], new_action[0])
if types == (, ):
chosen = resolve_shift_reduce(lookahead,
prev_action,
new_action)
elif types == (, ):
chosen = resolve_shift_reduce(lookahead,
new_action,
prev_action)
else:
raise TableConflictError(prev_action, new_action)
ACTION[label, lookahead] = chosen
for symbol in grammar.nonterminals:
if symbol in goto[closure]:
GOTO[label, symbol] = get_label(goto[closure][symbol])
return get_label(initial), ACTION, GOTO | Generates the ACTION and GOTO tables for the grammar.
Returns:
action - dict[state][lookahead] = (action, ...)
goto - dict[state][just_reduced] = new_state |
376,406 | def on_data(self, ws, message, message_type, fin):
try:
if message_type == websocket.ABNF.OPCODE_TEXT:
json_object = json.loads(message)
if in json_object:
self.callback.on_content_type(json_object[][0][])
elif in json_object:
self.on_error(ws, json_object.get())
return
else:
self.callback.on_timing_information(json_object)
except Exception:
self.on_error(ws, )
if message_type == websocket.ABNF.OPCODE_BINARY:
self.callback.on_audio_stream(message)
self.callback.on_data(message) | Callback executed when message is received from the server.
:param ws: Websocket client
:param message: utf-8 string which we get from the server.
:param message_type: Message type which is either ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY
:param fin: continue flag. If 0, the data continues. |
376,407 | def extract():
Paragraph.parsers = [CompoundParser(), ChemicalLabelParser(), MpParser()]
Table.parsers = []
patents = []
for root, dirs, files in os.walk():
for filename in files:
if not filename.endswith():
continue
path = os.path.abspath(os.path.join(root, filename))
size = os.path.getsize(path)
patents.append((path, filename, size))
patents = sorted(patents, key=lambda p: p[2])
for path, filename, size in patents:
print(path)
shutil.copyfile(path, % filename)
with open(path) as f:
d = Document.from_file(f)
if os.path.isfile( % filename):
continue
records = [r.serialize() for r in d.records if len(r.melting_points) == 1]
with open( % filename, ) as fout:
fout.write(json.dumps(records, ensure_ascii=False, indent=2).encode()) | Extract melting points from patents. |
376,408 | def from_expr(cls, expr):
return cls(expr.args, expr.kwargs, cls=expr.__class__) | Instantiate proto-expression from the given Expression |
376,409 | async def cluster_reset_all_nodes(self, soft=True):
option = if soft else
res = list()
for node in await self.cluster_nodes():
res.append(
await self.execute_command(
, option, node_id=node[]
))
return res | Send CLUSTER RESET to all nodes in the cluster
If 'soft' is True then it will send 'SOFT' argument
If 'soft' is False then it will send 'HARD' argument
Sends to all nodes in the cluster |
376,410 | def reset_case(self):
for bus in self.market.case.buses:
bus.p_demand = self.pdemand[bus]
for task in self.tasks:
for g in task.env.generators:
g.p = task.env._g0[g]["p"]
g.p_max = task.env._g0[g]["p_max"]
g.p_min = task.env._g0[g]["p_min"]
g.q = task.env._g0[g]["q"]
g.q_max = task.env._g0[g]["q_max"]
g.q_min = task.env._g0[g]["q_min"]
g.p_cost = task.env._g0[g]["p_cost"]
g.pcost_model = task.env._g0[g]["pcost_model"]
g.q_cost = task.env._g0[g]["q_cost"]
g.qcost_model = task.env._g0[g]["qcost_model"]
g.c_startup = task.env._g0[g]["startup"]
g.c_shutdown = task.env._g0[g]["shutdown"] | Returns the case to its original state. |
376,411 | def validate_lv_districts(session, nw):
nw._config = nw.import_config()
nw._pf_config = nw.import_pf_config()
nw._static_data = nw.import_static_data()
nw._orm = nw.import_orm()
lv_ditricts = [dist.id_db for mv in nw.mv_grid_districts()
for la in mv.lv_load_areas()
for dist in la.lv_grid_districts()]
load_input = nw.list_lv_grid_districts(session, lv_ditricts)
load_input = load_input.sum(axis=0).apply(lambda x: np.round(x, 3))
load_input.sort_index(inplace=True)
load_input.index.names = []
load_input[]=load_input[]+load_input[]
lv_dist_idx = 0
lv_dist_dict = {}
lv_load_idx = 0
lv_load_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
for lv_district in LA.lv_grid_districts():
lv_dist_idx += 1
lv_dist_dict[lv_dist_idx] = {
:lv_district.id_db,
:lv_district.peak_load_residential,
:lv_district.peak_load_retail,
:lv_district.peak_load_industrial,
:lv_district.peak_load_agricultural,
: lv_district.peak_load_industrial + lv_district.peak_load_retail,
}
for node in lv_district.lv_grid.graph_nodes_sorted():
if isinstance(node,LVLoadDing0):
lv_load_idx +=1
peak_load_agricultural = 0
peak_load_residential = 0
peak_load_retail = 0
peak_load_industrial = 0
peak_load_retind = 0
if in node.consumption:
tipo =
peak_load_agricultural = node.peak_load
elif in node.consumption:
if node.consumption[]==0:
tipo =
peak_load_industrial = node.peak_load
elif node.consumption[]==0:
tipo =
peak_load_retail = node.peak_load
else:
tipo =
peak_load_retind = node.peak_load
elif in node.consumption:
tipo =
peak_load_residential = node.peak_load
else:
tipo =
print(node.consumption)
lv_load_dict[lv_load_idx] = {
:node.id_db,
:peak_load_residential,
:peak_load_retail,
:peak_load_industrial,
:peak_load_agricultural,
:peak_load_retind,
}
for node in mv_district.mv_grid.graph_nodes_sorted():
if isinstance(node,LVLoadAreaCentreDing0):
lv_load_idx +=1
lv_load_dict[lv_load_idx] = {
: node.id_db,
: node.lv_load_area.peak_load_residential,
: node.lv_load_area.peak_load_retail,
: node.lv_load_area.peak_load_industrial,
: node.lv_load_area.peak_load_agricultural,
:0,
}
load_effective_lv_distr = pd.DataFrame.from_dict(lv_dist_dict,orient=).set_index().sum(axis=0).apply(lambda x: np.round(x,3))
load_effective_lv_distr.sort_index(inplace=True)
compare_by_district = pd.concat([load_input,load_effective_lv_distr,load_input==load_effective_lv_distr],axis=1)
compare_by_district.columns = [,,]
compare_by_district.index.names = []
load_effective_lv_load = pd.DataFrame.from_dict(lv_load_dict,orient=).set_index()
load_effective_lv_load = load_effective_lv_load.sum(axis=0).apply(lambda x: np.round(x,3))
load_effective_lv_load.sort_index(inplace=True)
load_effective_lv_load[] = load_effective_lv_load[] + \
load_effective_lv_load[] + \
load_effective_lv_load[]
compare_by_load = pd.concat([load_input,load_effective_lv_load,load_input==load_effective_lv_load],axis=1)
compare_by_load.columns = [,,]
compare_by_load.index.names = []
return compare_by_district, compare_by_load | Validate if total load of a grid in a pkl file is what expected from LV districts
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
nw:
The network
Returns
-------
DataFrame
compare_by_district
DataFrame
compare_by_loads |
376,412 | def has_index(self, name):
name = self._normalize_identifier(name)
return name in self._indexes | Returns whether this table has an Index with the given name.
:param name: The index name
:type name: str
:rtype: bool |
376,413 | def start_range(self):
if len(self._exons) == 0: return None
return GenomicRange(self._exons[0].chr,
min([x.start for x in self._exons]),
max([x.start for x in self._exons])) | Similar to the junction range but don't need to check for leftmost or rightmost |
376,414 | def CNOT(control, target):
return Gate(name="CNOT", params=[], qubits=[unpack_qubit(q) for q in (control, target)]) | Produces a controlled-NOT (controlled-X) gate::
CNOT = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]
This gate applies to two qubit arguments to produce the controlled-not gate instruction.
:param control: The control qubit.
:param target: The target qubit. The target qubit has an X-gate applied to it if the control
qubit is in the ``|1>`` state.
:returns: A Gate object. |
376,415 | def delete(network):
try:
network.destroy()
except libvirt.libvirtError as error:
raise RuntimeError("Unable to destroy network: {}".format(error)) | libvirt network cleanup.
@raise: libvirt.libvirtError. |
376,416 | def get_and_check_tasks_for(context, task, msg_prefix=):
tasks_for = task[][]
if tasks_for not in context.config[]:
raise ValueError(
.format(msg_prefix, tasks_for)
)
return tasks_for | Given a parent task, return the reason the parent task was spawned.
``.taskcluster.yml`` uses this to know whether to spawn an action,
cron, or decision task definition. ``tasks_for`` must be a valid one defined in the context.
Args:
task (dict): the task definition.
msg_prefix (str): the string prefix to use for an exception.
Raises:
(KeyError, ValueError): on failure to find a valid ``tasks_for``.
Returns:
str: the ``tasks_for`` |
376,417 | def attach(self, file):
try:
result = self.get_one()
if not in result:
raise NoResults()
except MultipleResults:
raise MultipleResults()
except NoResults:
raise NoResults()
if not os.path.isfile(file):
raise InvalidUsage("Attachment must be an existing regular file" % file)
response = self.session.post(
self._get_attachment_url(),
data={
: self.table,
: result[],
: ntpath.basename(file)
},
files={: open(file, )},
headers={: None}
)
return self._get_content(response) | Attaches the queried record with `file` and returns the response after validating the response
:param file: File to attach to the record
:raise:
:NoResults: if query returned no results
:MultipleResults: if query returned more than one result (currently not supported)
:return:
- The attachment record metadata |
376,418 | def removeIndividual(self, individual):
q = models.Individual.delete().where(
models.Individual.id == individual.getId())
q.execute() | Removes the specified individual from this repository. |
376,419 | def connected_channel(self):
if not self.channel_id:
return None
return self._lavalink.bot.get_channel(int(self.channel_id)) | Returns the voice channel the player is connected to. |
376,420 | def fetch_stackexchange(
dataset,
test_set_fraction=0.2,
min_training_interactions=1,
data_home=None,
indicator_features=True,
tag_features=False,
download_if_missing=True,
):
if not (indicator_features or tag_features):
raise ValueError(
"At least one of item_indicator_features " "or tag_features must be True"
)
if dataset not in ("crossvalidated", "stackoverflow"):
raise ValueError("Unknown dataset")
if not (0.0 < test_set_fraction < 1.0):
raise ValueError("Test set fraction must be between 0 and 1")
urls = {
"crossvalidated": (
"https://github.com/maciejkula/lightfm_datasets/releases/"
"download/v0.1.0/stackexchange_crossvalidated.npz"
),
"stackoverflow": (
"https://github.com/maciejkula/lightfm_datasets/releases/"
"download/v0.1.0/stackexchange_stackoverflow.npz"
),
}
path = _common.get_data(
data_home,
urls[dataset],
os.path.join("stackexchange", dataset),
"data.npz",
download_if_missing,
)
data = np.load(path)
interactions = sp.coo_matrix(
(
data["interactions_data"],
(data["interactions_row"], data["interactions_col"]),
),
shape=data["interactions_shape"].flatten(),
)
interactions.sum_duplicates()
tag_features_mat = sp.coo_matrix(
(data["features_data"], (data["features_row"], data["features_col"])),
shape=data["features_shape"].flatten(),
)
tag_labels = data["labels"]
test_cutoff_index = int(len(interactions.data) * (1.0 - test_set_fraction))
test_cutoff_timestamp = np.sort(interactions.data)[test_cutoff_index]
in_train = interactions.data < test_cutoff_timestamp
in_test = np.logical_not(in_train)
train = sp.coo_matrix(
(
np.ones(in_train.sum(), dtype=np.float32),
(interactions.row[in_train], interactions.col[in_train]),
),
shape=interactions.shape,
)
test = sp.coo_matrix(
(
np.ones(in_test.sum(), dtype=np.float32),
(interactions.row[in_test], interactions.col[in_test]),
),
shape=interactions.shape,
)
if min_training_interactions > 0:
include = np.squeeze(np.array(train.getnnz(axis=1))) > min_training_interactions
train = train.tocsr()[include].tocoo()
test = test.tocsr()[include].tocoo()
if indicator_features and not tag_features:
features = sp.identity(train.shape[1], format="csr", dtype=np.float32)
labels = np.array(["question_id:{}".format(x) for x in range(train.shape[1])])
elif not indicator_features and tag_features:
features = tag_features_mat.tocsr()
labels = tag_labels
else:
id_features = sp.identity(train.shape[1], format="csr", dtype=np.float32)
features = sp.hstack([id_features, tag_features_mat]).tocsr()
labels = np.concatenate(
[
np.array(["question_id:{}".format(x) for x in range(train.shape[1])]),
tag_labels,
]
)
return {
"train": train,
"test": test,
"item_features": features,
"item_feature_labels": labels,
} | Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_.
The datasets contain users answering questions: an interaction is defined as a user
answering a given question.
The following datasets from the StackExchange network are available:
- CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions,
and 70000 answers.
- StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions,
and 18M answers.
Parameters
----------
dataset: string, one of ('crossvalidated', 'stackoverflow')
The part of the StackExchange network for which to fetch the dataset.
test_set_fraction: float, optional
The fraction of the dataset used for testing. Splitting into the train and test set is done
in a time-based fashion: all interactions before a certain time are in the train set and
all interactions after that time are in the test set.
min_training_interactions: int, optional
Only include users with this amount of interactions in the training set.
data_home: path, optional
Path to the directory in which the downloaded data should be placed.
Defaults to ``~/lightfm_data/``.
indicator_features: bool, optional
Use an [n_users, n_users] identity matrix for item features. When True with genre_features,
indicator and genre features are concatenated into a single feature matrix of shape
[n_users, n_users + n_genres].
download_if_missing: bool, optional
Download the data if not present. Raises an IOError if False and data is missing.
Notes
-----
The return value is a dictionary containing the following keys:
Returns
-------
train: sp.coo_matrix of shape [n_users, n_items]
Contains training set interactions.
test: sp.coo_matrix of shape [n_users, n_items]
Contains testing set interactions.
item_features: sp.csr_matrix of shape [n_items, n_item_features]
Contains item features.
item_feature_labels: np.array of strings of shape [n_item_features,]
Labels of item features. |
376,421 | def _GetContents(self):
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise | Read the directory, making sure we close the file if the format
is bad. |
376,422 | def _client(self, host, port, unix_socket, auth):
db = int(self.config[])
timeout = int(self.config[])
try:
cli = redis.Redis(host=host, port=port,
db=db, socket_timeout=timeout, password=auth,
unix_socket_path=unix_socket)
cli.ping()
return cli
except Exception as ex:
self.log.error("RedisCollector: failed to connect to %s:%i. %s.",
unix_socket or host, port, ex) | Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis |
376,423 | def _get_lib_modules(self, full):
result = []
if full:
found = {}
from os import path
mypath = path.dirname(self.module.filepath)
self.module.parent.scan_path(mypath, found)
for codefile in found:
self.module.parent.load_dependency(codefile.replace(".f90", ""), True, True, False)
for modname, module in list(self.module.parent.modules.items()):
if path.dirname(module.filepath).lower() == mypath.lower():
result.append(modname)
else:
result.extend(self.module.search_dependencies())
return self._process_module_needs(result) | Returns a list of the modules in the same folder as the one being wrapped for
compilation as a linked library.
:arg full: when True, all the code files in the source file's directory are considered
as dependencies; otherwise only those explicitly needed are kept. |
376,424 | def forall(self, method):
for c in self._all_combos():
method(self[c], c, self.cube) | IT IS EXPECTED THE method ACCEPTS (value, coord, cube), WHERE
value - VALUE FOUND AT ELEMENT
coord - THE COORDINATES OF THE ELEMENT (PLEASE, READ ONLY)
cube - THE WHOLE CUBE, FOR USE IN WINDOW FUNCTIONS |
376,425 | def parse_environment(fields, context, topics):
def _resolve_environment_lists(context):
for key, value in context.copy().iteritems():
if isinstance(value, list):
context[key] = os.pathsep.join(value)
return context
def _resolve_environment_references(fields, context):
def repl(match):
key = pattern[match.start():match.end()].strip("$")
return context.get(key)
pat = re.compile("\$\w+", re.IGNORECASE)
for key, pattern in fields.copy().iteritems():
fields[key] = pat.sub(repl, pattern) \
.strip(os.pathsep)
return fields
def _resolve_environment_fields(fields, context, topics):
source_dict = replacement_fields_from_context(context)
source_dict.update(dict((str(topics.index(topic)), topic)
for topic in topics))
def repl(match):
key = pattern[match.start():match.end()].strip("{}")
try:
return source_dict[key]
except KeyError:
echo("PROJECT ERROR: Unavailable reference \"%s\" "
"in be.yaml" % key)
sys.exit(PROJECT_ERROR)
for key, pattern in fields.copy().iteritems():
fields[key] = re.sub("{[\d\w]+}", repl, pattern)
return fields
fields = _resolve_environment_lists(fields)
fields = _resolve_environment_references(fields, context)
fields = _resolve_environment_fields(fields, context, topics)
return fields | Resolve the be.yaml environment key
Features:
- Lists, e.g. ["/path1", "/path2"]
- Environment variable references, via $
- Replacement field references, e.g. {key}
- Topic references, e.g. {1} |
376,426 | def matches(property_name, regex, *, present_optional=False, message=None):
def check(val):
if not val:
return present_optional
else:
return True if regex.search(val) else False
return Validation(check, property_name, message) | Returns a Validation that checks a property against a regex. |
376,427 | def greet(event: str):
greetings = "Happy"
if event == "Christmas":
greetings = "Merry"
if event == "Kwanzaa":
greetings = "Joyous"
if event == "wishes":
greetings = "Warm"
return "{greetings} {event}!".format(**locals()) | Greets appropriately (from http://blog.ketchum.com/how-to-write-10-common-holiday-greetings/) |
376,428 | def set_xlimits_widgets(self, set_min=True, set_max=True):
xmin, xmax = self.tab_plot.ax.get_xlim()
if set_min:
self.w.x_lo.set_text(.format(xmin))
if set_max:
self.w.x_hi.set_text(.format(xmax)) | Populate axis limits GUI with current plot values. |
376,429 | def __has_language(self, bundleId, languageId):
return True if self.__get_language_data(bundleId=bundleId,
languageId=languageId) \
else False | Returns ``True`` if the bundle has the language, ``False`` otherwise |
376,430 | def get_form_kwargs(self, **kwargs):
kwargs = super(PrivateLessonStudentInfoView, self).get_form_kwargs(**kwargs)
kwargs[] = self.request
kwargs[] = self.payAtDoor
return kwargs | Pass along the request data to the form |
376,431 | def template_to_dict_find(item, debug=0):
if debug > 1:
print("template_to_dict_find:")
tmpl = item.find().find()
if tmpl is not None:
value = template_to_text(tmpl, debug)
else:
value = text_with_children(item.find(), debug)
if debug:
print(" find: %s" % value)
return value | DEPRECATED: Returns infobox parsetree value using etree.find()
Older template_to_dict() algorithm, uses etree.xpath() to "lookup"
or find specific elements, but fails to include tail text in the
order it is found, and does not _exclude_ <ext> tags (references,
etc.). Compare to template_to_dict_iter(). |
376,432 | def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
start = self.cellsPerColumn * column
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) | Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int), |
376,433 | def validate(self, value):
len_ = len(value)
if self.minimum_value is not None and len_ < self.minimum_value:
tpl = "Value length is lower than allowed minimum ."
raise ValidationError(tpl.format(
val=value, min=self.minimum_value
))
if self.maximum_value is not None and len_ > self.maximum_value:
raise ValidationError(
"Value length is bigger than "
"allowed maximum .".format(
val=value,
max=self.maximum_value,
)) | Validate value. |
376,434 | def _write(self, data):
if in data:
before, after = data.rsplit(, 1)
to_write = self._buffer + [before, ]
self._buffer = [after]
def run():
for s in to_write:
if self._raw:
self._cli.output.write_raw(s)
else:
self._cli.output.write(s)
self._do(run)
else:
self._buffer.append(data) | Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritter by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout. |
376,435 | def convertDay(self, day, prefix="", weekday=False):
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString | Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information. |
376,436 | def is_pinyin(s):
re_pattern = ( %
{: zhon.pinyin.word,
: zhon.pinyin.punctuation})
return _is_pattern_match(re_pattern, s) | Check if *s* consists of valid Pinyin. |
376,437 | def _printDescription(self, hrlinetop=True):
if hrlinetop:
self._print("----------------")
NOTFOUND = "[not found]"
if self.currentEntity:
obj = self.currentEntity[]
label = obj.bestLabel() or NOTFOUND
description = obj.bestDescription() or NOTFOUND
print(Style.BRIGHT + "OBJECT TYPE: " + Style.RESET_ALL +
Fore.BLACK + uri2niceString(obj.rdftype) + Style.RESET_ALL)
print(Style.BRIGHT + "URI : " + Style.RESET_ALL +
Fore.GREEN + "<" + unicode(obj.uri) + ">" + Style.RESET_ALL)
print(Style.BRIGHT + "TITLE : " + Style.RESET_ALL +
Fore.BLACK + label + Style.RESET_ALL)
print(Style.BRIGHT + "DESCRIPTION: " + Style.RESET_ALL +
Fore.BLACK + description + Style.RESET_ALL)
else:
self._clear_screen()
self._print("Graph: <" + self.current[] + ">", )
self._print("----------------", "TIP")
self._printStats(self.current[])
for obj in self.current[].all_ontologies:
print(Style.BRIGHT + "Ontology URI: " + Style.RESET_ALL +
Fore.RED + "<%s>" % str(obj.uri) + Style.RESET_ALL)
label = obj.bestLabel() or NOTFOUND
description = obj.bestDescription() or NOTFOUND
print(Style.BRIGHT + "Title : " + Style.RESET_ALL +
Fore.BLACK + label + Style.RESET_ALL)
print(Style.BRIGHT + "Description : " + Style.RESET_ALL +
Fore.BLACK + description + Style.RESET_ALL)
self._print("----------------", "TIP") | generic method to print out a description |
376,438 | def _string_to_record_type(string):
string = string.upper()
record_type = getattr(RecordType, string)
return record_type | Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:param string: A record type, e.g. A, TXT, NS
:type string: ``str``
:rtype: :class:`RecordType` |
376,439 | def addBarcodesToIdentifier(read, UMI, cell):
read_id = read.identifier.split(" ")
if cell == "":
read_id[0] = read_id[0] + "_" + UMI
else:
read_id[0] = read_id[0] + "_" + cell + "_" + UMI
identifier = " ".join(read_id)
return identifier | extract the identifier from a read and append the UMI and
cell barcode before the first space |
376,440 | def _get_ckptmgr_process(self):
ckptmgr_main_class =
ckptmgr_ram_mb = self.checkpoint_manager_ram / (1024 * 1024)
ckptmgr_cmd = [os.path.join(self.heron_java_home, "bin/java"),
% ckptmgr_ram_mb,
% ckptmgr_ram_mb,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
self.checkpoint_manager_classpath,
ckptmgr_main_class,
+ self.topology_name,
+ self.topology_id,
+ self.ckptmgr_ids[self.shard],
+ self.checkpoint_manager_port,
+ self.stateful_config_file,
+ self.override_config_file,
+ self.heron_internals_config_file]
retval = {}
retval[self.ckptmgr_ids[self.shard]] = Command(ckptmgr_cmd, self.shell_env)
return retval | Get the command to start the checkpoint manager process |
376,441 | def db990(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._db990 = value | Corresponds to IDD Field `db990`
Dry-bulb temperature corresponding to 90.0% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `db990`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
376,442 | def _readintbe(self, length, start):
if length % 8:
raise InterpretError("Big-endian integers must be whole-byte. "
"Length = {0} bits.", length)
return self._readint(length, start) | Read bits and interpret as a big-endian signed int. |
376,443 | def get_exception_information(self, index):
if index < 0 or index > win32.EXCEPTION_MAXIMUM_PARAMETERS:
raise IndexError("Array index out of range: %s" % repr(index))
info = self.raw.u.Exception.ExceptionRecord.ExceptionInformation
value = info[index]
if value is None:
value = 0
return value | @type index: int
@param index: Index into the exception information block.
@rtype: int
@return: Exception information DWORD. |
376,444 | def interrupt(self, data=None):
self.interrupted = True
if data is not None:
return self.write(data)
else:
return True | 中断处理
:param data: 要响应的数据,不传即不响应
:return: |
376,445 | def _create_destination(self, server_id, dest_url, owned):
server = self._get_server(server_id)
host, port, ssl = parse_url(dest_url, allow_defaults=False)
schema = if ssl else
listener_url = .format(schema, host, port)
this_host = getfqdn()
ownership = "owned" if owned else "permanent"
dest_path = CIMInstanceName(DESTINATION_CLASSNAME,
namespace=server.interop_ns)
dest_inst = CIMInstance(DESTINATION_CLASSNAME)
dest_inst.path = dest_path
dest_inst[] = DESTINATION_CLASSNAME
dest_inst[] = SYSTEM_CREATION_CLASSNAME
dest_inst[] = this_host
dest_inst[] = _format(
,
ownership, self._subscription_manager_id, uuid.uuid4())
dest_inst[] = listener_url
if owned:
for i, inst in enumerate(self._owned_destinations[server_id]):
if inst.path == dest_path:
if inst != dest_inst:
server.conn.ModifyInstance(dest_inst)
dest_inst = server.conn.GetInstance(dest_path)
self._owned_destinations[server_id][i] = dest_inst
return dest_inst
dest_path = server.conn.CreateInstance(dest_inst)
dest_inst = server.conn.GetInstance(dest_path)
self._owned_destinations[server_id].append(dest_inst)
else:
dest_path = server.conn.CreateInstance(dest_inst)
dest_inst = server.conn.GetInstance(dest_path)
return dest_inst | Create a listener destination instance in the Interop namespace of a
WBEM server and return that instance.
In order to catch any changes the server applies, the instance is
retrieved again using the instance path returned by instance creation.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
dest_url (:term:`string`):
URL of the listener that is used by the WBEM server to send any
indications to.
The URL scheme (e.g. http/https) determines whether the WBEM server
uses HTTP or HTTPS for sending the indication. Host and port in the
URL specify the target location to be used by the WBEM server.
owned (:class:`py:bool`):
Defines whether or not the created instance is *owned* by the
subscription manager.
Returns:
:class:`~pywbem.CIMInstance`: The created instance, as retrieved
from the server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`. |
376,446 | def absent(name, profile="splunk"):
ret = {
: name,
: {},
: True,
: .format(name)
}
target = __salt__[](name, profile=profile)
if target:
if __opts__[]:
ret = {}
ret["name"] = name
ret[] = "Would delete {0}".format(name)
ret[] = None
return ret
result = __salt__[](name, profile=profile)
if result:
ret[] = .format(name)
else:
ret[] = .format(name)
ret[] = False
return ret | Ensure a search is absent
.. code-block:: yaml
API Error Search:
splunk_search.absent
The following parameters are required:
name
This is the name of the search in splunk |
376,447 | def strip_tx_attenuation(self, idx):
idx = Radiotap.align(idx, 2)
tx_attenuation, = struct.unpack_from(, self._rtap, idx)
return idx + 2, tx_attenuation | strip(1 byte) tx_attenuation
:idx: int
:return: int
idx
:return: int |
376,448 | def GenCatchallState(self):
for c in self.comments:
self._AddToken(".", c, "PushState,EndField", "COMMENT")
for c in self.cont:
self._AddToken(".", c, "PushState", "FWD")
for t in self.term:
self._AddToken(".", t, "EndEntry", None)
for s in self.sep:
self._AddToken(".", s, "EndField", None)
for i, q in enumerate(self.quot):
self._AddToken(".", q, "PushState", "%s_STRING" % i)
self._AddToken(".", ".", "AddToField", None) | Generate string matching state rules.
This sets up initial state handlers that cover both the 'INITIAL' state
and the intermediate content between fields.
The lexer acts on items with precedence:
- continuation characters: use the fast forward state rules.
- field separators: finalize processing the field.
- quotation characters: use the quotation state rules. |
376,449 | def format(self, number, **kwargs):
if check_type(number, ):
return map(lambda val: self.format(val, **kwargs))
number = self.parse(number)
if check_type(kwargs, ):
options = (self.settings[].update(kwargs))
precision = self._change_precision(options[])
negative = (lambda num: "-" if num < 0 else "")(number)
base = str(int(self.to_fixed(abs(number) or 0, precision)), 10)
mod = (lambda num: len(num) % 3 if len(num) > 3 else 0)(base)
num = negative + (lambda num: base[0:num] if num else )(mod)
num += re.sub(, +
options[], base[mod:])
num += (lambda val: options[
] + self.to_fixed(abs(number), precision)
.split()[1] if val else )(precision)
return num | Format a given number.
Format a number, with comma-separated thousands and
custom precision/decimal places
Localise by overriding the precision and thousand / decimal separators
2nd parameter `precision` can be an object matching `settings.number`
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description |
376,450 | def _write_to_file(self, fileinfo, filename):
txt = to_text_string(fileinfo.editor.get_text_with_eol())
fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding) | Low-level function for writing text of editor to file.
Args:
fileinfo: FileInfo object associated to editor to be saved
filename: str with filename to save to
This is a low-level function that only saves the text to file in the
correct encoding without doing any error handling. |
376,451 | def validate(self):
if self.implemented_protocol_version != self.protocol_version:
raise OmapiError("protocol mismatch")
if self.implemented_header_size != self.header_size:
raise OmapiError("header size mismatch") | Checks whether this OmapiStartupMessage matches the implementation.
@raises OmapiError: |
376,452 | def K_diaphragm_valve_Crane(D=None, fd=None, style=0):
r
if D is None and fd is None:
raise ValueError()
if fd is None:
fd = ft_Crane(D)
try:
K = diaphragm_valve_Crane_coeffs[style]*fd
except KeyError:
raise KeyError()
return K | r'''Returns the loss coefficient for a diaphragm valve of either weir
(`style` = 0) or straight-through (`style` = 1) as shown in [1]_.
.. math::
K = K_1 = K_2 = N\cdot f_d
For style 0 (weir), N = 149; for style 1 (straight through), N = 39.
Parameters
----------
D : float, optional
Diameter of the pipe section the valve in mounted in; the
same as the line size [m]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
style : int, optional
Either 0 (weir type valve) or 1 (straight through weir valve) [-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_diaphragm_valve_Crane(D=.1, style=0)
2.4269804835982565
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009. |
376,453 | def _get_spark_app_ids(self, running_apps, requests_config, tags):
spark_apps = {}
for app_id, (app_name, tracking_url) in iteritems(running_apps):
response = self._rest_request_to_json(
tracking_url, SPARK_APPS_PATH, SPARK_SERVICE_CHECK, requests_config, tags
)
for app in response:
app_id = app.get()
app_name = app.get()
if app_id and app_name:
spark_apps[app_id] = (app_name, tracking_url)
return spark_apps | Traverses the Spark application master in YARN to get a Spark application ID.
Return a dictionary of {app_id: (app_name, tracking_url)} for Spark applications |
376,454 | def get_house_conn_gen_load(graph, node):
generation = 0
peak_load = 0
for cus_1 in graph.successors(node):
for cus_2 in graph.successors(cus_1):
if not isinstance(cus_2, list):
cus_2 = [cus_2]
generation += sum([gen.capacity for gen in cus_2
if isinstance(gen, GeneratorDing0)])
peak_load += sum([load.peak_load for load in cus_2
if isinstance(load, LVLoadDing0)])
return [peak_load, generation] | Get generation capacity/ peak load of neighboring house connected to main
branch
Parameters
----------
graph : :networkx:`NetworkX Graph Obj< >`
Directed graph
node : graph node
Node of the main branch of LV grid
Returns
-------
:any:`list`
A list containing two items
# peak load of connected house branch
# generation capacity of connected generators |
376,455 | def obj_assd(result, reference, voxelspacing=None, connectivity=1):
assd = numpy.mean( (obj_asd(result, reference, voxelspacing, connectivity), obj_asd(reference, result, voxelspacing, connectivity)) )
return assd | Average symmetric surface distance.
Computes the average symmetric surface distance (ASSD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between all mutually existing distinct
binary object(s) in ``result`` and ``reference``. The distance unit is the same as for
the spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_asd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> obj_asd(result, reference)
and
>>> obj_asd(reference, result)
The binary images can therefore be supplied in any order. |
376,456 | def _wait_non_ressources(self, callback):
self.trigger = threading.Lock()
self.was_ended = False
self.trigger.acquire()
self.trigger.acquire()
if not self.was_ended:
callback(self) | This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback |
376,457 | def get_service_display_name(name):
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return win32.GetServiceDisplayName(hSCManager, name) | Get the service display name for the given service name.
@see: L{get_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: str
@return: Service display name. |
376,458 | def convertLatLngToPixelXY(self, lat, lng, level):
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY) | returns the x and y values of the pixel corresponding to a latitude
and longitude. |
376,459 | def teardown_socket(s):
try:
s.shutdown(socket.SHUT_WR)
except socket.error:
pass
finally:
s.close() | Shuts down and closes a socket. |
376,460 | def frames(
self,
*,
callers: Optional[Union[str, List[str]]] = None,
callees: Optional[Union[str, List[str]]] = None,
kind: Optional[TraceKind] = None,
limit: Optional[int] = 10,
):
with self.db.make_session() as session:
query = (
session.query(
TraceFrame.id,
CallerText.contents.label("caller"),
TraceFrame.caller_port,
CalleeText.contents.label("callee"),
TraceFrame.callee_port,
)
.filter(TraceFrame.run_id == self.current_run_id)
.join(CallerText, CallerText.id == TraceFrame.caller_id)
.join(CalleeText, CalleeText.id == TraceFrame.callee_id)
)
if callers is not None:
query = self._add_list_or_string_filter_to_query(
callers, query, CallerText.contents, "callers"
)
if callees is not None:
query = self._add_list_or_string_filter_to_query(
callees, query, CalleeText.contents, "callees"
)
if kind is not None:
if kind not in {TraceKind.PRECONDITION, TraceKind.POSTCONDITION}:
raise UserError(
"Try "
" or ."
)
query = query.filter(TraceFrame.kind == kind)
if limit is not None and not isinstance(limit, int):
raise UserError(" should be an int or None.")
trace_frames = query.group_by(TraceFrame.id).order_by(
CallerText.contents, CalleeText.contents
)
total_trace_frames = trace_frames.count()
limit = limit or total_trace_frames
self._output_trace_frames(
self._group_trace_frames(trace_frames, limit), limit, total_trace_frames
) | Display trace frames independent of the current issue.
Parameters (all optional):
callers: str or list[str] filter traces by this caller name
callees: str or list[str] filter traces by this callee name
kind: precondition|postcondition the type of trace frames to show
limit: int (default: 10) how many trace frames to display
(specify limit=None for all)
Sample usage:
frames callers="module.function", kind=postcondition
String filters support LIKE wildcards (%, _) from SQL:
% matches anything (like .* in regex)
_ matches 1 character (like . in regex) |
376,461 | def search_dashboard_entities(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.search_dashboard_entities_with_http_info(**kwargs)
else:
(data) = self.search_dashboard_entities_with_http_info(**kwargs)
return data | Search over a customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDashboard
If the method is called asynchronously,
returns the request thread. |
376,462 | async def insert_news(self, **params):
logging.debug("\n\n [+] -- Setting news debugging. ")
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
logging.debug(" *** Params")
event_type = params.get("event_type")
cid = params.get("cid")
access_string = params.get("access_string")
buyer_pubkey = params.get("buyer_pubkey")
buyer_address = params.get("buyer_address")
owneraddr = params.get("owneraddr")
price = params.get("price")
offer_type = int(params.get("offer_type", -1))
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
logging.debug("\n ** Coinid")
logging.debug(coinid)
if coinid in settings.bridges.keys():
self.account.blockchain.setendpoint(settings.bridges[coinid])
else:
return {"error":400, "reason": "Invalid coin ID"}
owneraddr = await self.account.blockchain.ownerbycid(cid=cid)
seller = await getaccountbywallet(wallet=owneraddr)
if "error" in seller.keys():
return seller
news_collection = self.database[settings.NEWS]
self.account.blockchain.setendpoint(settings.bridges[coinid])
if offer_type == 1:
seller_price = await self.account.blockchain.getwriteprice(cid=cid)
elif offer_type == 0:
seller_price = await self.account.blockchain.getreadprice(cid=cid)
row = {"offer_type": self.account.ident_offer[offer_type],
"buyer_address":buyer_address,
"cid":cid,
"access_string":access_string,
"buyer_pubkey": buyer_pubkey,
"seller_price": seller_price,
"buyer_price": price,
"account_id": seller["id"],
"event_type": event_type,
"coinid":coinid}
logging.debug("\n ** Inserting row")
logging.debug(row)
database = client[settings.DBNAME]
collection = database[settings.ACCOUNTS]
await collection.find_one_and_update(
{"id": int(seller["id"])},
{"$inc": {"news_count": 1}})
await collection.find_one({"id":int(seller["id"])})
await news_collection.insert_one(row)
logging.debug("\n ** Fresh news")
fresh = await collection.find_one({"buyer_address":buyer_address,
"cid":cid})
logging.debug(fresh)
return {"result":"ok"} | Inserts news for account
Accepts:
- event_type
- cid
- access_string (of buyer)
- buyer_pubkey
- buyer address
- owner address
- price
- offer type
- coin ID
Returns:
- dict with result |
376,463 | def visit_named_list(self, _, children):
filters, resource = children
resource.name = filters[0].name
resource.filters = filters
return resource | Manage a list, represented by a ``.resources.List`` instance.
This list is populated with data from the result of the ``FILTERS``.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
- 1: for ``LIST``: a ``List`` resource
Example
-------
>>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data
<List[foo] .foo(1)>
<Field[name] />
</List[foo]> |
376,464 | def is_empty(self):
return not bool(self.title or self.subtitle or self.part_number \
or self.part_name or self.non_sort or self.type) | Returns True if all titleInfo subfields are not set or
empty; returns False if any of the fields are not empty. |
376,465 | def output(data, **kwargs):
color = salt.utils.color.get_colors(
__opts__.get(),
__opts__.get())
strip_colors = __opts__.get(, True)
ident = 0
if __opts__.get():
ident = 4
if __opts__[] in (, ):
acc =
pend =
den =
rej =
cmap = {pend: color[],
acc: color[],
den: color[],
rej: color[],
: color[]}
trans = {pend: u.format(
* ident,
color[],
color[]),
acc: u.format(
* ident,
color[],
color[]),
den: u.format(
* ident,
color[],
color[]),
rej: u.format(
* ident,
color[],
color[]),
: u.format(
* ident,
color[],
color[])}
else:
acc =
pend =
rej =
cmap = {pend: color[],
acc: color[],
rej: color[],
: color[]}
trans = {pend: u.format(
* ident,
color[],
color[]),
acc: u.format(
* ident,
color[],
color[]),
rej: u.format(
* ident,
color[],
color[]),
: u.format(
* ident,
color[],
color[])}
ret =
for status in sorted(data):
ret += u.format(trans[status])
for key in sorted(data[status]):
key = salt.utils.data.decode(key)
skey = salt.output.strip_esc_sequence(key) if strip_colors else key
if isinstance(data[status], list):
ret += u.format(
* ident,
cmap[status],
skey,
color[])
if isinstance(data[status], dict):
ret += u.format(
* ident,
cmap[status],
skey,
data[status][key],
color[])
return ret | Read in the dict structure generated by the salt key API methods and
print the structure. |
376,466 | def use_plenary_grade_entry_view(self):
self._object_views[] = PLENARY
for session in self._get_provider_sessions():
try:
session.use_plenary_grade_entry_view()
except AttributeError:
pass | Pass through to provider GradeEntryLookupSession.use_plenary_grade_entry_view |
376,467 | def avl_new_top(t1, t2, top, direction=0):
top.parent = None
assert top.parent is None, str(top.parent.value)
top.set_child(direction, t1)
top.set_child(1 - direction, t2)
top.balance = max(height(t1), height(t2)) + 1
return top | if direction == 0:
(t1, t2) is (left, right)
if direction == 1:
(t1, t2) is (right, left) |
376,468 | def terminate_processes(pid_list):
for proc in psutil.process_iter():
if proc.pid in pid_list:
proc.terminate() | Terminate a list of processes by sending to each of them a SIGTERM signal,
pre-emptively checking if its PID might have been reused.
Parameters
----------
pid_list : list
A list of process identifiers identifying active processes. |
376,469 | def to_(self, off_pts):
off_pts = np.asarray(off_pts, dtype=np.float)
has_z = (off_pts.shape[-1] > 2)
scale_pt = [self.viewer._org_scale_x, self.viewer._org_scale_y]
if has_z:
scale_pt.append(self.viewer._org_scale_z)
off_pts = np.multiply(off_pts, scale_pt)
return off_pts | Reverse of :meth:`from_`. |
376,470 | def refactor_string(self, data, name):
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree | Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse. |
376,471 | def select_delay_factor(self, delay_factor):
if self.fast_cli:
if delay_factor <= self.global_delay_factor:
return delay_factor
else:
return self.global_delay_factor
else:
if delay_factor >= self.global_delay_factor:
return delay_factor
else:
return self.global_delay_factor | Choose the greater of delay_factor or self.global_delay_factor (default).
In fast_cli choose the lesser of delay_factor of self.global_delay_factor.
:param delay_factor: See __init__: global_delay_factor
:type delay_factor: int |
376,472 | def validate(self):
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u.format(
type(self.fold_scope_location), self.fold_scope_location))
allowed_block_types = (GremlinFoldedFilter, GremlinFoldedTraverse, Backtrack)
for block in self.folded_ir_blocks:
if not isinstance(block, allowed_block_types):
raise AssertionError(
u
u
.format(type(block), self.folded_ir_blocks, allowed_block_types))
if not isinstance(self.field_type, GraphQLList):
raise ValueError(u
u.format(self.field_type))
inner_type = strip_non_null_from_type(self.field_type.of_type)
if isinstance(inner_type, GraphQLList):
raise GraphQLCompilationError(
u
u.format(self.fold_scope_location, self.field_type.of_type)) | Validate that the GremlinFoldedContextField is correctly representable. |
376,473 | def create_server(initialize=True):
with provider() as p:
host_string = p.create_server()
if initialize:
env.host_string = host_string
initialize_server() | Create a server |
376,474 | def create_script_fact(self):
self.ddl_text +=
self.ddl_text += + self.fact_table +
self.ddl_text +=
self.ddl_text += + self.fact_table +
self.ddl_text += + self.fact_table +
self.ddl_text += .join([col + for col in self.col_list])
self.ddl_text += + self.date_updated_col +
self.ddl_text += | appends the CREATE TABLE, index etc to self.ddl_text |
376,475 | def result(self, *args, **kwargs):
prettify = kwargs.get(, False)
sql = % (self._type, self._class)
if prettify:
sql +=
else:
sql +=
if self._type.lower() == :
sql += " FROM %s TO %s " % (self._from, self._to)
if self._cluster:
sql += % self._cluster
if prettify:
sql +=
else:
sql +=
if self.data:
sql += + json.dumps(self.data)
return sql | Construye la consulta SQL |
376,476 | def get_bonds(input_group):
out_list = []
for i in range(len(input_group.bond_order_list)):
out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],))
return out_list | Utility function to get indices (in pairs) of the bonds. |
376,477 | def format_file_node(import_graph, node, indent):
f = import_graph.provenance[node]
if isinstance(f, resolve.Direct):
out = + f.short_path
elif isinstance(f, resolve.Local):
out = + f.short_path
elif isinstance(f, resolve.System):
out = + f.short_path
elif isinstance(f, resolve.Builtin):
out = % f.module_name
else:
out = % node
return *indent + out | Prettyprint nodes based on their provenance. |
376,478 | def merge_lists(*args):
out = {}
for contacts in filter(None, args):
for contact in contacts:
out[contact.value] = contact
return list(out.values()) | Merge an arbitrary number of lists into a single list and dedupe it
Args:
*args: Two or more lists
Returns:
A deduped merged list of all the provided lists as a single list |
376,479 | def check_existens_of_staging_tag_in_remote_repo():
staging_tag = Git.create_git_version_tag(APISettings.GIT_STAGING_PRE_TAG)
command_git =
command_awk = {print $2}\
command_cut_1 = /\
command_cut_2 = ^\
command_sort =
command_uniq =
command = command_git + + command_awk + + command_cut_1 + + \
command_cut_2 + + command_sort + + command_uniq
list_of_tags = str(check_output(command, shell=True))
if staging_tag in list_of_tags:
return True
return False | This method will check, if the given tag exists as a staging tag in the remote repository.
The intention is, that every tag, which should be deployed on a production envirnment,
has to be deployed on a staging environment before. |
376,480 | def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None:
self.mglo.bind_to_uniform_block(binding, offset, size) | Bind the buffer to a uniform block.
Args:
binding (int): The uniform block binding.
Keyword Args:
offset (int): The offset.
size (int): The size. Value ``-1`` means all. |
376,481 | def p_BIT_ix(p):
bit = p[2].eval()
if bit < 0 or bit > 7:
error(p.lineno(3), % bit)
p[0] = None
return
p[0] = Asm(p.lineno(3), % (p[1], bit, p[4][0]), p[4][1]) | asm : bitop expr COMMA reg8_I
| bitop pexpr COMMA reg8_I |
376,482 | def create_chunker(self, chunk_size):
rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed)
rolling_hash.set_threshold(1.0 / chunk_size)
return RabinKarpCDC._Chunker(rolling_hash) | Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a
specific, expected chunk size.
Args:
chunk_size (int): (Expected) target chunk size.
Returns:
BaseChunker: A chunker object. |
376,483 | def specify_data_set(self, x_input, y_input):
self.x = x_input
self.y = y_input | Define input to ACE.
Parameters
----------
x_input : list
list of iterables, one for each independent variable
y_input : array
the dependent observations |
376,484 | def _edge_opposite_point(self, tri, i):
ind = tri.index(i)
return (tri[(ind+1) % 3], tri[(ind+2) % 3]) | Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri. |
376,485 | def vperp(a, b):
a = stypes.toDoubleVector(a)
b = stypes.toDoubleVector(b)
vout = stypes.emptyDoubleVector(3)
libspice.vperp_c(a, b, vout)
return stypes.cVectorToPython(vout) | Find the component of a vector that is perpendicular to a second
vector. All vectors are 3-dimensional.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vperp_c.html
:param a: The vector whose orthogonal component is sought.
:type a: 3-Element Array of floats
:param b: The vector used as the orthogonal reference.
:type b: 3-Element Array of floats
:return: The component of a orthogonal to b.
:rtype: 3-Element Array of floats |
376,486 | def aveknt(t, k):
t = np.atleast_1d(t)
if t.ndim > 1:
raise ValueError("t must be a list or a rank-1 array")
n = t.shape[0]
u = max(0, n - (k-1))
out = np.empty( (u,), dtype=t.dtype )
for j in range(u):
out[j] = sum( t[j:(j+k)] ) / k
return out | Compute the running average of `k` successive elements of `t`. Return the averaged array.
Parameters:
t:
Python list or rank-1 array
k:
int, >= 2, how many successive elements to average
Returns:
rank-1 array, averaged data. If k > len(t), returns a zero-length array.
Caveat:
This is slightly different from MATLAB's aveknt, which returns the running average
of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). |
376,487 | def hs_mux(sel, ls_hsi, hso):
N = len(ls_hsi)
ls_hsi_rdy, ls_hsi_vld = zip(*ls_hsi)
ls_hsi_rdy, ls_hsi_vld = list(ls_hsi_rdy), list(ls_hsi_vld)
hso_rdy, hso_vld = hso
@always_comb
def _hsmux():
hso_vld.next = 0
for i in range(N):
ls_hsi_rdy[i].next = 0
if i == sel:
hso_vld.next = ls_hsi_vld[i]
ls_hsi_rdy[i].next = hso_rdy
return _hsmux | [Many-to-one] Multiplexes a list of input handshake interfaces
sel - (i) selects an input handshake interface to be connected to the output
ls_hsi - (i) list of input handshake tuples (ready, valid)
hso - (o) output handshake tuple (ready, valid) |
376,488 | def get_info(self):
request = urllib.request.Request(self.server_data.url + "/rest/v1/jobs/" + self.uri)
if self.server_data.authorization_header() is not None:
request.add_header(, self.server_data.authorization_header())
request.add_header(, )
response = urllib.request.urlopen(request)
self.json = response.read().decode()
self.info = json.loads(self.json)
self.load_info() | Query the GenePattern server for metadata regarding this job and assign
that metadata to the properties on this GPJob object. Including:
* Task Name
* LSID
* User ID
* Job Number
* Status
* Date Submitted
* URL of Log Files
* URL of Output Files
* Number of Output Files |
376,489 | def DeleteMessageHandlerRequests(self, requests, cursor=None):
query = "DELETE FROM message_handler_requests WHERE request_id IN ({})"
request_ids = set([r.request_id for r in requests])
query = query.format(",".join(["%s"] * len(request_ids)))
cursor.execute(query, request_ids) | Deletes a list of message handler requests from the database. |
376,490 | def favorites_add(photo_id):
method =
_dopost(method, auth=True, photo_id=photo_id)
return True | Add a photo to the user's favorites. |
376,491 | def GetFormatSpecification(cls):
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b, offset=4)
format_specification.AddNewSignature(b, offset=0)
return format_specification | Retrieves the format specification.
Returns:
FormatSpecification: format specification. |
376,492 | def get_gc_book(self):
if not self.gc_book:
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
if not gc_db:
raise AttributeError("GnuCash book path not configured.")
if not os.path.isabs(gc_db):
gc_db = resource_filename(
Requirement.parse("Asset-Allocation"), gc_db)
if not os.path.exists(gc_db):
raise ValueError(f"Invalid GnuCash book path {gc_db}")
self.gc_book = open_book(gc_db, open_if_lock=True)
return self.gc_book | Returns the GnuCash db session |
376,493 | def execute_prebuild_script(self):
(pb_mod_path, pb_func) = self.prebuild_script.rsplit(, 1)
try:
if pb_mod_path.count() >= 1:
(mod_folder_path, mod_name) = pb_mod_path.rsplit(, 1)
mod_folder_path_fragments = mod_folder_path.split()
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try:
module_ = importlib.import_module(pb_mod_path)
except ImportError:
raise ClickException(click.style("Failed ", fg="red") + + click.style(
"import prebuild script ", bold=True) + .format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func):
raise ClickException(click.style("Failed ", fg="red") + + click.style(
"find prebuild script ", bold=True) + .format(
pb_func=click.style(pb_func, bold=True)) + .format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() | Parse and execute the prebuild_script from the zappa_settings. |
376,494 | def genl_ctrl_resolve_grp(sk, family_name, grp_name):
family = genl_ctrl_probe_by_name(sk, family_name)
if family is None:
return -NLE_OBJ_NOTFOUND
return genl_ctrl_grp_by_name(family, grp_name) | Resolve Generic Netlink family group name.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L471
Looks up the family object and resolves the group name to the numeric group identifier.
Positional arguments:
sk -- Generic Netlink socket (nl_sock class instance).
family_name -- name of Generic Netlink family (bytes).
grp_name -- name of group to resolve (bytes).
Returns:
The numeric group identifier or a negative error code. |
376,495 | def replace_termcodes(self, string, from_part=False, do_lt=True,
special=True):
r
return self.request(, string,
from_part, do_lt, special) | r"""Replace any terminal code strings by byte sequences.
The returned sequences are Nvim's internal representation of keys,
for example:
<esc> -> '\x1b'
<cr> -> '\r'
<c-l> -> '\x0c'
<up> -> '\x80ku'
The returned sequences can be used as input to `feedkeys`. |
376,496 | def list_nodes_min(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
nodes = _query(, )[]
for node in nodes:
name = node[]
this_node = {
: six.text_type(node[]),
: _get_status_descr_by_id(int(node[]))
}
ret[name] = this_node
return ret | Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config |
376,497 | def get_all_current_trains(self, train_type=None, direction=None):
params = None
if train_type:
url = self.api_base_url +
params = {
: STATION_TYPE_TO_CODE_DICT[train_type]
}
else:
url = self.api_base_url +
response = requests.get(
url, params=params, timeout=10)
if response.status_code != 200:
return []
trains = self._parse_all_train_data(response.content)
if direction is not None:
return self._prune_trains(trains, direction=direction)
return trains | Returns all trains that are due to start in the next 10 minutes
@param train_type: ['mainline', 'suburban', 'dart'] |
376,498 | def create_role(self, role_name, mount_point=, **kwargs):
return self._adapter.post(.format(mount_point, role_name), json=kwargs) | POST /auth/<mount_point>/role/<role name>
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:param kwargs:
:type kwargs:
:return:
:rtype: |
376,499 | async def getRecentErrors(self, *args, **kwargs):
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs) | Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``v1/errors.json#``
This method is ``experimental`` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.