Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
16,000 | def _OpenCollectionPath(coll_path):
hunt_collection = results.HuntResultCollection(coll_path)
if hunt_collection and hunt_collection[0].payload:
return hunt_collection
indexed_collection = sequential_collection.GeneralIndexedCollection(coll_path)
if indexed_collection:
return indexed_collection | Tries to open various types of collections at the given path. |
16,001 | def unpack_rsp(cls, rsp_pb):
ret_type = rsp_pb.retType
ret_msg = rsp_pb.retMsg
if ret_type != RET_OK:
return RET_ERROR, ret_msg, None
res = {}
if rsp_pb.HasField():
res[] = rsp_pb.s2c.serverVer
res[] = rsp_pb.s2c.loginUserID
res[] = rsp_pb.s2c.connID
res[] = rsp_pb.s2c.connAESKey
res[] = rsp_pb.s2c.keepAliveInterval
else:
return RET_ERROR, "rsp_pb error", None
return RET_OK, "", res | Unpack the init connect response |
16,002 | def setup(applicationName,
applicationType=None,
style=,
splash=,
splashType=None,
splashTextColor=,
splashTextAlign=None,
theme=):
import_qt(globals())
output = {}
if not QtGui.QApplication.instance():
if applicationType is None:
applicationType = QtGui.QApplication
app = applicationType([applicationName])
app.setApplicationName(applicationName)
app.setQuitOnLastWindowClosed(True)
stylize(app, style=style, theme=theme)
app.setProperty(, wrapVariant(True))
output[] = app
if splash:
if not splashType:
splashType = XLoggerSplashScreen
pixmap = QtGui.QPixmap(splash)
screen = splashType(pixmap)
if splashTextAlign is None:
splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom
screen.setTextColor(QtGui.QColor(splashTextColor))
screen.setTextAlignment(splashTextAlign)
screen.show()
QtGui.QApplication.instance().processEvents()
output[] = screen
return output | Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. } |
16,003 | def _call(self, method, params):
url = self.base_url % {: self.bot_token, : method}
logger.debug("Telegram bot calls method: %s params: %s",
method, str(params))
r = self.fetch(url, payload=params)
return r.text | Retrive the given resource.
:param resource: resource to retrieve
:param params: dict with the HTTP parameters needed to retrieve
the given resource |
16,004 | def _export_to2marc(self, key, value):
def _is_for_cds(value):
return in value
def _is_for_hal(value):
return in value and value[]
def _is_not_for_hal(value):
return in value and not value[]
result = []
if _is_for_cds(value):
result.append({: })
if _is_for_hal(value):
result.append({: })
elif _is_not_for_hal(value):
result.append({: })
return result | Populate the ``595`` MARC field. |
16,005 | def pubkey(self, identity, ecdh=False):
curve_name = identity.get_curve_name(ecdh=ecdh)
log.debug(,
identity.to_string(), curve_name, self)
addr = identity.get_bip32_address(ecdh=ecdh)
result = self._defs.get_public_node(
self.conn,
n=addr,
ecdsa_curve_name=curve_name)
log.debug(, result)
return bytes(result.node.public_key) | Return public key. |
16,006 | def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed) | Set random seed for python, numpy and pytorch RNGs |
16,007 | def loadFromCheckpoint(savedModelDir, newSerialization=False):
if newSerialization:
return HTMPredictionModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir) | Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance. |
16,008 | def baseline(y_true, y_score=None):
if len(y_true) > 0:
return np.nansum(y_true)/count(y_true, countna=False)
else:
return 0.0 | Number of positive labels divided by number of labels,
or zero if there are no labels |
16,009 | def create_entity(self):
self._highest_id_seen += 1
entity = Entity(self._highest_id_seen, self)
self._entities.append(entity)
return entity | Create a new entity.
The entity will have a higher UID than any previously associated
with this world.
:return: the new entity
:rtype: :class:`essence.Entity` |
16,010 | async def _connect_and_read(self):
while not self._stopped:
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(
loop=self._event_loop,
timeout=aiohttp.ClientTimeout(total=self.timeout),
) as session:
self._session = session
url, data = await self._retreive_websocket_info()
async with session.ws_connect(
url,
heartbeat=self.ping_interval,
ssl=self.ssl,
proxy=self.proxy,
) as websocket:
self._logger.debug("The Websocket connection has been opened.")
self._websocket = websocket
self._dispatch_event(event="open", data=data)
await self._read_messages()
except (
client_err.SlackClientNotConnectedError,
client_err.SlackApiError,
) as exception:
self._logger.debug(str(exception))
self._dispatch_event(event="error", data=exception)
if self.auto_reconnect and not self._stopped:
await self._wait_exponentially(exception)
continue
self._logger.exception(
"The Websocket encountered an error. Closing the connection..."
)
self._close_websocket()
raise | Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library. |
16,011 | def compile_compiler_bridge(self, context):
bridge_jar_name =
bridge_jar = os.path.join(self._compiler_bridge_cache_dir, bridge_jar_name)
global_bridge_cache_dir = os.path.join(self._zinc_factory.get_options().pants_bootstrapdir, fast_relpath(self._compiler_bridge_cache_dir, self._workdir()))
globally_cached_bridge_jar = os.path.join(global_bridge_cache_dir, bridge_jar_name)
if os.path.exists(globally_cached_bridge_jar):
safe_mkdir(self._relative_to_buildroot(self._compiler_bridge_cache_dir))
safe_hardlink_or_copy(globally_cached_bridge_jar, bridge_jar)
if not os.path.exists(bridge_jar):
res = self._run_bootstrapper(bridge_jar, context)
context._scheduler.materialize_directories((
DirectoryToMaterialize(get_buildroot(), res.output_directory_digest),
))
safe_mkdir(global_bridge_cache_dir)
safe_hardlink_or_copy(bridge_jar, globally_cached_bridge_jar)
return ClasspathEntry(bridge_jar, res.output_directory_digest)
else:
bridge_jar_snapshot = context._scheduler.capture_snapshots((PathGlobsAndRoot(
PathGlobs((self._relative_to_buildroot(bridge_jar),)),
text_type(get_buildroot())
),))[0]
bridge_jar_digest = bridge_jar_snapshot.directory_digest
return ClasspathEntry(bridge_jar, bridge_jar_digest) | Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar. |
16,012 | def from_int(cls, integer):
bin_string = bin(integer)
return cls(
text=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
) | Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8 |
16,013 | def normalize(pw):
pw_lower = pw.lower()
return .join(helper.L33T.get(c, c) for c in pw_lower) | Lower case, and change the symbols to closest characters |
16,014 | def _MergeTaskStorage(self, storage_writer):
if self._processing_profiler:
self._processing_profiler.StartTiming()
for task_identifier in storage_writer.GetProcessedTaskIdentifiers():
try:
task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)
self._task_manager.SampleTaskStatus(task, )
to_merge = self._task_manager.CheckTaskToMerge(task)
if not to_merge:
storage_writer.RemoveProcessedTaskStorage(task)
self._task_manager.RemoveTask(task)
self._task_manager.SampleTaskStatus(task, )
else:
storage_writer.PrepareMergeTaskStorage(task)
self._task_manager.UpdateTaskAsPendingMerge(task)
except KeyError:
logger.error(
.format(
task_identifier))
continue
if self._processing_profiler:
self._processing_profiler.StopTiming()
task = None
if not self._storage_merge_reader_on_hold:
task = self._task_manager.GetTaskPendingMerge(self._merge_task)
if task or self._storage_merge_reader:
self._status = definitions.STATUS_INDICATOR_MERGING
if self._processing_profiler:
self._processing_profiler.StartTiming()
if task:
if self._storage_merge_reader:
self._merge_task_on_hold = self._merge_task
self._storage_merge_reader_on_hold = self._storage_merge_reader
self._task_manager.SampleTaskStatus(
self._merge_task_on_hold, )
self._merge_task = task
try:
self._storage_merge_reader = storage_writer.StartMergeTaskStorage(
task)
self._task_manager.SampleTaskStatus(task, )
except IOError as exception:
logger.error((
).format(task.identifier, exception))
self._storage_merge_reader = None
if self._storage_merge_reader:
fully_merged = self._storage_merge_reader.MergeAttributeContainers(
maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)
else:
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_events = storage_writer.number_of_events
self._number_of_produced_sources = storage_writer.number_of_event_sources
self._number_of_produced_warnings = storage_writer.number_of_warnings | Merges a task storage with the session storage.
This function checks all task stores that are ready to merge and updates
the scheduled tasks. Note that to prevent this function holding up
the task scheduling loop only the first available task storage is merged.
Args:
storage_writer (StorageWriter): storage writer for a session storage used
to merge task storage. |
16,015 | def run_task_class(self, class_path, **options):
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config) | Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip | |
16,016 | def pyquil_to_tk(prog: Program) -> Circuit:
reg_name = None
qubits = prog.get_qubits()
n_qubits = max(qubits) + 1
tkc = Circuit(n_qubits)
for i in prog.instructions:
if isinstance(i, Gate):
name = i.name
try:
optype = _known_quil_gate[name]
except KeyError as error:
raise NotImplementedError("Operation not supported by tket: " + str(i)) from error
if len(i.params) == 0:
tkc.add_operation(optype, [q.index for q in i.qubits])
else:
params = [p/PI for p in i.params]
op = tkc._get_op(optype,len(i.qubits),len(i.qubits),params)
tkc._add_operation(op, [q.index for q in i.qubits])
elif isinstance(i, Measurement):
if not i.classical_reg:
raise NotImplementedError("Program has no defined classical register for measurement on qubit: ", i.qubits[0])
reg = i.classical_reg
if reg_name and reg_name != reg.name:
raise NotImplementedError("Program has multiple classical registers: ", reg_name, reg.name)
reg_name = reg.name
op = tkc._get_op(OpType.Measure,1,1,str(reg.offset))
tkc._add_operation(op, [i.qubit.index])
elif isinstance(i, Declare):
continue
elif isinstance(i, Pragma):
continue
elif isinstance(i, Halt):
return tkc
else:
raise NotImplementedError("Pyquil instruction is not a gate: " + str(i))
return tkc | Convert a :py:class:`pyquil.Program` to a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` .
Note that not all pyQuil operations are currently supported by pytket.
:param prog: A circuit to be converted
:return: The converted circuit |
16,017 | def disconnect_node(node, target_obj_result, graph, debug):
branch_kind = graph.adj[node][target_obj_result][].kind
branch_type = graph.adj[node][target_obj_result][].type
branch_ring = graph.adj[node][target_obj_result][].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug(.format(node, target_obj_result)) | Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process |
16,018 | def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
self.response.card.type =
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl | Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h. |
16,019 | def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
aabb
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat | Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1) |
16,020 | def _read_dataset_metadata(self):
blob = self.storage_client.get_blob(
+ self.dataset_name + )
buf = BytesIO()
blob.download_to_file(buf)
buf.seek(0)
return eval_lib.DatasetMetadata(buf) | Reads dataset metadata.
Returns:
instance of DatasetMetadata |
16,021 | def _create_key_manager(self, get_match_fuzzy, set_match_fuzzy,
get_enable_vi_bindings, set_enable_vi_bindings,
get_show_completion_columns,
set_show_completion_columns,
get_show_help, set_show_help,
stop_input_and_refresh_cli):
assert callable(get_match_fuzzy)
assert callable(set_match_fuzzy)
assert callable(get_enable_vi_bindings)
assert callable(set_enable_vi_bindings)
assert callable(get_show_completion_columns)
assert callable(set_show_completion_columns)
assert callable(get_show_help)
assert callable(set_show_help)
assert callable(stop_input_and_refresh_cli)
self.manager = KeyBindingManager(
enable_search=True,
enable_abort_and_exit_bindings=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_open_in_editor=False)
@self.manager.registry.add_binding(Keys.F2)
def handle_f2(_):
set_match_fuzzy(not get_match_fuzzy())
@self.manager.registry.add_binding(Keys.F3)
def handle_f3(_):
set_enable_vi_bindings(not get_enable_vi_bindings())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F4)
def handle_f4(_):
set_show_completion_columns(not get_show_completion_columns())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F5)
def handle_f5(_):
set_show_help(not get_show_help())
stop_input_and_refresh_cli()
@self.manager.registry.add_binding(Keys.F9)
def handle_f9(event):
if event.cli.current_buffer_name == u:
event.cli.focus(u)
else:
event.cli.focus(u)
@self.manager.registry.add_binding(Keys.F10)
def handle_f10(event):
event.cli.set_exit() | Create and initialize the keybinding manager.
:type get_fuzzy_match: callable
:param get_fuzzy_match: Gets the fuzzy matching config.
:type set_fuzzy_match: callable
:param set_fuzzy_match: Sets the fuzzy matching config.
:type get_enable_vi_bindings: callable
:param get_enable_vi_bindings: Gets the vi (or emacs) key bindings
config.
:type set_enable_vi_bindings: callable
:param set_enable_vi_bindings: Sets the vi (or emacs) key bindings
config.
:type get_show_completion_columns: callable
:param get_show_completion_columns: Gets the show completions in
multiple or single columns config.
type set_show_completion_columns: callable
:param set_show_completion_columns: Sets the show completions in
multiple or single columns config.
:type get_show_help: callable
:param get_show_help: Gets the show help pane config.
:type set_show_help: callable
:param set_show_help: Sets the show help pane config.
:type stop_input_and_refresh_cli: callable
param stop_input_and_refresh_cli: Stops input by raising an
`InputInterrupt`, forces a cli refresh to ensure certain
options take effect within the current session.
:rtype: :class:`prompt_toolkit.KeyBindingManager`
:return: A custom `KeyBindingManager`. |
16,022 | def install_config_kibana(self):
if self.prompt_check("Download and install kibana"):
self.kibana_install()
if self.prompt_check("Configure and autostart kibana"):
self.kibana_config() | install and config kibana
:return: |
16,023 | def sudoku(G):
global N, N2, N4
if len(G) == 16:
N, N2, N4 = 4, 16, 256
e = 4 * N4
universe = e + 1
S = [[rc(a), rv(a), cv(a), bv(a)] for a in range(N4 * N2)]
A = [e]
for r in range(N2):
for c in range(N2):
if G[r][c] != 0:
a = assignation(r, c, G[r][c] - 1)
A += S[a]
sol = dancing_links(universe, S + [A])
if sol:
for a in sol:
if a < len(S):
G[row(a)][col(a)] = val(a) + 1
return True
else:
return False | Solving Sudoku
:param G: integer matrix with 0 at empty cells
:returns bool: True if grid could be solved
:modifies: G will contain the solution
:complexity: huge, but linear for usual published 9x9 grids |
16,024 | def result(self):
if not self.is_done():
raise ValueError("Cannot get a result for a program that isnstatusCANCELLEDresultstatusERRORQVMresultQPUresultQUILCresultresultprogramtypewavefunctionresultprogramaddressesprogramtypemultishotmultishot-measureexpectationresultresult'] | The result of the job if available
throws ValueError is result is not available yet
throws ApiError if server returned an error indicating program execution was not successful
or if the job was cancelled |
16,025 | def skip(self, regex):
return self.scan_full(regex, return_string=False, advance_pointer=True) | Like :meth:`scan`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip('test ')
5 |
16,026 | def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ):
from .readwrite.write import write_csvs
write_csvs(dirname, self, skip_data=skip_data, sep=sep) | Write annotation to ``.csv`` files.
It is not possible to recover the full :class:`~anndata.AnnData` from the
output of this function. Use :meth:`~anndata.AnnData.write` for this.
Parameters
----------
dirname
Name of directory to which to export.
skip_data
Skip the data matrix :attr:`X`.
sep
Separator for the data. |
16,027 | def detx(self, det_id, t0set=None, calibration=None):
url = .format(det_id)
detx = self._get_content(url)
return detx | Retrieve the detector file for given detector id
If t0set is given, append the calibration data. |
16,028 | def flag(self, key, env=None):
env = env or self.ENVVAR_PREFIX_FOR_DYNACONF or "DYNACONF"
with self.using_env(env):
value = self.get_fresh(key)
return value is True or value in true_values | Feature flagging system
write flags to redis
$ dynaconf write redis -s DASHBOARD=1 -e premiumuser
meaning: Any premium user has DASHBOARD feature enabled
In your program do::
# premium user has access to dashboard?
>>> if settings.flag('dashboard', 'premiumuser'):
... activate_dashboard()
The value is ensured to be loaded fresh from redis server
It also works with file settings but the recommended is redis
as the data can be loaded once it is updated.
:param key: The flag name
:param env: The env to look for |
16,029 | def __create_dir_property(self, dir_name, docstring):
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p) | Generate getter and setter for a directory property. |
16,030 | def get_resource_search_session_for_bin(self, bin_id):
if not self.supports_resource_search():
raise errors.Unimplemented()
return sessions.ResourceSearchSession(bin_id, runtime=self._runtime) | Gets a resource search session for the given bin.
arg: bin_id (osid.id.Id): the ``Id`` of the bin
return: (osid.resource.ResourceSearchSession) - ``a
ResourceSearchSession``
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_resource_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_search()`` and
``supports_visible_federation()`` are ``true``.* |
16,031 | def select_inputs(self, address: str, amount: int) -> dict:
s to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO
utxos = []
utxo_sum = Decimal(0)
for tx in sorted(self.listunspent(address=address), key=itemgetter()):
if tx["address"] not in (self.pa_parameters.P2TH_addr,
self.pa_parameters.test_P2TH_addr):
utxos.append(
MutableTxIn(txid=tx[],
txout=tx[],
sequence=Sequence.max(),
script_sig=ScriptSig.empty())
)
utxo_sum += Decimal(tx["amount"])
if utxo_sum >= amount:
return {: utxos, : utxo_sum}
if utxo_sum < amount:
raise InsufficientFunds("Insufficient funds.")
raise Exception("undefined behavior :.(") | finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's |
16,032 | def lookup_users(self, user_ids=None, screen_names=None, include_entities=None, tweet_mode=None):
post_data = {}
if include_entities is not None:
include_entities = if include_entities else
post_data[] = include_entities
if user_ids:
post_data[] = list_to_csv(user_ids)
if screen_names:
post_data[] = list_to_csv(screen_names)
if tweet_mode:
post_data[] = tweet_mode
return self._lookup_users(post_data=post_data) | Perform bulk look up of users from user ID or screen_name |
16,033 | def register(self, name, obj):
if name in self.all:
log.debug(, name, obj.name)
raise DuplicateDefinitionException(
% (name, obj.name))
log.debug(, name)
self.all[name] = obj
return obj | Registers an unique type description |
16,034 | def replace_cluster_role(self, name, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.replace_cluster_role_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)
return data | replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread. |
16,035 | def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {: __utils__[](e)}
return {: True} | Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1 |
16,036 | def run(self, *, delay=None):
self.broker.enqueue(self.messages[0], delay=delay)
return self | Run this pipeline.
Parameters:
delay(int): The minimum amount of time, in milliseconds, the
pipeline should be delayed by.
Returns:
pipeline: Itself. |
16,037 | def find_max_label_length(labels):
length = 0
for i in range(len(labels)):
if len(labels[i]) > length:
length = len(labels[i])
return length | Return the maximum length for the labels. |
16,038 | def index_of_reports(self, report, account_id):
path = {}
data = {}
params = {}
path["account_id"] = account_id
path["report"] = report
self.logger.debug("GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True) | Index of Reports.
Shows all reports that have been run for the account of a specific type. |
16,039 | def __get_translation(self, surah, ayah, lang):
url = .format(
base=self.BASE_API, lang=lang, surah=int(surah)
)
try:
response = urlopen(url)
data = json.loads(response.read().decode())
translation = data[][ayah]
except ODOAException:
return None
else:
return translation | Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat. |
16,040 | def convertPixelXYToLngLat(self, pixelX, pixelY, level):
mapSize = self.getMapDimensionsByZoomLevel(level)
x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5
y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize)
lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi
lng = 360 * x
return (lng, lat) | converts a pixel x, y to a latitude and longitude. |
16,041 | def onRefreshPluginData(self, plugin_name, data):
logger.info(u"onRefreshPluginData: {}".format(plugin_name))
if not plugin_name:
logger.error("Missing plugin name")
return
reactor.callFromThread(self._sendJSON, {
: "plugin_data_get",
: plugin_name
}) | Frontend requests a data refresh
:param plugin_name: Name of plugin that changed
:type plugin_name: str
:param data: Additional data
:type data: None
:rtype: None |
16,042 | def roundness(im):
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2 | from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645 |
16,043 | def _namify_arguments(mapping):
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result | Ensure that a mapping of names to parameters has the parameters set to the
correct name. |
16,044 | def copy_path_to_clipboard(i):
import os
p=os.getcwd()
if i.get(,)==:
p=+p+
rx=copy_to_clipboard({:p})
return {:0} | Input: {
(add_quotes) - if 'yes', add quotes
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} |
16,045 | def has_delete_permission(self, request, obj=None):
if settings.TREE_EDITOR_OBJECT_PERMISSIONS:
opts = self.opts
r = request.user.has_perm(opts.app_label + + opts.get_delete_permission(), obj)
else:
r = True
return r and super(TreeEditor, self).has_delete_permission(request, obj) | Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in. |
16,046 | def get_graphviz_dirtree(self, engine="automatic", **kwargs):
if engine == "automatic":
engine = "fdp"
return Dirviz(self.workdir).get_cluster_graph(engine=engine, **kwargs) | Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> |
16,047 | def list_vm_images_sub(access_token, subscription_id):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
,
, COMP_API])
return do_get_next(endpoint, access_token) | List VM images in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of a list of VM images. |
16,048 | def build_and_start(query, directory):
Async(target=grep, args=[query, directory]).start() | This function will create and then start a new Async task with the
default callbacks argument defined in the decorator. |
16,049 | def _get_pos(self):
if self._canvas.height >= self._max_height:
return 0
else:
return self._canvas.start_line / (self._max_height - self._canvas.height + 1) | Get current position for scroll bar. |
16,050 | def proto_01_13_steps025dual(abf=exampleABF):
swhlab.ap.detect(abf)
standard_groupingForInj(abf,200)
for feature in [,]:
swhlab.ap.plot_values(abf,feature,continuous=False)
swhlab.plot.save(abf,tag=+feature)
f1=swhlab.ap.getAvgBySweep(abf,,None,1)
f2=swhlab.ap.getAvgBySweep(abf,,1,None)
f1=np.nan_to_num(f1)
f2=np.nan_to_num(f2)
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)",
ylabel="average inst. freq. (Hz)")
pylab.plot(Xs,f1,,ms=20,alpha=.5,label="step 1",color=)
pylab.plot(Xs,f2,,ms=20,alpha=.5,label="step 2",color=)
pylab.legend(loc=)
pylab.axis([Xs[0],Xs[-1],None,None])
swhlab.plot.save(abf,tag=) | IC steps. See how hyperpol. step affects things. |
16,051 | def discard_between(
self,
min_rank=None,
max_rank=None,
min_score=None,
max_score=None,
):
no_ranks = (min_rank is None) and (max_rank is None)
no_scores = (min_score is None) and (max_score is None)
if no_ranks and no_scores:
return
if no_ranks and (not no_scores):
return self.discard_by_score(min_score, max_score)
if (not no_ranks) and no_scores:
return self.discard_by_rank(min_rank, max_rank)
with self.redis.pipeline() as pipe:
self.discard_by_score(min_score, max_score, pipe)
self.discard_by_rank(min_rank, max_rank, pipe)
pipe.execute() | Remove members whose ranking is between *min_rank* and *max_rank*
OR whose score is between *min_score* and *max_score* (both ranges
inclusive). If no bounds are specified, no members will be removed. |
16,052 | def handle_register_or_upload(post_data, files, user, repository):
name = post_data.get()
version = post_data.get()
if settings.LOCALSHOP_VERSIONING_TYPE:
scheme = get_versio_versioning_scheme(settings.LOCALSHOP_VERSIONING_TYPE)
try:
Version(version, scheme=scheme)
except AttributeError:
response = HttpResponseBadRequest(
reason="Invalid version supplied for scheme.".format(
version, settings.LOCALSHOP_VERSIONING_TYPE))
return response
if not name or not version:
logger.info("Missing name or version for package")
return HttpResponseBadRequest()
try:
condition = Q()
for search_name in get_search_names(name):
condition |= Q(name__iexact=search_name)
package = repository.packages.get(condition)
release_file = form_file.save(commit=False)
release_file.save()
return HttpResponse() | Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user. |
16,053 | def _hexify(data, chunksize=None):
if chunksize is None:
chunksize = _hex_chunksize
hex = data.encode()
l = len(hex)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(hex[i : i + chunksize])
i += chunksize
hex = .join(chunks)
return hex | Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string |
16,054 | def _update_scsi_devices(scsis_old_new, current_disks):
device_config_specs = []
if scsis_old_new:
devs = [scsi[][] for scsi in scsis_old_new]
log.trace(, devs)
for item in scsis_old_new:
next_scsi = item[]
current_scsi = item[]
difference = recursive_diff(current_scsi, next_scsi)
difference.ignore_unset_values = False
if difference.changed():
log.trace(
,
current_scsi[],
current_scsi[],
next_scsi[],
next_scsi[])
if next_scsi[] != current_scsi[]:
device_config_specs.append(
_delete_device(current_scsi[]))
device_config_specs.append(_apply_scsi_controller(
current_scsi[],
next_scsi[],
next_scsi[],
current_scsi[],
current_scsi[], ))
disks_to_update = []
for disk_key in current_scsi[]:
disk_objects = \
[disk[] for disk in current_disks]
disks_to_update.append(
_get_device_by_key(disk_objects, disk_key))
for current_disk in disks_to_update:
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.device = current_disk
disk_spec.operation =
device_config_specs.append(disk_spec)
else:
device_config_specs.append(_apply_scsi_controller(
current_scsi[],
current_scsi[],
next_scsi[],
current_scsi[],
current_scsi[], ))
return device_config_specs | Returns a list of vim.vm.device.VirtualDeviceSpec specifying the scsi
properties as input the old and new configs are defined in a dictionary.
scsi_diffs
List of old and new scsi properties |
16,055 | def _get_subcats(self, recurse=False):
if recurse:
return sorted([Category(e) for e in self._subcats_recursive],
key=lambda c: c.sort_breadcrumb)
parts = len(self.path.split()) + 1 if self.path else 1
subcats = [c.split()[:parts] for c in self._subcats_recursive]
subcats = {.join(c) for c in subcats}
return sorted([Category(c) for c in subcats], key=lambda c: c.sort_name or c.name) | Get the subcategories of this category
recurse -- whether to include their subcategories as well |
16,056 | def reindex_model_on_save(sender, document, **kwargs):
if current_app.config.get():
reindex.delay(document) | (Re/Un)Index Mongo document on post_save |
16,057 | def dataframe_to_smp(dataframe,smp_filename,name_col="name",
datetime_col="datetime",value_col="value",
datetime_format="dd/mm/yyyy",
value_format="{0:15.6E}",
max_name_len=12):
formatters = {"name":lambda x:"{0:<20s}".format(str(x)[:max_name_len]),
"value":lambda x:value_format.format(x)}
if datetime_format.lower().startswith("d"):
dt_fmt = "%d/%m/%Y %H:%M:%S"
elif datetime_format.lower().startswith("m"):
dt_fmt = "%m/%d/%Y %H:%M:%S"
else:
raise Exception("unrecognized datetime_format: " +\
"{0}".format(str(datetime_format)))
for col in [name_col,datetime_col,value_col]:
assert col in dataframe.columns
dataframe.loc[:,"datetime_str"] = dataframe.loc[:,"datetime"].\
apply(lambda x:x.strftime(dt_fmt))
if isinstance(smp_filename,str):
smp_filename = open(smp_filename,)
s = dataframe.loc[:,[name_col,"datetime_str",value_col]].\
to_string(col_space=0,
formatters=formatters,
justify=None,
header=False,
index=False)
for ss in s.split():
smp_filename.write("{0:<s}\n".format(ss.strip()))
dataframe.pop("datetime_str") | write a dataframe as an smp file
Parameters
----------
dataframe : pandas.DataFrame
smp_filename : str
smp file to write
name_col: str
the column in the dataframe the marks the site namne
datetime_col: str
the column in the dataframe that is a datetime instance
value_col: str
the column in the dataframe that is the values
datetime_format: str
either 'dd/mm/yyyy' or 'mm/dd/yyy'
value_format: str
a python float-compatible format |
16,058 | def copyKeyMultipart(srcBucketName, srcKeyName, srcKeyVersion, dstBucketName, dstKeyName, sseAlgorithm=None, sseKey=None,
copySourceSseAlgorithm=None, copySourceSseKey=None):
s3 = boto3.resource()
dstBucket = s3.Bucket(oldstr(dstBucketName))
dstObject = dstBucket.Object(oldstr(dstKeyName))
copySource = {: oldstr(srcBucketName), : oldstr(srcKeyName)}
if srcKeyVersion is not None:
copySource[] = oldstr(srcKeyVersion)
destEncryptionArgs = {}
if sseKey is not None:
destEncryptionArgs.update({: sseAlgorithm,
: sseKey})
copyEncryptionArgs = {}
if copySourceSseKey is not None:
copyEncryptionArgs.update({: copySourceSseAlgorithm,
: copySourceSseKey})
copyEncryptionArgs.update(destEncryptionArgs)
dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs)
| Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket). |
16,059 | def _set_tieBreaking(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 2}, u: {: 0}, u: {: 1}},), is_leaf=True, yang_name="tieBreaking", rest_name="tieBreaking", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-mpls:tie-breaking",
: ,
})
self.__tieBreaking = t
if hasattr(self, ):
self._set() | Setter method for tieBreaking, mapped from YANG variable /brocade_mpls_rpc/show_mpls_te_path/input/tieBreaking (tie-breaking)
If this variable is read-only (config: false) in the
source YANG file, then _set_tieBreaking is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tieBreaking() directly.
YANG Description: Tie breaking mode for CSPF when multiple paths to destination exists |
16,060 | def fit(self, Xs=None, ys=None, Xt=None, yt=None):
if check_params(Xs=Xs, Xt=Xt):
self.cost_ = dist(Xs, Xt, metric=self.metric)
self.cost_ = cost_normalization(self.cost_, self.norm)
if (ys is not None) and (yt is not None):
if self.limit_max != np.infty:
self.limit_max = self.limit_max * np.max(self.cost_)
classes = [c for c in np.unique(ys) if c != -1]
for c in classes:
idx_s = np.where((ys != c) & (ys != -1))
idx_t = np.where(yt == c)
for j in idx_t[0]:
self.cost_[idx_s[0], j] = self.limit_max
self.mu_s = self.distribution_estimation(Xs)
self.mu_t = self.distribution_estimation(Xt)
self.xs_ = Xs
self.xt_ = Xt
return self | Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self. |
16,061 | def process_json(filename):
logger.debug(, filename)
doecode_json = json.load(open(filename))
for record in doecode_json[]:
yield record | Converts a DOE CODE .json file into DOE CODE projects
Yields DOE CODE records from a DOE CODE .json file |
16,062 | def verify(password_hash, password):
ensure(len(password_hash) == PWHASH_SIZE,
"The password hash must be exactly %s bytes long" %
nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES,
raising=exc.ValueError)
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verify(
password_hash, password
) | Takes the output of scryptsalsa208sha256 and compares it against
a user provided password to see if they are the same
:param password_hash: bytes
:param password: bytes
:rtype: boolean
.. versionadded:: 1.2 |
16,063 | def ogr2ogr(src, dst, options):
out = gdal.VectorTranslate(dst, src, options=gdal.VectorTranslateOptions(**options))
out = None | a simple wrapper for gdal.VectorTranslate aka `ogr2ogr <https://www.gdal.org/ogr2ogr.html>`_
Parameters
----------
src: str or :osgeo:class:`ogr.DataSource`
the input data set
dst: str
the output data set
options: dict
additional parameters passed to gdal.VectorTranslate;
see `gdal.VectorTranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#VectorTranslateOptions>`_
Returns
------- |
16,064 | def create_waveform_generator(variable_params, data,
recalibration=None, gates=None,
**static_params):
try:
approximant = static_params[]
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator.select_waveform_generator(approximant)
delta_f = None
for d in data.values():
if delta_f is None:
delta_f = d.delta_f
delta_t = d.delta_t
start_time = d.start_time
else:
if not all([d.delta_f == delta_f, d.delta_t == delta_t,
d.start_time == start_time]):
raise ValueError("data must all have the same delta_t, "
"delta_f, and start_time")
waveform_generator = generator.FDomainDetFrameGenerator(
generator_function, epoch=start_time,
variable_args=variable_params, detectors=list(data.keys()),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
return waveform_generator | Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation. |
16,065 | def network_profile_name_list(self, obj):
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj[]),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name =
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list | Get AP profile names. |
16,066 | def _all_recall_native_type(self, data, ptitem, prefix):
typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE)
colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE)
type_changed = False
if colltype == HDF5StorageService.COLL_SCALAR:
if isinstance(data, np.ndarray):
data = np.array([data])[0]
type_changed = True
if not typestr is None:
if typestr != type(data).__name__:
if typestr == str.__name__:
data = data.decode(self._encoding)
else:
try:
data = pypetconstants.PARAMETERTYPEDICT[typestr](data)
except KeyError:
data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data)
type_changed = True
elif (colltype == HDF5StorageService.COLL_TUPLE or
colltype == HDF5StorageService.COLL_LIST):
if type(data) is not list and type is not tuple:
type_changed = True
data = list(data)
if len(data) > 0:
first_item = data[0]
if not typestr == type(first_item).__name__:
if not isinstance(data, list):
data = list(data)
for idx, item in enumerate(data):
if typestr == str.__name__:
data[idx] = data[idx].decode(self._encoding)
else:
try:
data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item)
except KeyError:
data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item)
type_changed = True
if colltype == HDF5StorageService.COLL_TUPLE:
if type(data) is not tuple:
data = tuple(data)
type_changed = True
elif colltype == HDF5StorageService.COLL_EMPTY_DICT:
data = {}
type_changed = True
elif isinstance(data, np.ndarray):
if typestr == str.__name__:
data = np.core.defchararray.decode(data, self._encoding)
type_changed = True
if colltype == HDF5StorageService.COLL_MATRIX:
data = np.matrix(data)
type_changed = True
return data, type_changed | Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not. |
16,067 | def DictReader(ltsvfile, labels=None, dict_type=dict):
for rec in reader(ltsvfile, labels):
yield dict_type(rec) | Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels.
:return: generator of record in {label: value, ...} form. |
16,068 | def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None):
variable_type = entities.Variable.Type.BOOLEAN
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes) | Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable. |
16,069 | def __try_parse_number(self, string):
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return False | Try to parse a string to a number, else return False. |
16,070 | def block_compute(x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
origin=(0, 0, 0),
block_size=(512, 512, 16)):
x_bounds = range(origin[0], x_stop + block_size[0], block_size[0])
x_bounds = [x for x in x_bounds if (x > x_start and x < x_stop)]
if len(x_bounds) is 0:
x_slices = [(x_start, x_stop)]
else:
x_slices = []
for start_x in x_bounds[:-1]:
x_slices.append((start_x, start_x + block_size[0]))
x_slices.append((x_start, x_bounds[0]))
x_slices.append((x_bounds[-1], x_stop))
y_bounds = range(origin[1], y_stop + block_size[1], block_size[1])
y_bounds = [y for y in y_bounds if (y > y_start and y < y_stop)]
if len(y_bounds) is 0:
y_slices = [(y_start, y_stop)]
else:
y_slices = []
for start_y in y_bounds[:-1]:
y_slices.append((start_y, start_y + block_size[1]))
y_slices.append((y_start, y_bounds[0]))
y_slices.append((y_bounds[-1], y_stop))
z_bounds = range(origin[2], z_stop + block_size[2], block_size[2])
z_bounds = [z for z in z_bounds if (z > z_start and z < z_stop)]
if len(z_bounds) is 0:
z_slices = [(z_start, z_stop)]
else:
z_slices = []
for start_z in z_bounds[:-1]:
z_slices.append((start_z, start_z + block_size[2]))
z_slices.append((z_start, z_bounds[0]))
z_slices.append((z_bounds[-1], z_stop))
chunks = []
for x in x_slices:
for y in y_slices:
for z in z_slices:
chunks.append((x, y, z))
return chunks | Get bounding box coordinates (in 3D) of small cutouts to request in
order to reconstitute a larger cutout.
Arguments:
x_start (int): The lower bound of dimension x
x_stop (int): The upper bound of dimension x
y_start (int): The lower bound of dimension y
y_stop (int): The upper bound of dimension y
z_start (int): The lower bound of dimension z
z_stop (int): The upper bound of dimension z
Returns:
[((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ] |
16,071 | def d3logpdf_dlink3(self, link_f, y, Y_metadata=None):
N = y.shape[0]
D = link_f.shape[1]
d3logpdf_dlink3 = np.zeros((N,D))
return d3logpdf_dlink3 | Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{3}\\lambda(f)} = 0
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: third derivative of log likelihood evaluated at points link(f)
:rtype: Nx1 array |
16,072 | def loc(lexer: Lexer, start_token: Token) -> Optional[Location]:
if not lexer.no_location:
end_token = lexer.last_token
source = lexer.source
return Location(
start_token.start, end_token.end, start_token, end_token, source
)
return None | Return a location object.
Used to identify the place in the source that created a given parsed object. |
16,073 | def union(self, sig: Scope) -> Scope:
new = Scope(sig=self._hsig.values(), state=self.state)
new |= sig
return new | Create a new Set produce by the union of 2 Set |
16,074 | def filer(filelist):
filedict = dict()
for seqfile in filelist:
strainname = os.path.splitext(os.path.basename(seqfile))[0]
filedict[strainname] = seqfile
return filedict | Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension |
16,075 | def initialize_tracer(self, io_loop=None):
with Config._initialized_lock:
if Config._initialized:
logger.warn()
return
Config._initialized = True
tracer = self.new_tracer(io_loop)
self._initialize_global_tracer(tracer=tracer)
return tracer | Initialize Jaeger Tracer based on the passed `jaeger_client.Config`.
Save it to `opentracing.tracer` global variable.
Only the first call to this method has any effect. |
16,076 | def runDia(diagram):
ifname = .format(diagram)
ofname = .format(diagram)
cmd = .format(ofname, ifname)
print(.format(cmd))
subprocess.call(cmd, shell=True)
return True | Generate the diagrams using Dia. |
16,077 | def edit(self, obj_id, parameters, create_if_not_exists=False):
if create_if_not_exists:
response = self._client.session.put(
.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response) | Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str |
16,078 | def ns(self):
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp | Get the namespace of a node |
16,079 | def minOpar(self,dangle,tdisrupt=None,_return_raw=False):
if tdisrupt is None: tdisrupt= self._tdisrupt
Oparb= (dangle-self._kick_interpdOpar_poly.x[:-1])/self._timpact
lowx= ((Oparb-self._kick_interpdOpar_poly.c[-1])\
*(tdisrupt-self._timpact)+Oparb*self._timpact-dangle)\
/((tdisrupt-self._timpact)\
*(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact)\
+self._timpact)
lowx[lowx < 0.]= numpy.inf
lowbindx= numpy.argmin(lowx)
if _return_raw:
return (lowbindx,lowx[lowbindx])
else:
return Oparb[lowbindx]-lowx[lowbindx] | NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT) |
16,080 | def request(self, method, url, erc, **kwargs):
abs_url = self.abs_url(url)
kwargs.setdefault(, self.single_request_timeout)
while True:
response = self._req_session.request(method, abs_url, **kwargs)
try:
check_response_code(response, erc)
except RateLimitError as e:
if self.wait_on_rate_limit:
warnings.warn(RateLimitWarning(response))
time.sleep(e.retry_after)
continue
else:
raise
else:
return response | Abstract base method for making requests to the Webex Teams APIs.
This base method:
* Expands the API endpoint URL to an absolute URL
* Makes the actual HTTP request to the API endpoint
* Provides support for Webex Teams rate-limiting
* Inspects response codes and raises exceptions as appropriate
Args:
method(basestring): The request-method type ('GET', 'POST', etc.).
url(basestring): The URL of the API endpoint to be called.
erc(int): The expected response code that should be returned by the
Webex Teams API endpoint to indicate success.
**kwargs: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint. |
16,081 | def compose(*funcs):
if len(funcs) == 2: f0,f1=funcs; return lambda *a,**kw: f0(f1(*a,**kw))
elif len(funcs) == 3: f0,f1,f2=funcs; return lambda *a,**kw: f0(f1(f2(*a,**kw)))
elif len(funcs) == 0: return lambda x:x
elif len(funcs) == 1: return funcs[0]
else:
def composed(*args,**kwargs):
y = funcs[-1](*args,**kwargs)
for f in funcs[:0:-1]: y = f(y)
return y
return composed | Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2] |
16,082 | def _finalize(self):
container = {}
try:
for name in self._traces:
container[name] = self._traces[name]._trace
container[] = self._state_
file = open(self.filename, )
std_pickle.dump(container, file)
file.close()
except AttributeError:
pass | Dump traces using cPickle. |
16,083 | def generate(ast_tree: ast.Tree, model_name: str):
component_ref = ast.ComponentRef.from_string(model_name)
ast_tree_new = copy.deepcopy(ast_tree)
ast_walker = TreeWalker()
flat_tree = flatten(ast_tree_new, component_ref)
sympy_gen = SympyGenerator()
ast_walker.walk(sympy_gen, flat_tree)
return sympy_gen.src[flat_tree] | :param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model |
16,084 | def pop_object(self, element):
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element[] =
element[] =
element[] =
element[] = []
element[] = None
element[] = redacted_text
element[] = element[]
element[] =
element[] =
element[] = redacted_text
element[] = []
element[][] =
element[][] =
element[][] = redacted_text
element[][] = redacted_text
return element | Pop the object element if the object contains an higher TLP then allowed. |
16,085 | async def find_deleted(self, seq_set: SequenceSet,
selected: SelectedMailbox) -> Sequence[int]:
session_flags = selected.session_flags
return [msg.uid async for _, msg in self.find(seq_set, selected)
if Deleted in msg.get_flags(session_flags)] | Return all the active message UIDs that have the ``\\Deleted`` flag.
Args:
seq_set: The sequence set of the possible messages.
selected: The selected mailbox session. |
16,086 | def summary(self):
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`. |
16,087 | def load_commodities(self):
if isinstance(self.fee, Amount):
self.fee = Amount("{0:.8f} {1}".format(self.fee.to_double(), self.currency))
else:
self.fee = Amount("{0:.8f} {1}".format(self.fee, self.currency))
if isinstance(self.amount, Amount):
self.amount = Amount("{0:.8f} {1}".format(self.amount.to_double(), self.currency))
else:
self.amount = Amount("{0:.8f} {1}".format(self.amount, self.currency)) | Load the commodities for Amounts in this object. |
16,088 | def messageRemote(self, cmdObj, consequence=None, **args):
messageBox = cmdObj.makeArguments(args, self)
messageBox[COMMAND] = cmdObj.commandName
messageData = messageBox.serialize()
self.queue.queueMessage(self.sender, self.target,
Value(AMP_MESSAGE_TYPE, messageData),
consequence) | Send a message to the peer identified by the target, via the
given L{Command} object and arguments.
@param cmdObj: a L{twisted.protocols.amp.Command}, whose serialized
form will be the message.
@param consequence: an L{IDeliveryConsequence} provider which will
handle the result of this message (or None, if no response processing
is desired).
@param args: keyword arguments which match the C{cmdObj}'s arguments
list.
@return: L{None} |
16,089 | def get_compression_filter(byte_counts):
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()[]:
try:
FILTERS = tables.filters(complevel = 5, complib = ,
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = ,
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS | Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class |
16,090 | def to_filespec(cls, args, root=, exclude=None):
result = {: [os.path.join(root, arg) for arg in args]}
if exclude:
result[] = []
for exclude in exclude:
if hasattr(exclude, ):
result[].append(exclude.filespec)
else:
result[].append({: [os.path.join(root, x) for x in exclude]})
return result | Return a dict representation of this glob list, relative to the buildroot.
The format of the dict is {'globs': [ 'list', 'of' , 'strings' ]
(optional) 'exclude' : [{'globs' : ... }, ...] }
The globs are in zglobs format. |
16,091 | def split_iter(src, sep=None, maxsplit=None):
if not is_iterable(src):
raise TypeError()
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return | Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version. |
16,092 | def _try_redeem_disposable_app(file, client):
redeemedClient = client.redeem_onetime_code(None)
if redeemedClient is None:
return None
else:
return _BlotreDisposableApp(file,
redeemedClient.client,
creds = redeemedClient.creds,
config = redeemedClient.config) | Attempt to redeem a one time code registred on the client. |
16,093 | def daily_pr_intensity(pr, thresh=, freq=):
r
t = utils.convert_units_to(thresh, pr, )
pr_wd = xr.where(pr >= t, pr, 0)
pr_wd.attrs[] = pr.units
s = pr_wd.resample(time=freq).sum(dim=, keep_attrs=True)
sd = utils.pint_multiply(s, 1 * units.day, )
wd = wetdays(pr, thresh=thresh, freq=freq)
return sd / wd | r"""Average daily precipitation intensity
Return the average precipitation over wet days.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm/d or kg/m²/s]
thresh : str
precipitation value over which a day is considered wet. Default : '1 mm/day'
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. Default : '1 mm/day'
Returns
-------
xarray.DataArray
The average precipitation over wet days for each period
Notes
-----
Let :math:`\mathbf{p} = p_0, p_1, \ldots, p_n` be the daily precipitation and :math:`thresh` be the precipitation
threshold defining wet days. Then the daily precipitation intensity is defined as
.. math::
\frac{\sum_{i=0}^n p_i [p_i \leq thresh]}{\sum_{i=0}^n [p_i \leq thresh]}
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the average
precipitation fallen over days with precipitation >= 5 mm at seasonal
frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> daily_int = daily_pr_intensity(pr, thresh='5 mm/day', freq="QS-DEC") |
16,094 | def path_exists(value,
allow_empty = False,
**kwargs):
if not value and not allow_empty:
raise errors.EmptyValueError( % value)
elif not value:
return None
value = path(value, force_run = True)
if not os.path.exists(value):
raise errors.PathExistsError( % value)
return value | Validate that ``value`` is a path-like object that exists on the local
filesystem.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: The file name represented by ``value``.
:rtype: Path-like object / :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist |
16,095 | async def _init_writer(self):
async with self._initialization_lock:
if not self.initialized:
self.stream = await aiofiles.open(
file=self.absolute_file_path,
mode=self.mode,
encoding=self.encoding,
loop=self.loop,
) | Open the current base file with the (original) mode and encoding.
Return the resulting stream. |
16,096 | def aligned_covariance(fit, type=):
cov = fit._covariance_matrix(type)
cov /= N.linalg.norm(cov)
return dot(fit.axes,cov) | Covariance rescaled so that eigenvectors sum to 1
and rotated into data coordinates from PCA space |
16,097 | def getRemoteObject(self, busName, objectPath, interfaces=None,
replaceKnownInterfaces=False):
weak_id = (busName, objectPath, interfaces)
need_introspection = False
required_interfaces = set()
if interfaces is not None:
ifl = []
if not isinstance(interfaces, list):
interfaces = [interfaces]
for i in interfaces:
if isinstance(i, interface.DBusInterface):
ifl.append(i)
required_interfaces.add(i.name)
else:
required_interfaces.add(i)
if i in interface.DBusInterface.knownInterfaces:
ifl.append(interface.DBusInterface.knownInterfaces[i])
else:
need_introspection = True
if not need_introspection:
return defer.succeed(
RemoteDBusObject(self, busName, objectPath, ifl)
)
d = self.conn.introspectRemoteObject(
busName,
objectPath,
replaceKnownInterfaces,
)
def ok(ifaces):
missing = required_interfaces - {q.name for q in ifaces}
if missing:
raise error.IntrospectionFailed(
+ .join(missing)
)
prox = RemoteDBusObject(self, busName, objectPath, ifaces)
self._weakProxies[weak_id] = prox
return prox
d.addCallback(ok)
return d | Creates a L{RemoteDBusObject} instance to represent the
specified DBus object. If explicit interfaces are not
supplied, DBus object introspection will be used to obtain
them automatically.
@type busName: C{string}
@param busName: Name of the bus exporting the desired object
@type objectPath: C{string}
@param objectPath: DBus path of the desired object
@type interfaces: None, C{string} or L{interface.DBusInterface} or a
list of C{string}/L{interface.DBusInterface}
@param interfaces: May be None, a single value, or a list of string
interface names and/or instances of
L{interface.DBusInterface}. If None or any of the
specified interface names are unknown, full
introspection will be attempted. If interfaces
consists of solely of L{interface.DBusInterface}
instances and/or known interfacep names, no
introspection will be preformed.
@type replaceKnownInterfaces: C{bool}
@param replaceKnownInterfaces: If True (defaults to False), any
interfaces discovered during the
introspection process will override any
previous, cached values.
@rtype: L{twisted.internet.defer.Deferred}
@returns: A Deferred to the L{RemoteDBusObject} instance |
16,098 | def generate_readme(catalog, export_path=None):
if isinstance(catalog, string_types):
catalog_path_or_url = catalog
else:
catalog_path_or_url = None
catalog = read_catalog(catalog)
validation = validate_catalog(catalog)
indicators = generate_catalogs_indicators(
catalog, CENTRAL_CATALOG)[0][0]
with io.open(os.path.join(TEMPLATES_PATH, ), ,
encoding=) as template_file:
readme_template = template_file.read()
not_federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_no_federados"]
])
federated_removed_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados_eliminados"]
])
federated_datasets_list = "\n".join([
"- [{}]({})".format(dataset[0], dataset[1])
for dataset in indicators["datasets_federados"]
])
non_federated_pct = 1.0 - indicators["datasets_federados_pct"] if \
indicators["datasets_federados_pct"] is not None else \
indicators["datasets_federados_pct"]
content = {
"title": catalog.get("title"),
"publisher_name": traverse_dict(
catalog, ["publisher", "name"]),
"publisher_mbox": traverse_dict(
catalog, ["publisher", "mbox"]),
"catalog_path_or_url": catalog_path_or_url,
"description": catalog.get("description"),
"global_status": validation["status"],
"catalog_status": validation["error"]["catalog"]["status"],
"no_of_datasets": len(catalog["dataset"]),
"no_of_distributions": sum([len(dataset["distribution"]) for
dataset in catalog["dataset"]]),
"federated_datasets": indicators["datasets_federados_cant"],
"not_federated_datasets": indicators["datasets_no_federados_cant"],
"not_federated_datasets_pct": non_federated_pct,
"not_federated_datasets_list": not_federated_datasets_list,
"federated_removed_datasets_list": federated_removed_datasets_list,
"federated_datasets_list": federated_datasets_list,
}
catalog_readme = readme_template.format(**content)
if export_path:
with io.open(export_path, , encoding=) as target:
target.write(catalog_readme)
else:
return catalog_readme | Genera una descripción textual en formato Markdown sobre los
metadatos generales de un catálogo (título, editor, fecha de
publicación, et cetera), junto con:
- estado de los metadatos a nivel catálogo,
- estado global de los metadatos,
- cantidad de datasets federados y no federados,
- detalles de los datasets no federados
- cantidad de datasets y distribuciones incluidas
Es utilizada por la rutina diaria de `libreria-catalogos` para generar
un README con información básica sobre los catálogos mantenidos.
Args:
catalog (str o dict): Path a un catálogo en cualquier formato,
JSON, XLSX, o diccionario de python.
export_path (str): Path donde exportar el texto generado (en
formato Markdown). Si se especifica, el método no devolverá
nada.
Returns:
str: Texto de la descripción generada. |
16,099 | def identity_factor(self):
return DiscreteFactor(self.variables, self.cardinality, np.ones(self.values.size)) | Returns the identity factor.
Def: The identity factor of a factor has the same scope and cardinality as the original factor,
but the values for all the assignments is 1. When the identity factor is multiplied with
the factor it returns the factor itself.
Returns
-------
DiscreteFactor: The identity factor.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi_identity = phi.identity_factor()
>>> phi_identity.variables
['x1', 'x2', 'x3']
>>> phi_identity.values
array([[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]]]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.