Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,000 | def safe_print(ustring, errors=, **kwargs):
encoding = sys.stdout.encoding or
if sys.version_info[0] == 3:
print(ustring, **kwargs)
else:
bytestr = ustring.encode(encoding, errors=errors)
print(bytestr, **kwargs) | Safely print a unicode string |
18,001 | def summarize(self, **kwargs):
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame | Return pandas DataFrame with the most important results stored in the timers. |
18,002 | def cholesky(A, sparse=True, verbose=True):
if SKSPIMPORT:
A = sp.sparse.csc_matrix(A)
try:
F = spcholesky(A)
P = sp.sparse.lil_matrix(A.shape)
p = F.P()
P[np.arange(len(p)), p] = 1
L = F.L()
L = P.T.dot(L)
except CholmodNotPositiveDefiniteError as e:
raise NotPositiveDefiniteError()
if sparse:
return L.T
return L.T.A
else:
msg = \
\
\
\
if verbose:
warnings.warn(msg)
if sp.sparse.issparse(A):
A = A.A
try:
L = sp.linalg.cholesky(A, lower=False)
except LinAlgError as e:
raise NotPositiveDefiniteError()
if sparse:
return sp.sparse.csc_matrix(L)
return L | Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings |
18,003 | def invalidate_cache(self, obj=None, queryset=None,
extra=None, force_all=False):
if self.cache_manager:
if queryset != None:
force_all = True
self.cache_manager.invalidate_cache(self.model, instance=obj,
extra=extra,
force_all=force_all) | Method that should be called by all tiggers to invalidate the
cache for an item(s).
Should be overriden by inheriting classes to customize behavior. |
18,004 | def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid):
s problem (i.e. previous iteration).
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capitals problem (new iteration).
s solution and the income distribution
vPfuncNext = solution_next.vPfunc
ShkPrbsNext = IncomeDstn[0]
PermShkValsNext = IncomeDstn[1]
TranShkValsNext = IncomeDstn[2]
aNrmNow = aXtraGrid
aNrmCount = aNrmNow.size
ShkCount = ShkPrbsNext.size
aNrm_tiled = np.tile(np.reshape(aNrmNow,(aNrmCount,1)),(1,ShkCount))
PermShkVals_tiled = np.tile(np.reshape(PermShkValsNext,(1,ShkCount)),(aNrmCount,1))
TranShkVals_tiled = np.tile(np.reshape(TranShkValsNext,(1,ShkCount)),(aNrmCount,1))
ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext,(1,ShkCount)),(aNrmCount,1))
KtoLnext = kNrmNext/TranShkVals_tiled
RfreeNext = 1. - DeprFac + CapShare*KtoLnext**(CapShare-1.)
wRteNext = (1.-CapShare)*KtoLnext**CapShare
mNrmNext = RfreeNext*kNrmNext + wRteNext*TranShkVals_tiled
vPnext = vPfuncNext(mNrmNext)
EndOfPrdvP = DiscFac*np.sum(RfreeNext*(PermGroFac*PermShkVals_tiled)**(-CRRA)*vPnext*ShkPrbs_tiled,axis=1)
cNrmNow = EndOfPrdvP**(-1./CRRA)
mNrmNow = aNrmNow + cNrmNow
cFuncNow = LinearInterp(np.insert(mNrmNow,0,0.0),np.insert(cNrmNow,0,0.0))
vPfuncNow = MargValueFunc(cFuncNow,CRRA)
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow)
return solution_now | Solve one period of the simple representative agent consumption-saving model.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration). |
18,005 | def get_by(self, field, value):
firmwares = self.get_all()
matches = []
for item in firmwares:
if item.get(field) == value:
matches.append(item)
return matches | Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to
filter the list of resources returned.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: List of firmware baseline resources. |
18,006 | def _buildTraitCovar(self, trait_covar_type=, rank=1, fixed_trait_covar=None, jitter=1e-4):
assert trait_covar_type in [, , , , , , , , ],
if trait_covar_type==:
cov = FreeFormCov(self.P, jitter=jitter)
elif trait_covar_type==:
assert fixed_trait_covar is not None,
assert fixed_trait_covar.shape[0]==self.P,
assert fixed_trait_covar.shape[1]==self.P,
cov = FixedCov(fixed_trait_covar)
elif trait_covar_type==:
cov = DiagonalCov(self.P)
elif trait_covar_type==:
cov = LowRankCov(self.P, rank=rank)
elif trait_covar_type==:
cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))
elif trait_covar_type==:
cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))
elif trait_covar_type==:
cov = FixedCov(sp.ones([self.P, self.P]))
elif trait_covar_type==:
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
elif trait_covar_type==:
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
return cov | Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix |
18,007 | def normalize_val(val):
if is_unicode(val) and val.isdigit():
return int(val)
elif isinstance(val, list):
return .join(val)
elif val is None:
return
return val | Normalize JSON/YAML derived values as they pertain
to Vault resources and comparison operations |
18,008 | def message_user(self, username, domain, subject, message):
kwargs = {
: message,
: domain,
: % (username, domain),
}
if self.api_version <= (14, 7):
| Currently use send_message_chat and discard subject, because headline messages are not
stored by mod_offline. |
18,009 | def text_list_to_colors(names):
Dnames = np.zeros( (len(names), len(names)) )
for i in range(len(names)):
for j in range(len(names)):
Dnames[i,j] = 1 - 2.0 * levenshtein(names[i], names[j]) / float(len(names[i]+names[j]))
pca = sklearn.decomposition.PCA(n_components = 1)
pca.fit(Dnames)
textToColor = pca.transform(Dnames)
textToColor = 255 * (textToColor - textToColor.min()) / (textToColor.max() - textToColor.min())
textmaps = generateColorMap();
colors = [textmaps[int(c)] for c in textToColor]
return colors | Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors. |
18,010 | def remove_task_db(self, fs_id):
sql =
self.cursor.execute(sql, [fs_id, ])
self.check_commit() | ๅฐไปปๅกไปๆฐๆฎๅบไธญๅ ้ค |
18,011 | def eqy(ql, qs, ns=None,):
return CONN.ExecQuery(QueryLanguage=ql,
Query=qs,
namespace=ns) | *New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances. |
18,012 | def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, target):
self._psi_computations(Z, mu, S)
d_var = 2.*self._psi2 / self.variance
d_length = -2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] * self.inv_lengthscale2) / (self.inv_lengthscale * self._psi2_denom)
target[0] += np.sum(dL_dpsi2 * d_var)
dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None]
if not self.ARD:
target[1] += dpsi2_dlength.sum()
else:
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0) | Shape N,num_inducing,num_inducing,Ntheta |
18,013 | def set_pixel_spacing(hdr, spacing):
r
warnings.warn(, category=DeprecationWarning)
set_voxel_spacing(hdr, spacing) | r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`. |
18,014 | def function(self):
if not hasattr(self,):
function = self.model.lambdify(self.expression, self.all_variables, **self.lambdify_options)
self._function = lambda *x: function(*(x + self.fixed_values))
return self._function | The function passed to the `fit_function` specified in `scipy_data_fitting.Fit.options`,
and used by `scipy_data_fitting.Fit.pointspace` to generate plots, etc.
Its number of arguments and their order is determined by items 1, 2, and 3
as listed in `scipy_data_fitting.Fit.all_variables`.
All parameter values will be multiplied by their corresponding prefix before being passed to this function.
By default, it is a functional form of `scipy_data_fitting.Fit.expression` converted
using `scipy_data_fitting.Model.lambdify`.
See also `scipy_data_fitting.Fit.lambdify_options`. |
18,015 | def _remove_hlink(self):
hlink = self._hlink
if hlink is None:
return
rId = hlink.rId
if rId:
self.part.drop_rel(rId)
self._element.remove(hlink) | Remove the a:hlinkClick or a:hlinkHover element, including dropping
any relationship it might have. |
18,016 | def is_none(entity, prop, name):
"bool: True if the value of a property is None."
return is_not_empty(entity, prop, name) and getattr(entity, name) is None | bool: True if the value of a property is None. |
18,017 | def compute_column_width_and_height(self):
if not self.rows:
return
for row in self.rows:
max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1
for cell in row.columns:
cell.height = max_row_height
max_columns = max([len(row.columns) for row in self.rows])
for column_idx in range(max_columns):
row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows]
max_column_width = max((len(line) for line in chain(*row_cell_lines)))
for row in self.rows:
if len(row.columns) > column_idx:
row.columns[column_idx].width = max_column_width | compute and set the column width for all colls in the table |
18,018 | def justify(clr, argd):
methodmap = {
: clr.ljust,
: clr.rjust,
: clr.center,
}
for flag in methodmap:
if argd[flag]:
if argd[flag] in (, ):
val = get_terminal_size(default=(80, 35))[0]
else:
val = try_int(argd[flag], minimum=None)
if val < 0:
val = get_terminal_size(default=(80, 35))[0] + val
return methodmap[flag](val)
return clr | Justify str/Colr based on user args. |
18,019 | def build_html():
source = AjaxDataSource(data_url=,
polling_interval=INTERVAL,
method=)
p = figure(plot_height=400,
title=,
sizing_mode=,
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type=None,
y_axis_location="right",
y_axis_label="Price ($)")
p.x_range.follow = "end"
p.x_range.follow_interval = 100
p.x_range.range_padding = 0
p.line(x=, y=, alpha=0.25, line_width=3, color=,
source=source)
p.line(x=, y=, alpha=0.8, line_width=2, color=,
source=source)
p.segment(x0=, y0=, x1=, y1=, line_width=2,
color=, source=source)
p.segment(x0=, y0=, x1=, y1=, line_width=8,
color=, source=source, alpha=0.8)
p2 = figure(plot_height=200,
title=,
sizing_mode=,
x_range=p.x_range,
x_axis_label=,
tools="xpan,xwheel_zoom,xbox_zoom,reset",
y_axis_location="right")
p2.line(x=, y=, color=, line_width=2, source=source)
p2.line(x=, y=, color=, line_width=2, source=source)
p2.segment(x0=, y0=0, x1=, y1=, line_width=6, color=,
alpha=0.5, source=source)
plot = gridplot([[p], [p2]], toolbar_location="left", plot_width=1000)
script, div = components(plot, theme=theme)
html = template.render(resources=CDN.render(), script=script, div=div)
return html | Build the html, to be served by IndexHandler |
18,020 | def main(self, function):
captured = self.command(function)
self.default_command = captured.__name__
return captured | Decorator to define the main function of the experiment.
The main function of an experiment is the default command that is being
run when no command is specified, or when calling the run() method.
Usually it is more convenient to use ``automain`` instead. |
18,021 | def retrieve_import_alias_mapping(names_list):
import_alias_names = dict()
for alias in names_list:
if alias.asname:
import_alias_names[alias.asname] = alias.name
return import_alias_names | Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call |
18,022 | def primary_keys_full(cls):
mapper = cls.__mapper__
return [
mapper.get_property_by_column(column)
for column in mapper.primary_key
] | Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy |
18,023 | def extract_all(zipfile, dest_folder):
z = ZipFile(zipfile)
print(z)
z.extract(dest_folder) | reads the zip file, determines compression
and unzips recursively until source files
are extracted |
18,024 | def rename_tier(self, id_from, id_to):
childs = self.get_child_tiers_for(id_from)
self.tiers[id_to] = self.tiers.pop(id_from)
self.tiers[id_to][2][] = id_to
for child in childs:
self.tiers[child][2][] = id_to | Rename a tier. Note that this renames also the child tiers that have
the tier as a parent.
:param str id_from: Original name of the tier.
:param str id_to: Target name of the tier.
:throws KeyError: If the tier doesnt' exist. |
18,025 | def update_iscsi_settings(self, iscsi_data):
self._conn.patch(self.path, data=iscsi_data) | Update iscsi data
:param data: default iscsi config data |
18,026 | def groups_kick(self, room_id, user_id, **kwargs):
return self.__call_api_post(, roomId=room_id, userId=user_id, kwargs=kwargs) | Removes a user from the private group. |
18,027 | def mac_address_table_aging_time_conversational_time_out(self, **kwargs):
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
aging_time = ET.SubElement(mac_address_table, "aging-time")
conversational_time_out = ET.SubElement(aging_time, "conversational-time-out")
conversational_time_out.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
18,028 | def get(self, request, *args, **kwargs):
try:
context = self.get_context_data(**kwargs)
except exceptions.NotAvailable:
exceptions.handle(request)
self.set_workflow_step_errors(context)
return self.render_to_response(context) | Handler for HTTP GET requests. |
18,029 | def instruction_list_to_easm(instruction_list: list) -> str:
result = ""
for instruction in instruction_list:
result += "{} {}".format(instruction["address"], instruction["opcode"])
if "argument" in instruction:
result += " " + instruction["argument"]
result += "\n"
return result | Convert a list of instructions into an easm op code string.
:param instruction_list:
:return: |
18,030 | def schedule_to_array(schedule, events, slots):
array = np.zeros((len(events), len(slots)), dtype=np.int8)
for item in schedule:
array[events.index(item.event), slots.index(item.slot)] = 1
return array | Convert a schedule from schedule to array form
Parameters
----------
schedule : list or tuple
of instances of :py:class:`resources.ScheduledItem`
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise |
18,031 | def calculate_integral(self, T1, T2):
r
if T2 < T1:
flipped = True
T1, T2 = T2, T1
else:
flipped = False
if self.n == 1:
dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[0])
- Zabransky_cubic_integral(T1, *self.coeff_sets[0]))
else:
ind_T1, ind_T2 = self._coeff_ind_from_T(T1), self._coeff_ind_from_T(T2)
if ind_T1 == ind_T2:
dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])
- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))
else:
dH = (Zabransky_cubic_integral(self.Ts[ind_T1], *self.coeff_sets[ind_T1])
- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))
for i in range(ind_T1, ind_T2):
diff =(Zabransky_cubic_integral(self.Ts[i+1], *self.coeff_sets[i])
- Zabransky_cubic_integral(self.Ts[i], *self.coeff_sets[i]))
dH += diff
end = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])
- Zabransky_cubic_integral(self.Ts[ind_T2], *self.coeff_sets[ind_T2]))
dH += end
return -dH if flipped else dH | r'''Method to compute the enthalpy integral of heat capacity from
`T1` to `T2`. Analytically integrates across the piecewise spline
as necessary.
Parameters
----------
T1 : float
Initial temperature, [K]
T2 : float
Final temperature, [K]
Returns
-------
dS : float
Enthalpy difference between `T1` and `T2`, [J/mol/K] |
18,032 | def file_data(self):
return {
: self._file_content,
: self._group_data.get(),
: self._group_data.get(),
} | Return Group file (only supported for Document and Report). |
18,033 | def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None):
mustshouldmustshouldshouldminimum_number_should_matchmust_notmust_notminimum_number_should_matchboostkimchyboolshouldtermuserkimchy
instance = cls(bool={})
if must is not None:
instance[][] = must
if should is not None:
instance[][] = should
if must_not is not None:
instance[][] = must_not
if minimum_number_should_match is not None:
instance[][] = minimum_number_should_match
if boost is not None:
instance[][] = boost
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html
A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are:
'must' - The clause(query) must appear in matching documents.
'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter.
'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s).
'minimum_number_should_match' - Minimum number of documents that should match
'boost' - boost value
> term = ElasticQuery()
> term.term(user='kimchy')
> query = ElasticQuery()
> query.bool(should=term)
> query.query()
{ 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}} |
18,034 | def configure_profile(msg_type, profile_name, data, auth):
with jsonconfig.Config("messages", indent=4) as cfg:
write_data(msg_type, profile_name, data, cfg)
write_auth(msg_type, profile_name, auth, cfg)
print("[+] Configuration entry for <" + profile_name + "> created.")
print("[+] Configuration file location: " + cfg.filename) | Create the profile entry.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:data: (dict) dict values for the 'settings'
:auth: (dict) auth parameters |
18,035 | def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b * (4 - rem)
return base64.urlsafe_b64decode(input) | Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode. |
18,036 | def delete(self, uid):
try:
record = resource_db[uid].copy()
except KeyError:
return self.response_factory.not_found(errors=[])
del resource_db[uid]
return self.response_factory.ok(data=record) | Example DELETE method. |
18,037 | def load(controller=None, filename="", name=None, rsrc=None):
"Create the GUI objects defined in the resource (filename or python struct)"
if not filename and not rsrc:
if isinstance(controller, types.ClassType):
mod_dict = util.get_class_module_dict(controller)
elif isinstance(controller, types.ModuleType):
mod_dict = controller.__dict__
elif isinstance(controller, Controller):
mod_dict = util.get_class_module_dict(controller)
else:
mod_dict = util.get_caller_module_dict()
if controller is None:
controller = mod_dict
if util.main_is_frozen():
if in mod_dict:
filename = os.path.split(mod_dict[])[1]
else:
filename = os.path.split(sys.argv[0])[-1]
filename = os.path.join(util.get_app_dir(), filename)
else:
filename = mod_dict[]
base, ext = os.path.splitext(filename)
filename = base + ".rsrc.py"
if isinstance(filename, basestring):
rsrc = parse(filename)
ret = []
for win in rsrc:
if not name or win[] == name:
ret.append(build_window(win))
if ret and controller:
connect(ret[0], controller)
return ret[0]
else:
return ret | Create the GUI objects defined in the resource (filename or python struct) |
18,038 | def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None):
file_name = f.split()[-1].split()[0]
if tag != :
tag = + tag
if suffix:
file_name, _ = drop_bids_suffix(file_name)
save_name = file_name + tag
save_name += + suffix
else:
save_name = file_name + tag
paths_post_pipeline = f.split(self.pipeline)
if self.pipeline_subdir:
paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[
0]
else:
paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0]
base_dir = self.BIDS_dir + + + \
teneto.__version__ + + paths_post_pipeline +
save_dir = base_dir + + save_directory +
if not os.path.exists(save_dir):
try:
os.makedirs(save_dir)
except:
time.sleep(2)
if not os.path.exists(self.BIDS_dir + + + teneto.__version__ + ):
try:
with open(self.BIDS_dir + + + teneto.__version__ + , ) as fs:
json.dump(self.tenetoinfo, fs)
except:
time.sleep(2)
return save_name, save_dir, base_dir | Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) |
18,039 | def _get_config_type(cla55: type) -> Optional[str]:
if cla55 == torch.nn.RNN:
return "rnn"
elif cla55 == torch.nn.LSTM:
return "lstm"
elif cla55 == torch.nn.GRU:
return "gru"
for subclass_dict in Registrable._registry.values():
for name, subclass in subclass_dict.items():
if subclass == cla55:
return name
if hasattr(subclass, ):
sif = subclass()._init_function
if sif == cla55:
return sif.__name__.rstrip("_")
return None | Find the name (if any) that a subclass was registered under.
We do this simply by iterating through the registry until we
find it. |
18,040 | def _set_policy(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("policyname",policy.policy, yang_name="policy", rest_name="policy", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u}}), is_container=, yang_name="policy", rest_name="policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__policy = t
if hasattr(self, ):
self._set() | Setter method for policy, mapped from YANG variable /rbridge_id/maps/policy (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_policy() directly. |
18,041 | def _get_benchmark_handler(self, last_trade, freq=):
return LiveBenchmark(
last_trade, frequency=freq).surcharge_market_data \
if utils.is_live(last_trade) else None | Setup a custom benchmark handler or let zipline manage it |
18,042 | def browseprofile(profilelog):
print()
try:
browser = ProfileBrowser(profilelog)
print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass | Browse interactively a profile log in console |
18,043 | def _init_metadata(self):
self._choices_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: False,
: True,
: [],
: ,
: []
}
self._choice_name_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: False,
: False,
: False,
: False,
: [],
: ,
: 0,
: 1024,
: []
}
self._multi_answer_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: True,
: False,
: [],
: ,
: []
} | stub |
18,044 | def fmt_text(text, bg = None, fg = None, attr = None, plain = False):
if not plain:
if fg is not None:
text = TEXT_FORMATING[][fg] + text
if bg is not None:
text = TEXT_FORMATING[][bg] + text
if attr is not None:
text = TEXT_FORMATING[][attr] + text
if (fg is not None) or (bg is not None) or (attr is not None):
text += TEXT_FORMATING[]
return text | Apply given console formating around given text. |
18,045 | def find_root(self):
node = self
while node.parent is not None:
node = node.parent
return node | Finds the outermost context. |
18,046 | async def get(self, request):
ticket = await self.get_ticket(request)
if ticket is None:
return None
try:
now = time.time()
fields = self._ticket.validate(ticket, self._get_ip(request), now)
if (self._reissue_time is not None and
now >= (fields.valid_until - self._reissue_time)):
request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)
return fields.user_id
except TicketError as e:
return None | Gets the user_id for the request.
Gets the ticket for the request using the get_ticket() function, and
authenticates the ticket.
Args:
request: aiohttp Request object.
Returns:
The userid for the request, or None if the ticket is not
authenticated. |
18,047 | def spare_disk(self, disk_xml=None):
spare_disk = {}
disk_types = set()
for filer_disk in disk_xml:
disk_types.add(filer_disk.find().text)
if not filer_disk.find().text == :
continue
disk_type = filer_disk.find().text
if disk_type in spare_disk:
spare_disk[disk_type] += 1
else:
spare_disk[disk_type] = 1
for disk_type in disk_types:
if disk_type in spare_disk:
self.push( + disk_type, , spare_disk[disk_type])
else:
self.push( + disk_type, , 0) | Number of spare disk per type.
For example: storage.ontap.filer201.disk.SATA |
18,048 | def get_resource(self, path):
response = self._http_request(path)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.") | Getting the required information from the API. |
18,049 | def scan_full(self, regex, return_string=True, advance_pointer=True):
regex = get_regex(regex)
self.match = regex.match(self.string, self.pos)
if not self.match:
return
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.match.group(0)
return len(self.match.group(0)) | Match from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched.
>>> s = Scanner("test string")
>>> s.scan_full(r' ')
>>> s.scan_full(r'test ')
'test '
>>> s.pos
5
>>> s.scan_full(r'stri', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.scan_full(r'stri', return_string=False, advance_pointer=False)
4
>>> s.pos
5 |
18,050 | def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels | Get the tick locations and labels for an axis of a Lambert Conformal projection. |
18,051 | def rpc_reply(id: Union[str, int], result: Optional[object],
warnings: Optional[List[Warning]] = None) -> rpcq.messages.RPCReply:
warnings = warnings or []
return rpcq.messages.RPCReply(
jsonrpc=,
id=id,
result=result,
warnings=[rpc_warning(warning) for warning in warnings]
) | Create RPC reply
:param str|int id: Request ID
:param result: Result
:param warnings: List of warnings to attach to the message
:return: JSON RPC formatted dict |
18,052 | def _combine_qc_samples(samples):
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out | Combine split QC analyses into single samples based on BAM files. |
18,053 | def fetch_from(self, year: int, month: int):
self.raw_data = []
self.data = []
today = datetime.datetime.today()
for year, month in self._month_year_iter(month, year, today.month, today.year):
self.raw_data.append(self.fetcher.fetch(year, month, self.sid))
self.data.extend(self.raw_data[-1][])
return self.data | Fetch data from year, month to current year month data |
18,054 | def read_until_eof(self) -> bool:
if self.read_eof():
return True
self._stream.save_context()
while not self.read_eof():
self._stream.incpos()
return self._stream.validate_context() | Consume all the stream. Same as EOF in BNF. |
18,055 | def _get_9q_square_qvm(name: str, noisy: bool,
connection: ForestConnection = None,
qvm_type: str = ) -> QuantumComputer:
topology = nx.convert_node_labels_to_integers(nx.grid_2d_graph(3, 3))
return _get_qvm_with_topology(name=name, connection=connection,
topology=topology,
noisy=noisy,
requires_executable=True,
qvm_type=qvm_type) | A nine-qubit 3x3 square lattice.
This uses a "generic" lattice not tied to any specific device. 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly.
:param name: The name of this QVM
:param connection: The connection to use to talk to external services
:param noisy: Whether to construct a noisy quantum computer
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer |
18,056 | def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
atom_name_map = {
: ,
: ,
: ,
: ,
}
assert(start_atom.residue.chain == end_atom.residue.chain)
chain_id = start_atom.residue.chain
num_new_atoms = float(len(new_atoms))
X, Y, Z = start_atom.x, start_atom.y, start_atom.z
x_step = (end_atom.x - X) / (num_new_atoms + 1.0)
y_step = (end_atom.y - Y) / (num_new_atoms + 1.0)
z_step = (end_atom.z - Z) / (num_new_atoms + 1.0)
D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step)
jitter = 0
if jitterbug:
jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D
new_lines = []
next_serial_number = max(sorted(self.atoms.keys())) + 1
round = 0
for new_atom in new_atoms:
X, Y, Z = X + x_step, Y + y_step, Z + z_step
if jitter:
if round % 3 == 0:
X, Y = X + jitter, Y - jitter
elif round % 3 == 1:
Y, Z = Y + jitter, Z - jitter
elif round % 3 == 2:
Z, X = Z + jitter, X - jitter
round += 1
residue_id, residue_type, atom_name = new_atom
assert(len(residue_type) == 3)
assert(len(residue_id) == 6)
new_lines.append(.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z))
next_serial_number += 1
new_pdb = []
in_start_residue = False
for l in self.indexed_lines:
if l[0] and l[3].serial_number == start_atom.serial_number:
in_start_residue = True
if in_start_residue and l[3].serial_number != start_atom.serial_number:
new_pdb.extend(new_lines)
in_start_residue = False
if l[0]:
new_pdb.append(l[2])
else:
new_pdb.append(l[1])
return .join(new_pdb) | A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined). |
18,057 | def _inplace_subset_var(self, index):
adata_subset = self[:, index].copy()
self._init_as_actual(adata_subset, dtype=self._X.dtype) | Inplace subsetting along variables dimension.
Same as ``adata = adata[:, index]``, but inplace. |
18,058 | def add_result(self, result):
if self._active_jobs == 0:
return
self._results.add(result)
self._active_jobs -= 1
if self._active_jobs == 0:
self._done() | Adds the result of a completed job to the result list, then decrements
the active job count. If the job set is already complete, the result is
simply discarded instead. |
18,059 | def get_gene_modification_language(identifier_qualified: ParserElement) -> ParserElement:
gmod_identifier = MatchFirst([
identifier_qualified,
gmod_default_ns,
])
return gmod_tag + nest(
Group(gmod_identifier)(IDENTIFIER)
) | Build a gene modification parser. |
18,060 | def add_ordered_combo_item(
combo, text, data=None, count_selected_features=None, icon=None):
if count_selected_features is not None:
text += + tr().format(
count=count_selected_features) +
size = combo.count()
for combo_index in range(0, size):
item_text = combo.itemText(combo_index)
if cmp(text.lower(), item_text.lower()) < 0:
if icon:
combo.insertItem(combo_index, icon, text, data)
else:
combo.insertItem(combo_index, text, data)
return
if icon:
combo.insertItem(size, icon, text, data)
else:
combo.insertItem(size, text, data) | Add a combo item ensuring that all items are listed alphabetically.
Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
:param combo: Combo box receiving the new item.
:type combo: QComboBox
:param text: Display text for the combo.
:type text: str
:param data: Optional UserRole data to be associated with the item.
:type data: QVariant, str
:param count_selected_features: A count to display if the layer has some
selected features. Default to None, nothing will be displayed.
:type count_selected_features: None, int
:param icon: Icon to display in the combobox.
:type icon: QIcon |
18,061 | def parse_eprocess(self, eprocess_data):
Name = eprocess_data[][][]
PID = eprocess_data[][][]
PPID = eprocess_data[][][]
return {: Name, : PID, : PPID} | Parse the EProcess object we get from some rekall output |
18,062 | def multilingual(request):
codes = sorted(get_language_code_list())
return {: codes,
: [(c, LANG_DICT.get(c, c)) for c in codes],
: get_default_language_code(),
: settings.ADMIN_MEDIA_PREFIX} | Returns context variables containing information about available languages. |
18,063 | def config(data_folder=settings.data_folder,
logs_folder=settings.logs_folder,
imgs_folder=settings.imgs_folder,
cache_folder=settings.cache_folder,
use_cache=settings.use_cache,
log_file=settings.log_file,
log_console=settings.log_console,
log_level=settings.log_level,
log_name=settings.log_name,
log_filename=settings.log_filename,
useful_tags_node=settings.useful_tags_node,
useful_tags_path=settings.useful_tags_path,
osm_xml_node_attrs=settings.osm_xml_node_attrs,
osm_xml_node_tags=settings.osm_xml_node_tags,
osm_xml_way_attrs=settings.osm_xml_way_attrs,
osm_xml_way_tags=settings.osm_xml_way_tags,
default_access=settings.default_access,
default_crs=settings.default_crs,
default_user_agent=settings.default_user_agent,
default_referer=settings.default_referer,
default_accept_language=settings.default_accept_language):
settings.use_cache = use_cache
settings.cache_folder = cache_folder
settings.data_folder = data_folder
settings.imgs_folder = imgs_folder
settings.logs_folder = logs_folder
settings.log_console = log_console
settings.log_file = log_file
settings.log_level = log_level
settings.log_name = log_name
settings.log_filename = log_filename
settings.useful_tags_node = useful_tags_node
settings.useful_tags_path = useful_tags_path
settings.useful_tags_node = list(set(
useful_tags_node + osm_xml_node_attrs + osm_xml_node_tags))
settings.useful_tags_path = list(set(
useful_tags_path + osm_xml_way_attrs + osm_xml_way_tags))
settings.osm_xml_node_attrs = osm_xml_node_attrs
settings.osm_xml_node_tags = osm_xml_node_tags
settings.osm_xml_way_attrs = osm_xml_way_attrs
settings.osm_xml_way_tags = osm_xml_way_tags
settings.default_access = default_access
settings.default_crs = default_crs
settings.default_user_agent = default_user_agent
settings.default_referer = default_referer
settings.default_accept_language = default_accept_language
if settings.log_file or settings.log_console:
log() | Configure osmnx by setting the default global vars to desired values.
Parameters
---------
data_folder : string
where to save and load data files
logs_folder : string
where to write the log files
imgs_folder : string
where to save figures
cache_folder : string
where to save the http response cache
use_cache : bool
if True, use a local cache to save/retrieve http responses instead of
calling API repetitively for the same request URL
log_file : bool
if true, save log output to a log file in logs_folder
log_console : bool
if true, print log output to the console
log_level : int
one of the logger.level constants
log_name : string
name of the logger
useful_tags_node : list
a list of useful OSM tags to attempt to save from node elements
useful_tags_path : list
a list of useful OSM tags to attempt to save from path elements
default_access : string
default filter for OSM "access" key
default_crs : string
default CRS to set when creating graphs
default_user_agent : string
HTTP header user-agent
default_referer : string
HTTP header referer
default_accept_language : string
HTTP header accept-language
Returns
-------
None |
18,064 | def _create_user(
self, username, email, short_name, full_name,
institute, password, is_admin, **extra_fields):
person = self.model(
username=username, email=email,
short_name=short_name, full_name=full_name,
is_admin=is_admin,
institute=institute,
**extra_fields
)
person.set_password(password)
person.save()
return person | Creates a new active person. |
18,065 | def _iterparse(xmlfile):
try:
return ET.iterparse(xmlfile, events=("start-ns", ))
except TypeError:
return ET.iterparse(xmlfile, events=(b"start-ns", )) | Avoid bug in python 3.{2,3}. See http://bugs.python.org/issue9257.
:param xmlfile: XML file or file-like object |
18,066 | def graph_to_laplacian(G, normalized=True):
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass | Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None |
18,067 | def _state_invalid(self):
for statemanager, conditions in self.statetransition.transitions.items():
current_state = getattr(self.obj, statemanager.propname)
if conditions[] is None:
state_valid = True
else:
mstate = conditions[].get(current_state)
state_valid = mstate and mstate(self.obj)
if state_valid and conditions[]:
state_valid = all(v(self.obj) for v in conditions[])
if not state_valid:
return statemanager, current_state, statemanager.lenum.get(current_state) | If the state is invalid for the transition, return details on what didn't match
:return: Tuple of (state manager, current state, label for current state) |
18,068 | def _find_scc(self):
self._num_scc, self._scc_proj = \
csgraph.connected_components(self.csgraph, connection=) | Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs. |
18,069 | def reset(cls):
cls.stats = StatContainer()
cls.parentMap = {}
cls.containerMap = {}
cls.subId = 0
for stat in gc.get_objects():
if isinstance(stat, Stat):
stat._aggregators = {} | Resets the static state. Should only be called by tests. |
18,070 | def off_datastream(self, datastream):
url = + str(datastream) +
response = self.http.post(url,"")
return response | To turn off datastream
:param datastream: string |
18,071 | def drop_indexes(self):
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*") | Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. |
18,072 | def load(text, match=None):
if text is None: return None
text = text.strip()
if len(text) == 0: return None
nametable = {
: [],
: {}
}
if(sys.version_info < (3, 0, 0) and isinstance(text, unicode)):
text = text.encode()
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
if count == 0:
return None
elif count == 1:
return load_root(items[0], nametable)
else:
return [load_root(item, nametable) for item in items] | This function reads a string that contains the XML of an Atom Feed, then
returns the
data in a native Python structure (a ``dict`` or ``list``). If you also
provide a tag name or path to match, only the matching sub-elements are
loaded.
:param text: The XML text to load.
:type text: ``string``
:param match: A tag name or path to match (optional).
:type match: ``string`` |
18,073 | def add_attribute(self, ont_id: str, ctrl_acct: Account, attributes: Attribute, payer: Account, gas_limit: int,
gas_price: int) -> str:
if not isinstance(ctrl_acct, Account) or not isinstance(payer, Account):
raise SDKException(ErrorCode.require_acct_params)
pub_key = ctrl_acct.get_public_key_bytes()
b58_payer_address = payer.get_address_base58()
tx = self.new_add_attribute_transaction(ont_id, pub_key, attributes, b58_payer_address, gas_limit, gas_price)
tx.sign_transaction(ctrl_acct)
tx.add_sign_transaction(payer)
tx_hash = self.__sdk.get_network().send_raw_transaction(tx)
return tx_hash | This interface is used to send a Transaction object which is used to add attribute.
:param ont_id: OntId.
:param ctrl_acct: an Account object which indicate who will sign for the transaction.
:param attributes: a list of attributes we want to add.
:param payer: an Account object which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: a hexadecimal transaction hash value. |
18,074 | def simplify_basic(drawing, process=False, **kwargs):
if any(i.__class__.__name__ !=
for i in drawing.entities):
log.debug()
return drawing
cache = copy.deepcopy(drawing._cache)
vertices_new = collections.deque()
entities_new = collections.deque()
scale = drawing.scale
for discrete in drawing.discrete:
circle = is_circle(discrete,
scale=scale)
if circle is not None:
entities_new.append(entities.Arc(points=np.arange(3) +
len(vertices_new),
closed=True))
vertices_new.extend(circle)
else:
points = merge_colinear(discrete, scale=scale)
indexes = np.arange(len(points)) + len(vertices_new)
indexes[-1] = indexes[0]
entities_new.append(entities.Line(points=indexes))
vertices_new.extend(points)
simplified = type(drawing)(
entities=entities_new,
vertices=vertices_new,
metadata=copy.deepcopy(drawing.metadata),
process=process)
cache.cache.update({
: np.arange(len(entities_new)).reshape((-1, 1)),
: np.ones(len(entities_new), dtype=np.bool),
: np.array([])})
if in cache.cache:
cache.cache.pop()
simplified._cache = cache
simplified._cache.id_set()
return simplified | Merge colinear segments and fit circles.
Parameters
-----------
drawing: Path2D object, will not be modified.
Returns
-----------
simplified: Path2D with circles. |
18,075 | def genotypesPhenotypesGenerator(self, request):
compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse(
request.phenotype_association_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
phenotypeAssociationSet = dataset.getPhenotypeAssociationSet(
compoundId.phenotypeAssociationSetId)
featureSets = dataset.getFeatureSets()
annotationList = phenotypeAssociationSet.getAssociations(
request, featureSets)
return self._protocolListGenerator(request, annotationList) | Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request |
18,076 | def compare(s1, s2, **kwargs):
if s1 is None or s2 is None:
if s1 == s2:
return 1.0
return 0.0
try:
return NGram([s1], **kwargs).search(s2)[0][1]
except IndexError:
return 0.0 | Compares two strings and returns their similarity.
:param s1: first string
:param s2: second string
:param kwargs: additional keyword arguments passed to __init__.
:return: similarity between 0.0 and 1.0.
>>> from ngram import NGram
>>> NGram.compare('spa', 'spam')
0.375
>>> NGram.compare('ham', 'bam')
0.25
>>> NGram.compare('spam', 'pam') #N=2
0.375
>>> NGram.compare('ham', 'ams', N=1)
0.5 |
18,077 | def main( gpu:Param("GPU to run on", str)=None ):
gpu = setup_distrib(gpu)
n_gpus = num_distrib()
path = url2path(URLs.CIFAR)
ds_tfms = ([*rand_pad(4, 32), flip_lr(p=0.5)], [])
workers = min(16, num_cpus()//n_gpus)
data = ImageDataBunch.from_folder(path, valid=, ds_tfms=ds_tfms, bs=512//n_gpus,
num_workers=workers).normalize(cifar_stats)
learn = Learner(data, wrn_22(), metrics=accuracy)
if gpu is None: learn.model = nn.DataParallel(learn.model)
else: learn.to_distributed(gpu)
learn.to_fp16()
learn.fit_one_cycle(35, 3e-3, wd=0.4) | Distrubuted training of CIFAR-10.
Fastest speed is if you run as follows:
python -m fastai.launch train_cifar.py |
18,078 | def __getListMetaInfo(self, inferenceElement):
fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
for inputFieldMeta in self.__inputFieldsMeta:
if InferenceElement.getInputElement(inferenceElement):
outputFieldMeta = FieldMetaInfo(
name=inputFieldMeta.name + ".actual",
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
predictionField = FieldMetaInfo(
name=inputFieldMeta.name + "." + inferenceLabel,
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
fieldMetaInfo.append(outputFieldMeta)
fieldMetaInfo.append(predictionField)
return fieldMetaInfo | Get field metadata information for inferences that are of list type
TODO: Right now we assume list inferences are associated with the input field
metadata |
18,079 | def refetch_fields(self, missing_fields):
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v | Refetches a list of fields from the DB |
18,080 | def loads(s, cls=BinaryQuadraticModel, vartype=None):
return load(s.split(), cls=cls, vartype=vartype) | Load a COOrdinate formatted binary quadratic model from a string. |
18,081 | def underscores_to_camelcase(argument):
result =
previous_was_underscore = False
for char in argument:
if char != :
if previous_was_underscore:
result += char.upper()
else:
result += char
previous_was_underscore = char ==
return result | Converts a camelcase param like the_new_attribute to the equivalent
camelcase version like theNewAttribute. Note that the first letter is
NOT capitalized by this function |
18,082 | def get_fn(elev, name=None):
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name) | Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied) |
18,083 | async def _verkey_for(self, target: str) -> str:
LOGGER.debug(, target)
rv = target
if rv is None or not ok_did(rv):
return rv | Given a DID, retrieve its verification key, looking in wallet, then pool.
Given a verification key or None, return input.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
If no such verification key is on the ledger, raise AbsentNym.
:param target: verification key, or DID to resolve to such
:return: verification key |
18,084 | def parse(content, *args, **kwargs):
global MECAB_PYTHON3
if not in kwargs and MECAB_PYTHON3 and in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs) | Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed |
18,085 | def get_posix(self, i):
index = i.index
value = []
try:
c = next(i)
if c != :
raise ValueError()
else:
value.append(c)
c = next(i)
if c == :
value.append(c)
c = next(i)
while c != :
if c not in _PROPERTY:
raise ValueError()
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != or not value:
raise ValueError()
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return .join(value) if value else None | Get POSIX. |
18,086 | def repeat(self, count=2):
try:
return self.__class__(.join((
str(self),
self.last_code() * (count - 1),
)))
except TypeError as ex:
raise TypeError(
.format(count)
) from ex | Repeat the last control code a number of times.
Returns a new Control with this one's data and the repeated code. |
18,087 | def shell():
"Open a shell"
from gui.tools.debug import Shell
shell = Shell()
shell.show()
return shell | Open a shell |
18,088 | def filter(self, table, cg_snapshots, filter_string):
query = filter_string.lower()
return [cg_snapshot for cg_snapshot in cg_snapshots
if query in cg_snapshot.name.lower()] | Naive case-insensitive search. |
18,089 | def put (self, ch):
if isinstance(ch, bytes):
ch = self._decode(ch)
self.put_abs (self.cur_r, self.cur_c, ch) | This puts a characters at the current cursor position. |
18,090 | def print_summary(graph, tails, node_id_map):
heads = get_heads(tails)
heights = get_heights(tails)
max_height = max(heights)
common_height, block_ids_at_common_height = get_common_height(tails)
lags = get_lags(heights, max_height)
common_ancestor = graph.root
divergences = get_divergences(heights, graph.root)
col_1 = 8
col_n = 8
format_str = + str(col_1) + + ( + str(col_n) + ) * 2
header = format_str.format("COMMON", "HEIGHT", "BLOCKS")
print(header)
print("-" * len(header))
print(format_str.format(
"ANCESTOR", common_ancestor.num, common_ancestor.ident[:col_n]))
print(format_str.format(
"HEIGHT", common_height, str(block_ids_at_common_height)))
print()
node_col_width = get_col_width_for_num(len(tails), len("NODE"))
num_col_width = get_col_width_for_num(max_height, len("HEIGHT"))
lag_col_width = get_col_width_for_num(max(lags), len("LAG"))
diverg_col_width = get_col_width_for_num(max(divergences), len("DIVERG"))
format_str = (
+ str(node_col_width) +
+ str(num_col_width) +
+ str(lag_col_width) +
+ str(diverg_col_width) +
)
header = format_str.format("NODE", "HEAD", "HEIGHT", "LAG", "DIVERG")
print(header)
print( * len(header))
for i, _ in enumerate(tails):
print(format_str.format(
node_id_map[i],
heads[i],
heights[i],
lags[i],
divergences[i],
))
print() | Print out summary and per-node comparison data. |
18,091 | def sendSMS_multi(self, CorpNum, Sender, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None):
return self.sendMessage("SMS", CorpNum, Sender, , , Contents, Messages, reserveDT, adsYN, UserID,
RequestNum) | ๋จ๋ฌธ ๋ฌธ์๋ฉ์์ง ๋ค๋์ ์ก
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
Sender : ๋ฐ์ ์๋ฒํธ (๋๋ณด์ ์ก์ฉ)
Contents : ๋ฌธ์ ๋ด์ฉ (๋๋ณด์ ์ก์ฉ)
Messages : ๊ฐ๋ณ์ ์ก์ ๋ณด ๋ฐฐ์ด
reserveDT : ์์ฝ์ ์ก์๊ฐ (ํ์. yyyyMMddHHmmss)
UserID : ํ๋นํ์ ์์ด๋
RequestNum : ์ ์ก์์ฒญ๋ฒํธ
return
์ ์๋ฒํธ (receiptNum)
raise
PopbillException |
18,092 | def get_bounce_dump(bounce_id, api_key=None, secure=None, test=None,
**request_args):
s id. Get the id with :func:`get_bounces`.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BounceDumpResponse`
'
return _default_bounce_dump.get(bounce_id, api_key=api_key, secure=secure,
test=test, **request_args) | Get the raw email dump for a single bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BounceDumpResponse` |
18,093 | def enable_events(self):
if self.annot is not None and self.parent.channels.groups:
self.action[].setEnabled(True)
self.action[].setEnabled(True)
self.action[].setEnabled(True)
else:
self.action[].setEnabled(False)
self.action[].setEnabled(False)
self.action[].setEnabled(False) | enable slow wave and spindle detection if both
annotations and channels are active. |
18,094 | def tie_weights(self):
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] | Run this to be sure output and input (adaptive) softmax weights are tied |
18,095 | def get_status(self):
url = self.base_url +
try:
r = requests.get(url, timeout=10)
return r.json()
except RequestException as err:
raise Client.ClientError(err) | Query the device status. Returns JSON of the device internal state |
18,096 | def decode_body(cls, header, f):
assert header.packet_type == MqttControlPacketType.unsuback
decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len))
packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID)
if header.remaining_len != decoder.num_bytes_consumed:
raise DecodeError()
return decoder.num_bytes_consumed, MqttUnsuback(packet_id) | Generates a `MqttUnsuback` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `unsuback`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttUnsuback
Object extracted from ``f``. |
18,097 | def get_bucket_lifecycle(self, bucket):
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d | Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration. |
18,098 | def open(self, path, mode=):
entry = self.find(path)
if entry is None:
if mode == :
raise ValueError("stream does not exists: %s" % path)
entry = self.create_dir_entry(path, , None)
else:
if not entry.isfile():
raise ValueError("can only open stream type DirEntrywrw':
pass
s = Stream(self, entry, mode)
return s | Open stream, returning ``Stream`` object |
18,099 | def value_to_string(self, obj):
statefield = self.to_python(self.value_from_object(obj))
return statefield.state.name | Convert a field value to a string.
Returns the state name. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.