Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
19,200 | def calculate_deltat(year, month):
plw = \
\
try:
if np.any((year > 3000) | (year < -1999)):
warnings.warn(plw)
except ValueError:
if (year > 3000) | (year < -1999):
warnings.warn(plw)
except TypeError:
return 0
y = year + (month - 0.5)/12
deltat = np.where(year < -500,
-20+32*((y-1820)/100)**2, 0)
deltat = np.where((-500 <= year) & (year < 500),
10583.6-1014.41*(y/100)
+ 33.78311*(y/100)**2
- 5.952053*(y/100)**3
- 0.1798452*(y/100)**4
+ 0.022174192*(y/100)**5
+ 0.0090316521*(y/100)**6, deltat)
deltat = np.where((500 <= year) & (year < 1600),
1574.2-556.01*((y-1000)/100)
+ 71.23472*((y-1000)/100)**2
+ 0.319781*((y-1000)/100)**3
- 0.8503463*((y-1000)/100)**4
- 0.005050998*((y-1000)/100)**5
+ 0.0083572073*((y-1000)/100)**6, deltat)
deltat = np.where((1600 <= year) & (year < 1700),
120-0.9808*(y-1600)
- 0.01532*(y-1600)**2
+ (y-1600)**3/7129, deltat)
deltat = np.where((1700 <= year) & (year < 1800),
8.83+0.1603*(y-1700)
- 0.0059285*(y-1700)**2
+ 0.00013336*(y-1700)**3
- (y-1700)**4/1174000, deltat)
deltat = np.where((1800 <= year) & (year < 1860),
13.72-0.332447*(y-1800)
+ 0.0068612*(y-1800)**2
+ 0.0041116*(y-1800)**3
- 0.00037436*(y-1800)**4
+ 0.0000121272*(y-1800)**5
- 0.0000001699*(y-1800)**6
+ 0.000000000875*(y-1800)**7, deltat)
deltat = np.where((1860 <= year) & (year < 1900),
7.62+0.5737*(y-1860)
- 0.251754*(y-1860)**2
+ 0.01680668*(y-1860)**3
- 0.0004473624*(y-1860)**4
+ (y-1860)**5/233174, deltat)
deltat = np.where((1900 <= year) & (year < 1920),
-2.79+1.494119*(y-1900)
- 0.0598939*(y-1900)**2
+ 0.0061966*(y-1900)**3
- 0.000197*(y-1900)**4, deltat)
deltat = np.where((1920 <= year) & (year < 1941),
21.20+0.84493*(y-1920)
- 0.076100*(y-1920)**2
+ 0.0020936*(y-1920)**3, deltat)
deltat = np.where((1941 <= year) & (year < 1961),
29.07+0.407*(y-1950)
- (y-1950)**2/233
+ (y-1950)**3/2547, deltat)
deltat = np.where((1961 <= year) & (year < 1986),
45.45+1.067*(y-1975)
- (y-1975)**2/260
- (y-1975)**3/718, deltat)
deltat = np.where((1986 <= year) & (year < 2005),
63.86+0.3345*(y-2000)
- 0.060374*(y-2000)**2
+ 0.0017275*(y-2000)**3
+ 0.000651814*(y-2000)**4
+ 0.00002373599*(y-2000)**5, deltat)
deltat = np.where((2005 <= year) & (year < 2050),
62.92+0.32217*(y-2000)
+ 0.005589*(y-2000)**2, deltat)
deltat = np.where((2050 <= year) & (year < 2150),
-20+32*((y-1820)/100)**2
- 0.5628*(2150-y), deltat)
deltat = np.where(year >= 2150,
-20+32*((y-1820)/100)**2, deltat)
deltat = deltat.item() if np.isscalar(year) & np.isscalar(month)\
else deltat
return deltat | Calculate the difference between Terrestrial Dynamical Time (TD)
and Universal Time (UT).
Note: This function is not yet compatible for calculations using
Numba.
Equations taken from http://eclipse.gsfc.nasa.gov/SEcat5/deltatpoly.html |
19,201 | def createSegment(self, cell):
return self._createSegment(
self.connections, self.lastUsedIterationForSegment, cell, self.iteration,
self.maxSegmentsPerCell) | Create a :class:`~nupic.algorithms.connections.Segment` on the specified
cell. This method calls
:meth:`~nupic.algorithms.connections.Connections.createSegment` on the
underlying :class:`~nupic.algorithms.connections.Connections`, and it does
some extra bookkeeping. Unit tests should call this method, and not
:meth:`~nupic.algorithms.connections.Connections.createSegment`.
:param cell: (int) Index of cell to create a segment on.
:returns: (:class:`~nupic.algorithms.connections.Segment`) The created
segment. |
19,202 | def add_metadata(self, metadata_matrix, meta_index_store):
assert isinstance(meta_index_store, IndexStore)
assert len(metadata_matrix.shape) == 2
assert metadata_matrix.shape[0] == self.get_num_docs()
return self._make_new_term_doc_matrix(new_X=self._X,
new_y=None,
new_category_idx_store=None,
new_y_mask=np.ones(self.get_num_docs()).astype(bool),
new_mX=metadata_matrix,
new_term_idx_store=self._term_idx_store,
new_metadata_idx_store=meta_index_store) | Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories |
19,203 | def standardize_input_data(data):
if type(data) == bytes:
data = data.decode()
if type(data) == list:
data = [
el.decode() if type(data) == bytes else el
for el in data
]
return data | Ensure utf-8 encoded strings are passed to the indico API |
19,204 | def _construct_columns(self, column_map):
from sqlalchemy import Column, String, Boolean, Integer, Float, Binary
column_args = []
for key, value in column_map.items():
record_key = value[0]
datatype = value[1]
max_length = value[2]
if record_key == :
if datatype in (, , ):
if datatype == :
if max_length:
column_args.insert(0, Column(record_key, String(max_length), primary_key=True))
else:
column_args.insert(0, Column(record_key, String, primary_key=True))
elif datatype == :
column_args.insert(0, Column(record_key, Float, primary_key=True))
elif datatype == :
column_args.insert(0, Column(record_key, Integer, primary_key=True))
else:
raise ValueError()
else:
if datatype == :
column_args.append(Column(record_key, Boolean))
elif datatype == :
if max_length:
column_args.append(Column(record_key, String(max_length)))
else:
column_args.append(Column(record_key, String))
elif datatype == :
column_args.append(Column(record_key, Float))
elif datatype == :
column_args.append(Column(record_key, Integer))
elif datatype == :
column_args.append(Column(record_key, Binary))
return column_args | a helper method for constructing the column objects for a table object |
19,205 | def dependencies(self) -> List[Dependency]:
dependencies_str = DB.get_hash_value(self.key, )
dependencies = []
for dependency in ast.literal_eval(dependencies_str):
dependencies.append(Dependency(dependency))
return dependencies | Return the PB dependencies. |
19,206 | def node_style(self, node, **kwargs):
if node not in self.edges:
self.edges[node] = {}
self.nodes[node] = kwargs | Modifies a node style to the dot representation. |
19,207 | def extend_reservation(request, user_id, days=7):
s cart.
'
user = User.objects.get(id=int(user_id))
cart = CartController.for_user(user)
cart.extend_reservation(datetime.timedelta(days=days))
return redirect(request.META["HTTP_REFERER"]) | Allows staff to extend the reservation on a given user's cart. |
19,208 | def field_values(self):
if self._field_values is None:
self._field_values = FieldValueList(
self._version,
assistant_sid=self._solution[],
field_type_sid=self._solution[],
)
return self._field_values | Access the field_values
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList |
19,209 | def _parse_pool_transaction_file(
ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators,
ledger_size=None):
for _, txn in ledger.getAllTxn(to=ledger_size):
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
nodeName = txn_data[DATA][ALIAS]
clientStackName = nodeName + CLIENT_STACK_SUFFIX
nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \
if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \
else None
cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \
if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \
else None
if nHa:
nodeReg[nodeName] = HA(*nHa)
if cHa:
cliNodeReg[clientStackName] = HA(*cHa)
try:
key_type =
verkey = cryptonymToHex(str(txn_data[TARGET_NYM]))
key_type =
cryptonymToHex(get_from(txn))
except ValueError:
logger.exception(
.format(key_type))
exit(.format(key_type))
nodeKeys[nodeName] = verkey
services = txn_data[DATA].get(SERVICES)
if isinstance(services, list):
if VALIDATOR in services:
activeValidators.add(nodeName)
else:
activeValidators.discard(nodeName) | helper function for parseLedgerForHaAndKeys |
19,210 | def cis(x: float) -> complex:
r
return np.cos(x) + 1.0j * np.sin(x) | r"""
Implements Euler's formula
:math:`\text{cis}(x) = e^{i \pi x} = \cos(x) + i \sin(x)` |
19,211 | def create_datastream(self, datastream):
raw_datastream = self.http.post(, datastream)
return Schemas.Datastream(datastream=raw_datastream) | To create Datastream
:param datastream: Datastream
:param options: dict |
19,212 | def run_once(self):
packet = _parse_irc_packet(next(self.lines))
for event_handler in list(self.on_packet_received):
event_handler(self, packet)
if packet.command == "PRIVMSG":
if packet.arguments[0].startswith("
for event_handler in list(self.on_public_message):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0], packet.arguments[1])
else:
for event_handler in list(self.on_private_message):
event_handler(self, packet.prefix.split("!")[0], packet.arguments[1])
elif packet.command == "PING":
self.send_line("PONG :{}".format(packet.arguments[0]))
for event_handler in list(self.on_ping):
event_handler(self)
elif packet.command == "433" or packet.command == "437":
self.set_nick("{}_".format(self.nick))
elif packet.command == "001":
for event_handler in list(self.on_welcome):
event_handler(self)
elif packet.command == "JOIN":
for event_handler in list(self.on_join):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0])
elif packet.command == "PART":
for event_handler in list(self.on_leave):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0]) | This function runs one iteration of the IRC client. This is called in a loop
by the run_loop function. It can be called separately, but most of the
time there is no need to do this. |
19,213 | def devserver_cmd(argv=sys.argv[1:]):
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments[],
port=int(arguments[]),
debug=int(arguments[]),
) | \
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0]. |
19,214 | def delete(self, path, data=None):
assert path is not None
assert data is None or isinstance(data, dict)
response = self.conn.request(, path, data,
self._get_headers())
self._last_status = response_status = response.status
response_content = response.data.decode()
return Result(status=response_status, json=response_content) | Executes a DELETE.
'path' may not be None. Should include the full path to the
resoure.
'data' may be None or a dictionary.
Returns a named tuple that includes:
status: the HTTP status code
json: the returned JSON-HAL
If the key was not set, throws an APIConfigurationException. |
19,215 | def lcm(*numbers):
n = 1
for i in numbers:
n = (i * n) // gcd(i, n)
return n | Return lowest common multiple of a sequence of numbers.
Args:
\*numbers: Sequence of numbers.
Returns:
(int) Lowest common multiple of numbers. |
19,216 | def listBlockSummaries(self, block_name="", dataset="", detail=False):
if bool(dataset)+bool(block_name)!=1:
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"Dataset or block_names must be specified at a time.")
if block_name and isinstance(block_name, basestring):
try:
block_name = [str(block_name)]
except:
dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ")
for this_block_name in block_name:
if re.search("[*, %]", this_block_name):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in block_name list")
if re.search("[*, %]", dataset):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in dataset")
data = []
try:
with self.dbi.connection() as conn:
data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler(,
dbsExceptionCode[],
self.logger.exception,
sError)
for item in data:
yield item | API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided |
19,217 | def emit(self):
i = self.options.rand.get_weighted_random_index(self._weights)
return self._transcriptome.transcripts[i] | Get a mapping from a transcript
:return: One random Transcript sequence
:rtype: sequence |
19,218 | def merge_entity(self, entity, if_match=):
request = _merge_entity(entity, if_match, self._require_encryption,
self._key_encryption_key)
self._add_to_batch(entity[], entity[], request) | Adds a merge entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.merge_entity` for more
information on merges.
The operation will not be executed until the batch is committed.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*). |
19,219 | def save_df_output(
df_output: pd.DataFrame,
freq_s: int = 3600,
site: str = ,
path_dir_save: Path = Path(),)->list:
.RunControl.nml
list_path_save = []
list_group = df_output.columns.get_level_values().unique()
list_grid = df_output.index.get_level_values().unique()
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_output\
.loc[grid, group]\
.dropna(how=, axis=0)
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
freq_save = pd.Timedelta(freq_s, )
df_rsmp = resample_output(df_output, freq_save)
df_rsmp = df_rsmp.drop(columns=)
list_group = df_rsmp.columns.get_level_values().unique()
list_grid = df_rsmp.index.get_level_values().unique()
for grid in list_grid:
for group in list_group:
df_output_grid_group = df_rsmp.loc[grid, group]
path_save = save_df_grid_group(
df_output_grid_group, grid, group,
site=site, dir_save=path_dir_save)
list_path_save.append(path_save)
return list_path_save | save supy output dataframe to txt files
Parameters
----------
df_output : pd.DataFrame
output dataframe of supy simulation
freq_s : int, optional
output frequency in second (the default is 3600, which indicates the a txt with hourly values)
path_dir_save : Path, optional
directory to save txt files (the default is '.', which the current working directory)
site : str, optional
site code used for filename (the default is '', which indicates no site name prepended to the filename)
path_runcontrol : str or anything that can be parsed as `Path`, optional
path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters)
Returns
-------
list
a list of `Path` objects for saved txt files |
19,220 | def setup(self, in_name=None, out_name=None, required=None, hidden=None,
multiple=None, defaults=None):
if in_name is not None:
self.in_name = in_name if isinstance(in_name, list) else [in_name]
if out_name is not None:
self.out_name = out_name
if required is not None:
self.required = required
if hidden is not None:
self.hidden = hidden
if multiple is not None:
self.multiple = multiple
if defaults is not None:
self.defaults = defaults | Set the options of the block.
Only the not None given options are set
.. note:: a block may have multiple inputs but have only one output
:param in_name: name(s) of the block input data
:type in_name: str or list of str
:param out_name: name of the block output data
:type out_name: str
:param required: whether the block will be required or not
:type required: bool
:param hidden: whether the block will be hidden to the user or not
:type hidden: bool
:param multiple: if True more than one component may be selected/ run)
:type multiple: bool
:param defaults: names of the selected components
:type defaults: list of str, or str |
19,221 | def _protected_division(x1, x2):
with np.errstate(divide=, invalid=):
return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.) | Closure of division (x1/x2) for zero denominator. |
19,222 | def init_logger(self):
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
if not self.logdir:
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(
str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir)
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
upload_uri=self.upload_dir,
loggers=self.loggers,
sync_function=self.sync_function) | Init logger. |
19,223 | def ionic_strength(mis, zis):
r
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)]) | r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012. |
19,224 | def __compute_evolution(
df,
id_cols,
value_col,
date_col=None,
freq=1,
compare_to=None,
method=,
format=,
offseted_suffix=,
evolution_col_name=,
how=,
fillna=None,
raise_duplicate_error=True
):
if date_col is not None:
is_date_to_format = isinstance(date_col, dict) or (df[date_col].dtype == np.object)
if is_date_to_format:
if isinstance(date_col, dict):
date_format = date_col.get(, None)
date_col = date_col[]
else:
date_format = None
df[+date_col + ] = pd.to_datetime(df[date_col], format=date_format)
date_col = +date_col +
is_freq_dict = isinstance(freq, dict)
if is_freq_dict:
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
check_params_columns_duplicate(id_cols + [value_col, date_col])
group_cols = id_cols + [date_col]
df_offseted = df[group_cols + [value_col]].copy()
df_offseted[date_col] += freq
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
if is_date_to_format:
del df_with_offseted_values[date_col]
elif compare_to is not None:
check_params_columns_duplicate(id_cols + [value_col])
group_cols = id_cols
df_offseted = df.query(compare_to).copy()
df_offseted = df_offseted[group_cols + [value_col]]
df_with_offseted_values = apply_merge(
df, df_offseted, group_cols, how, offseted_suffix,
raise_duplicate_error
)
apply_fillna(df_with_offseted_values, value_col, offseted_suffix, fillna)
apply_method(df_with_offseted_values, evolution_col_name, value_col, offseted_suffix, method)
return apply_format(df_with_offseted_values, evolution_col_name, format) | Compute an evolution column :
- against a period distant from a fixed frequency.
- against a part of the df
Unfortunately, pandas doesn't allow .change() and .pct_change() to be
executed with a MultiIndex.
Args:
df (pd.DataFrame):
id_cols (list(str)):
value_col (str):
date_col (str/dict): default None
freq (int/pd.DateOffset/pd.Serie): default 1
compare_to (str): default None
method (str): default ``'abs'`` can be also ``'pct'``
format(str): default 'column' can be also 'df'
offseted_suffix(str): default '_offseted'
evolution_col_name(str): default 'evolution_computed'
how(str): default 'left'
fillna(str/int): default None |
19,225 | def license_present(name):
ret = {: name,
: {},
: False,
: }
if not __salt__[]():
ret[] = False
ret[] =
return ret
licenses = [l[] for l in __salt__[]()]
if name in licenses:
ret[] = True
ret[] = .format(name)
return ret
if __opts__[]:
ret[] = None
ret[] = .format(name)
return ret
data = __salt__[](name)
if data[]:
ret[] = {name: }
ret[] = True
ret[] = data[]
return ret
else:
ret[] = False
ret[] = data[]
return ret | Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present |
19,226 | def output(self, output, accepts, set_http_code, set_content_type):
graph = Decorator._get_graph(output)
if graph is not None:
output_mimetype, output_format = self.format_selector.decide(accepts, graph.context_aware)
serialized = graph.serialize(format=output_format)
set_content_type(output_mimetype)
return [serialized]
else:
return output | Formats a response from a WSGI app to handle any RDF graphs
If a view function returns a single RDF graph, serialize it based on Accept header
If it's not an RDF graph, return it without any special handling |
19,227 | def centroid_2dg(data, error=None, mask=None):
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) | Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid. |
19,228 | def statement(self):
return (self.assignment ^ self.expression) + Suppress(
self.syntax.terminator) | A terminated relational algebra statement. |
19,229 | def parse_option(self, option, block_name, *values):
_extra_subs = (, , )
if len(values) == 0:
raise ValueError
for value in values:
value = value.lower()
domain = _RE_WWW_SUB.sub(, domain)
if len(domain.split()) == 2:
for sub in _extra_subs:
self.domains.add(.format(sub, domain))
self.domains.add(domain)
if not self.domains:
raise ValueError | Parse domain values for option. |
19,230 | def state(self, new_state):
with self.lock:
self._state.exit()
self._state = new_state
self._state.enter() | Set the state. |
19,231 | def list_all_customer_groups(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_customer_groups_with_http_info(**kwargs)
else:
(data) = cls._list_all_customer_groups_with_http_info(**kwargs)
return data | List CustomerGroups
Return a list of CustomerGroups
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_customer_groups(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[CustomerGroup]
If the method is called asynchronously,
returns the request thread. |
19,232 | def AddShadow(self, fileset):
shadow = fileset.get("/etc/shadow")
if shadow:
self._ParseFile(shadow, self.ParseShadowEntry)
else:
logging.debug("No /etc/shadow file.") | Add the shadow entries to the shadow store. |
19,233 | def get_content_slug_by_slug(self, slug):
content = self.filter(type=, body=slug)
if settings.PAGE_USE_SITE_ID:
content = content.filter(page__sites__id=global_settings.SITE_ID)
try:
content = content.latest()
except self.model.DoesNotExist:
return None
else:
return content | Returns the latest :class:`Content <pages.models.Content>`
slug object that match the given slug for the current site domain.
:param slug: the wanted slug. |
19,234 | def build(self):
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)] | Builds and returns all combinations of parameters specified
by the param grid. |
19,235 | def luminosity_integral(self, x, axis_ratio):
r = x * axis_ratio
return 2 * np.pi * r * self.intensities_from_grid_radii(x) | Routine to integrate the luminosity of an elliptical light profile.
The axis ratio is set to 1.0 for computing the luminosity within a circle |
19,236 | def insertions_from_masked(seq):
insertions = []
prev = True
for i, base in enumerate(seq):
if base.isupper() and prev is True:
insertions.append([])
prev = False
elif base.islower():
insertions[-1].append(i)
prev = True
return [[min(i), max(i)] for i in insertions if i != []] | get coordinates of insertions from insertion-masked sequence |
19,237 | def writeCleanup(self, varBind, **context):
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger( % (self, name, val)))
cbFun = context[]
self.branchVersionId += 1
instances = context[].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context[]
if idx in instances[self.ST_CREATE]:
self.createCleanup(varBind, **context)
return
if idx in instances[self.ST_DESTROY]:
self.destroyCleanup(varBind, **context)
return
try:
node = self.getBranch(name, **context)
except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc:
cbFun(varBind, **dict(context, error=exc))
else:
node.writeCleanup(varBind, **context) | Finalize Managed Object Instance modification.
Implements the successful third step of the multi-step workflow of the
SNMP SET command processing (:RFC:`1905#section-4.2.5`).
The goal of the third (successful) phase is to seal the new state of the
requested Managed Object Instance. Once the system transition into the
*cleanup* state, no roll back to the previous Managed Object Instance
state is possible.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object. |
19,238 | def should_stop(self, result):
if result.get(DONE):
return True
for criteria, stop_value in self.stopping_criterion.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
if result[criteria] >= stop_value:
return True
return False | Whether the given result meets this trial's stopping criteria. |
19,239 | def set_genre(self, genre):
self._set_attr(TCON(encoding=3, text=str(genre))) | Sets song's genre
:param genre: genre |
19,240 | def drop_if(df, fun):
def _filter_f(col):
try:
return fun(df[col])
except:
return False
cols = list(filter(_filter_f, df.columns))
return df.drop(cols, axis=1) | Drops columns where fun(ction) is true
Args:
fun: a function that will be applied to columns |
19,241 | def resolve_orm_path(model, orm_path):
bits = orm_path.split()
endpoint_model = reduce(get_model_at_related_field, [model] + bits[:-1])
if bits[-1] == :
field = endpoint_model._meta.pk
else:
field = endpoint_model._meta.get_field(bits[-1])
return field | Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the
path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will
be raised. |
19,242 | def major_flux(self, fraction=0.9):
r
(paths, pathfluxes) = self.pathways(fraction=fraction)
return self._pathways_to_flux(paths, pathfluxes, n=self.nstates) | r"""Returns the main pathway part of the net flux comprising
at most the requested fraction of the full flux. |
19,243 | def pprint(self, index=False, delimiter=):
lines = _build_tree_string(self, 0, index, delimiter)[0]
print( + .join((line.rstrip() for line in lines))) | Pretty-print the binary tree.
:param index: If set to True (default: False), display level-order_
indexes using the format: ``{index}{delimiter}{value}``.
:type index: bool
:param delimiter: Delimiter character between the node index and
the node value (default: '-').
:type delimiter: str | unicode
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1) # index: 0, value: 1
>>> root.left = Node(2) # index: 1, value: 2
>>> root.right = Node(3) # index: 2, value: 3
>>> root.left.right = Node(4) # index: 4, value: 4
>>>
>>> root.pprint()
<BLANKLINE>
__1
/ \\
2 3
\\
4
<BLANKLINE>
>>> root.pprint(index=True) # Format: {index}-{value}
<BLANKLINE>
_____0-1_
/ \\
1-2_ 2-3
\\
4-4
<BLANKLINE>
.. note::
If you do not need level-order_ indexes in the output string, use
:func:`binarytree.Node.__str__` instead.
.. _level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search |
19,244 | def get(self, id, seq, line):
schema = HighlightSchema()
resp = self.service.get_id(self._base(id, seq), line)
return self.service.decode(schema, resp) | Get a highlight.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:param line: Line number in TestResult's logfile as an int.
:return: :class:`highlights.Highlight <highlights.Highlight>` object |
19,245 | def deleteMember(self, address, id, headers=None, query_params=None, content_type="application/json"):
uri = self.client.base_url + "/network/"+id+"/member/"+address
return self.client.delete(uri, None, headers, query_params, content_type) | Delete member from network
It is method for DELETE /network/{id}/member/{address} |
19,246 | def request(key, features, query, timeout=5):
data = {}
data[] = key
data[] = .join([f for f in features if f in FEATURES])
data[] = quote(query)
data[] =
r = requests.get(API_URL.format(**data), timeout=timeout)
results = json.loads(_unicode(r.content))
return results | Make an API request
:param string key: API key to use
:param list features: features to request. It must be a subset of :data:`FEATURES`
:param string query: query to send
:param integer timeout: timeout of the request
:returns: result of the API request
:rtype: dict |
19,247 | def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray:
x0 = np.linspace(0, 3.14, Np)
return np.tanh(x0)*gridmax+gridmin | typically call via setupz instead |
19,248 | def predecesors_pattern(element, root):
def is_root_container(el):
return el.parent.parent.getTagName() == ""
if not element.parent or not element.parent.parent or \
is_root_container(element):
return []
trail = [
[
element.parent.parent.getTagName(),
_params_or_none(element.parent.parent.params)
],
[
element.parent.getTagName(),
_params_or_none(element.parent.params)
],
[element.getTagName(), _params_or_none(element.params)],
]
match = root.match(*trail)
if element in match:
return [
PathCall("match", match.index(element), trail)
] | Look for `element` by its predecesors.
Args:
element (obj): HTMLElement instance of the object you are looking for.
root (obj): Root of the `DOM`.
Returns:
list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \
allow use with ``.extend(predecesors_pattern())``). |
19,249 | def _append_to_scalar_dict(self, tag, scalar_value, global_step, timestamp):
if tag not in self._scalar_dict.keys():
self._scalar_dict[tag] = []
self._scalar_dict[tag].append([timestamp, global_step, float(scalar_value)]) | Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`.
This allows users to store scalars in memory and dump them to a json file later. |
19,250 | def on_mouse_wheel(self, event):
state = self.state
if not state.can_zoom:
return
mousepos = self.image_coordinates(event.GetPosition())
rotation = event.GetWheelRotation() / event.GetWheelDelta()
oldzoom = self.zoom
if rotation > 0:
self.zoom /= 1.0/(1.1 * rotation)
elif rotation < 0:
self.zoom /= 1.1 * (-rotation)
if self.zoom > 10:
self.zoom = 10
elif self.zoom < 0.1:
self.zoom = 0.1
if oldzoom < 1 and self.zoom > 1:
self.zoom = 1
if oldzoom > 1 and self.zoom < 1:
self.zoom = 1
self.need_redraw = True
new = self.image_coordinates(event.GetPosition())
self.dragpos = wx.Point(self.dragpos.x - (new.x-mousepos.x), self.dragpos.y - (new.y-mousepos.y))
self.limit_dragpos() | handle mouse wheel zoom changes |
19,251 | def _bfs(root_node, process_node):
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node) | Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node |
19,252 | def Copier(source, destination):
if source.type == and destination.type == :
return LocalCopier(source, destination)
elif source.type == and destination.type == :
return Local2GoogleStorageCopier(source, destination)
elif source.type == and destination.type == :
return GoogleStorage2LocalCopier(source, destination)
elif source.type == and destination.type == :
return GoogleStorageCopier(source, destination)
else:
raise FileUtilsError(\
% (source, destination)) | Factory method to select the right copier for a given source and destination. |
19,253 | def handle(self):
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_output_writer_spec = self.request.get("mapper_output_writer")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
mr_params = map_job.JobConfig._get_default_mr_params()
mr_params.update(params)
if "queue_name" in mapper_params:
mr_params["queue_name"] = mapper_params["queue_name"]
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC)
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)),
output_writer_spec=mapper_output_writer_spec)
mapreduce_id = self._start_map(
mapreduce_name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id | Handles start request. |
19,254 | def download_photo_async(photo):
photo_id = photo[]
photo_title = photo[]
download_url = get_photo_url(photo_id)
photo_format = download_url.split()[-1]
photo_title = photo_title + + photo_format
file_path = directory + os.sep + photo_title
logger.info(, photo_title.encode())
req = [grequests.get(download_url)]
counter_lock = multiprocessing.Lock()
for resp in grequests.map(req):
with open(file_path, ) as f:
f.write(resp.content)
with counter_lock:
global counter
counter -= 1
logger.info(
, counter
) | Download a photo to the the path(global varialbe `directory`)
:param photo: The photo information include id and title
:type photo: dict |
19,255 | def project_data(self):
from pyny3d.utils import sort_numpy
proj = self.light_vor.astype(float)
map_ = np.vstack((self.t2vor_map, self.integral)).T
map_sorted = sort_numpy(map_)
n_points = map_sorted.shape[0]
for i in range(proj.shape[0]):
a, b = np.searchsorted(map_sorted[:, 0], (i, i+1))
if b == n_points:
b = -1
proj[i, :] *= np.sum(map_sorted[a:b, 1])
self.proj_vor = np.sum(proj, axis=1)
self.proj_points = np.sum(proj, axis=0) | Assign the sum of ``.integral``\* to each sensible point in the
``pyny.Space`` for the intervals that the points are visible to
the Sun.
The generated information is stored in:
* **.proj_vor** (*ndarray*): ``.integral`` projected to the
Voronoi diagram.
* **.proj_points** (*ndarray*): ``.integral`` projected to
the sensible points in the ``pyny.Space``.
:returns: None
.. note:: \* Trapezoidal data (``.arg_data``) integration over
time (``.arg_t``). |
19,256 | def ignore_path(path, ignore_list=None, whitelist=None):
if ignore_list is None:
return True
should_ignore = matches_glob_list(path, ignore_list)
if whitelist is None:
return should_ignore
return should_ignore and not matches_glob_list(path, whitelist) | Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns. |
19,257 | def register(self, reg_data, retry=True, interval=1, timeout=3):
if len(reg_data["resources"]) == 0:
_logger.debug("%s no need to register due to no resources" %
(reg_data["name"]))
return
def _register():
try:
resp = self.publish.direct.post(
"/controller/registration", reg_data)
if resp.code == 200:
return resp
except TimeoutError:
_logger.debug("Register message is timeout")
return False
resp = _register()
while resp is False:
_logger.debug("Register failed.")
self.deregister(reg_data)
resp = _register()
if resp is None:
_logger.error("Can\'t not register to controller")
self.stop()
return False
self._conn.set_tunnel(
reg_data["role"], resp.data["tunnel"], self.on_sanji_message)
self.bundle.profile["currentTunnels"] = [
tunnel for tunnel, callback in self._conn.tunnels.items()]
self.bundle.profile["regCount"] = \
self.bundle.profile.get("reg_count", 0) + 1
_logger.debug("Register successfully %s tunnel: %s"
% (reg_data["name"], resp.data["tunnel"],)) | register function
retry
True, infinity retries
False, no retries
Number, retries times
interval
time period for retry
return
False if no success
Tunnel if success |
19,258 | def _split_python(python):
python = _preprocess(python)
if not python:
return []
lexer = PythonSplitLexer()
lexer.read(python)
return lexer.chunks | Split Python source into chunks.
Chunks are separated by at least two return lines. The break must not
be followed by a space. Also, long Python strings spanning several lines
are not splitted. |
19,259 | def info(gandi, resource, id, value):
output_keys = [, ]
if id:
output_keys.append()
if value:
output_keys.append()
ret = []
for item in resource:
sshkey = gandi.sshkey.info(item)
ret.append(output_sshkey(gandi, sshkey, output_keys))
return ret | Display information about an SSH key.
Resource can be a name or an ID |
19,260 | def run(self, messages):
if self.args.local:
return
if self.assignment.endpoint not in self.SUPPORTED_ASSIGNMENTS:
message = "{0} does not support hinting".format(self.assignment.endpoint)
log.info(message)
if self.args.hint:
print(message)
return
if not in messages:
log.info()
return
if not in messages:
log.info()
return
if self.args.no_experiments:
messages[] = {: }
return
messages[] = {}
history = messages[].get(, {})
questions = history.get(, [])
current_q = history.get(, {})
messages[][] = self.args.hint
for question in current_q:
if question not in questions:
continue
stats = questions[question]
is_solved = stats[] == True
messages[][question] = {: {}, : {}}
hint_info = messages[][question]
if is_solved:
hint_info[] = False
hint_info[] =
if self.args.hint:
print("This question has already been solved.")
continue
elif stats[] < self.SMALL_EFFORT:
log.info("Question %s is not elgible: Attempts: %s, Solved: %s",
question, stats[], is_solved)
hint_info[] = False
if self.args.hint:
hint_info[] =
print("You need to make a few more attempts before the hint system is enabled")
continue
else:
if stats[] % self.WAIT_ATTEMPTS != 0:
hint_info[] =
hint_info[] = False
log.info(,
stats[] % self.WAIT_ATTEMPTS)
else:
hint_info[] = not is_solved
if not self.args.hint:
if hint_info[]:
with format.block("-"):
print("To get hints, try using python3 ok --hint -q {}".format(question))
hint_info[] = True
continue
hint_info[] = True
with format.block("-"):
print(("Thinking of a hint for {}".format(question) +
"... (This could take up to 30 seconds)"))
pre_hint = random.choice(PRE_HINT_MESSAGES)
print("In the meantime, consider: \n{}".format(pre_hint))
hint_info[] = pre_hint
log.info(, question)
try:
response = self.query_server(messages, question)
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError):
log.debug("Network error while fetching hint", exc_info=True)
hint_info[] = True
print("\r\nNetwork Error while generating hint. Try again later")
response = None
continue
if response:
hint_info[] = response
hint = response.get()
pre_prompt = response.get()
post_prompt = response.get()
system_error = response.get()
log.info("Hint server response: {}".format(response))
if not hint:
if system_error:
print("{}".format(system_error))
else:
print("Sorry. No hints found for the current code. Try again making after some changes")
continue
print("\n{}".format(hint.rstrip()))
if post_prompt:
results[][query] = prompt.explanation_msg(post_prompt) | Determine if a student is elgible to recieve a hint. Based on their
state, poses reflection questions.
After more attempts, ask if students would like hints. If so, query
the server. |
19,261 | def rows_max(self, size=None, focus=False):
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError( % self._original_widget)
return self._rows_max_cached | Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned. |
19,262 | def legal_status(CASRN, Method=None, AvailableMethods=False, CASi=None):
rListedNon-Domestic Substances List (NDSL)Significant New Activity (SNAc)Ministerial Condition pertaining to this
substanceListed64-17-5DSLLISTEDEINECSLISTEDNLPUNLISTEDSPINLISTEDTSCALISTED
load_law_data()
if not CASi:
CASi = CAS2int(CASRN)
methods = [COMBINED, DSL, TSCA, EINECS, NLP, SPIN]
if AvailableMethods:
return methods
if not Method:
Method = methods[0]
if Method == DSL:
if CASi in DSL_data.index:
status = CAN_DSL_flags[DSL_data.at[CASi, ]]
else:
status = UNLISTED
elif Method == TSCA:
if CASi in TSCA_data.index:
data = TSCA_data.loc[CASi].to_dict()
if any(data.values()):
status = sorted([TSCA_flags[i] for i in data.keys() if data[i]])
else:
status = LISTED
else:
status = UNLISTED
elif Method == EINECS:
if CASi in EINECS_data.index:
status = LISTED
else:
status = UNLISTED
elif Method == NLP:
if CASi in NLP_data.index:
status = LISTED
else:
status = UNLISTED
elif Method == SPIN:
if CASi in SPIN_data.index:
status = LISTED
else:
status = UNLISTED
elif Method == COMBINED:
status = {}
for method in methods[1:]:
status[method] = legal_status(CASRN, Method=method, CASi=CASi)
else:
raise Exception()
return status | r'''Looks up the legal status of a chemical according to either a specifc
method or with all methods.
Returns either the status as a string for a specified method, or the
status of the chemical in all available data sources, in the format
{source: status}.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
status : str or dict
Legal status information [-]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain legal status with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
legal_status_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
the legal status for the desired chemical, and will return methods
instead of the status
CASi : int, optional
CASRN as an integer, used internally [-]
Notes
-----
Supported methods are:
* **DSL**: Canada Domestic Substance List, [1]_. As extracted on Feb 11, 2015
from a html list. This list is updated continuously, so this version
will always be somewhat old. Strictly speaking, there are multiple
lists but they are all bundled together here. A chemical may be
'Listed', or be on the 'Non-Domestic Substances List (NDSL)',
or be on the list of substances with 'Significant New Activity (SNAc)',
or be on the DSL but with a 'Ministerial Condition pertaining to this
substance', or have been removed from the DSL, or have had a
Ministerial prohibition for the substance.
* **TSCA**: USA EPA Toxic Substances Control Act Chemical Inventory, [2]_.
This list is as extracted on 2016-01. It is believed this list is
updated on a periodic basis (> 6 month). A chemical may simply be
'Listed', or may have certain flags attached to it. All these flags
are described in the dict TSCA_flags.
* **EINECS**: European INventory of Existing Commercial chemical
Substances, [3]_. As extracted from a spreadsheet dynamically
generated at [1]_. This list was obtained March 2015; a more recent
revision already exists.
* **NLP**: No Longer Polymers, a list of chemicals with special
regulatory exemptions in EINECS. Also described at [3]_.
* **SPIN**: Substances Prepared in Nordic Countries. Also a boolean
data type. Retrieved 2015-03 from [4]_.
Other methods which could be added are:
* Australia: AICS Australian Inventory of Chemical Substances
* China: Inventory of Existing Chemical Substances Produced or Imported
in China (IECSC)
* Europe: REACH List of Registered Substances
* India: List of Hazardous Chemicals
* Japan: ENCS: Inventory of existing and new chemical substances
* Korea: Existing Chemicals Inventory (KECI)
* Mexico: INSQ National Inventory of Chemical Substances in Mexico
* New Zealand: Inventory of Chemicals (NZIoC)
* Philippines: PICCS Philippines Inventory of Chemicals and Chemical
Substances
Examples
--------
>>> pprint(legal_status('64-17-5'))
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
References
----------
.. [1] Government of Canada.. "Substances Lists" Feb 11, 2015.
https://www.ec.gc.ca/subsnouvelles-newsubs/default.asp?n=47F768FE-1.
.. [2] US EPA. "TSCA Chemical Substance Inventory." Accessed April 2016.
https://www.epa.gov/tsca-inventory.
.. [3] ECHA. "EC Inventory". Accessed March 2015.
http://echa.europa.eu/information-on-chemicals/ec-inventory.
.. [4] SPIN. "SPIN Substances in Products In Nordic Countries." Accessed
March 2015. http://195.215.202.233/DotNetNuke/default.aspx. |
19,263 | def share_matrix(locifile, tree=None, nameorder=None):
with open(locifile, ) as locidata:
loci = locidata.read().split("|\n")[:-1]
if tree:
tree = ete.Tree(tree)
tree.ladderize()
snames = tree.get_leaf_names()
lxs, names = _getarray(loci, snames)
elif nameorder:
lxs, names = _getarray(loci, nameorder)
else:
raise IOError("must provide either tree or nameorder argument")
share = _countmatrix(lxs)
return share | returns a matrix of shared RAD-seq data
Parameters:
-----------
locifile (str):
Path to a ipyrad .loci file.
tree (str):
Path to Newick file or a Newick string representation of
a tree. If used, names will be ordered by the ladderized
tip order.
nameorder (list):
If a tree is not provided you can alternatively enter
the sample order as a list here. The tree argument will
override this argument.
Returns:
--------
matrix (numpy.array):
A uint64 numpy array of the number of shared loci between
all pairs of samples. |
19,264 | async def add_unknown_id(self, unknown_id, timeout=OTGW_DEFAULT_TIMEOUT):
cmd = OTGW_CMD_UNKNOWN_ID
unknown_id = int(unknown_id)
if unknown_id < 1 or unknown_id > 255:
return None
ret = await self._wait_for_cmd(cmd, unknown_id, timeout)
if ret is not None:
return int(ret) | Inform the gateway that the boiler doesn't support the
specified Data-ID, even if the boiler doesn't indicate that
by returning an Unknown-DataId response. Using this command
allows the gateway to send an alternative Data-ID to the boiler
instead.
Return the added ID, or None on failure.
This method is a coroutine |
19,265 | def get_repos(self, visibility=github.GithubObject.NotSet, affiliation=github.GithubObject.NotSet, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
assert visibility is github.GithubObject.NotSet or isinstance(visibility, (str, unicode)), visibility
assert affiliation is github.GithubObject.NotSet or isinstance(affiliation, (str, unicode)), affiliation
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if visibility is not github.GithubObject.NotSet:
url_parameters["visibility"] = visibility
if affiliation is not github.GithubObject.NotSet:
url_parameters["affiliation"] = affiliation
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/repos",
url_parameters
) | :calls: `GET /user/repos <http://developer.github.com/v3/repos>`
:param visibility: string
:param affiliation: string
:param type: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository` |
19,266 | def create(self, _attributes=None, **attributes):
if _attributes is not None:
attributes.update(_attributes)
instance = self._related.new_instance(attributes)
instance.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
instance.save()
return instance | Create a new instance of the related model.
:param attributes: The attributes
:type attributes: dict
:rtype: Model |
19,267 | def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_nodes()
self._find_usage_subnet_groups()
self._find_usage_parameter_groups()
self._find_usage_security_groups()
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. |
19,268 | def post_loader(*decorator_args, serializer):
def wrapped(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*serializer.load(request.get_json()))
return decorated
if decorator_args and callable(decorator_args[0]):
return wrapped(decorator_args[0])
return wrapped | Decorator to automatically instantiate a model from json request data
:param serializer: The ModelSerializer to use to load data from the request |
19,269 | def undeploy_lambda_alb(self, lambda_name):
print("Undeploying ALB infrastructure...")
try:
self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=lambda_name
)
except botocore.exceptions.ClientError as e:
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
try:
response = self.elbv2_client.describe_load_balancers(
Names=[lambda_name]
)
if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn)
if not(response["Listeners"]):
print()
elif len(response["Listeners"]) > 1:
raise EnvironmentError("Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn)
waiter = self.elbv2_client.get_waiter()
print(.format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e:
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
try:
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not(response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB target group named [{}]. Response was: {}".format(lambda_name, repr(response)))
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter()
print(.format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3}
)
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e:
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e | The `zappa undeploy` functionality for ALB infrastructure. |
19,270 | def purge(self, session, checksum):
C = session.query(model.Calculation).get(checksum)
if not C:
return
if C.siblings_count:
C_meta = session.query(model.Metadata).get(checksum)
higher_lookup = {}
more = C.parent
distance = 0
while True:
distance += 1
higher, more = more, []
if not higher:
break
for item in higher:
try:
higher_lookup[distance].add(item)
except KeyError:
higher_lookup[distance] = set([item])
if item.parent:
more += item.parent
for distance, members in higher_lookup.items():
for member in members:
if distance == 1:
member.siblings_count -= 1
if not member.siblings_count:
return
member.meta_data.download_size -= C_meta.download_size
session.add(member)
else:
session.execute( model.delete( model.Spectra ).where( model.Spectra.checksum == checksum) )
session.execute( model.delete( model.Electrons ).where( model.Electrons.checksum == checksum ) )
session.execute( model.delete( model.Phonons ).where( model.Phonons.checksum == checksum ) )
session.execute( model.delete( model.Recipinteg ).where( model.Recipinteg.checksum == checksum ) )
session.execute( model.delete( model.Basis ).where( model.Basis.checksum == checksum ) )
session.execute( model.delete( model.Energy ).where( model.Energy.checksum == checksum ) )
session.execute( model.delete( model.Spacegroup ).where( model.Spacegroup.checksum == checksum ) )
session.execute( model.delete( model.Struct_ratios ).where( model.Struct_ratios.checksum == checksum ) )
session.execute( model.delete( model.Struct_optimisation ).where( model.Struct_optimisation.checksum == checksum ) )
struct_ids = [ int(i[0]) for i in session.query(model.Structure.struct_id).filter(model.Structure.checksum == checksum).all() ]
for struct_id in struct_ids:
session.execute( model.delete( model.Atom ).where( model.Atom.struct_id == struct_id ) )
session.execute( model.delete( model.Lattice ).where( model.Lattice.struct_id == struct_id ) )
session.execute( model.delete( model.Structure ).where( model.Structure.checksum == checksum ) )
if len(C.references):
left_references = [ int(i[0]) for i in session.query(model.Reference.reference_id).join(model.metadata_references, model.Reference.reference_id == model.metadata_references.c.reference_id).filter(model.metadata_references.c.checksum == checksum).all() ]
session.execute( model.delete( model.metadata_references ).where( model.metadata_references.c.checksum == checksum ) )
for lc in left_references:
if not (session.query(model.metadata_references.c.checksum).filter(model.metadata_references.c.reference_id == lc).count()):
session.execute( model.delete( model.Reference ).where(model.Reference.reference_id == lc) )
session.execute( model.delete( model.Metadata ).where( model.Metadata.checksum == checksum ) )
session.execute( model.delete( model.Grid ).where( model.Grid.checksum == checksum ) )
session.execute( model.delete( model.tags ).where( model.tags.c.checksum == checksum ) )
session.execute( model.delete( model.calcsets ).where( model.calcsets.c.children_checksum == checksum ) )
session.execute( model.delete( model.calcsets ).where( model.calcsets.c.parent_checksum == checksum ) )
session.execute( model.delete( model.Calculation ).where( model.Calculation.checksum == checksum ) )
session.commit()
return False | Deletes calc entry by checksum entirely from the database
NB source files on disk are not deleted
NB: this is the PUBLIC method
@returns error |
19,271 | def __convert_key(expression):
if type(expression) is str and len(expression) > 2 and expression[1] == :
expression = eval(expression[2:-1])
return expression | Converts keys in YAML that reference other keys. |
19,272 | def request(self, request):
url = "{}{}".format(self._base_url, request.path)
timeout = self.poll_timeout
if request.stream is True:
timeout = self.stream_timeout
try:
http_response = self._session.request(
request.method,
url,
headers=self._headers,
params=request.params,
data=request.body,
stream=request.stream,
timeout=timeout
)
except requests.exceptions.ConnectionError:
raise V20ConnectionError(url)
except requests.exceptions.ConnectTimeout:
raise V20Timeout(url, "connect")
except requests.exceptions.ReadTimeout:
raise V20Timeout(url, "read")
request.headers = http_response.request.headers
response = Response(
request,
request.method,
http_response.url,
http_response.status_code,
http_response.reason,
http_response.headers
)
if request.stream:
response.set_line_parser(
request.line_parser
)
response.set_lines(
http_response.iter_lines(
self.stream_chunk_size
)
)
else:
response.set_raw_body(http_response.text)
return response | Perform an HTTP request through the context
Args:
request: A v20.request.Request object
Returns:
A v20.response.Response object |
19,273 | def qorts_general_stats (self):
headers = OrderedDict()
headers[] = {
: ,
: ,
: 100,
: 0,
: ,
:
}
headers[] = {
: ,
: ,
:
}
self.general_stats_addcols(self.qorts_data, headers) | Add columns to the General Statistics table |
19,274 | def sql(self):
from fasta.indexed import DatabaseFASTA, fasta_to_sql
db = DatabaseFASTA(self.prefix_path + ".db")
if not db.exists: fasta_to_sql(self.path, db.path)
return db | If you access this attribute, we will build an SQLite database
out of the FASTA file and you will be able access everything in an
indexed fashion, and use the blaze library via sql.frame |
19,275 | def GET_AUTH(self, courseid, aggregationid=):
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=True)
if course.is_lti():
raise web.notfound()
return self.display_page(course, aggregationid) | Edit a aggregation |
19,276 | def create_from_str(name_and_zone: str):
if _NAME_ZONE_SEGREGATOR not in name_and_zone:
raise ValueError("Users name cannot be blank")
if len(zone) == 0:
raise ValueError("User's zone cannot be blank")
return User(name, zone) | Factory method for creating a user from a string in the form `name#zone`.
:param name_and_zone: the user's name followed by hash followed by the user's zone
:return: the created user |
19,277 | def _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw):
Verbose = False
if Verbose:
print("_versioned_lib_name: libnode={:r}".format(libnode.get_path()))
print("_versioned_lib_name: version={:r}".format(version))
print("_versioned_lib_name: prefix={:r}".format(prefix))
print("_versioned_lib_name: suffix={:r}".format(suffix))
print("_versioned_lib_name: suffix_generator={:r}".format(suffix_generator))
versioned_name = os.path.basename(libnode.get_path())
if Verbose:
print("_versioned_lib_name: versioned_name={:r}".format(versioned_name))
versioned_prefix = prefix_generator(env, **kw)
versioned_suffix = suffix_generator(env, **kw)
if Verbose:
print("_versioned_lib_name: versioned_prefix={:r}".format(versioned_prefix))
print("_versioned_lib_name: versioned_suffix={:r}".format(versioned_suffix))
versioned_prefix_re = + re.escape(versioned_prefix)
versioned_suffix_re = re.escape(versioned_suffix) +
name = re.sub(versioned_prefix_re, prefix, versioned_name)
name = re.sub(versioned_suffix_re, suffix, name)
if Verbose:
print("_versioned_lib_name: name={:r}".format(name))
return name | For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so |
19,278 | def split_by_files(self, valid_names:)->:
"Split the data by using the names in `valid_names` for validation."
if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names)
else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names) | Split the data by using the names in `valid_names` for validation. |
19,279 | def scheme_specification(cls):
return WSchemeSpecification(
,
WURIComponentVerifier(WURI.Component.path, WURIComponentVerifier.Requirement.optional)
) | :meth:`.WSchemeHandler.scheme_specification` method implementation |
19,280 | def run_script(self, filename, start_opts=None, globals_=None,
locals_=None):
self.mainpyfile = self.core.canonic(filename)
return retval | Run debugger on Python script `filename'. The script may
inspect sys.argv for command arguments. `globals_' and
`locals_' are the dictionaries to use for local and global
variables. If `globals' is not given, globals() (the current
global variables) is used. If `locals_' is not given, it
becomes a copy of `globals_'.
True is returned if the program terminated normally and False
if the debugger initiated a quit or the program did not normally
terminate.
See also `run_call' if what you to debug a function call,
`run_eval' if you want to debug an expression, and `run' if you
want to debug general Python statements not inside a file. |
19,281 | def merge_requests_data_to(to, food={}):
if not to:
to.update(food)
to[][] += food[][]
to[][] += food[][]
to[] += food[]
for group_name, urls in food[].items():
if group_name not in to[]:
to[][group_name] = urls
else:
to_urls = to[][group_name]
to_urls[] = to_urls[].merge_with(
urls[])
merge_urls_data_to(to_urls[], urls[]) | Merge a small analyzed result to a big one, this function will modify the
original ``to`` |
19,282 | def from_raw(self, raw: RawScalar) -> Optional[bytes]:
try:
return base64.b64decode(raw, validate=True)
except TypeError:
return None | Override superclass method. |
19,283 | def get_out_ip_addr(cls, tenant_id):
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_ip_addr() | Retrieves the 'out' service subnet attributes. |
19,284 | def __add_text(self, text):
if text is not None and not isinstance(text, six.text_type):
raise TypeError( % text)
sid = self.__new_sid()
location = None
if self.table_type.is_shared:
location = self.__import_location(sid)
token = SymbolToken(text, sid, location)
self.__add(token)
return token | Adds the given Unicode text as a locally defined symbol. |
19,285 | def Dadgostar_Shaw_integral_over_T(T, similarity_variable):
r
a = similarity_variable
a2 = a*a
a11 = -0.3416
a12 = 2.2671
a21 = 0.1064
a22 = -0.3874
a31 = -9.8231E-05
a32 = 4.182E-04
constant = 24.5
S = T*T*0.5*(a2*a32 + a*a31) + T*(a2*a22 + a*a21) + a*constant*(a*a12 + a11)*log(T)
return S*1000. | r'''Calculate the integral of liquid constant-pressure heat capacitiy
with the similarity variable concept and method as shown in [1]_.
Parameters
----------
T : float
Temperature of gas [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
S : float
Difference in entropy from 0 K, [J/kg/K]
Notes
-----
Original model is in terms of J/g/K. Note that the model is for predicting
mass heat capacity, not molar heat capacity like most other methods!
Integral was computed with SymPy.
See Also
--------
Dadgostar_Shaw
Dadgostar_Shaw_integral
Examples
--------
>>> Dadgostar_Shaw_integral_over_T(300.0, 0.1333)
1201.1409113147927
References
----------
.. [1] Dadgostar, Nafiseh, and John M. Shaw. "A Predictive Correlation for
the Constant-Pressure Specific Heat Capacity of Pure and Ill-Defined
Liquid Hydrocarbons." Fluid Phase Equilibria 313 (January 15, 2012):
211-226. doi:10.1016/j.fluid.2011.09.015. |
19,286 | def find_files(path, patterns):
if not isinstance(patterns, (list, tuple)):
patterns = [patterns]
matches = []
for root, dirnames, filenames in os.walk(path):
for pattern in patterns:
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches | Returns all files from a given path that matches the pattern or list
of patterns
@type path: str
@param path: A path to traverse
@typ patterns: str|list
@param patterns: A pattern or a list of patterns to match
@rtype: list[str]:
@return: A list of matched files |
19,287 | def _execute_pillar(pillar_name, run_type):
groups = __salt__[](pillar_name)
data = {}
for group in groups:
data[group] = {}
commands = groups[group]
for command in commands:
if isinstance(command, dict):
plugin = next(six.iterkeys(command))
args = command[plugin]
else:
plugin = command
args =
command_key = _format_dict_key(args, plugin)
data[group][command_key] = run_type(plugin, args)
return data | Run one or more nagios plugins from pillar data and get the result of run_type
The pillar have to be in this format:
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
------- |
19,288 | def capability_info(self, name=None):
for r in self.resources:
if (r.capability == name):
return(r)
return(None) | Return information about the requested capability from this list.
Will return None if there is no information about the requested capability. |
19,289 | def msg_name(code):
ids = {v: k for k, v in COMMANDS.items()}
return ids[code] | Convert integer message code into a string name. |
19,290 | def normalize_parameters(params):
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
key_values.sort()
parameter_parts = [.format(k, v) for k, v in key_values]
return .join(parameter_parts) | **Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 |
19,291 | def manifest_repr(self, p_num):
prefix = "p" + str(p_num) + "_"
manifest = prefix + "MODE=" + ("IN" if self.type == Type.FILE else "") + "\n"
manifest += prefix + "TYPE=" + str(self.type.value) + "\n"
if self.type == Type.FILE and len(self.choices) > 0:
manifest += prefix + "choices=" + self._choices() + "\n"
manifest += prefix + "default_value=" + self.default_value + "\n"
manifest += prefix + "description=" + GPTaskSpec.manifest_escape(self.description) + "\n"
manifest += prefix + "fileFormat=" + .join(self.file_format) + "\n"
manifest += prefix + "flag=" + self.flag + "\n"
manifest += prefix + "name=" + self.name + "\n"
manifest += prefix + "numValues=" + self._num_values() + "\n"
manifest += prefix + "optional=" + str(self.optional.value) + "\n"
manifest += prefix + "prefix=" + (self.flag if self.prefix_when_specified else "") + "\n"
manifest += prefix + "prefix_when_specified=" + (self.flag if self.prefix_when_specified else "") + "\n"
manifest += prefix + "type=" + self._java_type() + "\n"
manifest += prefix + "value=" + (self._choices() if self.type != Type.FILE and len(self.choices) > 0 else "") + "\n"
return manifest | Builds a manifest string representation of the parameters and returns it
:param p_num: int
:return: string |
19,292 | def clear_dir(path):
for f in os.listdir(path):
f_path = os.path.join(path, f)
if os.path.isfile(f_path) or os.path.islink(f_path):
os.unlink(f_path) | Empty out the image directory. |
19,293 | def autogen_explicit_injectable_metaclass(classname, regen_command=None,
conditional_imports=None):
r
import utool as ut
vals_list = []
def make_redirect(func):
src_fmt = r
from utool._internal import meta_util_six
orig_docstr = meta_util_six.get_funcdoc(func)
funcname = meta_util_six.get_funcname(func)
orig_funcname = modname.split()[-1] + + funcname
orig_docstr = if orig_docstr is None else orig_docstr
import textwrap
import inspect
argspec = inspect.getargspec(func)
(args, varargs, varkw, defaults) = argspec
defsig = inspect.formatargspec(*argspec)
callsig = inspect.formatargspec(*argspec[0:3])
src_fmtdict = dict(funcname=funcname, orig_funcname=orig_funcname,
defsig=defsig, callsig=callsig,
orig_docstr=orig_docstr)
src = textwrap.dedent(src_fmt).format(**src_fmtdict)
return src
src_list = []
for classkey, vals in __CLASSTYPE_ATTRIBUTES__.items():
modname = classkey[1]
if classkey[0] == classname:
vals_list.append(vals)
for func in vals:
src = make_redirect(func)
src = ut.indent(src)
src = .join([_.rstrip() for _ in src.split()])
src_list.append(src)
if regen_command is None:
regen_command =
module_header = ut.codeblock(
+ ut.TRIPLE_DOUBLE_QUOTE + + ut.TRIPLE_DOUBLE_QUOTE + ).format(
autogen_time=ut.get_timestamp(),
regen_command=regen_command,
classname=classname)
depends_module_block = autogen_import_list(classname, conditional_imports)
inject_statement_fmt = ("print, rrr, profile = "
"ut.inject2(__name__, )")
inject_statement = inject_statement_fmt.format(classname=classname)
source_block_lines = [
module_header,
depends_module_block,
inject_statement,
,
+ classname + ,
] + src_list
source_block = .join(source_block_lines)
source_block = ut.autoformat_pep8(source_block, aggressive=2)
return source_block | r"""
Args:
classname (?):
Returns:
?:
CommandLine:
python -m utool.util_class --exec-autogen_explicit_injectable_metaclass
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_class import * # NOQA
>>> from utool.util_class import __CLASSTYPE_ATTRIBUTES__ # NOQA
>>> import ibeis
>>> import ibeis.control.IBEISControl
>>> classname = ibeis.control.controller_inject.CONTROLLER_CLASSNAME
>>> result = autogen_explicit_injectable_metaclass(classname)
>>> print(result) |
19,294 | def _sanitize_inputs(self):
ret = {}
if self.inputs is None:
return
if isinstance(self.inputs, dict):
for key, grouping in self.inputs.items():
if not Grouping.is_grouping_sane(grouping):
raise ValueError()
if isinstance(key, HeronComponentSpec):
if key.name is None:
raise RuntimeError("In _sanitize_inputs(): HeronComponentSpec doesnt have a name")
global_streamid = GlobalStreamId(input_obj.name, Stream.DEFAULT_STREAM_ID)
ret[global_streamid] = Grouping.SHUFFLE
elif isinstance(input_obj, GlobalStreamId):
ret[input_obj] = Grouping.SHUFFLE
else:
raise ValueError("%s is not supported as an input" % str(input_obj))
else:
raise TypeError("Inputs must be a list, dict, or None, given: %s" % str(self.inputs))
return ret | Sanitizes input fields and returns a map <GlobalStreamId -> Grouping> |
19,295 | def machine_to_machine(self):
if self._machine_to_machine is None:
self._machine_to_machine = MachineToMachineList(
self._version,
account_sid=self._solution[],
country_code=self._solution[],
)
return self._machine_to_machine | Access the machine_to_machine
:returns: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList
:rtype: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList |
19,296 | def relaxation_matvec(P, p0, obs, times=[1]):
r
times = np.asarray(times)
ind = np.argsort(times)
times = times[ind]
if times[0] < 0:
raise ValueError("Times can not be negative")
dt = times[1:] - times[0:-1]
nt = len(times)
relaxations = np.zeros(nt)
obs_t = 1.0 * obs
obs_t = propagate(P, obs_t, times[0])
relaxations[0] = np.dot(p0, obs_t)
for i in range(nt - 1):
obs_t = propagate(P, obs_t, dt[i])
relaxations[i + 1] = np.dot(p0, obs_t)
relaxations = relaxations[ind]
return relaxations | r"""Relaxation experiment.
The relaxation experiment describes the time-evolution
of an expectation value starting in a non-equilibrium
situation.
Parameters
----------
P : (M, M) ndarray
Transition matrix
p0 : (M,) ndarray (optional)
Initial distribution for a relaxation experiment
obs : (M,) ndarray
Observable, represented as vector on state space
times : list of int (optional)
List of times at which to compute expectation
Returns
-------
res : ndarray
Array of expectation value at given times |
19,297 | def expand(obj, relation, seen):
if hasattr(relation, ):
relation = relation.all()
if hasattr(relation, ):
return [expand(obj, item, seen) for item in relation]
if type(relation) not in seen:
return to_json(relation, seen + [type(obj)])
else:
return relation.id | Return the to_json or id of a sqlalchemy relationship. |
19,298 | def triggers_update_many(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/triggers
api_path = "/api/v2/triggers/update_many.json"
return self.call(api_path, method="PUT", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/triggers#update-many-triggers |
19,299 | def dotter(self):
if self.globalcount <= 80:
sys.stdout.write()
self.globalcount += 1
else:
sys.stdout.write()
self.globalcount = 1 | Prints formatted time to stdout at the start of a line, as well as a "."
whenever the length of the line is equal or lesser than 80 "." long |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.