Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,700 | def _expectation(p, mean, none, kern, feat, nghp=None):
return tf.matrix_transpose(expectation(p, (kern, feat), mean)) | Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,} :: Linear kernel
or the equivalent for MarkovGaussian
:return: NxDxM |
384,701 | def load(self, typedef, value, **kwargs):
try:
bound_type = self.bound_types[typedef]
except KeyError:
raise DeclareException(
"Cant need to try/catch since load/dump are bound together
return bound_type["load"](value, **kwargs) | Return the result of the bound load method for a typedef
Looks up the load function that was bound to the engine for a typedef,
and return the result of passing the given `value` and any `context`
to that function.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef whose bound load method should be used
value : object
The value to be passed into the bound load method
**kwargs : kwargs
Context for the value being loaded
Returns
-------
loaded_value : object
The return value of the load function for the input value
Raises
------
exc : :class:`KeyError`
If the input typedef is not bound to this engine
Example
-------
.. code-block:: python
class Account(TypeDefinition):
prefix = "::account"
def load(self, value, **context):
return value + Account.prefix
def dump(self, value, **context):
return value[:-len(Account.prefix)]
typedef = Account()
engine = TypeEngine("accounts")
engine.register(typedef)
engine.bind()
assert engine.dump(typedef, "Jill::account") == "Jill" |
384,702 | def dispatch(self, *args, **kwargs):
return super(GenList, self).dispatch(*args, **kwargs) | Entry point for this class, here we decide basic stuff |
384,703 | def get(self):
self.log.info()
from operator import itemgetter
results = list(self.squareResults)
results = sorted(
results, key=itemgetter(), reverse=True)
headers = ["sdss_name", "type", "ra", "dec", "specz", "specz_err", "photoz",
"photoz_err", "separation_arcsec", "separation_north_arcsec", "separation_east_arcsec"]
import collections
orderDict = collections.OrderedDict(sorted({}.items()))
filteredResults = []
for row in results:
if float(row["separation_arcsec"]) < self.searchRadius:
orderDict = collections.OrderedDict(sorted({}.items()))
for h in headers:
if h in row.keys():
orderDict[h] = row[h]
filteredResults.append(orderDict)
else:
pass
if self.nearest and len(filteredResults):
orderDict = collections.OrderedDict(sorted({}.items()))
for h in headers:
if h in filteredResults[0].keys():
orderDict[h] = row[h]
filteredResults = [orderDict]
if not len(filteredResults):
orderDict = collections.OrderedDict(sorted({}.items()))
for h in headers:
if self.galaxyType == "all" or self.galaxyType == False or (self.galaxyType == "specz" and h not in ["photoz_err", "photoz"]) or (self.galaxyType == "photoz" and h not in ["specz", "specz_err"]):
orderDict[h] = ""
filteredResults = [orderDict]
dataSet = list_of_dictionaries(
log=self.log,
listOfDictionaries=list(reversed(filteredResults))
)
if self.outputFormat == "csv":
results = dataSet.csv()
else:
results = dataSet.table()
sleep(1)
self.log.info()
return results | *get the cone_search object*
**Return:**
- ``results`` -- the results of the conesearch |
384,704 | def __find_index(alig_file_pth, idx_extensions):
if idx_extensions is None:
return None
base, _ = os.path.splitext(alig_file_pth)
for idx_ext in idx_extensions:
candidate = base + os.extsep + idx_ext
if os.path.isfile(candidate):
return candidate
return None | Find an index file for a genome alignment file in the same directory.
:param alig_file_path: path to the alignment file.
:param idx_extensions: check for index files with these extensions
:return: path to first index file that matches the name of the alignment file
and has one of the specified extensions. |
384,705 | def tabulate(self, restricted_predicted_column_indices = [], restricted_predicted_column_names = [], dataset_name = None):
self._analyze()
data_series = self.get_series_names(column_indices = restricted_predicted_column_indices, column_names = restricted_predicted_column_names)
group_names = []
for l in self.index_layers:
group_names.append(l)
headers = [] + group_names + [, , , , , , , ]
table_rows = []
for dseries in data_series:
if isinstance(dseries, tuple):
dseries_l = list(dseries)
else:
assert(isinstance(dseries, basestring))
dseries_l = [dseries]
results = []
assert (len(self.index_layers) == len(dseries))
if self.analysis.get(dseries, {}).get() and self.analysis.get(dseries, {}).get():
results.append((dseries_l[:-1] + [dseries_l[-1] + ], self.analysis[dseries][]))
results.append((dseries_l[:-1] + [dseries_l[-1]], self.analysis[dseries][]))
elif (self.analysis.get(dseries, {}).get()):
results.append((dseries_l[:-1] + [dseries_l[-1] + ], self.analysis[dseries][]))
elif (self.analysis.get(dseries, {}).get()):
results = [(dseries, self.analysis[dseries][])]
for result in results:
n = result[1][][]
R = result[1][][][0]
rho = result[1][][][0]
mae = result[1][][]
fraction_correct = result[1][][]
accuracy = result[1][][]
SBSensitivity = .format(result[1][][][0], result[1][][][1])
SBSpecificity = .format(result[1][][][0], result[1][][][1])
method = result[0]
if isinstance(method, tuple):
method = list(method)
table_rows.append([dataset_name or self.reference_dataset_name] + method +
[n, R, rho, mae, fraction_correct, accuracy, SBSensitivity, SBSpecificity])
return DataTable(pandas.DataFrame(table_rows, columns = headers), self.index_layers) | Returns summary analysis from the dataframe as a DataTable object.
DataTables are wrapped pandas dataframes which can be combined if the have the same width. This is useful for combining multiple analyses.
DataTables can be printed to terminal as a tabular string using their representation function (i.e. print(data_table)).
This function (tabulate) looks at specific analysis; this class (DatasetDataFrame) can be subclassed for custom tabulation. |
384,706 | def validate_config(config):
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True | Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made |
384,707 | def run_download_media(filename=None):
if not filename:
filename = settings.MEDIA_DUMP_FILENAME
if env.key_filename:
ssh = settings.PROJECT_NAME
else:
ssh = .format(env.user, env.host_string)
local(.format(
ssh, settings.FAB_SETTING(), filename)) | Downloads the media dump from the server into your local machine.
In order to import the downloaded media dump, run ``fab import_media``
Usage::
fab prod run_download_media
fab prod run_download_media:filename=foobar.tar.gz |
384,708 | def parse(self,tolerance=0,downsample=None,evidence=2,use_gene_names=False):
g = Graph()
nodes = [Node(x) for x in self._transcripts]
for n in nodes: g.add_node(n)
for i in range(0,len(nodes)):
for j in range(0,len(nodes)):
if i == j: continue
jov = nodes[i].payload.junction_overlap(nodes[j].payload,tolerance)
sub = jov.is_subset()
if not sub: continue
if sub == 1:
g.add_edge(Edge(nodes[i],nodes[j]))
g.add_edge(Edge(nodes[j],nodes[i]))
if sub == 2:
g.add_edge(Edge(nodes[i],nodes[j]))
g.merge_cycles()
roots = g.roots
groups = []
for r in roots:
g2 = g.get_root_graph(r)
c = CompatibleGraph(g2,tolerance,downsample,evidence,use_gene_names=use_gene_names)
groups.append(c)
return groups | Divide out the transcripts. allow junction tolerance if wanted |
384,709 | def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False):
"Compute the edges for the histogram bins to keep it the same for all nodes."
if use_orig_distr:
print()
edges=None
return edges
if edge_range is None:
if trim_outliers:
edges_of_edges = np.array([np.percentile(features, trim_percentile),
np.percentile(features, 100 - trim_percentile)])
else:
edges_of_edges = np.array([np.min(features), np.max(features)])
else:
edges_of_edges = edge_range
edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True)
return edges | Compute the edges for the histogram bins to keep it the same for all nodes. |
384,710 | def iter_prio_dict(prio_dict):
for _prio, objs in sorted(prio_dict.items(), key=lambda x: x[0]):
for obj in objs:
yield obj | Iterate over a priority dictionary. A priority dictionary is a
dictionary keyed by integer priority, with the values being lists
of objects. This generator will iterate over the dictionary in
priority order (from lowest integer value to highest integer
value), yielding each object in the lists in turn.
:param prio_dict: A priority dictionary, as described above.
:returns: An iterator that yields each object in the correct
order, based on priority and ordering within the
priority values. |
384,711 | def save(self, filething=None, padding=None):
try:
self.tags._inject(filething.fileobj, padding)
except (IOError, error) as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found") | save(filething=None, padding=None)
Save a tag to a file.
If no filename is given, the one most recently loaded is used.
Args:
filething (filething)
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError |
384,712 | def features_node_edge_graph(obj):
points = {}
features = obj[]
for feature in tqdm(obj[]):
for (lon, lat) in geojson.utils.coords(feature):
points.setdefault((lon, lat), 0)
points[(lon, lat)] += 1
points = [p for (p, c) in points.items() if c > 1]
features = [geojson.Point(p) for p in points]
for f in tqdm(obj[]):
seqs = []
seq = []
for point in geojson.utils.coords(f):
if len(seq) > 0:
seq.append(point)
if point in points:
seq.append(point)
if len(seq) > 1 and seq[0] in points:
seqs.append(seq)
seq = [point]
for seq in seqs:
features.append(geojson.Feature(geometry={"coordinates":seq, "type":f[][]}, properties=f[], type=f[]))
obj[] = features
return obj | Transform the features into a more graph-like structure by
appropriately splitting LineString features into two-point
"edges" that connect Point "nodes". |
384,713 | def most_by_mask(self, mask, y, mult):
idxs = np.where(mask)[0]
cnt = min(4, len(idxs))
return idxs[np.argsort(mult * self.probs[idxs,y])[:cnt]] | Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities
Arguments:
mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else
y (int): the selected class
mult (int): sets the ordering; -1 descending, 1 ascending
Returns:
idxs (ndarray): An array of indexes of length 4 |
384,714 | def get_form(self, request, obj=None, **kwargs):
parent_id = request.GET.get(, None)
if not parent_id:
parent_id = request.POST.get(, None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = self.get_queryset(request).filter(
parent=form_obj.instance.parent,
name=cleaned_data[])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError(
)
return cleaned_data
folder_form.clean = folder_form_clean
return folder_form | Returns a Form class for use in the admin add view. This is used by
add_view and change_view. |
384,715 | def reset(self):
self.solved = False
self.niter = 0
self.iter_mis = []
self.F = None
self.system.dae.factorize = True | Reset all internal storage to initial status
Returns
-------
None |
384,716 | def gfrepi(window, begmss, endmss):
begmss = stypes.stringToCharP(begmss)
endmss = stypes.stringToCharP(endmss)
if not isinstance(window, ctypes.POINTER(stypes.SpiceCell)):
assert isinstance(window, stypes.SpiceCell)
assert window.is_double()
window = ctypes.byref(window)
libspice.gfrepi_c(window, begmss, endmss) | This entry point initializes a search progress report.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrepi_c.html
:param window: A window over which a job is to be performed.
:type window: spiceypy.utils.support_types.SpiceCell
:param begmss: Beginning of the text portion of the output message.
:type begmss: str
:param endmss: End of the text portion of the output message.
:type endmss: str |
384,717 | def parse_value(named_reg_value):
name, value, value_type = named_reg_value
value_class = REG_VALUE_TYPE_MAP[value_type]
return name, value_class(value) | Convert the value returned from EnumValue to a (name, value) tuple using the value classes. |
384,718 | def _getModelPosterior(self,min):
Sigma = self._getLaplaceCovar(min)
n_params = self.vd.getNumberScales()
ModCompl = 0.5*n_params*sp.log(2*sp.pi)+0.5*sp.log(sp.linalg.det(Sigma))
RV = min[]+ModCompl
return RV | USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR |
384,719 | def row_wise_rescale(matrix):
if matrix.shape[0] <= matrix.shape[1]:
raise ValueError(
)
min_ = matrix.min(axis=1)
range_ = matrix.ptp(axis=1)
min_tile = np.tile(min_, (matrix.shape[1], 1)).T
range_tile = np.tile(range_, (matrix.shape[1], 1)).T
range_tile[range_tile < np.finfo(np.float).eps] = 1.0
normed = (matrix - min_tile) / range_tile
del min_, range_, min_tile, range_tile
return normed | Row-wise rescale of a given matrix.
For fMRI data (num_voxels x num_time_points), this would translate to voxel-wise normalization over time.
Parameters
----------
matrix : ndarray
Input rectangular matrix, typically a carpet of size num_voxels x num_4th_dim, 4th_dim could be time points or gradients or other appropriate
Returns
-------
normed : ndarray
normalized matrix |
384,720 | def batch_insert(self, records, typecast=False):
return self._batch_request(self.insert, records) | Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records |
384,721 | def to_graph_decomposition(H):
if not isinstance(H, UndirectedHypergraph):
raise TypeError("Transformation only applicable to \
undirected Hs")
G = UndirectedHypergraph()
nodes = [(node, H.get_node_attributes(node_attributes))
for node in G.node_iterator()]
G.add_nodes(nodes)
edges = [(node_a, node_b)
for hyperedge_id in H.hyperedge_id_iterator()
for node_a in H.get_hyperedge_nodes(hyperedge_id)
for node_b in H.get_hyperedge_nodes(hyperedge_id)
if node_a != node_b]
G.add_hyperedges(edges)
return G | Returns an UndirectedHypergraph object that has the same nodes (and
corresponding attributes) as the given H, except that for all
hyperedges in the given H, each node in the hyperedge is pairwise
connected to every other node also in that hyperedge in the new H.
Said another way, each of the original hyperedges are decomposed in the
new H into cliques (aka the "2-section" or "clique graph").
:param H: the H to decompose into a graph.
:returns: UndirectedHypergraph -- the decomposed H.
:raises: TypeError -- Transformation only applicable to
undirected Hs |
384,722 | def saved(name,
source=,
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup=,
show_changes=True,
create=True,
tmp_dir=,
tmp_ext=,
encoding=None,
encoding_errors=,
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
t exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories itstrictstrictAdministratorspermsfull_controlAdministratorspermsfull_control%s
ret = __salt__[](source=source)
if not ret[]:
return {
: name,
: {},
: False,
: ret[]
}
return __states__[](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret[][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs) | .. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg`` |
384,723 | def CI_calc(mean, SE, CV=1.96):
try:
CI_down = mean - CV * SE
CI_up = mean + CV * SE
return (CI_down, CI_up)
except Exception:
return ("None", "None") | Calculate confidence interval.
:param mean: mean of data
:type mean : float
:param SE: standard error of data
:type SE : float
:param CV: critical value
:type CV:float
:return: confidence interval as tuple |
384,724 | def initSchd_1_to_4(self):
self.m_schd_1_to_4["reserved_40"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_schd_1_to_4["Schedule_1_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_41"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_42"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_43"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_44"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass | Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. |
384,725 | def get_current_future_chain(self, continuous_future, dt):
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain) | Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on. |
384,726 | def line(x_fn, y_fn, *, options={}, **interact_params):
fig = options.get(, False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix=)
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix=)
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...) |
384,727 | def _apply_theme(self):
self.theme.apply_axs(self.axs)
self.theme.apply_figure(self.figure) | Apply theme attributes to Matplotlib objects |
384,728 | def unblock_username(username, pipe=None):
do_commit = False
if not pipe:
pipe = REDIS_SERVER.pipeline()
do_commit = True
if username:
pipe.delete(get_username_attempt_cache_key(username))
pipe.delete(get_username_blocked_cache_key(username))
if do_commit:
pipe.execute() | unblock the given Username |
384,729 | def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
*
return _gem([, , source_uri],
ruby,
gem_bin=gem_bin,
runas=runas) | Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/ |
384,730 | def map_lazy(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
pass_state: bool = False,
num_chunks: int = None,
) -> SequenceTaskResult:
r
if num_chunks is None:
num_chunks = multiprocessing.cpu_count()
lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None]
assert (
lengths
), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
length = min(lengths)
assert (
length > num_chunks
), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks)
chunk_length, extra = divmod(length, num_chunks)
if extra:
chunk_length += 1
task_id = util.generate_task_id((chunk_length, length, num_chunks))
iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks)
args_chunks = util.make_chunks(map_args, chunk_length, num_chunks)
kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks)
target_bytes = serializer.dumps_fn(target)
for index in range(num_chunks):
params = (
iter_chunks[index],
args_chunks[index],
args,
kwargs_chunks[index],
kwargs,
)
task = (params, pass_state, self.namespace)
self._task_push.send_multipart(
[
util.encode_chunk_id(task_id, index),
target_bytes,
serializer.dumps(task),
]
)
return SequenceTaskResult(self.server_address, task_id) | r"""
Functional equivalent of ``map()`` in-built function,
but executed in a parallel fashion.
Distributes the iterables,
provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
The idea is to:
1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
2. Send these chunks to ``num_chunks`` number of worker nodes.
3. Wait for all these worker nodes to finish their task(s).
4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
5. Return the combined results.
*Steps 3-5 can be done lazily, on the fly with the help of an iterator*
:param target:
The ``Callable`` to be invoked inside a :py:class:`Process`.
*It is invoked with the following signature:*
``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
*Where:*
- ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
- ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
:param map_iter:
A sequence whose elements are supplied as the *first* positional argument to the ``target``.
:param map_args:
A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
:param map_kwargs:
A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
:param args:
The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
By default, it is an empty ``tuple``.
:param kwargs:
A dictionary of keyword arguments for ``target``.
By default, it is an empty ``dict``.
:param pass_state:
Weather this process needs to access the state.
If this is set to ``False``,
then the ``state`` argument won't be provided to the ``target``.
If this is set to ``True``,
then a :py:class:`State` object is provided as the first Argument to the ``target``.
Unlike :py:class:`Process` it is set to ``False`` by default.
(To retain a similar API to in-built ``map()``)
:param num_chunks:
The number of worker nodes to use.
By default, it is set to ``multiprocessing.cpu_count()``
(The number of CPU cores on your system)
:param lazy:
Wheteher to return immediately put
:return:
The result is quite similar to ``map()`` in-built function.
It returns a :py:class:`Iterable` which contatins,
the return values of the ``target`` function,
when applied to every item of the Iterables provided in the ``map_*`` arguments.
The actual "processing" starts as soon as you call this function.
The returned :py:class:`Iterable` only fetches the results from the worker processes.
.. note::
- If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
then the results will be cut-off at the shortest Sequence.
See :ref:`worker_map` for Examples. |
384,731 | def bin(args):
p = OptionParser(bin.__doc__)
p.add_option("--dtype", choices=("float32", "int32"),
help="dtype of the matrix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tsvfile, = args
dtype = opts.dtype
if dtype is None:
dtype = np.int32 if "data" in tsvfile else np.float32
else:
dtype = np.int32 if dtype == "int32" else np.float32
print("dtype: {}".format(dtype), file=sys.stderr)
fp = open(tsvfile)
next(fp)
arrays = []
for i, row in enumerate(fp):
a = np.fromstring(row, sep="\t", dtype=dtype)
a = a[1:]
arrays.append(a)
print(i, a, file=sys.stderr)
print("Merging", file=sys.stderr)
b = np.concatenate(arrays)
print("Binary shape: {}".format(b.shape), file=sys.stderr)
binfile = tsvfile.rsplit(".", 1)[0] + ".bin"
b.tofile(binfile) | %prog bin data.tsv
Conver tsv to binary format. |
384,732 | def remove_bucket(self, bucket_name):
is_valid_bucket_name(bucket_name)
self._url_open(, bucket_name=bucket_name)
self._delete_bucket_region(bucket_name) | Remove a bucket.
:param bucket_name: Bucket to remove |
384,733 | def raise_301(instance, location):
_set_location(instance, location)
instance.response.status = 301
raise ResponseException(instance.response) | Abort the current request with a 301 (Moved Permanently) response code.
Sets the Location header correctly. If the location does not start with a
slash, the path of the current request is prepended.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 301 |
384,734 | def startup_config_content(self, startup_config):
try:
startup_config_path = os.path.join(self.working_dir, "startup-config.cfg")
if startup_config is None:
startup_config =
if len(startup_config) == 0 and os.path.exists(startup_config_path):
return
with open(startup_config_path, , encoding=) as f:
if len(startup_config) == 0:
f.write()
else:
startup_config = startup_config.replace("%h", self._name)
f.write(startup_config)
vlan_file = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
if os.path.exists(vlan_file):
try:
os.remove(vlan_file)
except OSError as e:
log.error("Could not delete VLAN file : {}".format(vlan_file, e))
except OSError as e:
raise IOUError("Can{}': {}".format(startup_config_path, e)) | Update the startup config
:param startup_config: content of the startup configuration file |
384,735 | def querytime(self, value):
self._querytime = value
self.query.querytime = value | Sets self._querytime as well as self.query.querytime.
:param value: None or datetime
:return: |
384,736 | def weighted_axioms(self, x, y, xg):
scope_pairs = [
(, , 0.0, 0.0, 3.0,-0.8),
(, , 0.0, 0.0, 2.5,-0.5),
(, , -1.0, 1.0, 0.0, 0.0),
(, , 1.0,-1.0, 0.0, 0.0),
(, , 0.0, 0.0, 0.0, 0.0),
(, , 0.0, 0.0, 2.5,-0.5),
(, , -1.0, 1.0, 0.0, 0.0),
(, , 1.0,-1.0, 0.0, 0.0),
(, , 0.0, 0.0, 0.0, 0.0),
(, , -0.5, 0.5, 0.0, 0.0),
(, , 0.5,-0.5, 0.0, 0.0),
(, , 0.0, 0.0, 0.0, 0.0),
(, , 0.0, 0.0, 0.0, 1.0),
(, , -0.5, 0.5, 0.0, 0.2),
(, , 0.0, 0.0, 0.0, 0.0)
]
scope_map = defaultdict(dict)
for (l,r,w1,w2,w3,w4) in scope_pairs:
l = l.upper()
r = r.upper()
scope_map[l][r] = np.array((w1,w2,w3,w4))
scope_map[r][l] = np.array((w2,w1,w3,w4))
WS = None
pfx1 = self._id_to_ontology(x)
pfx2 = self._id_to_ontology(y)
for mw in self.config.get(, []):
mpfx1 = mw.get(,)
mpfx2 = mw.get(,)
X = np.array(mw[])
if mpfx1 == pfx1 and mpfx2 == pfx2:
WS = X
elif mpfx2 == pfx1 and mpfx1 == pfx2:
WS = self._flipweights(X)
elif mpfx1 == pfx1 and mpfx2 == and WS is None:
WS = X
elif mpfx2 == pfx1 and mpfx1 == and WS is None:
WS = self._flipweights(X)
if WS is None:
WS = np.array((0.0, 0.0, 0.0, 0.0))
WS += np.array(self.config.get(, [0.0, 0.0, 1.5, -0.1]))
logging.info(.format(WS))
for xw in self.config.get(, []):
left = xw.get(,)
right = xw.get(,)
X = np.array(xw[])
if x == left and y == right:
WS += X
logging.info(.format(X, x, y))
elif y == left and x == right:
WS += self._flipweights(X)
logging.info(.format(X))
smap = self.smap
WT = np.array((0.0, 0.0, 0.0, 0.0))
WBESTMAX = np.array((0.0, 0.0, 0.0, 0.0))
n = 0
for sx in smap[x]:
WBEST, _ = self._best_match_syn(sx, smap[y], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
if max(abs(WBEST)) > max(abs(WBESTMAX)):
WBESTMAX = WBEST
for sy in smap[y]:
WBEST, _ = self._best_match_syn(sy, smap[x], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
if n > 0:
logging.info(.format(WBESTMAX))
WS += WBESTMAX
WS += self._graph_weights(x, y, xg)
logging.info(.format(WS))
(ss1,ss2) = xg[x][y][self.SIMSCORES]
WS[3] += ((1-ss1) + (1-ss2)) / 2
rs = xg[x][y][]
if rs == 4:
WS[2] += 0.5
if rs == 0:
WS[2] -= 0.2
P = 1/(1+np.exp(-WS))
logging.info(.format(WS, P))
P = P / np.sum(P)
return P | return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping
between x and y.
See kboom paper |
384,737 | def obfn_fvar(self, i):
r
return self.X[..., i] if self.opt[] else self.Y | r"""Variable to be evaluated in computing :math:`f_i(\cdot)`,
depending on the ``fEvalX`` option value. |
384,738 | def set_node_as_int(self, dst, src):
dst.value = self.value(src)
return True | Set a node to a value captured from another node
example::
R = [
In : node #setcapture(_, node)
] |
384,739 | def citation_count(doi, url = "http://www.crossref.org/openurl/",
key = "[email protected]", **kwargs):
args = {"id": "doi:" + doi, "pid": key, "noredirect": True}
args = dict((k, v) for k, v in args.items() if v)
res = requests.get(url, params = args, headers = make_ua(), **kwargs)
xmldoc = minidom.parseString(res.content)
val = xmldoc.getElementsByTagName()[0].attributes[].value
return int(str(val)) | Get a citation count with a DOI
:param doi: [String] DOI, digital object identifier
:param url: [String] the API url for the function (should be left to default)
:param keyc: [String] your API key
See http://labs.crossref.org/openurl/ for more info on this Crossref API service.
Usage::
from habanero import counts
counts.citation_count(doi = "10.1371/journal.pone.0042793")
counts.citation_count(doi = "10.1016/j.fbr.2012.01.001")
# DOI not found
## FIXME
counts.citation_count(doi = "10.1016/j.fbr.2012") |
384,740 | def readline(self, size=-1):
"Ignore the `size` since a complete line must be processed."
while True:
try:
record = next(self.reader)
except StopIteration:
break
if checks.record_is_valid(record):
if self.use_cache:
self.variants.ensure_cache(record)
md5 = calculate_md5(record)
if not self.use_cache or md5 not in self.variants:
cleaned = self.process_line(record)
cleaned.append(md5)
return self.outdel.join(cleaned) +
return | Ignore the `size` since a complete line must be processed. |
384,741 | def inline(args):
trusted = args.trusted
args = load_config(args)
print("Args:")
pprint.pprint(args)
ret_code = 0
url = args.url
if args.repo_slug:
owner = args.repo_slug.split("/")[0]
repo = args.repo_slug.split("/")[1]
else:
owner = args.owner
repo = args.repo
if args.url:
try:
url_to_parse = args.url
if not url_to_parse.endswith(".git"):
url_to_parse += ".git"
parsed = giturlparse.parse(str(url_to_parse))
url = parsed.resource
if not url.startswith("https://"):
url = "https://" + url
if parsed.owner:
owner = parsed.owner
if parsed.name:
repo = parsed.name
except giturlparse.parser.ParserError:
pass
if not args.dryrun and args.interface not in interfaces.INTERFACES:
print("Valid inline-plz config not found")
return 1
print("Using interface: {0}".format(args.interface))
my_interface = None
filenames = None
if not args.dryrun:
my_interface = interfaces.INTERFACES[args.interface](
owner,
repo,
args.pull_request,
args.branch,
args.token,
url,
args.commit,
args.ignore_paths,
args.prefix,
args.autofix,
args.set_status,
)
if not my_interface.is_valid():
print("Invalid review. Exiting.")
return 0
filenames = my_interface.filenames
my_interface.start_review()
try:
linter_runner = LinterRunner(
args.install,
args.autorun,
args.ignore_paths,
args.config_dir,
args.enabled_linters,
args.disabled_linters,
args.autofix,
trusted,
filenames,
)
messages = linter_runner.run_linters()
except Exception:
print("Linting failed:\n{}".format(traceback.format_exc()))
print("inline-plz version: {}".format(__version__))
print("Python version: {}".format(sys.version))
ret_code = 1
if my_interface:
my_interface.finish_review(error=True)
return ret_code
print("{} lint messages found".format(len(messages)))
print("inline-plz version: {}".format(__version__))
print("Python version: {}".format(sys.version))
if args.dryrun:
print_messages(messages)
write_messages_to_json(messages)
return ret_code
try:
if my_interface.post_messages(messages, args.max_comments):
if not args.zero_exit:
ret_code = 1
if args.delete_outdated:
my_interface.clear_outdated_messages()
my_interface.finish_review(success=False)
write_messages_to_json(messages)
return ret_code
if args.delete_outdated:
my_interface.clear_outdated_messages()
my_interface.finish_review(success=True)
except KeyError:
print("Interface not found: {}".format(args.interface))
traceback.print_exc()
write_messages_to_json(messages)
return ret_code | Parse input file with the specified parser and post messages based on lint output
:param args: Contains the following
interface: How are we going to post comments?
owner: Username of repo owner
repo: Repository name
pr: Pull request ID
token: Authentication for repository
url: Root URL of repository (not your project) Default: https://github.com
dryrun: Prints instead of posting comments.
zero_exit: If true: always return a 0 exit code.
install: If true: install linters.
max_comments: Maximum comments to write
:return: Exit code. 1 if there are any comments, 0 if there are none. |
384,742 | def _run_pre_command(self, pre_cmd):
logger.debug(, pre_cmd)
try:
pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True)
except OSError as err:
if err.errno == errno.ENOENT:
logger.debug(, pre_cmd)
return
stdout, stderr = pre_proc.communicate()
the_return_code = pre_proc.poll()
logger.debug("Pre-command results:")
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Return Code: %s", the_return_code)
if the_return_code != 0:
return []
if six.PY3:
stdout = stdout.decode()
return stdout.splitlines() | Run a pre command to get external args for a command |
384,743 | def list(self, log=values.unset, message_date_before=values.unset,
message_date=values.unset, message_date_after=values.unset, limit=None,
page_size=None):
return list(self.stream(
log=log,
message_date_before=message_date_before,
message_date=message_date,
message_date_after=message_date_after,
limit=limit,
page_size=page_size,
)) | Lists NotificationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode log: Filter by log level
:param date message_date_before: Filter by date
:param date message_date: Filter by date
:param date message_date_after: Filter by date
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.call.notification.NotificationInstance] |
384,744 | def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(, , required=True,
action=, help=)
arg_parser.add_argument(, , required=True,
dest=, action=, help=)
arg_parser.add_argument(, , dest=,
action=, help=)
arg_parser.add_argument(, , dest=,
action=, help=)
arg_parser.add_argument(, , dest=,
action=, type=int, help=)
arg_parser.add_argument(, , dest=,
action=, type=int, help=)
arg_parser.add_argument(, , dest=,
action=, help=)
arg_parser.add_argument(, , action=,
default=False, help=)
arg_parser.add_argument(, , action=,
default=False, help=)
arg_parser.add_argument(, dest=, action=,
default=False, help=)
args = arg_parser.parse_args()
noprompt = args.noprompt
nowait = args.nowait | main routine |
384,745 | def list(self, request, *args, **kwargs):
return super(SshKeyViewSet, self).list(request, *args, **kwargs) | To get a list of SSH keys, run **GET** against */api/keys/* as authenticated user.
A new SSH key can be created by any active users. Example of a valid request:
.. code-block:: http
POST /api/keys/ HTTP/1.1
Content-Type: application/json
Accept: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
{
"name": "ssh_public_key1",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDURXDP5YhOQUYoDuTxJ84DuzqMJYJqJ8+SZT28
TtLm5yBDRLKAERqtlbH2gkrQ3US58gd2r8H9jAmQOydfvgwauxuJUE4eDpaMWupqquMYsYLB5f+vVGhdZbbzfc6DTQ2rY
dknWoMoArlG7MvRMA/xQ0ye1muTv+mYMipnd7Z+WH0uVArYI9QBpqC/gpZRRIouQ4VIQIVWGoT6M4Kat5ZBXEa9yP+9du
D2C05GX3gumoSAVyAcDHn/xgej9pYRXGha4l+LKkFdGwAoXdV1z79EG1+9ns7wXuqMJFHM2KDpxAizV0GkZcojISvDwuh
vEAFdOJcqjyyH4FOGYa8usP1 [email protected]",
} |
384,746 | def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None):
attrs = {
: publication_date_field,
: publication_date_field,
: enable_comments_field,
}
ModerationClass = type(ParentModel.__name__ + , (FluentCommentsModerator,), attrs)
moderator.register(ParentModel, ModerationClass) | Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation.
:param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model.
:param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date.
:type publication_date_field: str
:param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled.
:type enable_comments_field: str |
384,747 | def sign(self, msg, key):
if not isinstance(key, rsa.RSAPrivateKey):
raise TypeError(
"The key must be an instance of rsa.RSAPrivateKey")
sig = key.sign(msg, self.padding, self.hash)
return sig | Create a signature over a message as defined in RFC7515 using an
RSA key
:param msg: the message.
:type msg: bytes
:returns: bytes, the signature of data.
:rtype: bytes |
384,748 | def ngettext(*args, **kwargs):
is_plural = args[2] > 1
if not is_plural:
key = args[0]
key_match = TRANSLATION_KEY_RE.match(key)
else:
key = args[1]
key_match = PLURAL_TRANSLATION_KEY_RE.match(key)
translation = _ngettext(*args, **kwargs)
if not key_match or translation != key:
return translation
return _get_domain(key_match).ngettext(*args, **kwargs) | Like :func:`gettext`, except it supports pluralization. |
384,749 | def _get_module_filename(module):
module = module.split()
package = .join(module[:-1])
module = module[-1]
try:
if not package:
module = __import__(module)
else:
package = __import__(package, fromlist=[module])
module = getattr(package, module, None)
filename = getattr(module, , None)
if not filename:
return Unparseable()
if filename.endswith():
filename = filename[:-1]
if not os.path.exists(filename) and os.path.isfile(filename):
filename = filename[:-11]
return filename
except ImportError:
return | Return the filename of `module` if it can be imported.
If `module` is a package, its directory will be returned.
If it cannot be imported ``None`` is returned.
If the ``__file__`` attribute is missing, or the module or package is a
compiled egg, then an :class:`Unparseable` instance is returned, since the
source can't be retrieved.
:param module: A module name, such as ``'test.test_config'``
:type module: str |
384,750 | def mask_plane(data, wcs, region, negate=False):
indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int)
idx = np.array([(j, 0) for j in range(data.shape[1])])
j = data.shape[1]
for i in range(data.shape[0]):
idx[:, 1] = i
indexes[i*j:(i+1)*j] = idx
ra, dec = wcs.wcs_pix2world(indexes, 1).transpose()
bigmask = region.sky_within(ra, dec, degin=True)
if not negate:
bigmask = np.bitwise_not(bigmask)
bigmask = bigmask.reshape(data.shape)
data[bigmask] = np.nan
return data | Mask a 2d image (data) such that pixels within 'region' are set to nan.
Parameters
----------
data : 2d-array
Image array.
wcs : astropy.wcs.WCS
WCS for the image in question.
region : :class:`AegeanTools.regions.Region`
A region within which the image pixels will be masked.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
Returns
-------
masked : 2d-array
The original array, but masked as required. |
384,751 | def _dimension_keys(self):
return [tuple(zip([d.name for d in self.kdims], [k] if self.ndims == 1 else k))
for k in self.keys()] | Helper for __mul__ that returns the list of keys together with
the dimension labels. |
384,752 | def validate_query_params(self, strict=True):
if not (self.api_key or default_api_key):
raise ValueError()
if strict and self.query_params_mode not in (None, , ):
raise ValueError()
if not self.person.is_searchable:
raise ValueError()
if strict and self.person.unsearchable_fields:
raise ValueError(
% self.person.unsearchable_fields) | Check if the request is valid and can be sent, raise ValueError if
not.
`strict` is a boolean argument that defaults to True which means an
exception is raised on every invalid query parameter, if set to False
an exception is raised only when the search request cannot be performed
because required query params are missing. |
384,753 | def prepare_state_m_for_insert_as(state_m_to_insert, previous_state_size):
if isinstance(state_m_to_insert, AbstractStateModel) and \
not gui_helper_meta_data.model_has_empty_meta(state_m_to_insert):
if isinstance(state_m_to_insert, ContainerStateModel):
models_dict = {: state_m_to_insert}
for state_element_key in state_m_to_insert.state.state_element_attrs:
state_element_list = getattr(state_m_to_insert, state_element_key)
if hasattr(state_element_list, ):
state_element_list = state_element_list.values()
models_dict[state_element_key] = {elem.core_element.core_element_id: elem for elem in state_element_list}
resize_factor = gui_helper_meta_data.scale_meta_data_according_state(models_dict, as_template=True)
gui_helper_meta_data.resize_income_of_state_m(state_m_to_insert, resize_factor)
elif isinstance(state_m_to_insert, StateModel):
if previous_state_size:
current_size = state_m_to_insert.get_meta_data_editor()[]
factor = gui_helper_meta_data.divide_two_vectors(current_size, previous_state_size)
state_m_to_insert.set_meta_data_editor(, previous_state_size)
factor = (min(*factor), min(*factor))
gui_helper_meta_data.resize_state_meta(state_m_to_insert, factor)
else:
logger.debug("For insert as template of {0} no resize of state meta data is performed because "
"the meta data has empty fields.".format(state_m_to_insert))
elif not isinstance(state_m_to_insert, LibraryStateModel):
raise TypeError("For insert as template of {0} no resize of state meta data is performed because "
"state model type is not ContainerStateModel or StateModel".format(state_m_to_insert))
else:
logger.info("For insert as template of {0} no resize of state meta data is performed because the meta data has "
"empty fields.".format(state_m_to_insert)) | Prepares and scales the meta data to fit into actual size of the state. |
384,754 | def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
, op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check(
,
op_xdr_object.body.paymentOp.destination.ed25519).decode()
asset = Asset.from_xdr_object(op_xdr_object.body.paymentOp.asset)
amount = Operation.from_xdr_amount(op_xdr_object.body.paymentOp.amount)
return cls(
source=source,
destination=destination,
asset=asset,
amount=amount,
) | Creates a :class:`Payment` object from an XDR Operation
object. |
384,755 | def _unrecognised(achr):
if options[] == UNRECOGNISED_ECHO:
return achr
elif options[] == UNRECOGNISED_SUBSTITUTE:
return options[]
else:
raise KeyError(achr) | Handle unrecognised characters. |
384,756 | def execute_pool_txns(self, three_pc_batch) -> List:
committed_txns = self.default_executer(three_pc_batch)
for txn in committed_txns:
self.poolManager.onPoolMembershipChange(txn)
return committed_txns | Execute a transaction that involves consensus pool management, like
adding a node, client or a steward.
:param ppTime: PrePrepare request time
:param reqs_keys: requests keys to be committed |
384,757 | def _get_app_auth_headers(self):
now = datetime.now(timezone.utc)
expiry = now + timedelta(minutes=5)
data = {"iat": now, "exp": expiry, "iss": self.app_id}
app_token = jwt.encode(data, self.app_key, algorithm="RS256").decode("utf-8")
headers = {
"Accept": PREVIEW_JSON_ACCEPT,
"Authorization": "Bearer {}".format(app_token),
}
return headers | Set the correct auth headers to authenticate against GitHub. |
384,758 | def _resolve_model(obj):
if isinstance(obj, six.string_types) and len(obj.split()) == 2:
app_name, model_name = obj.split()
resolved_model = apps.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj)) | Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName' |
384,759 | def delete(self, using=None):
if self.is_alone:
self.topic.delete()
else:
super(AbstractPost, self).delete(using)
self.topic.update_trackers() | Deletes the post instance. |
384,760 | def dir(self, filetype, **kwargs):
full = kwargs.get(, None)
if not full:
full = self.full(filetype, **kwargs)
return os.path.dirname(full) | Return the directory containing a file of a given type.
Parameters
----------
filetype : str
File type parameter.
Returns
-------
dir : str
Directory containing the file. |
384,761 | def get_objective(self, objective_id=None):
if objective_id is None:
raise NullArgument()
url_path = construct_url(, obj_id=objective_id)
return objects.Objective(self._get_request(url_path)) | Gets the Objective specified by its Id.
In plenary mode, the exact Id is found or a NotFound results.
Otherwise, the returned Objective may have a different Id than
requested, such as the case where a duplicate Id was assigned to
an Objective and retained for compatibility.
arg: objectiveId (osid.id.Id): Id of the Objective
return: (osid.learning.Objective) - the objective
raise: NotFound - objectiveId not found
raise: NullArgument - objectiveId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method is must be implemented. |
384,762 | def cast_to_subclass(self):
self.import_lib()
self.load_requirements()
try:
self.commit()
b = clz(self._dataset, self._library, self._source_url, self._build_url)
b.limited_run = self.limited_run
b.capture_exceptions = self.capture_exceptions
b.multi = self.multi
return b | Load the bundle file from the database to get the derived bundle class,
then return a new bundle built on that class
:return: |
384,763 | def cloud_front_origin_access_identity_exists(Id, region=None, key=None, keyid=None, profile=None):
authargs = {: region, : key, : keyid, : profile}
oais = list_cloud_front_origin_access_identities(**authargs) or []
return bool([i[] for i in oais if i[] == Id]) | Return True if a CloudFront origin access identity exists with the given Resource ID or False
otherwise.
Id
Resource ID of the CloudFront origin access identity.
region
Region to connect to.
key
Secret key to use.
keyid
Access key to use.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto_cloudfront.cloud_front_origin_access_identity_exists Id=E30RBTSABCDEF0 |
384,764 | def makeFigFromFile(filename,*args,**kwargs):
import matplotlib.pyplot as plt
img = plt.imread(filename)
fig,ax = plt.subplots(*args,**kwargs)
ax.axis()
ax.imshow(img)
return fig | Renders an image in a matplotlib figure, so it can be added to reports
args and kwargs are passed to plt.subplots |
384,765 | def t_ID(self, t):
r
t.type = self.reserved.get(t.value, )
if t.type == :
t.value = node.Id(t.value, self.lineno, self.filename)
return t | r'[a-z][a-zA-Z0-9_]* |
384,766 | def resource(resource_id):
resource_obj = app.db.resource(resource_id)
if in request.args:
return send_from_directory(os.path.dirname(resource_obj.path),
os.path.basename(resource_obj.path))
return render_template(, resource=resource_obj) | Show a resource. |
384,767 | def global_set_option(self, opt, value):
self._all_options[opt].set_option(opt, value) | set option on the correct option provider |
384,768 | def get_urls(self):
not_clone_url = [url(r,
admin.site.admin_view(self.will_not_clone))]
restore_url = [
url(r, admin.site.admin_view(self.restore))]
return not_clone_url + restore_url + super(VersionedAdmin,
self).get_urls() | Appends the custom will_not_clone url to the admin site |
384,769 | def get_color_label(self):
if self.args.norm:
return .format(self.args.norm)
if len(self.units) == 1 and self.usetex:
return r.format(
self.units[0].to_string().strip())
elif len(self.units) == 1:
return .format(self.units[0].to_string())
return super(Spectrogram, self).get_color_label() | Text for colorbar label |
384,770 | def load_plug_in(self, name):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
plug_in_name = self._call("loadPlugIn",
in_p=[name])
return plug_in_name | Loads a DBGF plug-in.
in name of type str
The plug-in name or DLL. Special name 'all' loads all installed plug-ins.
return plug_in_name of type str
The name of the loaded plug-in. |
384,771 | def represent_datetime(self, d):
fixed_format = self.get()
if fixed_format:
rep = string_decode(d.strftime(fixed_format), )
else:
format_hook = self.get_hook()
if format_hook:
rep = string_decode(format_hook(d), )
else:
rep = pretty_datetime(d)
return rep | turns a given datetime obj into a string representation.
This will:
1) look if a fixed 'timestamp_format' is given in the config
2) check if a 'timestamp_format' hook is defined
3) use :func:`~alot.helper.pretty_datetime` as fallback |
384,772 | def _command(self, event, command, *args, **kwargs):
self._assert_transition(event)
self.trigger( % event, **kwargs)
self._execute_command(command, *args)
self.trigger( % event, **kwargs) | Context state controller.
Check whether the transition is possible or not, it executes it and
triggers the Hooks with the pre_* and post_* events.
@param event: (str) event generated by the command.
@param command: (virDomain.method) state transition to impose.
@raise: RuntimeError. |
384,773 | def encipher(self,string):
string = self.remove_punctuation(string,filter=+self.key+)
ctext = ""
for c in string:
ctext += .join([str(i) for i in L2IND[c]])
return ctext | Encipher string using Delastelle cipher according to initialised key.
Example::
ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext. |
384,774 | def _do_connection(self, wgt, sig, func):
if hasattr(self, wgt):
wgtobj = getattr(self, wgt)
if hasattr(wgtobj, sig):
sigobj = getattr(wgtobj, sig)
if isinstance(sigobj, Signal):
sigobj.connect(func)
return 0
return 1 | Make a connection between a GUI widget and a callable.
wgt and sig are strings with widget and signal name
func is a callable for that signal |
384,775 | def set_child_value(self, name, value):
return XMLElement(lib.lsl_set_child_value(self.e,
str.encode(name),
str.encode(value))) | Set the text value of the (nameless) plain-text child of a named
child node. |
384,776 | def predict(self, X):
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) | Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value. |
384,777 | def put(self, key):
self.client.write(self._key_path(key[]), **key)
return self._key_path(key[]) | Put and return the only unique identifier possible, its path |
384,778 | def vtrees(self):
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/VTree/instances")).json()
all_vtrees = []
for vtree in response:
all_vtrees.append(
SIO_Vtree.from_dict(vtree)
)
return all_vtrees | Get list of VTrees from ScaleIO cluster
:return: List of VTree objects - Can be empty of no VTrees exist
:rtype: VTree object |
384,779 | def nodes(self):
return np.column_stack((self.points,
self.points)).reshape(
-1)[1:-1].reshape((-1, 2)) | Returns an (n,2) list of nodes, or vertices on the path.
Note that this generic class function assumes that all of the
reference points are on the path which is true for lines and
three point arcs.
If you were to define another class where that wasn't the case
(for example, the control points of a bezier curve),
you would need to implement an entity- specific version of this
function.
The purpose of having a list of nodes is so that they can then be
added as edges to a graph so we can use functions to check
connectivity, extract paths, etc.
The slicing on this function is essentially just tiling points
so the first and last vertices aren't repeated. Example:
self.points = [0,1,2]
returns: [[0,1], [1,2]] |
384,780 | def line_segment(X0, X1):
r
X0 = sp.around(X0).astype(int)
X1 = sp.around(X1).astype(int)
if len(X0) == 3:
L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]], [X1[2]-X0[2]]])) + 1
x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int)
y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int)
z = sp.rint(sp.linspace(X0[2], X1[2], L)).astype(int)
return [x, y, z]
else:
L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]]])) + 1
x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int)
y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int)
return [x, y] | r"""
Calculate the voxel coordinates of a straight line between the two given
end points
Parameters
----------
X0 and X1 : array_like
The [x, y] or [x, y, z] coordinates of the start and end points of
the line.
Returns
-------
coords : list of lists
A list of lists containing the X, Y, and Z coordinates of all voxels
that should be drawn between the start and end points to create a solid
line. |
384,781 | def selenol_params(**kwargs):
def params_decorator(func):
def service_function_wrapper(service, message):
params = {k: f(service, message) for k, f in kwargs.items()}
return func(service, **params)
return service_function_wrapper
return params_decorator | Decorate request parameters to transform them into Selenol objects. |
384,782 | def load_grammar(self, path):
if not os.path.exists(path):
raise Exception("path does not exist: {!r}".format(path))
grammar_path = os.path.dirname(path)
if grammar_path not in sys.path:
sys.path.append(grammar_path)
with open(path, "r") as f:
data = f.read()
code = compile(data, path, "exec")
locals_ = {"GRAMFUZZER": self, "__file__": path}
exec(code) in locals_
if "TOP_CAT" in locals_:
cat_group = os.path.basename(path).replace(".py", "")
self.set_cat_group_top_level_cat(cat_group, locals_["TOP_CAT"]) | Load a grammar file (python file containing grammar definitions) by
file path. When loaded, the global variable ``GRAMFUZZER`` will be set
within the module. This is not always needed, but can be useful.
:param str path: The path to the grammar file |
384,783 | def get_text(node, strategy):
textEquivs = node.get_TextEquiv()
if not textEquivs:
log.debug("No text results on %s %s", node, node.id)
return
else:
if len(textEquivs) > 1:
index1 = [x for x in textEquivs if x.index == 1]
if index1:
return index1[0].get_Unicode().strip()
return textEquivs[0].get_Unicode().strip() | Get the most confident text results, either those with @index = 1 or the first text results or empty string. |
384,784 | def get_file_to_path(self, share_name, directory_name, file_name, file_path,
open_mode=, start_range=None, end_range=None,
range_get_content_md5=None, progress_callback=None,
max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
_validate_not_none(, share_name)
_validate_not_none(, file_name)
_validate_not_none(, file_path)
_validate_not_none(, open_mode)
with open(file_path, open_mode) as stream:
file = self.get_file_to_stream(
share_name, directory_name, file_name, stream,
start_range, end_range, range_get_content_md5,
progress_callback, max_connections, max_retries,
retry_wait, timeout)
return file | Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File` |
384,785 | def do_prune(self):
if self.prune_table and self.prune_column and self.prune_date:
return True
elif self.prune_table or self.prune_column or self.prune_date:
raise Exception()
else:
return False | Return True if prune_table, prune_column, and prune_date are implemented.
If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none.
Prune (data newer than prune_date deleted) before copying new data in. |
384,786 | async def get_source_list(self, scheme: str = "") -> List[Source]:
res = await self.services["avContent"]["getSourceList"](scheme=scheme)
return [Source.make(**x) for x in res] | Return available sources for playback. |
384,787 | def normalize_docroot(app, root):
srcdir = app.env.srcdir
default_version = app.config.javalink_default_version
if isinstance(root, basestring):
(url, base) = _parse_docroot_str(srcdir, root)
return {: url, : base, : default_version}
else:
normalized = {}
normalized[] = _parse_docroot_str(srcdir, root[])[0]
if in root:
normalized[] = _parse_docroot_str(srcdir, root[])[1]
else:
normalized[] = _parse_docroot_str(srcdir, root[])[1]
if in root:
normalized[] = root[]
else:
normalized[] = default_version
return normalized | Creates a package-list URL and a link base from a docroot element.
Args:
app: the global app object
root: the docroot element [string or dictionary] |
384,788 | def lock(self):
if self.cache.get(self.lock_name):
return False
else:
self.cache.set(self.lock_name, timezone.now(), self.timeout)
return True | This method sets a cache variable to mark current job as "already running". |
384,789 | def _eval_target_jumptable(state, ip, limit):
if ip.symbolic is False:
return [ (claripy.ast.bool.true, ip) ]
cond_and_targets = [ ]
ip_ = ip
outer_reverse = False
if ip_.op == "Reverse":
ip_ = ip_.args[0]
outer_reverse = True
fallback = False
target_variable = None
concretes = set()
reached_sentinel = False
for cond, target in claripy.reverse_ite_cases(ip_):
if reached_sentinel:
fallback = True
break
if target.symbolic is False and state.solver.eval(target) == DUMMY_SYMBOLIC_READ_VALUE:
reached_sentinel = True
continue
if cond.op != "__eq__":
fallback = True
break
if cond.args[0].symbolic is True and cond.args[1].symbolic is False:
variable, value = cond.args
elif cond.args[0].symbolic is False and cond.args[1].symbolic is True:
value, variable = cond.args
else:
fallback = True
break
if target_variable is None:
target_variable = variable
elif target_variable is not variable:
fallback = True
break
value_concrete = state.solver.eval(value)
if value_concrete in concretes:
fallback = True
break
concretes.add(value_concrete)
if target.symbolic is True:
fallback = True
break
cond_and_targets.append((cond, target if not outer_reverse else state.solver.Reverse(target)))
if reached_sentinel is False:
fallback = True
if fallback:
return None
else:
return cond_and_targets[ : limit] | A *very* fast method to evaluate symbolic jump targets if they are a) concrete targets, or b) targets coming
from jump tables.
:param state: A SimState instance.
:param ip: The AST of the instruction pointer to evaluate.
:param limit: The maximum number of concrete IPs.
:return: A list of conditions and the corresponding concrete IPs, or None which indicates fallback is
necessary.
:rtype: list or None |
384,790 | def hook_point(self, hook_name, handle=None):
full_hook_name = + hook_name
for module in self.modules_manager.instances:
_ts = time.time()
if not hasattr(module, full_hook_name):
continue
fun = getattr(module, full_hook_name)
try:
fun(handle if handle is not None else self)
except Exception as exp:
logger.warning(
, module.name, str(exp))
logger.exception(, exp)
self.modules_manager.set_to_restart(module)
else:
statsmgr.timer( % (hook_name, module.name), time.time() - _ts) | Used to call module function that may define a hook function for hook_name
Available hook points:
- `tick`, called on each daemon loop turn
- `save_retention`; called by the scheduler when live state
saving is to be done
- `load_retention`; called by the scheduler when live state
restoring is necessary (on restart)
- `get_new_actions`; called by the scheduler before adding the actions to be executed
- `early_configuration`; called by the arbiter when it begins parsing the configuration
- `read_configuration`; called by the arbiter when it read the configuration
- `late_configuration`; called by the arbiter when it finishes parsing the configuration
As a default, the `handle` parameter provided to the hooked function is the
caller Daemon object. The scheduler will provide its own instance when it call this
function.
:param hook_name: function name we may hook in module
:type hook_name: str
:param handle: parameter to provide to the hook function
:type: handle: alignak.Satellite
:return: None |
384,791 | def eval(self, code, mode=):
if isinstance(code, str):
if isinstance(code, str):
code = UTF8_COOKIE + code.encode()
code = compile(code, , mode)
if mode != :
return eval(code, self.globals, self.locals)
exec(code, self.globals, self.locals) | Evaluate code in the context of the frame. |
384,792 | def imported_targets(self):
libs = []
for spec in self.imported_target_specs(payload=self.payload):
resolved_target = self._build_graph.get_target_from_spec(spec,
relative_to=self.address.spec_path)
if not resolved_target:
raise self.UnresolvedImportError(
.format(spec=spec, relative_to=self.address.spec))
try:
libs.append(self.expected_target_constraint.validate_satisfied_by(resolved_target))
except TypeConstraintError as e:
raise self.WrongTargetTypeError(
.format(spec=spec, relative_to=self.address.spec, err=str(e)),
e)
return libs | :returns: target instances for specs referenced by imported_target_specs.
:rtype: list of Target |
384,793 | def add_fields(self, field_dict):
for key, field in field_dict.items():
self.add_field(key, field) | Add a mapping of field names to PayloadField instances.
:API: public |
384,794 | def opens_platforms(self, tag=None, fromdate=None, todate=None):
return self.call("GET", "/stats/outbound/opens/platforms", tag=tag, fromdate=fromdate, todate=todate) | Gets an overview of the platforms used to open your emails.
This is only recorded when open tracking is enabled for that email. |
384,795 | def ps_ball(radius):
r
rad = int(sp.ceil(radius))
other = sp.ones((2 * rad + 1, 2 * rad + 1, 2 * rad + 1), dtype=bool)
other[rad, rad, rad] = False
ball = spim.distance_transform_edt(other) < radius
return ball | r"""
Creates spherical ball structuring element for morphological operations
Parameters
----------
radius : float or int
The desired radius of the structuring element
Returns
-------
strel : 3D-array
A 3D numpy array of the structuring element |
384,796 | def global_config(cls, key, *args):
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key] | This reads or sets the global settings stored in class.settings. |
384,797 | def decode_base64(data):
data = bytes(data, encoding="ascii")
missing_padding = len(data) % 4
if missing_padding != 0:
data += b * (4 - missing_padding)
return base64.b64decode(data) | Decodes a base64 string, with padding being optional
Args:
data: A base64 encoded string
Returns:
bytes: The decoded bytes |
384,798 | def get_teams():
LOGGER.debug("TeamService.get_teams")
args = {: , : }
response = TeamService.requester.call(args)
ret = None
if response.rc == 0:
ret = []
for team in response.response_content[]:
ret.append(Team.json_2_team(team))
elif response.rc != 404:
err_msg = \
+ str(response.response_content) + + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
return ret | :return: all knows teams |
384,799 | def CountFlowLogEntries(self, client_id, flow_id):
return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize)) | Returns number of flow log entries of a given flow. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.