Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
27,900 |
def _maybe_pandas_data(data, feature_names, feature_types):
if not isinstance(data, DataFrame):
return data, feature_names, feature_types
data_dtypes = data.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
bad_fields = [data.columns[i] for i, dtype in
enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]
msg =
raise ValueError(msg + .join(bad_fields))
if feature_names is None:
if isinstance(data.columns, MultiIndex):
feature_names = [
.join([str(x) for x in i])
for i in data.columns
]
else:
feature_names = data.columns.format()
if feature_types is None:
feature_types = [PANDAS_DTYPE_MAPPER[dtype.name] for dtype in data_dtypes]
data = data.values.astype()
return data, feature_names, feature_types
|
Extract internal data from pd.DataFrame for DMatrix data
|
27,901 |
def _next_token(self, skipws=True):
self._token = next(self._tokens).group(0)
return self._next_token() if skipws and self._token.isspace() else self._token
|
Increment _token to the next token and return it.
|
27,902 |
def list(cls, state=None, page=None, per_page=None):
conn = Qubole.agent()
params = {}
if page:
params[] = page
if per_page:
params[] = per_page
if (params.get() or params.get()) and Qubole.version == :
log.warn("Pagination is not supported with API v1.2. Fetching all clusters.")
params = None if not params else params
cluster_list = conn.get(cls.rest_entity_path, params=params)
if state is None:
return cluster_list
elif state is not None:
result = []
if Qubole.version == :
for cluster in cluster_list:
if state.lower() == cluster[][].lower():
result.append(cluster)
elif Qubole.version == :
cluster_list = cluster_list[]
for cluster in cluster_list:
if state.lower() == cluster[].lower():
result.append(cluster)
return result
|
List existing clusters present in your account.
Kwargs:
`state`: list only those clusters which are in this state
Returns:
List of clusters satisfying the given criteria
|
27,903 |
def _import(func):
func_name = func.__name__
if func_name in globals():
return func_name
module_name = func.__module__
submodules = module_name.split()
if submodules[0] in globals():
return module_name + + func_name
for i in range(len(submodules)):
m = submodules[i]
if m in globals():
return .join(submodules[i:]) + + func_name
module_ref = sys.modules[func.__module__]
all_globals = globals()
for n in all_globals:
if all_globals[n] == module_ref:
return n + + func_name
return func_name
|
Return the namespace path to the function
|
27,904 |
def search(self, index, query, **params):
if index is None:
index =
options = {}
if in params:
op = params.pop()
options[] = op
options.update(params)
url = self.solr_select_path(index, query, **options)
status, headers, data = self._request(, url)
self.check_http_code(status, [200])
if in headers[]:
results = json.loads(bytes_to_str(data))
return self._normalize_json_search_response(results)
elif in headers[]:
return self._normalize_xml_search_response(data)
else:
raise ValueError("Could not decode search response")
|
Performs a search query.
|
27,905 |
def event_types(self):
try:
events = self.rater.find()
except AttributeError:
raise IndexError()
return [x.get() for x in events]
|
Raises
------
IndexError
When there is no selected rater
|
27,906 |
def insert_paraphrase_information(germanet_db, wiktionary_files):
num_paraphrases = 0
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in paraphrases:
if paraphrase[] not in lexunits:
lexunits[paraphrase[]] = \
germanet_db.lexunits.find_one(
{: paraphrase[]})
lexunit = lexunits[paraphrase[]]
if not in lexunit:
lexunit[] = []
lexunit[].append(paraphrase)
for lexunit in lexunits.values():
germanet_db.lexunits.save(lexunit)
print(.format(num_paraphrases))
|
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`:
|
27,907 |
def feed(self, can):
if not isinstance(can, CAN):
raise Scapy_Exception("argument is not a CAN frame")
identifier = can.identifier
data = bytes(can.data)
if len(data) > 1 and self.use_ext_addr is not True:
self._try_feed(identifier, None, data)
if len(data) > 2 and self.use_ext_addr is not False:
ea = six.indexbytes(data, 0)
self._try_feed(identifier, ea, data[1:])
|
Attempt to feed an incoming CAN frame into the state machine
|
27,908 |
def create(self):
client = self._instance._client
cluster_pb = self._to_pb()
return client.instance_admin_client.create_cluster(
self._instance.name, self.cluster_id, cluster_pb
)
|
Create this cluster.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_cluster]
:end-before: [END bigtable_create_cluster]
.. note::
Uses the ``project``, ``instance`` and ``cluster_id`` on the
current :class:`Cluster` in addition to the ``serve_nodes``.
To change them before creating, reset the values via
.. code:: python
cluster.serve_nodes = 8
cluster.cluster_id = 'i-changed-my-mind'
before calling :meth:`create`.
:rtype: :class:`~google.api_core.operation.Operation`
:returns: The long-running operation corresponding to the
create operation.
|
27,909 |
def delete(self, kwargs):
self._to_delete.append(kwargs)
if self.should_flush():
self.flush()
|
Delete an item
Parameters
----------
kwargs : dict
The primary key of the item to delete
|
27,910 |
def confidence(self):
choices = self.choices
if len(choices) >= 2:
csq = chi_squared(*choices)
confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None
else:
csq = None
confident = False
return (csq, confident)
|
Returns a tuple (chi squared, confident) of the experiment. Confident
is simply a boolean specifying whether we're > 95%% sure that the
results are statistically significant.
|
27,911 |
def add_triple(self, subj, pred, obj):
subj_data, pred_data, obj_data = self.are_ilx([subj, pred, obj])
if subj_data.get() and pred_data.get() and obj_data.get():
if pred_data[] != :
return self.test_check()
return self.add_relationship(term1=subj_data,
relationship=pred_data,
term2=obj_data)
elif subj_data.get() and pred_data.get():
if pred_data[] != :
return self.test_check()
return self.add_annotation(entity=subj_data,
annotation=pred_data,
value=obj)
elif subj_data.get():
data = subj_data
_pred = self.ttl2sci_map.get(pred)
if not _pred:
error = pred + " doesnt not have correct RDF format or It is not an option"
return self.test_check(error)
data = self.custom_update(data, _pred, obj)
if data == :
return data
data = superclasses_bug_fix(data)
url_base = self.base_path +
url = url_base.format(id=data[])
return self.post(url, data)
else:
return self.test_check()
|
Adds an entity property to an existing entity
|
27,912 |
def set(self, key, val):
data = self.get_data(True)
if data is not None:
data[key] = val
else:
raise RuntimeError("No task is currently running")
|
Set value stored for current running task.
|
27,913 |
def retrieve_layers(self, *args, **options):
queryset = Q()
if len(args) < 1:
all_layers = Layer.objects.published().external()
if options[]:
exclude_list = options[].replace(, ).split()
return all_layers.exclude(slug__in=exclude_list)
else:
self.verbose()
return all_layers
for layer_slug in args:
queryset = queryset | Q(slug=layer_slug)
try:
layer = Layer.objects.get(slug=layer_slug)
if not layer.is_external:
raise CommandError( % layer_slug)
if not layer.is_published:
raise CommandError( % layer_slug)
return Layer.objects.published().external().select_related().filter(queryset)
|
Retrieve specified layers or all external layers if no layer specified.
|
27,914 |
def _initialize_tableaux_ig(X, Y, tableaux, bases):
m = X.shape[0]
min_ = np.zeros(m)
for i in range(m):
for j in range(2*m):
if j == i or j == i + m:
tableaux[0][i, j] = 1
else:
tableaux[0][i, j] = 0
tableaux[0][i, 2*m] = 1
for i in range(m):
for j in range(m):
if j == i:
tableaux[1][i, j] = 1
else:
tableaux[1][i, j] = 0
for j in range(m):
d = X[i] - Y[j]
tableaux[1][i, m+j] = _square_sum(d) * (-1)
if tableaux[1][i, m+j] < min_[j]:
min_[j] = tableaux[1][i, m+j]
tableaux[1][i, 2*m] = 1
for i in range(m):
for j in range(m):
tableaux[1][i, m+j] -= min_[j]
tableaux[1][i, m+j] += 1
for pl, start in enumerate([m, 0]):
for i in range(m):
bases[pl][i] = start + i
return tableaux, bases
|
Given sequences `X` and `Y` of ndarrays, initialize the tableau and
basis arrays in place for the "geometric" imitation game as defined
in McLennan and Tourky (2006), to be passed to `_lemke_howson_tbl`.
Parameters
----------
X, Y : ndarray(float)
Arrays of the same shape (m, n).
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays to be used to store the tableaux, of shape
(2m, 2m). Modified in place.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays to be used to store the bases, of shape
(m,). Modified in place.
Returns
-------
tableaux : tuple(ndarray(float, ndim=2))
View to `tableaux`.
bases : tuple(ndarray(int, ndim=1))
View to `bases`.
|
27,915 |
def get_statements(self):
stmt_lists = [v for k, v in self.stmts.items()]
stmts = []
for s in stmt_lists:
stmts += s
return stmts
|
Return a list of all Statements in a single list.
Returns
-------
stmts : list[indra.statements.Statement]
A list of all the INDRA Statements in the model.
|
27,916 |
def build(self, build_dir, **kwargs):
del kwargs
args = ["cmake", "--build", build_dir]
args.extend(self._get_build_flags())
return [{"args": args}]
|
This function builds the cmake build command.
|
27,917 |
def write_to_file(self, output_file_path, intervals, template):
msg = [template % (interval) for interval in intervals]
if output_file_path is None:
self.print_info(u"Intervals detected:")
for line in msg:
self.print_generic(line)
else:
with io.open(output_file_path, "w", encoding="utf-8") as output_file:
output_file.write(u"\n".join(msg))
self.print_success(u"Created file " % output_file_path)
|
Write intervals to file.
:param output_file_path: path of the output file to be written;
if ``None``, print to stdout
:type output_file_path: string (path)
:param intervals: a list of tuples, each representing an interval
:type intervals: list of tuples
|
27,918 |
def from_api_repr(cls, resource):
config = cls()
config._properties = copy.deepcopy(resource)
return config
|
Factory: construct a job configuration given its API representation
:type resource: dict
:param resource:
An extract job configuration in the same representation as is
returned from the API.
:rtype: :class:`google.cloud.bigquery.job._JobConfig`
:returns: Configuration parsed from ``resource``.
|
27,919 |
def _find_intervals(bundles, duration, step):
segments = []
for bund in bundles:
beg, end = bund[][0][0], bund[][-1][1]
if end - beg >= duration:
new_begs = arange(beg, end - duration, step)
for t in new_begs:
seg = bund.copy()
seg[] = [(t, t + duration)]
segments.append(seg)
return segments
|
Divide bundles into segments of a certain duration and a certain step,
discarding any remainder.
|
27,920 |
def set_default_moe_hparams(hparams):
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
hparams.add_hparam("moe_hidden_size", 4096)
hparams.add_hparam("moe_group_size", 1024)
hparams.add_hparam("moe_use_second_place_loss", 0)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2)
|
Add necessary hyperparameters for mixture-of-experts.
|
27,921 |
def _get_forecast(api_result: dict) -> List[SmhiForecast]:
forecasts = []
forecasts_ordered = OrderedDict()
forecasts_ordered = _get_all_forecast_from_api(api_result)
day_nr = 1
for day in forecasts_ordered:
forecasts_day = forecasts_ordered[day]
if day_nr == 1:
forecasts.append(copy.deepcopy(forecasts_day[0]))
total_precipitation = float(0.0)
forecast_temp_max = -100.0
forecast_temp_min = 100.0
forecast = None
for forcast_day in forecasts_day:
temperature = forcast_day.temperature
if forecast_temp_min > temperature:
forecast_temp_min = temperature
if forecast_temp_max < temperature:
forecast_temp_max = temperature
if forcast_day.valid_time.hour == 12:
forecast = copy.deepcopy(forcast_day)
total_precipitation = total_precipitation + \
forcast_day._total_precipitation
if forecast is None:
forecast = forecasts_day[0]
forecast._temperature_max = forecast_temp_max
forecast._temperature_min = forecast_temp_min
forecast._total_precipitation = total_precipitation
forecast._mean_precipitation = total_precipitation/24
forecasts.append(forecast)
day_nr = day_nr + 1
return forecasts
|
Converts results fråm API to SmhiForeCast list
|
27,922 |
def find_gromacs_command(commands):
commands = utilities.asiterable(commands)
for command in commands:
try:
driver, name = command.split()
except ValueError:
driver, name = None, command
executable = driver if driver else name
if utilities.which(executable):
break
else:
raise OSError(errno.ENOENT, "No Gromacs executable found in", ", ".join(commands))
return driver, name
|
Return *driver* and *name* of the first command that can be found on :envvar:`PATH`
|
27,923 |
def manhattan(src, tar, qval=2, normalized=False, alphabet=None):
return Manhattan().dist_abs(src, tar, qval, normalized, alphabet)
|
Return the Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Manhattan distance
Examples
--------
>>> manhattan('cat', 'hat')
4.0
>>> manhattan('Niall', 'Neil')
7.0
>>> manhattan('Colin', 'Cuilen')
9.0
>>> manhattan('ATCG', 'TAGC')
10.0
|
27,924 |
def find_files(data_path, brokers, minutes, start_time, end_time):
command = find_files_cmd(data_path, minutes, start_time, end_time)
pool = Pool(len(brokers))
result = pool.map(
partial(get_output_lines_from_command, command=command),
[host for broker, host in brokers])
return [(broker, host, files)
for (broker, host), files
in zip(brokers, result)]
|
Find all the Kafka log files on the broker that have been modified
in the speficied time range.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the lof files on the broker
:type data_path: str
:param brokers: the brokers
:type brokers: list of (broker_id, host) pairs
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the files
:rtype: list of (broker, host, file_path) tuples
|
27,925 |
def _errmsg(self, error: "Err", tb: bool=False, i: int=None,
msgformat: str="terminal") -> str:
if msgformat == "terminal":
msg = self._headline(error, i)
if error.ex is not None:
msg += "\n" + "line " + colors.bold(str(error.line))
msg += ": " + colors.yellow(error.code)
msg += "\n" + str(error.file)
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += "\n" + error.tb
elif msgformat == "csv":
sep = ","
msg = error.msg + sep
msg += str(error.line) + sep + error.code + sep
msg += str(error.file)
elif msgformat == "text":
sep = ","
msg = error.msg
if error.ex is not None:
msg += sep + str(error.line) + sep + error.code + sep
msg += str(error.file) + sep
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg += sep + error.tb
elif msgformat == "dict":
msg = {"date": datetime.now()}
if error.ex is not None:
msg["msg"] = error.msg
msg["line"] = error.line
msg["code"] = error.code
msg["file"] = error.file
if self.errs_traceback is True or tb is True:
if error.tb is not None:
msg["traceback"] = error.tb
return msg
|
Get the error message
|
27,926 |
def _merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
|
Given two dicts, merge them into a new dict as a shallow copy.
Once Python 3.6+ only is supported, replace method with ``z = {**x, **y}``
|
27,927 |
def addAggregators(cols, aggrnames):
for aggrname in aggrnames:
aggrs = aggregators.get(aggrname)
aggrs = aggrs if isinstance(aggrs, list) else [aggrs]
for aggr in aggrs:
for c in cols:
if not hasattr(c, ):
c.aggregators = []
if aggr and aggr not in c.aggregators:
c.aggregators += [aggr]
|
add aggregator for each aggrname to each of cols
|
27,928 |
def pack_into(self, buf, offset, *args, **kwargs):
if len(args) < self._number_of_arguments:
raise Error(
"pack expected {} item(s) for packing (got {})".format(
self._number_of_arguments,
len(args)))
self.pack_into_any(buf, offset, args, **kwargs)
|
See :func:`~bitstruct.pack_into()`.
|
27,929 |
def _generate_examples(self, archive, validation_labels=None):
if validation_labels:
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
for fname, fobj in archive:
label = fname[:-4]
}
|
Yields examples.
|
27,930 |
def change_directory(self, path, *args, **kwargs):
previous_path = self.session_path()
self.session_path(path)
if os.path.isdir(self.full_path()) is False:
self.session_path(previous_path)
raise ValueError()
|
:meth:`.WNetworkClientProto.change_directory` method implementation
|
27,931 |
def get_all_lines(self):
output = []
line = []
lineno = 1
for char in self.string:
line.append(char)
if char == :
output.append(SourceLine(.join(line), lineno))
line = []
lineno += 1
if line:
output.append(SourceLine(.join(line), lineno))
return output
|
Return all lines of the SourceString as a list of SourceLine's.
|
27,932 |
def on_key_pressed(self, event):
return
if event.keysym == "Up":
self.manager.set_joystick(0.0, -1.0, 0)
elif event.keysym == "Down":
self.manager.set_joystick(0.0, 1.0, 0)
elif event.keysym == "Left":
self.manager.set_joystick(-1.0, 0.0, 0)
elif event.keysym == "Right":
self.manager.set_joystick(1.0, 0.0, 0)
elif event.char == " ":
mode = self.manager.get_mode()
if mode == self.manager.MODE_DISABLED:
self.manager.set_mode(self.manager.MODE_OPERATOR_CONTROL)
else:
self.manager.set_mode(self.manager.MODE_DISABLED)
|
likely to take in a set of parameters to treat as up, down, left,
right, likely to actually be based on a joystick event... not sure
yet
|
27,933 |
def setVisible( self, state ):
super(XWizardBrowserDialog, self).setVisible(state)
if ( state ):
mwidth = self.uiPluginTREE.minimumWidth()
self.uiMainSPLT.setSizes([mwidth,
self.uiMainSPLT.width() - mwidth])
|
Overloads the setVisible method for the dialog to resize the contents \
of the splitter properly.
:param state | <bool>
|
27,934 |
def check(cls, status):
assert cls.trigger is not None,
assert cls.error is not None,
if status == cls.trigger:
raise cls.error()
|
Checks if a status enum matches the trigger originally set, and
if so, raises the appropriate error.
Args:
status (int, enum): A protobuf enum response status to check.
Raises:
AssertionError: If trigger or error were not set.
_ApiError: If the statuses don't match. Do not catch. Will be
caught automatically and sent back to the client.
|
27,935 |
def _list_files(path, suffix=""):
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn%s'" % path
if path.endswith(suffix):
return [path]
return []
|
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
|
27,936 |
def send_keys(self, keyserver, *keyids):
result = self._result_map[](self)
log.debug(, keyids)
data = _util._make_binary_stream("", self._encoding)
args = [, keyserver, ]
args.extend(keyids)
self._handle_io(args, data, result, binary=True)
log.debug(, result.__dict__)
data.close()
return result
|
Send keys to a keyserver.
|
27,937 |
def generate(self):
logger.info()
self._generate_graph(
,
,
self._stats.per_version_data,
)
self._generate_graph(
,
,
self._stats.per_file_type_data,
)
self._generate_graph(
,
,
self._stats.per_installer_data,
)
self._generate_graph(
,
,
self._stats.per_implementation_data,
)
self._generate_graph(
,
,
self._stats.per_system_data,
)
self._generate_graph(
,
,
self._stats.per_country_data,
)
self._generate_graph(
,
,
self._stats.per_distro_data,
)
self._generate_badges()
logger.info()
html = self._generate_html()
html_path = os.path.join(self.output_dir, )
with open(html_path, ) as fh:
fh.write(html.encode())
logger.info(, html_path)
logger.info()
for name, svg in self._badges.items():
path = os.path.join(self.output_dir, % name)
with open(path, ) as fh:
fh.write(svg)
logger.info(, name, path)
|
Generate all output types and write to disk.
|
27,938 |
def reset_network(roles, extra_vars=None):
logger.debug()
if not extra_vars:
extra_vars = {}
tmpdir = os.path.join(os.getcwd(), TMP_DIRNAME)
_check_tmpdir(tmpdir)
utils_playbook = os.path.join(ANSIBLE_DIR, )
options = {: ,
: tmpdir}
options.update(extra_vars)
run_ansible([utils_playbook], roles=roles, extra_vars=options)
|
Reset the network constraints (latency, bandwidth ...)
Remove any filter that have been applied to shape the traffic.
Args:
roles (dict): role->hosts mapping as returned by
:py:meth:`enoslib.infra.provider.Provider.init`
inventory (str): path to the inventory
|
27,939 |
def psturng(q, r, v):
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v)
|
Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations.
|
27,940 |
def _match(self, query):
response = self._match_dialog(query)
if response is not None:
return response
response = self._match_getters(query)
if response is not None:
return response
response = self._match_registers(query)
if response is not None:
return response
response = self._match_errors_queues(query)
if response is not None:
return response
response = self._match_setters(query)
if response is not None:
return response
if response is None:
for channel in self._channels.values():
response = channel.match(query)
if response:
return response
return None
|
Tries to match in dialogues, getters and setters and subcomponents
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
|
27,941 |
def set_rf_samples(n):
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
|
Changes Scikit learn's random forests to give each tree a random sample of
n random rows.
|
27,942 |
def get_labels(cls, path=None):
if path is None:
path = _get_config_file_path(
cls._xdg_config_dir,
cls._xdg_config_file
)
with open(path) as config_file:
return tuple(json.load(config_file).keys())
|
Get all server configuration labels.
:param path: A string. The configuration file to be manipulated.
Defaults to what is returned by
:func:`nailgun.config._get_config_file_path`.
:returns: Server configuration labels, where each label is a string.
|
27,943 |
def transform_non_affine(self, values):
scale = self.scale or 1
epoch = self.epoch or 0
values = numpy.asarray(values)
if self._parents or (
epoch == 0 and
scale == 1
):
return self._transform(values, float(epoch), float(scale))
flat = values.flatten()
def _trans(x):
return self._transform_decimal(x, epoch, scale)
return numpy.asarray(list(map(_trans, flat))).reshape(values.shape)
|
Transform an array of GPS times.
This method is designed to filter out transformations that will
generate text elements that require exact precision, and use
`Decimal` objects to do the transformation, and simple `float`
otherwise.
|
27,944 |
def djfrontend_twbs_css(version=None):
if version is None:
if not getattr(settings, , False):
version = getattr(settings, , DJFRONTEND_TWBS_VERSION_DEFAULT)
else:
version = getattr(settings, , DJFRONTEND_TWBS_VERSION_DEFAULT)
return format_html(
,
static=_static_url, v=version, min=_min)
|
Returns Twitter Bootstrap CSS file.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
|
27,945 |
def profile_prior_model_dict(self):
return {key: value for key, value in
filter(lambda t: isinstance(t[1], pm.PriorModel) and is_profile_class(t[1].cls),
self.__dict__.items())}
|
Returns
-------
profile_prior_model_dict: {str: PriorModel}
A dictionary mapping_matrix instance variable names to variable profiles.
|
27,946 |
def reduce_sum(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape == x.shape:
return x
return ReduceOperation(x, output_shape, "SUM", name=name).outputs[0]
|
Reduction on 1 or more axes.
If reduced_dim is present, then only that dimension is reduced out.
Alternatively, specify output_shape.
Do not specify both reduced_dim and output_shape.
If neither is specified, then all dimensions are reduced out.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: a mtf.Dimension
name: an optional string
Returns:
a Tensor
|
27,947 |
def prefix_all(self, prefix=, *lines):
for line in lines:
if isinstance(line, (tuple, list)):
self.prefix(prefix, *line)
elif line:
self.prefix(prefix, line)
else:
self.blank()
|
Same as :func:`~prefix`, for multiple lines.
:param prefix: Dockerfile command to use, e.g. ``ENV`` or ``RUN``.
:type prefix: unicode | str
:param lines: Lines with arguments to be prefixed.
:type lines: collections.Iterable[unicode | str]
|
27,948 |
def get_last_date(self, field, filters_=[]):
last_date = self.get_last_item_field(field, filters_=filters_)
return last_date
|
:field: field with the data
:filters_: additional filters to find the date
|
27,949 |
def _items_to_es(self, json_items):
if len(json_items) == 0:
return
logger.info("Adding items to Ocean for %s (%i items)" %
(self, len(json_items)))
field_id = self.get_field_unique_id()
inserted = self.elastic.bulk_upload(json_items, field_id)
if len(json_items) != inserted:
missing = len(json_items) - inserted
info = json_items[0]
name = info[]
version = info[]
origin = info[]
logger.warning("%s/%s missing JSON items for backend %s [ver. %s], origin %s",
str(missing),
str(len(json_items)),
name, version, origin)
return inserted
|
Append items JSON to ES (data source state)
|
27,950 |
def get_fullname(module):
bits = [str(module.name)]
while module.parent:
bits.append(str(module.parent.name))
module = module.parent
return .join(reversed(bits))
|
Reconstruct a Module's canonical path by recursing through its parents.
|
27,951 |
def new_file(self, vd, length, isoname, parent, seqnum, rock_ridge, rr_name,
xa, file_mode):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError()
self._new(vd, isoname, parent, seqnum, False, length, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b, False, False, False,
file_mode)
|
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
|
27,952 |
def _select_broker_pair(self, rg_destination, victim_partition):
broker_source = self._elect_source_broker(victim_partition)
broker_destination = rg_destination._elect_dest_broker(victim_partition)
return broker_source, broker_destination
|
Select best-fit source and destination brokers based on partition
count and presence of partition over the broker.
* Get overloaded and underloaded brokers
Best-fit Selection Criteria:
Source broker: Select broker containing the victim-partition with
maximum partitions.
Destination broker: NOT containing the victim-partition with minimum
partitions. If no such broker found, return first broker.
This helps in ensuring:-
* Topic-partitions are distributed across brokers.
* Partition-count is balanced across replication-groups.
|
27,953 |
def preserve_attr_data(A, B):
for attr, B_data in B.items():
if getattr(B_data, , True):
continue
if attr in A:
A_data = A[attr]
for _attr in getattr(A_data, , []):
if hasattr(A_data, _attr):
if getattr(B_data, _attr, None) is not None:
if _attr in getattr(B_data, , []):
setattr(B_data, _attr, getattr(A_data, _attr))
else:
setattr(B_data, _attr, getattr(A_data, _attr))
|
Preserve attr data for combining B into A.
|
27,954 |
def xml(self, attribs = None,elements = None, skipchildren = False):
if not attribs: attribs = {}
if self.idref:
attribs[] = self.idref
return super(AbstractTextMarkup,self).xml(attribs,elements, skipchildren)
|
See :meth:`AbstractElement.xml`
|
27,955 |
def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = []
for segment in segList:
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment)
else:
if len(synsToDel) > 0:
for syn in synsToDel:
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
nSegsRemoved += len(segsToDel)
for seg in segsToDel:
self._cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
|
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
:param colIdx Column index
:param cellIdx Cell index within the column
:param segList List of segment references
:param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
:param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
:returns: tuple (numSegsRemoved, numSynsRemoved)
|
27,956 |
def make_temp_path(path, new_ext=None):
root, ext = os.path.splitext(path)
if new_ext is None:
new_ext = ext
temp_path = root + TEMP_EXTENSION + new_ext
return temp_path
|
Arguments:
new_ext: the new file extension, including the leading dot.
Defaults to preserving the existing file extension.
|
27,957 |
def determine_output_name(self, sources):
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
name = os.path.splitext(sources[0].name())[0]
for s in sources[1:]:
n2 = os.path.splitext(s.name())
if n2 != name:
get_manager().errors()(
"%s: source targets have different names: cannot determine target name"
% (self.id_))
return self.determine_target_name(sources[0].name())
|
Determine the name of the produced target from the
names of the sources.
|
27,958 |
def device_info(self):
resp = self._session.get_software_information(format=)
hostname = resp.xpath()[0].text
model = resp.xpath()[0].text
version =
if resp.xpath():
try:
version = resp.xpath()[0].text
except IndexError:
pass
elif resp.xpath("//package-information[name = ]"):
try:
version = (resp.xpath(
"//package-information[name = ]/comment"
)[0].text).split()[1]
except IndexError:
pass
else:
try:
version = ((resp.xpath(
)[0].text.split()[1].split()[0]))
except IndexError:
pass
resp = self._session.get_system_uptime_information(format=)
try:
current_time = resp.xpath()[0].text
except IndexError:
current_time =
try:
uptime = resp.xpath()[0].text
except IndexError:
uptime =
show_hardware = self._session.get_chassis_inventory(format=)
(hostname, model, version, serial_num, current_time, uptime))
|
Pull basic device information.
Purpose: This function grabs the hostname, model, running version, and
| serial number of the device.
@returns: The output that should be shown to the user.
@rtype: str
|
27,959 |
def tarball_files(tar_name, file_paths, output_dir=, prefix=):
with tarfile.open(os.path.join(output_dir, tar_name), ) as f_out:
for file_path in file_paths:
if not file_path.startswith():
raise ValueError()
arcname = prefix + os.path.basename(file_path)
f_out.add(file_path, arcname=arcname)
|
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
|
27,960 |
def stochrsi(data, period):
rsi = relative_strength_index(data, period)[period:]
stochrsi = [100 * ((rsi[idx] - np.min(rsi[idx+1-period:idx+1])) / (np.max(rsi[idx+1-period:idx+1]) - np.min(rsi[idx+1-period:idx+1]))) for idx in range(period-1, len(rsi))]
stochrsi = fill_for_noncomputable_vals(data, stochrsi)
return stochrsi
|
StochRSI.
Formula:
SRSI = ((RSIt - RSI LOW) / (RSI HIGH - LOW RSI)) * 100
|
27,961 |
def basen_to_integer(self, X, cols, base):
out_cols = X.columns.values.tolist()
for col in cols:
col_list = [col0 for col0 in out_cols if str(col0).startswith(str(col))]
insert_at = out_cols.index(col_list[0])
if base == 1:
value_array = np.array([int(col0.split()[-1]) for col0 in col_list])
else:
len0 = len(col_list)
value_array = np.array([base ** (len0 - 1 - i) for i in range(len0)])
X.insert(insert_at, col, np.dot(X[col_list].values, value_array.T))
X.drop(col_list, axis=1, inplace=True)
out_cols = X.columns.values.tolist()
return X
|
Convert basen code as integers.
Parameters
----------
X : DataFrame
encoded data
cols : list-like
Column names in the DataFrame that be encoded
base : int
The base of transform
Returns
-------
numerical: DataFrame
|
27,962 |
def field_dict_from_row(row, model,
field_names=None, ignore_fields=(, ),
strip=True,
blank_none=True,
ignore_related=True,
ignore_values=(None,),
ignore_errors=True,
verbosity=0):
errors = collections.Counter()
if not field_names:
field_classes = [f for f in model._meta._fields() if (not ignore_fields or (f.name not in ignore_fields))]
field_names = [f.name for f in field_classes]
else:
field_classes = [f for f in model._meta._fields() if (f.name in field_names and (not ignore_fields or (f.name not in ignore_fields)))]
field_dict = {}
if isinstance(row, collections.Mapping):
row = [row.get(field_name, None) for field_name in field_names]
elif sum(hasattr(row, field_name) for field_name in field_names) / (len(field_names) / 2. + 1):
row = [getattr(row, field_name, None) for field_name in field_names]
for field_name, field_class, value in zip(field_names, field_classes, row):
clean_value = None
if verbosity >= 3:
print field_name, field_class, value
if isinstance(field_class, related.RelatedField):
if not ignore_related:
try:
clean_value = field_class.related.parent_model.objects.get(value)
except:
try:
clean_value = field_class.related.parent_model.objects.get_by_natural_key(value)
except:
errors += collections.Counter([])
if verbosity > 1:
print % (field_class, value)
if isinstance(value, basestring) and not value:
if verbosity >= 3:
print % (field_class, value)
value = None
if blank_none and (
not isinstance(field_class, related.RelatedField) or field_class.blank or not field_class.null):
try:
if isinstance(field_class.to_python(), basestring):
value =
else:
value = None
except:
value = None
else:
value = None
if not clean_value:
try:
clean_value = field_class.to_python(value)
except:
try:
clean_value = str(field_class.to_python(util.clean_wiki_datetime(value)))
except:
try:
clean_value = field_class.to_python(util.make_float(value))
except:
try:
clean_value = field_class.to_python(value)
except:
if verbosity > 0:
print
print "The row below has a value (%r) that cannum_uncoerciblet forget to decode the utf8 before doing a max_length truncation!
clean_value = clean_utf8(clean_value, verbosity=verbosity).decode()
max_length = getattr(field_class, )
if max_length:
try:
assert(len(clean_value) <= field_class.max_length)
except:
if verbosity > 0:
print
print "The row below has a string (%r) that is too long (> %d):" % (clean_value, max_length)
print row
print_exc()
errors += collections.Counter([])
clean_value = clean_value[:max_length]
if not ignore_errors:
raise
if not ignore_values or clean_value not in ignore_values:
field_dict[field_name] = clean_value
return field_dict, errors
|
Construct a Mapping (dict) from field names to values from a row of data
Args:
row (list or dict): Data (values) to be assigned to field_names in the dict.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
dict: Mapping from fields to values compatible with a Django model constructor kwargs, `model(**kwargs)`
|
27,963 |
async def del_aldb(self, addr, mem_addr: int):
dev_addr = Address(addr)
device = self.plm.devices[dev_addr.id]
if device:
_LOGGING.debug()
device.del_aldb(mem_addr)
await asyncio.sleep(1, loop=self.loop)
while device.aldb.status == ALDBStatus.LOADING:
await asyncio.sleep(1, loop=self.loop)
self.print_device_aldb(addr)
|
Write a device All-Link record.
|
27,964 |
def reencrypt_single_user(engine, user_id, old_crypto, new_crypto, logger):
crypto = FallbackCrypto([new_crypto, old_crypto])
reencrypt_user_content(
engine=engine,
user_id=user_id,
old_decrypt_func=crypto.decrypt,
new_encrypt_func=crypto.encrypt,
logger=logger,
)
|
Re-encrypt all files and checkpoints for a single user.
|
27,965 |
def _parse_response(self, result_page):
resultline = result_page.splitlines()[0]
if resultline.startswith():
raise Exception(resultline.replace(, ))
patt = re.compile(r, re.IGNORECASE)
m = patt.match(resultline)
if m:
return (m.group(1), m.group(2), m.group(3))
return None
|
Takes a result page of sending the sms, returns an extracted tuple:
('numeric_err_code', '<sent_queued_message_id>', '<smsglobalmsgid>')
Returns None if unable to extract info from result_page, it should be
safe to assume that it was either a failed result or worse, the interface
contract has changed.
|
27,966 |
def _lookup(cls: str) -> LdapObjectClass:
if isinstance(cls, str):
module_name, _, name = cls.rpartition(".")
module = importlib.import_module(module_name)
try:
cls = getattr(module, name)
except AttributeError:
raise AttributeError("%s reference cannot be found" % cls)
return cls
|
Lookup module.class.
|
27,967 |
def parse_date(value):
match = Definitions.DATE_RE.match(value) if value else None
if not match:
return None
if len(year) == 2:
if int(year) < 70:
year = "20" + year
else:
year = "19" + year
year = int(year)
data[] = max(1900, min(year, 9999))
for field in [, , , ]:
if data[field] is None:
data[field] = 0
data[field] = int(data[field])
data[] = Definitions.month_numbers[data[].lower()]
return datetime.datetime(**data)
|
Parse an RFC 1123 or asctime-like format date string to produce
a Python datetime object (without a timezone).
|
27,968 |
def reset_all_metadata(self):
self.topics_to_brokers.clear()
self.topic_partitions.clear()
self.topic_errors.clear()
self.consumer_group_to_brokers.clear()
|
Clear all cached metadata
Metadata will be re-fetched as required to satisfy requests.
|
27,969 |
def get_rr_queue(self):
return Queue(self.inbox_rr.name + , self.inbox_rr,
auto_delete=True)
|
Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type.
|
27,970 |
def parse_and_normalize_url_date(date_str):
if date_str is None:
return None
try:
return d1_common.date_time.dt_from_iso8601_str(date_str)
except d1_common.date_time.iso8601.ParseError as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
.format(
date_str, str(e)
),
)
|
Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC.
|
27,971 |
def _get_schema(self):
d={}
layout_kwargs=dict((_,) for _ in get_layout_kwargs())
for _ in (,,,):
d[_]={}
for __ in eval(.format(_.upper())):
layout_kwargs.pop(__,None)
d[_][__]=None
d[].update(annotations=dict(values=[],
params=utils.make_dict_from_list(get_annotation_kwargs())))
d[].update(shapes=utils.make_dict_from_list(get_shapes_kwargs()))
[layout_kwargs.pop(_,None) for _ in get_annotation_kwargs()+get_shapes_kwargs()]
d[].update(**layout_kwargs)
return d
|
Returns a dictionary with the schema for a QuantFigure
|
27,972 |
def attach_subdivision(times):
t = timer()
if not isinstance(times, Times):
raise TypeError("Expected Times object for param .")
assert times.total > 0., "Attached subdivision has total time 0, appears empty."
name = times.name
f.r.self_agg += times.self_agg
if name not in f.t.subdvsn_awaiting:
times_copy = copy.deepcopy(times)
times_copy.parent = f.r
f.t.subdvsn_awaiting[name] = times_copy
else:
merge.merge_times(f.t.subdvsn_awaiting[name], times)
f.t.self_cut += timer() - t
|
Manual assignment of a (stopped) times object as a subdivision of running
timer. Use cases are expected to be very limited (mainly provided as a
one-Times variant of attach_par_subdivision).
Notes:
As with any subdivision, the interval in the receiving timer is assumed to
totally subsume the time accumulated within the attached object--the total
in the receiver is not adjusted!
Args:
times (Times): Individual Times data object.
Raises:
TypeError: If times not a Times data object.
|
27,973 |
def open_mask(fn:PathOrStr, div=False, convert_mode=, after_open:Callable=None)->ImageSegment:
"Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255."
return open_image(fn, div=div, convert_mode=convert_mode, cls=ImageSegment, after_open=after_open)
|
Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255.
|
27,974 |
def user_view_events(self) -> List[str]:
return [event_type for event_type, event in self.items if event.get_event_action()
== event_actions.VIEWED]
|
Return event types where use viewed a main object.
|
27,975 |
def ancestors(self, full_table_name):
nodes = self.subgraph(
nx.algorithms.dag.ancestors(self, full_table_name))
return [full_table_name] + list(reversed(list(
nx.algorithms.dag.topological_sort(nodes))))
|
:param full_table_name: In form `schema`.`table_name`
:return: all dependent tables sorted in topological order. Self is included.
|
27,976 |
def restore(self):
if os.path.exists(self.backup_path):
for file in glob.glob(self.backup_path + "/*"):
shutil.copy(file, self.path)
|
Copy files from the backup folder to the sym-linked optimizer folder.
|
27,977 |
def resolve(hostname, family=AF_UNSPEC):
af_ok = (AF_INET, AF_INET6)
if family != AF_UNSPEC and family not in af_ok:
raise ValueError("Invalid family " % family)
ips = ()
try:
addrinfo = socket.getaddrinfo(hostname, None, family)
except socket.gaierror as exc:
if exc.errno not in (socket.EAI_NODATA, socket.EAI_NONAME):
LOG.debug("socket.getaddrinfo() raised an exception", exc_info=exc)
else:
if family == AF_UNSPEC:
ips = tuple({item[4][0] for item in addrinfo if item[0] in af_ok})
else:
ips = tuple({item[4][0] for item in addrinfo})
return ips
|
Resolve hostname to one or more IP addresses through the operating system.
Resolution is carried out for the given address family. If no
address family is specified, only IPv4 and IPv6 addresses are returned. If
multiple IP addresses are found, all are returned.
:param family: AF_INET or AF_INET6 or AF_UNSPEC (default)
:return: tuple of unique IP addresses
|
27,978 |
def compile(code: list, consts: list, names: list, varnames: list,
func_name: str = "<unknown, compiled>",
arg_count: int = 0, kwarg_defaults: Tuple[Any] = (), use_safety_wrapper: bool = True):
varnames = tuple(varnames)
consts = tuple(consts)
names = tuple(names)
code = util.flatten(code)
if arg_count > len(varnames):
raise CompileError("arg_count > len(varnames)")
if len(kwarg_defaults) > len(varnames):
raise CompileError("len(kwarg_defaults) > len(varnames)")
bc = compile_bytecode(code)
dis.dis(bc)
if PY36:
pass
else:
if bc[-1] != tokens.RETURN_VALUE:
raise CompileError(
"No default RETURN_VALUE. Add a `pyte.tokens.RETURN_VALUE` to the end of your "
"bytecode if you dont use these.
)
f_globals = frame_data[0].f_globals
f = types.FunctionType(obb, f_globals)
f.__name__ = func_name
f.__defaults__ = kwarg_defaults
if use_safety_wrapper:
def __safety_wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except SystemError as e:
if not in .join(e.args):
raise
msg = "Bytecode exception!" \
"\nFunction {} returned an invalid opcode." \
"\nFunction dissection:\n\n".format(f.__name__)
file = io.StringIO()
with contextlib.redirect_stdout(file):
dis.dis(f)
msg += file.getvalue()
raise SystemError(msg) from e
returned_func = __safety_wrapper
returned_func.wrapped = f
else:
returned_func = f
return returned_func
|
Compiles a set of bytecode instructions into a working function, using Python's bytecode
compiler.
:param code: A list of bytecode instructions.
:param consts: A list of constants to compile into the function.
:param names: A list of names to compile into the function.
:param varnames: A list of ``varnames`` to compile into the function.
:param func_name: The name of the function to use.
:param arg_count: The number of arguments this function takes. Must be ``<= len(varnames)``.
:param kwarg_defaults: A tuple of defaults for kwargs.
:param use_safety_wrapper: Use the safety wrapper? This hijacks SystemError to print better \
stack traces.
|
27,979 |
def gen_challenge(self, state):
state.checksig(self.key)
if (state.index >= state.n):
raise HeartbeatError("Out of challenges.")
state.seed = MerkleHelper.get_next_seed(self.key, state.seed)
chal = Challenge(state.seed, state.index)
state.index += 1
state.sign(self.key)
return chal
|
returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage
|
27,980 |
async def CharmArchiveSha256(self, urls):
_params = dict()
msg = dict(type=,
request=,
version=5,
params=_params)
_params[] = urls
reply = await self.rpc(msg)
return reply
|
urls : typing.Sequence[~CharmURL]
Returns -> typing.Sequence[~StringResult]
|
27,981 |
def split_unescaped(char, string, include_empty_strings=False):
\\
words = []
pos = len(string)
lastpos = pos
while pos >= 0:
pos = get_last_pos_of_char(char, string[:lastpos])
if pos >= 0:
if pos + 1 != lastpos or include_empty_strings:
words.append(string[pos + 1: lastpos])
lastpos = pos
if lastpos != 0 or include_empty_strings:
words.append(string[:lastpos])
words.reverse()
return words
|
:param char: The character on which to split the string
:type char: string
:param string: The string to split
:type string: string
:returns: List of substrings of *string*
:rtype: list of strings
Splits *string* whenever *char* appears without an odd number of
backslashes ('\\') preceding it, discarding any empty string
elements.
|
27,982 |
def do_rotation(self, mirror, rot):
if (mirror):
raise IIIFError(code=501, parameter="rotation",
text="Null manipulator does not support mirroring.")
if (rot != 0.0):
raise IIIFError(code=501, parameter="rotation",
text="Null manipulator supports only rotation=(0|360).")
|
Null implementation of rotate and/or mirror.
|
27,983 |
def list_distributions(region=None, key=None, keyid=None, profile=None):
retries = 10
sleep = 6
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
Items = []
while retries:
try:
log.debug()
Marker =
while Marker is not None:
ret = conn.list_distributions(Marker=Marker)
Items += ret.get(, {}).get(, [])
Marker = ret.get(, {}).get()
return Items
except botocore.exceptions.ParamValidationError as err:
raise SaltInvocationError(str(err))
except botocore.exceptions.ClientError as err:
if retries and err.response.get(, {}).get() == :
retries -= 1
log.debug(, sleep)
time.sleep(sleep)
continue
log.error(, err.message)
return None
|
List, with moderate information, all CloudFront distributions in the bound account.
region
Region to connect to.
key
Secret key to use.
keyid
Access key to use.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto_cloudfront.list_distributions
|
27,984 |
def start_time(self):
resource_list = self.traffic_incident()
start_time = namedtuple(, )
if len(resource_list) == 1 and resource_list[0] is None:
return None
else:
try:
return [start_time(resource[])
for resource in resource_list]
except (KeyError, TypeError):
return [start_time(resource[])
for resource in resource_list]
|
Retrieves the start time of the incident/incidents from the output
response
Returns:
start_time(namedtuple): List of named tuples of start time of the
incident/incidents
|
27,985 |
def nl_error_handler_verbose(_, err, arg):
ofd = arg or _LOGGER.debug
ofd( + strerror(-err.error))
ofd( + print_header_content(err.msg))
return -nl_syserr2nlerr(err.error)
|
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L78.
|
27,986 |
def _curvature_range(self):
self._curv_win = tf.get_variable("curv_win",
dtype=tf.float32,
trainable=False,
shape=[self.curvature_window_width,],
initializer=tf.zeros_initializer)
self._curv_win = tf.scatter_update(self._curv_win,
self._step % self.curvature_window_width,
tf.log(self._grad_norm_squared))
valid_window = tf.slice(self._curv_win,
tf.constant([0,]),
tf.expand_dims(
tf.minimum(
tf.constant(self.curvature_window_width),
self._step + 1), dim=0))
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t]):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
with tf.control_dependencies([avg_op]):
self._h_min = tf.exp(
tf.identity(self._moving_averager.average(self._h_min_t)))
self._h_max = tf.exp(
tf.identity(self._moving_averager.average(self._h_max_t)))
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
curv_range_ops.append(avg_op)
return curv_range_ops
|
Curvature range.
Returns:
h_max_t, h_min_t ops
|
27,987 |
def airborne_position_with_ref(msg, lat_ref, lon_ref):
mb = common.hex2bin(msg)[32:]
cprlat = common.bin2int(mb[22:39]) / 131072.0
cprlon = common.bin2int(mb[39:56]) / 131072.0
i = int(mb[21])
d_lat = 360.0/59 if i else 360.0/60
j = common.floor(lat_ref / d_lat) \
+ common.floor(0.5 + ((lat_ref % d_lat) / d_lat) - cprlat)
lat = d_lat * (j + cprlat)
ni = common.cprNL(lat) - i
if ni > 0:
d_lon = 360.0 / ni
else:
d_lon = 360.0
m = common.floor(lon_ref / d_lon) \
+ common.floor(0.5 + ((lon_ref % d_lon) / d_lon) - cprlon)
lon = d_lon * (m + cprlon)
return round(lat, 5), round(lon, 5)
|
Decode airborne position with only one message,
knowing reference nearby location, such as previously calculated location,
ground station, or airport location, etc. The reference position shall
be with in 180NM of the true position.
Args:
msg (string): even message (28 bytes hexadecimal string)
lat_ref: previous known latitude
lon_ref: previous known longitude
Returns:
(float, float): (latitude, longitude) of the aircraft
|
27,988 |
def HandleVersion(self, payload):
self.Version = IOHelper.AsSerializableWithType(payload, "neo.Network.Payloads.VersionPayload.VersionPayload")
if not self.Version:
return
if self.incoming_client:
if self.Version.Nonce == self.nodeid:
self.Disconnect()
self.SendVerack()
else:
self.nodeid = self.Version.Nonce
self.SendVersion()
|
Process the response of `self.RequestVersion`.
|
27,989 |
def metric(self):
if self._metric is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._metric
|
The metric of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i coordinate system.
|
27,990 |
def _convert_odict_to_classes(self,
data,
clean=False,
merge=True,
pop_schema=True,
compare_to_existing=True,
filter_on={}):
self._log.debug("_convert_odict_to_classes(): {}".format(self.name()))
self._log.debug("This should be a temporary fix. Dont be lazy.")
fkeys = list(filter_on.keys())
name_key = self._KEYS.NAME
if name_key in data:
self[name_key] = data.pop(name_key)
schema_key = self._KEYS.SCHEMA
if schema_key in data:
if pop_schema:
data.pop(schema_key)
else:
self[schema_key] = data.pop(schema_key)
if clean:
data = self.clean_internal(data)
src_key = self._KEYS.SOURCES
if src_key in data:
sources = data.pop(src_key)
self._log.debug("Found {} entries".format(
len(sources), src_key))
self._log.debug("{}: {}".format(src_key, sources))
for src in sources:
self.add_source(allow_alias=True, **src)
photo_key = self._KEYS.PHOTOMETRY
if photo_key in data:
photoms = data.pop(photo_key)
self._log.debug("Found {} entries".format(
len(photoms), photo_key))
phcount = 0
for photo in photoms:
skip = False
for fkey in fkeys:
if fkey in photo and photo[fkey] not in filter_on[fkey]:
skip = True
if skip:
continue
self._add_cat_dict(
Photometry,
self._KEYS.PHOTOMETRY,
compare_to_existing=compare_to_existing,
**photo)
phcount += 1
self._log.debug("Added {} entries".format(
phcount, photo_key))
spec_key = self._KEYS.SPECTRA
if spec_key in data:
model = data.pop(model_key)
self._log.debug("Found {} entries".format(
len(model), model_key))
for mod in model:
self._add_cat_dict(
Model,
self._KEYS.MODELS,
compare_to_existing=compare_to_existing,
**mod)
if len(data):
self._log.debug("{} remaining entries, assuming `Quantity`".format(
len(data)))
for key in list(data.keys()):
vals = data.pop(key)
if not isinstance(vals, list):
vals = [vals]
self._log.debug("{}: {}".format(key, vals))
for vv in vals:
self._add_cat_dict(
Quantity,
key,
check_for_dupes=merge,
compare_to_existing=compare_to_existing,
**vv)
if merge and self.dupe_of:
self.merge_dupes()
return
|
Convert `OrderedDict` into `Entry` or its derivative classes.
|
27,991 |
def rmdir(path):
logger.debug("DEBUG** Window rmdir sys.platform: {}".format(sys.platform))
if sys.platform == :
onerror = _windows_rmdir_readonly
else:
onerror = None
return shutil.rmtree(path, onerror=onerror)
|
Recursively deletes a directory. Includes an error handler to retry with
different permissions on Windows. Otherwise, removing directories (eg.
cloned via git) can cause rmtree to throw a PermissionError exception
|
27,992 |
def add_column_healpix(self, name="healpix", longitude="ra", latitude="dec", degrees=True, healpix_order=12, nest=True):
import healpy as hp
if degrees:
scale = "*pi/180"
else:
scale = ""
phi = self.evaluate("(%s)%s" % (longitude, scale))
theta = self.evaluate("pi/2-(%s)%s" % (latitude, scale))
hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)
self.add_column("healpix", hp_index)
|
Add a healpix (in memory) column based on a longitude and latitude
:param name: Name of column
:param longitude: longitude expression
:param latitude: latitude expression (astronomical convenction latitude=90 is north pole)
:param degrees: If lon/lat are in degrees (default) or radians.
:param healpix_order: healpix order, >= 0
:param nest: Nested healpix (default) or ring.
|
27,993 |
def restrict_to_version(self, version):
cairo.cairo_svg_surface_restrict_to_version(self._pointer, version)
self._check_status()
|
Restricts the generated SVG file to :obj:`version`.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`SVG_VERSION` string.
|
27,994 |
def generate_grid_coords(gx, gy):
r
return np.vstack([gx.ravel(), gy.ravel()]).T
|
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
|
27,995 |
def nl_list_entry(ptr, type_, member):
if ptr.container_of:
return ptr.container_of
null_data = type_()
setattr(null_data, member, ptr)
return null_data
|
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L64.
|
27,996 |
def get_qout_index(self,
river_index_array=None,
date_search_start=None,
date_search_end=None,
time_index_start=None,
time_index_end=None,
time_index=None,
time_index_array=None,
daily=False,
pd_filter=None,
filter_mode="mean",
as_dataframe=False):
if river_index_array is not None:
if hasattr(river_index_array, "__len__"):
if len(river_index_array) == 1:
river_index_array = river_index_array[0]
if time_index_array is None:
time_index_array = self.get_time_index_range(date_search_start,
date_search_end,
time_index_start,
time_index_end,
time_index)
qout_variable = self.qout_nc.variables[self.q_var_name]
qout_dimensions = qout_variable.dimensions
if qout_dimensions[0].lower() == and \
qout_dimensions[1].lower() == self.river_id_dimension.lower():
if time_index_array is not None and river_index_array is not None:
streamflow_array = qout_variable[time_index_array,
river_index_array].transpose()
elif time_index_array is not None:
streamflow_array = qout_variable[time_index_array, :] \
.transpose()
elif river_index_array is not None:
streamflow_array = qout_variable[:, river_index_array] \
.transpose()
else:
streamflow_array = qout_variable[:].transpose()
elif qout_dimensions[1].lower() == and \
qout_dimensions[0].lower() == self.river_id_dimension.lower():
if time_index_array is not None and river_index_array is not None:
streamflow_array = qout_variable[river_index_array,
time_index_array]
elif time_index_array is not None:
streamflow_array = qout_variable[:, time_index_array]
elif river_index_array is not None:
streamflow_array = qout_variable[river_index_array, :]
else:
streamflow_array = qout_variable[:]
else:
raise Exception("Invalid RAPID Qout file dimensions ...")
if daily:
pd_filter = "D"
if pd_filter is not None or as_dataframe:
time_array = self.get_time_array(return_datetime=True,
time_index_array=time_index_array)
qout_df = pd.DataFrame(streamflow_array.T, index=time_array)
if pd_filter is not None:
qout_df = qout_df.resample(pd_filter)
if filter_mode == "mean":
qout_df = qout_df.mean()
elif filter_mode == "max":
qout_df = qout_df.max()
else:
raise Exception("Invalid filter_mode ...")
if as_dataframe:
return qout_df
streamflow_array = qout_df.as_matrix().T
if streamflow_array.ndim > 0 and streamflow_array.shape[0] == 1:
streamflow_array = streamflow_array[0]
return streamflow_array
|
This method extracts streamflow data by river index.
It allows for extracting single or multiple river streamflow arrays
It has options to extract by date or by date index.
See: :meth:`RAPIDpy.RAPIDDataset.get_qout`
|
27,997 |
def from_csv(self, csv_source, delimiter=","):
import pytablereader as ptr
loader = ptr.CsvTableTextLoader(csv_source, quoting_flags=self._quoting_flags)
loader.delimiter = delimiter
try:
for table_data in loader.load():
self.from_tabledata(table_data, is_overwrite_table_name=False)
return
except ptr.DataError:
pass
loader = ptr.CsvTableFileLoader(csv_source, quoting_flags=self._quoting_flags)
loader.delimiter = delimiter
for table_data in loader.load():
self.from_tabledata(table_data)
|
Set tabular attributes to the writer from a character-separated values (CSV) data source.
Following attributes are set to the writer by the method:
- :py:attr:`~.headers`.
- :py:attr:`~.value_matrix`.
:py:attr:`~.table_name` also be set if the CSV data source is a file.
In that case, :py:attr:`~.table_name` is as same as the filename.
:param str csv_source:
Input CSV data source either can be designated CSV text or
CSV file path.
:Examples:
:ref:`example-from-csv`
:Dependency Packages:
- `pytablereader <https://github.com/thombashi/pytablereader>`__
|
27,998 |
def form(**kwargs: Question):
return Form(*(FormField(k, q) for k, q in kwargs.items()))
|
Create a form with multiple questions.
The parameter name of a question will be the key for the answer in
the returned dict.
|
27,999 |
def subscribe_sqs_queue(self, topic, queue):
t = queue.id.split()
q_arn = % (queue.connection.region.name,
t[1], t[2])
resp = self.subscribe(topic, , q_arn)
policy = queue.get_attributes()
if not in policy:
policy[] =
if not in policy:
policy[] = []
statement = { : ,
: ,
: { : },
: q_arn,
: str(uuid.uuid4()),
: { : { : topic}}}
policy[].append(statement)
queue.set_attribute(, json.dumps(policy))
return resp
|
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using ans SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The name of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.