Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
378,900 | def printClassTree(self, element=None, showids=False, labels=False, showtype=False):
TYPE_MARGIN = 11
if not element:
for x in self.toplayer_classes:
printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN)
else:
printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN) | Print nicely into stdout the class tree of an ontology
Note: indentation is made so that ids up to 3 digits fit in, plus a space.
[123]1--
[1]123--
[12]12-- |
378,901 | def _priority_key(pep8_result):
priority = [
,
,
, ,
,
]
middle_index = 10000
lowest_priority = [
]
key = pep8_result[].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index | Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation. |
378,902 | def create(self, unique_name=values.unset, friendly_name=values.unset,
identity=values.unset, deployment_sid=values.unset,
enabled=values.unset):
data = values.of({
: unique_name,
: friendly_name,
: identity,
: deployment_sid,
: enabled,
})
payload = self._version.create(
,
self._uri,
data=data,
)
return DeviceInstance(self._version, payload, fleet_sid=self._solution[], ) | Create a new DeviceInstance
:param unicode unique_name: A unique, addressable name of this Device.
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Newly created DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance |
378,903 | def load_key_bindings_for_prompt(**kw):
kw.setdefault(, True)
kw.setdefault(, True)
kw.setdefault(, True)
return load_key_bindings(**kw) | Create a ``Registry`` object with the defaults key bindings for an input
prompt.
This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D),
incremental search and auto suggestions.
(Not for full screen applications.) |
378,904 | def run_nested_groups():
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() | Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase |
378,905 | def delete_webhook(self, scaling_group, policy, webhook):
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
resp, resp_body = self.api.method_delete(uri)
return None | Deletes the specified webhook from the specified policy. |
378,906 | def validate_overwrite_different_input_output(opts):
if opts.overwrite or path.abspath(opts.input) != path.abspath(opts.output):
return True
else:
raise ValidationException("Input and output directories are the same, "
"but --overwrite / -X flag is not provided.\n"
"Do you want to overwrite your input files? "
"If so, use the following command:\n"
"\tanchorhub -X " + opts.input) | Make sure that if overwrite is set to False, the input and output folders
are not set to the same location.
:param opts: a namespace containing the attributes 'overwrite', 'input',
and 'output'
:raises ValidationException: if 'input' and 'output' point to the same
directory and 'overwrite' is set to False
:return: True if 'overwrite' is set to True, or 'input'/'output' are
separate directories |
378,907 | def api_call(method, end_point, params=None, client_id=None, access_token=None):
if bool(client_id) == bool(access_token):
raise ValueError()
url = .format(end_point)
if not params:
params = {}
if client_id:
params[] = client_id
headers = {: .format(access_token)} if access_token else None
response = requests.request(method, url, params=params, headers=headers)
if int(response.status_code / 100) != 2:
error_title =
try:
error_title += + response.json()[]
except ValueError:
pass
except KeyError:
pass
raise ValueError(
.format(url, response.status_code, error_title)
)
try:
return response.json()
except json.decoder.JSONDecodeError:
pass | Call given API end_point with API keys.
:param method: HTTP method (e.g. 'get', 'delete').
:param end_point: API endpoint (e.g. 'users/john/sets').
:param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'})
:param client_id: Quizlet client ID as string.
:param access_token: Quizlet access token as string.
client_id and access_token are mutually exclusive but mandatory. |
378,908 | def pformat(self, prefix=()):
nan = float("nan")
def sformat(segment, stat):
FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}"
line_segs = [segment]
for s in [stat]:
p = s.get_percentiles()
p50, p95 = p.get(0.50, nan), p.get(0.95, nan)
line_segs.append(FMT.format(s.n, s.mean, p50, p95, s.max))
return .format(*line_segs)
lines = []
for path in sorted(self.path_stats.keys()):
lines.append()
for seg, stat in zip(path, self.path_stats[path]):
lines.append(sformat(seg, stat))
return lines | Makes a pretty ASCII format of the data, suitable for
displaying in a console or saving to a text file.
Returns a list of lines. |
378,909 | def connect_to_region(region_name):
logging.debug("Connecting to AWS region " % region_name)
con = boto.vpc.connect_to_region(region_name)
if not con:
raise VpcRouteSetError("Could not establish connection to "
"region ." % region_name)
return con | Establish connection to AWS API. |
378,910 | async def post_heartbeat(self, msg, _context):
name = msg.get()
await self.service_manager.send_heartbeat(name) | Update the status of a service. |
378,911 | def get_published_courses_in_account(self, account_id, params={}):
params["published"] = True
return self.get_courses_in_account(account_id, params) | Return a list of published courses for the passed account ID. |
378,912 | def log(self, msg, level=INFO):
logger.log(level, .format(self._name) + msg) | Record a line of log in logger
:param str msg: content of the messag
:param level: logging level
:return: None |
378,913 | def noisy_layer(self, prefix, action_in, out_size, sigma0,
non_linear=True):
in_size = int(action_in.shape[1])
epsilon_in = tf.random_normal(shape=[in_size])
epsilon_out = tf.random_normal(shape=[out_size])
epsilon_in = self.f_epsilon(epsilon_in)
epsilon_out = self.f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0))
epsilon_b = epsilon_out
sigma_w = tf.get_variable(
name=prefix + "_sigma_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-1.0 / np.sqrt(float(in_size)),
maxval=1.0 / np.sqrt(float(in_size))))
sigma_b = tf.get_variable(
name=prefix + "_sigma_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.constant_initializer(
sigma0 / np.sqrt(float(in_size))))
w = tf.get_variable(
name=prefix + "_fc_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=layers.xavier_initializer())
b = tf.get_variable(
name=prefix + "_fc_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.zeros_initializer())
action_activation = tf.nn.xw_plus_b(action_in, w + sigma_w * epsilon_w,
b + sigma_b * epsilon_b)
if not non_linear:
return action_activation
return tf.nn.relu(action_activation) | a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
(b+\epsilon_b*\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \sigma are trainable variables which are expected to
vanish along the training procedure |
378,914 | def printhtml(csvdiffs):
soup = BeautifulSoup()
html = Tag(soup, name="html")
para1 = Tag(soup, name="p")
para1.append(csvdiffs[0][0])
para2 = Tag(soup, name="p")
para2.append(csvdiffs[1][0])
table = Tag(soup, name="table")
table.attrs.update(dict(border="1"))
soup.append(html)
html.append(para1)
html.append(para2)
html.append(table)
heading2table(soup, table, csvdiffs[3])
for row in csvdiffs[4:]:
row = [str(cell) for cell in row]
row2table(soup, table, row)
print(soup) | print the html |
378,915 | def entropy(string):
entropy = 0
for number in range(256):
result = float(string.encode().count(
chr(number))) / len(string.encode())
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy | Calculate the entropy of a string. |
378,916 | def quic_graph_lasso_cv(X, metric):
print("QuicGraphicalLassoCV with:")
print(" metric: {}".format(metric))
model = QuicGraphicalLassoCV(
cv=2,
n_refinements=6,
n_jobs=1,
init_method="cov",
score_metric=metric,
)
model.fit(X)
print(" len(cv_lams): {}".format(len(model.cv_lams_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected. |
378,917 | def param_errors(self, pnames=None):
l = self.get_params(pnames)
v = [p.errors for p in l]
return np.array(v) | Return an array with the parameter errors
Parameters
----------
pname : list of string or none
If a list of strings, get the Parameter objects with those names
If none, get all the Parameter objects
Returns
-------
~numpy.array of parameter errors
Note that this is a N x 2 array. |
378,918 | def _get_error_context(input_, token):
try:
line = input_[token.lexpos: input_.index(, token.lexpos)]
except ValueError:
line = input_[token.lexpos:]
i = input_.rfind(, 0, token.lexpos)
if i < 0:
i = 0
line = input_[i:token.lexpos] + line
lines = [line.strip()]
col = token.lexpos - i
while len(lines) < 5 and i > 0:
end = i
i = input_.rfind(, 0, i)
if i < 0:
i = 0
lines.insert(0, input_[i:end].strip())
pointer =
for dummy_ch in str(token.value):
pointer +=
pointline =
i = 0
while i < col - 1:
if lines[-1][i].isspace():
pointline += lines[-1][i]
else:
pointline +=
i += 1
lines.append(pointline + pointer)
return lines | Build a context string that defines where on the line the defined
error occurs. This consists of the characters ^ at the position
and for the length defined by the lexer position and token length |
378,919 | def get_worksheet_keys(data_dict, result_info_key):
keys = set(data_dict.keys())
keys.remove(result_info_key)
if in keys:
keys.remove()
return sorted(keys) | Gets sorted keys from the dict, ignoring result_info_key and 'meta' key
Args:
data_dict: dict to pull keys from
Returns:
list of keys in the dict other than the result_info_key |
378,920 | def start():
from . import app
root, apiopts, conf = app.get_app(__opts__)
if not apiopts.get(, False):
if not in apiopts or not in apiopts:
logger.error("Not starting . Options and "
" are required if SSL is not disabled.",
__name__)
return None
verify_certs(apiopts[], apiopts[])
cherrypy.server.ssl_module =
cherrypy.server.ssl_certificate = apiopts[]
cherrypy.server.ssl_private_key = apiopts[]
if in apiopts.keys():
cherrypy.server.ssl_certificate_chain = apiopts[]
cherrypy.quickstart(root, apiopts.get(, ), conf) | Start the server loop |
378,921 | def compute_group_count(self, pattern):
group_count = self.group_count
pattern_repeated = 1
if self.is_many():
pattern_repeated = 2
return group_count + pattern_repeated * pattern_group_count(pattern) | Compute the number of regexp match groups when the pattern is provided
to the :func:`Cardinality.make_pattern()` method.
:param pattern: Item regexp pattern (as string).
:return: Number of regexp match groups in the cardinality pattern. |
378,922 | def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
if options is not None and not isinstance(options, dict):
raise TypeError()
data = {
: name,
: driver,
: options,
: ipam,
: check_duplicate,
}
if labels is not None:
if version_lt(self._version, ):
raise InvalidVersion(
)
if not isinstance(labels, dict):
raise TypeError()
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, ):
raise InvalidVersion(
)
data[] = True
if internal:
if version_lt(self._version, ):
raise InvalidVersion(
)
data[] = True
if attachable is not None:
if version_lt(self._version, ):
raise InvalidVersion(
)
data[] = attachable
if ingress is not None:
if version_lt(self._version, ):
raise InvalidVersion(
)
data[] = ingress
if scope is not None:
if version_lt(self._version, ):
raise InvalidVersion(
)
data[] = scope
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True) | Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config) |
378,923 | def starting_expression(source_code, offset):
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
if expression:
return expression + + starting
return starting | Return the expression to complete |
378,924 | def get_avg_price_stat(self) -> Decimal:
avg_price = Decimal(0)
price_total = Decimal(0)
price_count = 0
for account in self.security.accounts:
if account.type == AccountType.TRADING.name:
continue
for split in account.splits:
if split.quantity == 0:
continue
price = split.value / split.quantity
price_count += 1
price_total += price
if price_count:
avg_price = price_total / price_count
return avg_price | Calculates the statistical average price for the security,
by averaging only the prices paid. Very simple first implementation. |
378,925 | def get_current_instruction(self) -> Dict:
instructions = self.environment.code.instruction_list
return instructions[self.mstate.pc] | Gets the current instruction for this GlobalState.
:return: |
378,926 | def insert_contribution_entries(database, entries):
entries = map(clean_entry, entries)
database.contributions.insert(entries, continue_on_error=True) | Insert a set of records of a contribution report in the provided database.
Insert a set of new records into the provided database without checking
for conflicting entries.
@param database: The MongoDB database to operate on. The contributions
collection will be used from this database.
@type db: pymongo.database.Database
@param entries: The entries to insert into the database.
@type entries: dict |
378,927 | def read_rle(file_obj, header, bit_width, debug_logging):
count = header >> 1
zero_data = b"\x00\x00\x00\x00"
width = (bit_width + 7) // 8
data = file_obj.read(width)
data = data + zero_data[len(data):]
value = struct.unpack(b"<i", data)[0]
if debug_logging:
logger.debug("Read RLE group with value %s of byte-width %s and count %s",
value, width, count)
for _ in range(count):
yield value | Read a run-length encoded run from the given fo with the given header and bit_width.
The count is determined from the header and the width is used to grab the
value that's repeated. Yields the value repeated count times. |
378,928 | def read(self):
def warn(msg, elapsed_time, current_time):
desc = self._cache_id_desc()
self._warnings(
"{0} {1}: {2}s < {3}s", msg, desc, elapsed_time, current_time)
file_time = get_time()
out = self._out
if out is None:
if self.verbose:
self._warnings("reading {0} from disk", self._cache_id_desc())
with open(self._cache_file, ) as f_in:
out = None
while True:
t_out = f_in.read(CHUNK_SIZE)
if not len(t_out):
break
if out is not None:
out += t_out
else:
out = t_out
self._out = out
(cache_id_obj, elapsed_time, res) = self._read(out)
self.ensure_cache_id(cache_id_obj)
real_time = get_time() - file_time
if elapsed_time is not None and real_time > elapsed_time:
warn("reading cache from disk takes longer than computing!",
elapsed_time, real_time)
elif self._start_time is not None and elapsed_time is not None:
current_time = get_time() - self._start_time
if elapsed_time < current_time:
warn("reading cache takes longer than computing!",
elapsed_time, current_time)
self._last_access = get_time()
return res | Reads the cache file as pickle file. |
378,929 | def discard_observer(self, observer):
discarded = False
key = self.make_key(observer)
if key in self.observers:
del self.observers[key]
discarded = True
return discarded | Un-register an observer.
Args:
observer: The observer to un-register.
Returns true if an observer was removed, otherwise False. |
378,930 | def visit_Call(self, nodeCall):
super(PatternFinder, self).generic_visit(nodeCall)
if hasattr(nodeCall.func, "func"):
nodeCall = nodeCall.func
nodeArgument = nodeCall.args[1]
if not isinstance(nodeArgument, ast.BinOp):
return
operation = nodeArgument.op
if type(operation) not in [ast.Mod, ast.Add]:
return
nodePattern = nodeArgument.left
if not isinstance(nodePattern, ast.Str):
return
pattern = nodePattern.s
if not ((type(operation) == ast.Add and pattern.endswith("_")) or
(pattern.count("%s") == 1 and pattern.endswith("_%s"))):
return
pattern = pattern.replace("%s", "")
if pattern[:1].isalpha() and not pattern[:1].islower():
self.patternsClass.add(pattern)
else:
self.patternsFunc.add(pattern) | Be invoked when visiting a node of function call.
@param node: currently visiting node |
378,931 | def clear(self):
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze() | Remove all content from the document but do not reset title.
Returns:
None |
378,932 | def get_creators(self, *args, **kwargs):
from .creator import Creator, CreatorDataWrapper
return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs) | Returns a full CreatorDataWrapper object for this story.
/stories/{storyId}/creators
:returns: CreatorDataWrapper -- A new request to API. Contains full results set. |
378,933 | def get_matlab_value(val):
import numpy as np
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
if not isinstance(val, np.ndarray):
return val
if hasattr(val, ):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
elif val.dtype.kind == :
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
elif val.size == 1:
val = val.item()
elif val.size == 0:
if val.dtype.kind in :
val =
else:
val = []
return val | Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html |
378,934 | def weld_variance(array, weld_type):
weld_obj_mean = weld_mean(array, weld_type)
obj_id, weld_obj = create_weld_object(array)
weld_obj_mean_id = get_weld_obj_id(weld_obj, weld_obj_mean)
weld_template = _weld_variance_code
weld_obj.weld_code = weld_template.format(array=obj_id,
type=weld_type,
mean=weld_obj_mean_id)
return weld_obj | Returns the variance of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation. |
378,935 | def new_output_file_opt(self, opt, name):
fil = File(name)
self.add_output_opt(opt, fil)
return fil | Add an option and return a new file handle |
378,936 | def walk_directory_directories_relative_path(self, relativePath=""):
errorMessage = ""
relativePath = os.path.normpath(relativePath)
dirInfoDict, errorMessage = self.get_directory_info(relativePath)
assert dirInfoDict is not None, errorMessage
for dname in dict.__getitem__(dirInfoDict, "directories"):
yield os.path.join(relativePath, dname) | Walk a certain directory in repository and yield all found directories relative path.
:parameters:
#. relativePath (str): The relative path of the directory. |
378,937 | def bow(self, tokens, remove_oov=False):
if remove_oov:
tokens = [x for x in tokens if x in self.items]
for t in tokens:
try:
yield self.items[t]
except KeyError:
if self.unk_index is None:
raise ValueError("You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items.")
yield self.unk_index | Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items. |
378,938 | def _set_properties(self):
self.SetTitle(_("About pyspread"))
label = _("pyspread {version}\nCopyright Martin Manns")
label = label.format(version=VERSION)
self.about_label.SetLabel(label) | Setup title and label |
378,939 | def draw_no_data(self):
no_data = self.node(
self.graph.nodes[],
,
x=self.graph.view.width / 2,
y=self.graph.view.height / 2,
class_=
)
no_data.text = self.graph.no_data_text | Write the no data text to the svg |
378,940 | def Validate(self, problems=default_problem_reporter):
found_problem = False
found_problem = ((not util.ValidateRequiredFieldsAreNotEmpty(
self, self._REQUIRED_FIELD_NAMES, problems))
or found_problem)
found_problem = self.ValidateAgencyUrl(problems) or found_problem
found_problem = self.ValidateAgencyLang(problems) or found_problem
found_problem = self.ValidateAgencyTimezone(problems) or found_problem
found_problem = self.ValidateAgencyFareUrl(problems) or found_problem
found_problem = self.ValidateAgencyEmail(problems) or found_problem
return not found_problem | Validate attribute values and this object's internal consistency.
Returns:
True iff all validation checks passed. |
378,941 | def get_child_by_name(self, childname):
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode | Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None |
378,942 | def get_output_margin(self, status=None):
margin = self.get_reserved_space() + self.get_prompt(self.prompt).count() + 1
if special.is_timing_enabled():
margin += 1
if status:
margin += 1 + status.count()
return margin | Get the output margin (number of rows for the prompt, footer and
timing message. |
378,943 | def _growth_curve_pooling_group(self, distr=, as_rural=False):
if not self.donor_catchments:
self.find_donor_catchments()
gc = GrowthCurve(distr, *self._var_and_skew(self.donor_catchments))
self.results_log[] = distr.upper()
self.results_log[] = gc.params
return gc | Return flood growth curve function based on `amax_records` from a pooling group.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool |
378,944 | def _get_connection(self, handle, expect_state=None):
conndata = self._connections.get(handle)
if conndata and expect_state is not None and conndata[] != expect_state:
self._logger.error("Connection in unexpected state, wanted=%s, got=%s", expect_state,
conndata[])
return conndata | Get a connection object, logging an error if its in an unexpected state |
378,945 | def _get_rescale_factors(self, reference_shape, meta_info):
height, width = reference_shape
service_type = ServiceType(meta_info[])
rescale = None
if service_type == ServiceType.WMS:
if (self.cm_size_x is None) and (self.cm_size_y is not None):
rescale = (self.cm_size_y / height, self.cm_size_y / height)
elif (self.cm_size_x is not None) and (self.cm_size_y is None):
rescale = (self.cm_size_x / width, self.cm_size_x / width)
else:
rescale = (self.cm_size_y / height, self.cm_size_x / width)
elif service_type == ServiceType.WCS:
if self.cm_size_y is None:
self.cm_size_y = self.cm_size_x
elif self.cm_size_x is None:
self.cm_size_x = self.cm_size_y
hr_res_x, hr_res_y = int(meta_info[].strip()), int(meta_info[].strip())
lr_res_x, lr_res_y = int(self.cm_size_x.strip()), int(self.cm_size_y.strip())
rescale = (hr_res_y / lr_res_y, hr_res_x / lr_res_x)
return rescale | Compute the resampling factor for height and width of the input array
:param reference_shape: Tuple specifying height and width in pixels of high-resolution array
:type reference_shape: tuple of ints
:param meta_info: Meta-info dictionary of input eopatch. Defines OGC request and parameters used to create the
eopatch
:return: Rescale factor for rows and columns
:rtype: tuple of floats |
378,946 | def logprob(self, actions, action_logits):
neg_log_prob = F.nll_loss(action_logits, actions, reduction=)
return -neg_log_prob | Logarithm of probability of given sample |
378,947 | def create_token(key, payload):
token = hmac.new(key)
token.update(json.dumps(payload))
return token.hexdigest() | Auth token generator
payload should be a json encodable data structure |
378,948 | def setting(key, default=None, expected_type=None, qsettings=None):
if default is None:
default = inasafe_default_settings.get(key, None)
full_key = % (APPLICATION_NAME, key)
return general_setting(full_key, default, expected_type, qsettings) | Helper function to get a value from settings under InaSAFE scope.
:param key: Unique key for setting.
:type key: basestring
:param default: The default value in case of the key is not found or there
is an error.
:type default: basestring, None, boolean, int, float
:param expected_type: The type of object expected.
:type expected_type: type
:param qsettings: A custom QSettings to use. If it's not defined, it will
use the default one.
:type qsettings: qgis.PyQt.QtCore.QSettings
:returns: The value of the key in the setting.
:rtype: object |
378,949 | def complete(self):
is_complete = super(ORMWrapperTask, self).complete()
for req in self.requires():
is_complete &= req.complete()
return is_complete | Task is complete if completion marker is set and all requirements are complete |
378,950 | def read_pid_constants(self):
p = self._read_as_int(Addr.PConstant, 4)
i = self._read_as_int(Addr.IConstant, 4)
d = self._read_as_int(Addr.DConstant, 4)
return map(lambda x: x / (2 ** 16), (p, i, d)) | Reads back the PID constants stored on the Grizzly. |
378,951 | def get_game_logs(self):
logs = self.response.json()[][0][]
headers = self.response.json()[][0][]
df = pd.DataFrame(logs, columns=headers)
df.GAME_DATE = pd.to_datetime(df.GAME_DATE)
return df | Returns team game logs as a pandas DataFrame |
378,952 | def read_azimuth_noise_array(elts):
y = []
x = []
data = []
for elt in elts:
first_pixel = int(elt.find().text)
last_pixel = int(elt.find().text)
lines = elt.find().text.split()
lut = elt.find().text.split()
pixels = [first_pixel, last_pixel]
swath = elt.find().text
corr = 1
if swath == :
corr = 1.5
if swath == :
corr = 1.2
if swath == :
corr = 1.5
for pixel in pixels:
y += [int(val) for val in lines]
x += [pixel] * len(lines)
data += [float(val) * corr for val in lut]
return np.asarray(data), (x, y) | Read the azimuth noise vectors.
The azimuth noise is normalized per swath to account for gain
differences between the swaths in EW mode.
This is based on the this reference:
J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won,
"Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel,"
in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3,
pp. 1555-1565, March 2018.
doi: 10.1109/TGRS.2017.2765248 |
378,953 | def cookie_dump(key, value=, max_age=None, expires=None, path=,
domain=None, secure=False, httponly=False):
cookie = SimpleCookie()
cookie[key] = value
for attr in (, , , ,
, ):
attr_key = attr.replace(, )
attr_value = locals()[attr]
if attr_value:
cookie[key][attr_key] = attr_value
return cookie | :rtype: ``Cookie.SimpleCookie`` |
378,954 | def tas2eas(Vtas, H):
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas | True Airspeed to Equivalent Airspeed |
378,955 | def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):
with tf.variable_scope("lstm_seq2seq_bid_encoder"):
if inputs is not None:
inputs_length = common_layers.length_from_embedding(inputs)
inputs = common_layers.flatten4d3d(inputs)
_, final_encoder_state = lstm_bid_encoder(
inputs, inputs_length, hparams, train, "encoder")
else:
inputs_length = None
final_encoder_state = None
shifted_targets = common_layers.shift_right(targets)
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
hparams_decoder = copy.copy(hparams)
hparams_decoder.hidden_size = 2 * hparams.hidden_size
decoder_outputs, _ = lstm(
common_layers.flatten4d3d(shifted_targets),
targets_length,
hparams_decoder,
train,
"decoder",
initial_state=final_encoder_state)
return tf.expand_dims(decoder_outputs, axis=2) | The basic LSTM seq2seq model with bidirectional encoder. |
378,956 | def fragment6(pkt, fragSize):
pkt = pkt.copy()
if IPv6ExtHdrFragment not in pkt:
return [pkt]
return []
s = raw(pkt)
if len(s) <= fragSize:
return [pkt]
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1") / fragPart)
fragPartLen = len(tmp) - 40
fragPartStr = s[-fragPartLen:]
nh = pkt[IPv6ExtHdrFragment].nh
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart / fragHeader / fragPart]
remain = fragPartStr
res = []
fragOffset = 0
fragId = random.randint(0, 0xffffffff)
if fragHeader.id is not None:
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset
fragOffset += (innerFragSize // 8)
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=remain)
res.append(tempo)
break
return res | Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list. |
378,957 | def prepare_files(self):
files = {}
for f in self.data[]:
k = f[]
if k not in files:
files[k] = []
files[k].append(f)
for k in files.keys():
files[k].sort(key=lambda x: x[])
self.files = files | Get files from data dump. |
378,958 | def get_task_param_string(task):
param_dict = task.to_str_params()
items = []
for key in sorted(param_dict.keys()):
items.append(": ".format(key, param_dict[key]))
return "{" + ", ".join(items) + "}" | Get all parameters of a task as one string
Returns:
str: task parameter string |
378,959 | def get_all_invoice_payments(self, params=None):
if not params:
params = {}
return self._iterate_through_pages(
get_function=self.get_invoice_payments_per_page,
resource=INVOICE_PAYMENTS,
**{: params}
) | Get all invoice payments
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list |
378,960 | def xlsx_to_csv(self, infile, worksheet=0, delimiter=","):
wb = load_workbook(self.getInputFile())
sheet = wb.worksheets[worksheet]
buffer = StringIO()
for n, row in enumerate(sheet.rows):
line = []
for cell in row:
value = cell.value
if type(value) in types.StringTypes:
value = value.encode("utf8")
if value is None:
value = ""
line.append(str(value))
print >>buffer, delimiter.join(line)
buffer.seek(0)
return buffer | Convert xlsx to easier format first, since we want to use the
convenience of the CSV library |
378,961 | def _set_callables(modules):
def _set_function(cmd_name, doc):
def _cmd(*args, **kw):
kwargs = {}
if kw.get():
for _kw in kw.get(, []):
if isinstance(_kw, dict):
kwargs = _kw
break
return _caller.call(cmd_name, *args, **kwargs)
_cmd.__doc__ = doc
return _cmd
for mod in modules:
setattr(sys.modules[__name__], mod, _set_function(mod, )) | Set all Ansible modules callables
:return: |
378,962 | def split_obj (obj, prefix = None):
new = obj.copy() if prefix is None else { .format(prefix, k): v for k, v in obj.items() }
for k, v in new.items():
if isinstance(v, list):
if not isinstance(v[0], dict):
new[k] = .join(v)
return new, None, None
del new[k]
return new, k, v
elif isinstance(v, dict):
del new[k]
return new, k, [v]
return new, None, None | Split the object, returning a 3-tuple with the flat object, optionally
followed by the key for the subobjects and a list of those subobjects. |
378,963 | def _sum_wrapper(fn):
def wrapper(*args, **kwargs):
v = 0
new_args = _cast_args_to_list(args)
for arg in zip(*new_args):
v += fn(*arg, **kwargs)
return v
return wrapper | Wrapper to perform row-wise aggregation of list arguments and pass
them to a function. The return value of the function is summed
over the argument groups. Non-list arguments will be
automatically cast to a list. |
378,964 | def rank(self,
method=,
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
) | Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank` |
378,965 | def get_defining_component(pe_pe):
if pe_pe is None:
return None
if type(pe_pe).__name__ != :
pe_pe = one(pe_pe).PE_PE[8001]()
ep_pkg = one(pe_pe).EP_PKG[8000]()
if ep_pkg:
return get_defining_component(ep_pkg)
return one(pe_pe).C_C[8003]() | Get the BridgePoint component (C_C) that defines the packeable element
*pe_pe*. |
378,966 | def translate_config(self, profile, merge=None, replace=None):
result = []
for k, v in self:
other_merge = getattr(merge, k) if merge else None
other_replace = getattr(replace, k) if replace else None
translator = Translator(
v, profile, merge=other_merge, replace=other_replace
)
result.append(translator.translate())
return "\n".join(result) | Translate the object to native configuration.
In this context, merge and replace means the following:
* **Merge** - Elements that exist in both ``self`` and ``merge`` will use by default the
values in ``merge`` unless ``self`` specifies a new one. Elements that exist only
in ``self`` will be translated as they are and elements present only in ``merge``
will be removed.
* **Replace** - All the elements in ``replace`` will either be removed or replaced by
elements in ``self``.
You can specify one of ``merge``, ``replace`` or none of them. If none of them are set we
will just translate configuration.
Args:
profile (list): Which profiles to use.
merge (Root): Object we want to merge with.
replace (Root): Object we want to replace. |
378,967 | def Logger(name, **kargs):
path_dirs = PathDirs(**kargs)
logging.captureWarnings(True)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.handlers.WatchedFileHandler(os.path.join(
path_dirs.meta_dir, ))
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
)
handler.setFormatter(formatter)
if not len(logger.handlers):
logger.addHandler(handler)
return logger | Create and return logger |
378,968 | def avg(self):
if len(self.values) > 0:
return sum(self.values) / float(len(self.values))
else:
return None | return the mean value |
378,969 | def is_translocated(graph: BELGraph, node: BaseEntity) -> bool:
return _node_has_modifier(graph, node, TRANSLOCATION) | Return true if over any of the node's edges, it is translocated. |
378,970 | def annual_reading_counts(kind=):
if kind == :
kinds = [, ]
else:
kinds = [kind]
counts = OrderedDict()
for k in kinds:
qs = Reading.objects.exclude(end_date__isnull=True) \
.filter(publication__kind=k) \
.annotate(year=TruncYear()) \
.values() \
.annotate(count=Count()) \
.order_by()
for year_data in qs:
year_str = year_data[].strftime()
if not year_str in counts:
counts[year_str] = {
: year_data[],
}
counts[year_str][k] = year_data[]
counts_list = []
for year_str, data in counts.items():
year_data = {
: data[],
}
if kind == :
year_data[] = 0
for k in kinds:
if k in data:
year_data[k] = data[k]
if kind == :
year_data[] += data[k]
else:
year_data[k] = 0
counts_list.append(year_data)
return counts_list | Returns a list of dicts, one per year of reading. In year order.
Each dict is like this (if kind is 'all'):
{'year': datetime.date(2003, 1, 1),
'book': 12, # only included if kind is 'all' or 'book'
'periodical': 18, # only included if kind is 'all' or 'periodical'
'total': 30, # only included if kind is 'all'
}
We use the end_date of a Reading to count when that thing was read.
kind is one of 'book', 'periodical' or 'all', for both. |
378,971 | def get_embedded_tweet(tweet):
if tweet.retweeted_tweet is not None:
return tweet.retweeted_tweet
elif tweet.quoted_tweet is not None:
return tweet.quoted_tweet
else:
return None | Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary
Args:
tweet (Tweet): A Tweet object (not simply a dict)
Returns:
dict (or None, if the Tweet is neither a quote tweet or a Retweet):
a dictionary representing the quote Tweet or the Retweet |
378,972 | def get_initial_states(self, input_var, init_state=None):
initial_states = {}
for state in self.state_names:
if state != "state" or not init_state:
if self._input_type == and input_var.ndim == 2:
init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size)
else:
init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size)
initial_states[state] = init_state
return initial_states | :type input_var: T.var
:rtype: dict |
378,973 | def serialize_array(array, domain=(0, 1), fmt=, quality=70):
normalized = _normalize_array(array, domain=domain)
return _serialize_normalized_array(normalized, fmt=fmt, quality=quality) | Given an arbitrary rank-3 NumPy array,
returns the byte representation of the encoded image.
Args:
array: NumPy array of dtype uint8 and range 0 to 255
domain: expected range of values in array, see `_normalize_array()`
fmt: string describing desired file format, defaults to 'png'
quality: specifies compression quality from 0 to 100 for lossy formats
Returns:
image data as BytesIO buffer |
378,974 | def generate_strings(project_base_dir, localization_bundle_path, tmp_directory, exclude_dirs, include_strings_file,
special_ui_components_prefix):
localization_directory = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)
if not os.path.exists(localization_directory):
os.makedirs(localization_directory)
localization_file = os.path.join(localization_directory, LOCALIZATION_FILENAME)
tmp_localization_directory = os.path.join(tmp_directory, DEFAULT_LANGUAGE_DIRECTORY_NAME)
tmp_localization_file = os.path.join(tmp_localization_directory, LOCALIZATION_FILENAME)
if os.path.isdir(tmp_localization_directory):
shutil.rmtree(tmp_localization_directory)
os.mkdir(tmp_localization_directory)
logging.info("Running genstrings")
source_files = extract_source_files(project_base_dir, exclude_dirs)
genstrings_cmd = % (tmp_localization_directory, " ".join(
[ % (source_file,) for source_file in source_files]))
genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
genstrings_out, genstrings_err = genstrings_process.communicate()
remove_empty_comments_from_file(tmp_localization_file)
add_genstrings_comments_to_file(tmp_localization_file, genstrings_err)
genstrings_rc = genstrings_process.returncode
if genstrings_rc != 0:
logging.fatal("genstrings returned %d, aborting run!", genstrings_rc)
sys.exit(genstrings_rc)
create_localized_strings_from_ib_files(project_base_dir, exclude_dirs, tmp_localization_file,
special_ui_components_prefix)
if include_strings_file:
target = open_strings_file(tmp_localization_file, "a")
source = open_strings_file(include_strings_file, "r")
target.write(source.read())
source.close()
target.close()
handle_duplications(tmp_localization_file)
if os.path.isfile(localization_file):
logging.info("Merging old localizable with new one...")
merge_strings_files(localization_file, tmp_localization_file)
else:
logging.info("No Localizable yet, moving the created file...")
shutil.move(tmp_localization_file, localization_file) | Calls the builtin 'genstrings' command with JTLocalizedString as the string to search for,
and adds strings extracted from UI elements internationalized with 'JTL' + removes duplications. |
378,975 | def get_satellites_list(self, sat_type):
satellites_list = []
if sat_type in [, , ,
, , ]:
for satellite in getattr(self, sat_type):
satellites_list.append(satellite)
satellites_list = master_then_spare(satellites_list)
return satellites_list | Get a sorted satellite list: master then spare
:param sat_type: type of the required satellites (arbiters, schedulers, ...)
:type sat_type: str
:return: sorted satellites list
:rtype: list[alignak.objects.satellitelink.SatelliteLink] |
378,976 | def field2parameter(self, field, name="body", default_in="body"):
location = field.metadata.get("location", None)
prop = self.field2property(field)
return self.property2parameter(
prop,
name=name,
required=field.required,
multiple=isinstance(field, marshmallow.fields.List),
location=location,
default_in=default_in,
) | Return an OpenAPI parameter as a `dict`, given a marshmallow
:class:`Field <marshmallow.Field>`.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameterObject |
378,977 | def _evaluate(self, indices, norm_distances, out=None):
idx_res = []
for i, yi in zip(indices, norm_distances):
if self.variant == :
idx_res.append(np.where(yi <= .5, i, i + 1))
else:
idx_res.append(np.where(yi < .5, i, i + 1))
idx_res = tuple(idx_res)
if out is not None:
out[:] = self.values[idx_res]
return out
else:
return self.values[idx_res] | Evaluate nearest interpolation. |
378,978 | def get_argflag(argstr_, default=False, help_=, return_specified=None,
need_prefix=True, return_was_specified=False, argv=None,
debug=None,
**kwargs):
if argv is None:
argv = sys.argv
assert isinstance(default, bool),
argstr_list = meta_util_iter.ensure_iterable(argstr_)
_register_arg(argstr_list, bool, default, help_)
parsed_val = default
was_specified = False
if debug is None:
debug = DEBUG
import os
for key, val in os.environ.items():
key = key.upper()
sentinal =
if key.startswith(sentinal):
flag = + key[len(sentinal):].lower().replace(, )
if val.upper() in [, ]:
pass
elif val.upper() in [, ]:
continue
else:
continue
new_argv = [flag]
argv = argv[:] + new_argv
if debug:
print()
print( % (new_argv,))
for argstr in argstr_list:
if not (argstr.find() == 0 or (argstr.find() == 0 and len(argstr) == 2)):
raise AssertionError( % (argstr,))
if not need_prefix:
noprefix = argstr.replace(, )
if noprefix in argv:
parsed_val = True
was_specified = True
break
noarg = argstr.replace(, )
if argstr in argv:
parsed_val = True
was_specified = True
break
elif noarg in argv:
parsed_val = False
was_specified = True
break
elif argstr + in argv:
parsed_val = True
was_specified = True
break
elif argstr + in argv:
parsed_val = False
was_specified = True
break
if return_specified is None:
return_specified = return_was_specified
if return_specified:
return parsed_val, was_specified
else:
return parsed_val | Checks if the commandline has a flag or a corresponding noflag
Args:
argstr_ (str, list, or tuple): the flag to look for
default (bool): dont use this (default = False)
help_ (str): a help string (default = '')
return_specified (bool): returns if flag was specified or not (default = False)
Returns:
tuple: (parsed_val, was_specified)
TODO:
depricate return_was_specified
CommandLine:
python -m utool.util_arg --exec-get_argflag --noface --exec-mode
python -m utool.util_arg --exec-get_argflag --foo --exec-mode
python -m utool.util_arg --exec-get_argflag --no-foo --exec-mode
python -m utool.util_arg --exec-get_argflag --foo=True --exec-mode
python -m utool.util_arg --exec-get_argflag --foo=False --exec-mode
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_arg import * # NOQA
>>> argstr_ = '--foo'
>>> default = False
>>> help_ = ''
>>> return_specified = True
>>> (parsed_val, was_specified) = get_argflag(argstr_, default, help_, return_specified)
>>> result = ('(parsed_val, was_specified) = %s' % (str((parsed_val, was_specified)),))
>>> print(result) |
378,979 | def get_col_width(self, col, tab):
try:
return self.col_widths[(col, tab)]
except KeyError:
return config["default_col_width"] | Returns column width |
378,980 | def callRemote(self, objectPath, methodName,
interface=None,
destination=None,
signature=None,
body=None,
expectReply=True,
autoStart=True,
timeout=None,
returnSignature=_NO_CHECK_RETURN):
try:
mcall = message.MethodCallMessage(
objectPath,
methodName,
interface=interface,
destination=destination,
signature=signature,
body=body,
expectReply=expectReply,
autoStart=autoStart,
oobFDs=self._toBeSentFDs,
)
d = self.callRemoteMessage(mcall, timeout)
d.addCallback(self._cbCvtReply, returnSignature)
return d
except Exception:
return defer.fail() | Calls a method on a remote DBus object and returns a deferred to the
result.
@type objectPath: C{string}
@param objectPath: Path of the remote object
@type methodName: C{string}
@param methodName: Name of the method to call
@type interface: None or C{string}
@param interface: If specified, this specifies the interface containing
the desired method
@type destination: None or C{string}
@param destination: If specified, this specifies the bus name
containing the remote object
@type signature: None or C{string}
@param signature: If specified, this specifies the DBus signature of
the body of the DBus MethodCall message. This string must be a
valid Signature string as defined by the DBus specification. If
arguments are supplied to the method call, this parameter must be
provided.
@type body: C{list}
@param body: A C{list} of Python objects to encode. The list content
must match the content of the signature parameter
@type expectReply: C{bool}
@param expectReply: If True (defaults to True) the returned deferred
will be called back with the eventual result of the remote call. If
False, the deferred will be immediately called back with None.
@type autoStart: C{bool}
@param autoStart: If True (defaults to True) DBus will attempt to
automatically start a service to handle the method call if a
service matching the target object is registered but not yet
started.
@type timeout: None or C{float}
@param timeout: If specified and the remote call does not return a
value before the timeout expires, the returned Deferred will be
errbacked with a L{error.TimeOut} instance.
@type returnSignature: C{string}
@param returnSignature: If specified, the return values will be
validated against the signature string. If the returned values do
not mactch, the returned Deferred witl be errbacked with a
L{error.RemoteError} instance.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred to the result. If expectReply is False, the
deferred will be immediately called back with None. |
378,981 | def load_delimited(filename, converters, delimiter=r):
r
n_columns = len(converters)
columns = tuple(list() for _ in range(n_columns))
splitter = re.compile(delimiter)
"found at {}:{:d}:\n\t{}".format(
value, converter.__name__, filename,
row, line))
column.append(converted_value)
if n_columns == 1:
return columns[0]
else:
return columns | r"""Utility function for loading in data from an annotation file where columns
are delimited. The number of columns is inferred from the length of
the provided converters list.
Examples
--------
>>> # Load in a one-column list of event times (floats)
>>> load_delimited('events.txt', [float])
>>> # Load in a list of labeled events, separated by commas
>>> load_delimited('labeled_events.csv', [float, str], ',')
Parameters
----------
filename : str
Path to the annotation file
converters : list of functions
Each entry in column ``n`` of the file will be cast by the function
``converters[n]``.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
columns : tuple of lists
Each list in this tuple corresponds to values in one of the columns
in the file. |
378,982 | def install_documentation(path="./Litho1pt0-Notebooks"):
Notebooks_Path = _pkg_resources.resource_filename(, )
ct = _dir_util.copy_tree(Notebooks_Path, path, preserve_mode=1, preserve_times=1, preserve_symlinks=1, update=0, verbose=1, dry_run=0)
return | Install the example notebooks for litho1pt0 in the given location
WARNING: If the path exists, the Notebook files will be written into the path
and will overwrite any existing files with which they collide. The default
path ("./Litho1pt0-Notebooks") is chosen to make collision less likely / problematic
The documentation for litho1pt0 is in the form of jupyter notebooks.
Some dependencies exist for the notebooks to be useful:
- matplotlib: for some diagrams
- cartopy: for plotting map examples
litho1pt0 dependencies are explicitly imported into the notebooks including:
- stripy (for interpolating on the sphere)
- numpy
- scipy (for k-d tree point location) |
378,983 | def InsertFloatArg(self, string="", **_):
try:
float_value = float(string)
return self.InsertArg(float_value)
except (TypeError, ValueError):
raise ParseError("%s is not a valid float." % string) | Inserts a Float argument. |
378,984 | def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None):
model = type(instance)
if space is None and model not in (Space, Event):
raise Exception(
)
if not extra_params:
extra_params = {}
if not id_field:
id_field =
if not instance.get(id_field, None):
raise AttributeError(
%s\ % (
instance.__class__.__name__,
id_field
)
)
url = .format(
settings.API_ROOT_PATH,
settings.API_VERSION,
rel_path or model.rel_path,
instance[id_field],
append_to_path or ,
urllib.urlencode(extra_params),
)
response = requests.delete(
url=url,
headers={
: self.key,
: self.secret,
: "application/json",
},
)
if response.status_code == 204:
return True
else:
raise Exception(
.format(
response.status_code,
url,
response.text
)
) | Base level method for removing data from the API |
378,985 | def emit_reset(self):
for name in self.layout.axes:
params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
self.write_event(ecodes.EV_ABS, name, int(sum(params[1:3]) / 2))
for name in self.layout.buttons:
self.write_event(ecodes.EV_KEY, name, False)
for name in self.layout.hats:
self.write_event(ecodes.EV_ABS, name, 0)
self.device.syn() | Resets the device to a blank state. |
378,986 | def setup(app) -> Dict[str, Any]:
app.connect("doctree-read", on_doctree_read)
app.connect("builder-inited", on_builder_inited)
app.add_css_file("uqbar.css")
app.add_node(
nodes.classifier, override=True, html=(visit_classifier, depart_classifier)
)
app.add_node(
nodes.definition, override=True, html=(visit_definition, depart_definition)
)
app.add_node(nodes.term, override=True, html=(visit_term, depart_term))
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | Sets up Sphinx extension. |
378,987 | def payments_for_address(self, address):
"return an array of (TX ids, net_payment)"
URL = self.api_domain + ("/address/%s?format=json" % address)
d = urlopen(URL).read()
json_response = json.loads(d.decode("utf8"))
response = []
for tx in json_response.get("txs", []):
total_out = 0
for tx_out in tx.get("out", []):
if tx_out.get("addr") == address:
total_out += tx_out.get("value", 0)
if total_out > 0:
response.append((tx.get("hash"), total_out))
return response | return an array of (TX ids, net_payment) |
378,988 | def quadvgk(feval, fmin, fmax, tol1=1e-5, tol2=1e-5):
XK = np.array([-0.991455371120813, -0.949107912342759, -0.864864423359769, -0.741531185599394,
-0.586087235467691, -0.405845151377397, -0.207784955007898, 0.,
0.207784955007898, 0.405845151377397, 0.586087235467691,
0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813])
WK = np.array([0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525,
0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728,
0.204432940075298, 0.190350578064785, 0.169004726639267,
0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529])
WG = np.array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469,
0.381830050505119, 0.279705391489277, 0.129484966168870])
NK = WK.size
G = np.arange(2,NK,2)
tol1 = 1e-4
tol2 = 1e-4
Subs = np.array([[fmin],[fmax]])
NF = feval(np.zeros(1)).size
Q = np.zeros(NF)
neval = 0
while Subs.size > 0:
Subs = getSubs(Subs,XK)
M = (Subs[1,:] - Subs[0,:]) / 2
C = (Subs[1,:] + Subs[0,:]) / 2
NM = M.size
x = XK[:,None]*M + C
x = x.flatten()
FV = feval(x)
Q1 = np.zeros((NF, NM))
Q2 = np.zeros((NF, NM))
Q1 = np.dot(FV.reshape(NF, NK, NM).swapaxes(2,1),WK)*M
Q2 = np.dot(FV.reshape(NF, NK, NM).swapaxes(2,1)[:,:,1::2],WG)*M
ind = np.nonzero(np.logical_or(np.max(np.abs((Q1-Q2)), 0) < tol1 , (Subs[1,:] - Subs[0,:]) < tol2))[0]
Q = Q + np.sum(Q1[:,ind], axis=1)
Subs = np.delete(Subs, ind, axis=1)
return Q | numpy implementation makes use of the code here: http://se.mathworks.com/matlabcentral/fileexchange/18801-quadvgk
We here use gaussian kronrod integration already used in gpstuff for evaluating one dimensional integrals.
This is vectorised quadrature which means that several functions can be evaluated at the same time over a grid of
points.
:param f:
:param fmin:
:param fmax:
:param difftol:
:return: |
378,989 | def get_source_url(obj):
source_env_prefix = obj.context.config[]
task = obj.task
log.debug("Getting source url for {} {}...".format(obj.name, obj.task_id))
repo = get_repo(obj.task, source_env_prefix=source_env_prefix)
source = task[][]
if repo and not verify_repo_matches_url(repo, source):
raise CoTError("{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!".format(
name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source
))
log.info("{} {}: found {}".format(obj.name, obj.task_id, source))
return source | Get the source url for a Trust object.
Args:
obj (ChainOfTrust or LinkOfTrust): the trust object to inspect
Raises:
CoTError: if repo and source are defined and don't match
Returns:
str: the source url. |
378,990 | def build(self, region=None, profile=None):
with self.lock:
key = "{}-{}".format(profile, region)
try:
provider = self.providers[key]
except KeyError:
msg = "Missed memoized lookup ({}), creating new AWS Provider."
logger.debug(msg.format(key))
if not region:
region = self.region
self.providers[key] = Provider(
get_session(region=region, profile=profile),
region=region,
**self.kwargs
)
provider = self.providers[key]
return provider | Get or create the provider for the given region and profile. |
378,991 | def get_provider_name(driver):
kls = driver.__class__.__name__
for d, prop in DRIVERS.items():
if prop[1] == kls:
return d
return None | Return the provider name from the driver class
:param driver: obj
:return: str |
378,992 | def arange_col(n, dtype=int):
return np.reshape(np.arange(n, dtype = dtype), (n, 1)) | Returns ``np.arange`` in a column form.
:param n: Length of the array.
:type n: int
:param dtype: Type of the array.
:type dtype: type
:returns: ``np.arange`` in a column form.
:rtype: ndarray |
378,993 | def delete_priority_class(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_priority_class_with_http_info(name, **kwargs)
else:
(data) = self.delete_priority_class_with_http_info(name, **kwargs)
return data | delete a PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_priority_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
378,994 | def shade_jar(self, shading_rules, jar_path):
self.context.log.debug(.format(jar_path))
with temporary_dir() as tempdir:
output_jar = os.path.join(tempdir, os.path.basename(jar_path))
with self.shader.binary_shader_for_rules(output_jar, jar_path, shading_rules) as shade_runner:
result = execute_runner(shade_runner, workunit_factory=self.context.new_workunit,
workunit_name=)
if result != 0:
raise TaskError(.format(jar_path,
result))
if not os.path.exists(output_jar):
raise TaskError(
.format(jar_path, output_jar))
atomic_copy(output_jar, jar_path)
return jar_path | Shades a jar using the shading rules from the given jvm_binary.
This *overwrites* the existing jar file at ``jar_path``.
:param shading_rules: predefined rules for shading
:param jar_path: The filepath to the jar that should be shaded. |
378,995 | def get_outputs(sym, params, in_shape, in_label):
inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label],
in_shape)}
inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()})
_, out_shapes, _ = sym.infer_shape(**inputs)
out_names = list()
for name in sym.list_outputs():
if name.endswith():
out_names.append(name[:-len()])
else:
logging.info("output does not end with ", name)
out_names.append(name)
assert len(out_shapes) == len(out_names)
graph_outputs = {n: s for n, s in zip(out_names, out_shapes)}
return graph_outputs | Infer output shapes and return dictionary of output name to shape
:param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on
:param dic of (str, nd.NDArray) params:
:param list of tuple(int, ...) in_shape: list of all input shapes
:param in_label: name of label typically used in loss that may be left in graph. This name is
removed from list of inputs required by symbol
:return: dictionary of output name to shape
:rtype: dict of (str, tuple(int, ...)) |
378,996 | def transfer(self, data):
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
response =
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
time.sleep(0.01)
while len(response) < len(data):
response += self.sendCommand(commands.SPITransferCommand()).data
return .join(response) | Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device. |
378,997 | def _detectEncoding(self, xml_data, isHTML=False):
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == :
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data, ).encode()
elif (len(xml_data) >= 4) and (xml_data[:2] == ) \
and (xml_data[2:4] != ):
sniffed_xml_encoding =
xml_data = unicode(xml_data[2:], ).encode()
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data, ).encode()
elif (len(xml_data) >= 4) and (xml_data[:2] == ) and \
(xml_data[2:4] != ):
sniffed_xml_encoding =
xml_data = unicode(xml_data[2:], ).encode()
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data, ).encode()
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data, ).encode()
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data[4:], ).encode()
elif xml_data[:4] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data[4:], ).encode()
elif xml_data[:3] == :
sniffed_xml_encoding =
xml_data = unicode(xml_data[3:], ).encode()
else:
sniffed_xml_encoding =
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
"](.*?)[\).match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile(">]iso-10646-ucs-2ucs-2csunicodeiso-10646-ucs-4ucs-4csucs4utf-16utf-32utf_16utf_32utf16u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding | Given a document, tries to detect its XML encoding. |
378,998 | def check_publication_state(publication_id):
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(, (publication_id,))
publication_state, publication_messages = cursor.fetchone()
return publication_state, publication_messages | Check the publication's current state. |
378,999 | def by_period(self, field=None, period=None, timezone=None, start=None, end=None):
hist_period = period if period else self.interval_
time_zone = timezone if timezone else "UTC"
start_ = start if start else self.start_date
end_ = end if end else self.end_date
bounds = self.get_bounds(start_, end_)
date_field = field if field else "grimoire_creation_date"
agg_key = "date_histogram_" + date_field
if agg_key in self.aggregations.keys():
agg = self.aggregations[agg_key]
else:
agg = A("date_histogram", field=date_field, interval=hist_period,
time_zone=time_zone, min_doc_count=0, **bounds)
child_agg_counter = self.child_agg_counter_dict[agg_key]
child_name, child_agg = self.aggregations.popitem()
agg.metric(child_agg_counter, child_agg)
self.aggregations[agg_key] = agg
self.child_agg_counter_dict[agg_key] += 1
return self | Create a date histogram aggregation using the last added aggregation for the
current object. Add this date_histogram aggregation into self.aggregations
:param field: the index field to create the histogram from
:param period: the interval which elasticsearch supports, ex: "month", "week" and such
:param timezone: custom timezone
:param start: custom start date for the date histogram, default: start date under range
:param end: custom end date for the date histogram, default: end date under range
:returns: self, which allows the method to be chainable with the other methods |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.