Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,900 |
def apply_config(self, config):
self.hash_name = config[]
self.dim = config[]
self.projection_count = config[]
self.normals = config[]
self.tree_root = config[]
self.minimum_result_size = config[]
|
Applies config
|
23,901 |
def scroll_up(self, n, pre_dl=None, post_dl=None):
self.delay(pre_dl)
self.m.scroll(vertical=n)
self.delay(post_dl)
|
Scroll up ``n`` times.
**中文文档**
鼠标滚轮向上滚动n次。
|
23,902 |
def install_cub(mb_inc_path):
cub_url =
cub_sha_hash =
cub_version_str =
cub_zip_file =
cub_zip_dir =
cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir)
cub_new_unzipped_path = os.path.join(mb_inc_path, )
cub_header = os.path.join(cub_new_unzipped_path, , )
cub_readme = os.path.join(cub_new_unzipped_path, )
cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str)
if cub_installed:
log.info("NVIDIA cub installation found "
"at ".format(cub_new_unzipped_path))
return
log.info("No NVIDIA cub installation found")
have_valid_cub_file = (os.path.exists(cub_zip_file) and
os.path.isfile(cub_zip_file) and
sha_hash_file(cub_zip_file) == cub_sha_hash)
if have_valid_cub_file:
log.info("Valid NVIDIA cub archive found ".format(cub_zip_file))
cub_new_unzipped_path))
there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str)
if not there:
raise InstallCubException(reason)
|
Downloads and installs cub into mb_inc_path
|
23,903 |
def confirm(message="", title="", default=False, ok=False, cancel=False,
parent=None):
"Ask for confirmation (yes/no or ok and cancel), returns True or False"
style = wx.CENTRE
if ok:
style |= wx.OK
else:
style |= wx.YES | wx.NO
if default:
style |= wx.YES_DEFAULT
else:
style |= wx.NO_DEFAULT
if cancel:
style |= wx.CANCEL
result = dialogs.messageDialog(parent, message, title, style)
if cancel and result.returned == wx.ID_CANCEL:
return None
return result.accepted
|
Ask for confirmation (yes/no or ok and cancel), returns True or False
|
23,904 |
def transfer_data_from_mongo(self,
index,
doc_type,
use_mongo_id=False,
indexed_flag_field_name=,
mongo_query_params={},
mongo_host=default.MONGO_HOST,
mongo_port=default.MONGO_PORT,
mongo_db=default.MONGO_DB,
mongo_collection=default.MONGO_COLLECTION):
mongo_client = MongoClient(host=mongo_host, port=int(mongo_port))
try:
collection = mongo_client[mongo_db][mongo_collection]
if indexed_flag_field_name != :
mongo_query_params.update({indexed_flag_field_name: False})
mongo_docs = collection.find(mongo_query_params)
finally:
mongo_client.close()
actions = []
id_array = []
for doc in mongo_docs:
action = {
: ,
: index,
: doc_type
}
id_array.append(doc[])
if not use_mongo_id:
doc.pop()
else:
doc[] = str(doc[])
doc.pop()
action[] = doc
actions.append(action)
success, failed = es_helpers.bulk(self.client, actions, request_timeout=60 * 60)
logger.info(
% (
mongo_host, mongo_port, self.client, success, failed))
if indexed_flag_field_name != :
t = threading.Thread(target=ElasticsearchClient._back_update_mongo,
args=(self, mongo_host, mongo_port, mongo_db, mongo_collection, id_array,
{indexed_flag_field_name: True}),
name=)
t.start()
return success, failed
|
Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and
collection name in MongoDB default from load in default.py
:param index: The name of the index
:param doc_type: The type of the document
:param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation
:param indexed_flag_field_name: the name of the field of the document,
if associated value is False will synchronize data for it
:param mongo_client_params: The dictionary for client params of MongoDB
:param mongo_query_params: The dictionary for query params of MongoDB
:param mongo_host: The name of the hostname from MongoDB
:param mongo_port: The number of the port from MongoDB
:param mongo_db: The name of the database from MongoDB
:param mongo_collection: The name of the collection from MongoDB
:return: void
|
23,905 |
def identify(self, access_token, leeway=10.0):
return identify(self.mw_uri, self.consumer_token, access_token,
leeway=leeway, user_agent=self.user_agent)
|
Gather identifying information about a user via an authorized token.
:Parameters:
access_token : `AccessToken`
A token representing an authorized user. Obtained from
`complete()`.
leeway : `int` | `float`
The number of seconds of leeway to account for when examining a
tokens "issued at" timestamp.
:Returns:
A dictionary containing identity information.
|
23,906 |
def format_coord(self, x, y):
p, b = stereonet_math.geographic2plunge_bearing(x, y)
s, d = stereonet_math.geographic2pole(x, y)
pb = u.format(p[0], b[0])
sd = u.format(s[0], d[0])
return u.format(pb, sd)
|
Format displayed coordinates during mouseover of axes.
|
23,907 |
def goBack(self):
if not self.canGoBack():
return
self._blockStack = True
self._index -= 1
self.emitCurrentChanged()
self._blockStack = False
return self.currentUrl()
|
Goes up one level if possible and returns the url at the current level.
If it cannot go up, then a blank string will be returned.
:return <str>
|
23,908 |
def ProduceExtractionWarning(self, message, path_spec=None):
if not self._storage_writer:
raise RuntimeError()
if not path_spec and self._file_entry:
path_spec = self._file_entry.path_spec
parser_chain = self.GetParserChain()
warning = warnings.ExtractionWarning(
message=message, parser_chain=parser_chain, path_spec=path_spec)
self._storage_writer.AddWarning(warning)
self._number_of_warnings += 1
self.last_activity_timestamp = time.time()
|
Produces an extraction warning.
Args:
message (str): message of the warning.
path_spec (Optional[dfvfs.PathSpec]): path specification, where None
will use the path specification of current file entry set in
the mediator.
Raises:
RuntimeError: when storage writer is not set.
|
23,909 |
def with_item(self, context, as_opt):
if as_opt:
as_loc, optional_vars = as_opt
return ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
return ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)
|
(2.7, 3.1-) with_item: test ['as' expr]
|
23,910 |
def add(self, count, timestamp=None):
if timestamp is None:
timestamp = time.time()
if self.last_data >= timestamp:
raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp))
self.last_data = timestamp
for meta in self.intervals.values():
meta.push(count, timestamp)
|
Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp.
|
23,911 |
def groups(self):
object_paths = (
path_components(path)
for path in self.objects)
group_names = (path[0] for path in object_paths if len(path) > 0)
groups_set = OrderedDict()
for group in group_names:
groups_set[group] = None
return list(groups_set)
|
Return the names of groups in the file
Note that there is not necessarily a TDMS object associated with
each group name.
:rtype: List of strings.
|
23,912 |
def handle(self, request):
request.id = str(uuid4())
deferred = maybeDeferred(self._validate, request)
deferred.addCallback(self.execute)
def write_response(response):
request.setHeader("Content-Length", str(len(response)))
request.setHeader("Content-Type", self.content_type)
request.setHeader("X-Content-Type-Options", "nosniff")
request.write(response)
request.finish()
return response
def write_error(failure):
if failure.check(APIError):
status = failure.value.status
log.err(failure)
body = "Server error"
status = 500
request.setResponseCode(status)
write_response(body)
deferred.addCallback(write_response)
deferred.addErrback(write_error)
return deferred
|
Handle an HTTP request for executing an API call.
This method authenticates the request checking its signature, and then
calls the C{execute} method, passing it a L{Call} object set with the
principal for the authenticated user and the generic parameters
extracted from the request.
@param request: The L{HTTPRequest} to handle.
|
23,913 |
def summary( self ):
child_text = []
for c in range(self.childCount()):
child = self.child(c)
text = [child.text(0), child.text(1), child.text(2), child.text(3)]
text = map(str, text)
while ( in text ):
text.remove()
child_text.append( .join(text) )
return .join(child_text)
|
Creates a text string representing the current query and its children
for this item.
:return <str>
|
23,914 |
def queryMore(self, queryLocator):
self._setHeaders()
return self._sforce.service.queryMore(queryLocator)
|
Retrieves the next batch of objects from a query.
|
23,915 |
def axis_angle_to_rotation_matrix(v, theta):
if np.abs(theta) < np.spacing(1):
return np.eye(3)
else:
v = v.reshape(3,1)
np.testing.assert_almost_equal(np.linalg.norm(v), 1.)
vx = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
vvt = np.dot(v, v.T)
R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta)
return R
|
Convert rotation from axis-angle to rotation matrix
Parameters
---------------
v : (3,) ndarray
Rotation axis (normalized)
theta : float
Rotation angle (radians)
Returns
----------------
R : (3,3) ndarray
Rotation matrix
|
23,916 |
def substitute_any_type(type_: Type, basic_types: Set[BasicType]) -> List[Type]:
if type_ == ANY_TYPE:
return list(basic_types)
if isinstance(type_, BasicType):
return [type_]
return type_.substitute_any_type(basic_types)
|
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all
possible basic types and returns a list with all possible combinations. Note that this
substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for
example, this may substitute the placeholders with different basic types. In that case, you'd
want to use ``_substitute_placeholder_type`` instead.
|
23,917 |
def get_chacra_repo(shaman_url):
shaman_response = get_request(shaman_url)
chacra_url = shaman_response.geturl()
chacra_response = get_request(chacra_url)
return chacra_response.read()
|
From a Shaman URL, get the chacra url for a repository, read the
contents that point to the repo and return it as a string.
|
23,918 |
def _enable(name, started, result=True, skip_verify=False, **kwargs):
ret = {}
if not skip_verify:
try:
if not _available(name, ret):
return ret
except CommandExecutionError as exc:
ret[] = False
ret[] = exc.strerror
return ret
ret[] = result
if not in __salt__ or not in __salt__:
if started is True:
ret[] = (
).format(name)
elif started is None:
ret[] = (
).format(name)
else:
ret[] = (
).format(name)
return ret
before_toggle_enable_status = __salt__[](name, **kwargs)
if before_toggle_enable_status:
if started is True:
ret[] = (
).format(name)
elif started is None:
ret[] = {}
ret[] = (
).format(name)
else:
ret[] = (
).format(name)
return ret
if __opts__[]:
ret[] = None
ret[] = .format(name)
return ret
try:
if __salt__[](name, **kwargs):
ret[] = {}
after_toggle_enable_status = __salt__[](
name,
**kwargs)
if before_toggle_enable_status != after_toggle_enable_status:
ret[][name] = True
if started is True:
ret[] = (
).format(name)
elif started is None:
ret[] = (
).format(name)
else:
ret[] = (
).format(name)
return ret
except CommandExecutionError as exc:
enable_error = exc.strerror
else:
enable_error = False
ret[] = False
if started is True:
ret[] = (
).format(name)
elif started is None:
ret[] = (
).format(name)
else:
ret[] = (
).format(name)
if enable_error:
ret[] += .format(
enable_error
)
return ret
|
Enable the service
|
23,919 |
def parse_version(version):
release_type_found = False
version_infos = re.split(, version)
version = []
for info in version_infos:
if info == or len(info) == 0:
continue
try:
info = int(info)
version.append("%06d" % (info,))
except ValueError:
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
if info == :
info =
version.append(info)
release_type_found = True
if release_type_found is False:
if len(version) == 1:
version.append("00000")
if len(version) == 2:
version.append("000000")
version.append("final")
return tuple(version)
|
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
:returns: Version string as comparable tuple
|
23,920 |
def p_definition_list(p):
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules ")
|
definition_list : definition definition_list
| definition
|
23,921 |
def main(
req_files,
verbose=False,
outdated=False,
latest=False,
verbatim=False,
repo=None,
path=,
token=None,
branch=,
url=None,
delay=None,
):
requirements = []
if repo:
github_url = build_github_url(repo, branch, path, token)
req_file = get_requirements_file_from_url(github_url)
requirements.extend(parse_req_file(req_file))
elif url:
req_file = get_requirements_file_from_url(url)
requirements.extend(parse_req_file(req_file))
else:
for req_file in req_files:
requirements.extend(parse_req_file(req_file, verbatim=verbatim))
req_file.close()
total_time_delta = 0
max_outdated_time = 0
session = FuturesSession()
results = []
for req, version, ignore in requirements:
if verbatim and not req:
results.append(version)
elif req:
results.append({
: req,
: version,
: ignore,
: session.get(get_pypi_url(req)),
: session.get(get_pypi_url(req, version))
})
for result in results:
if isinstance(result, str):
print(result.replace(, ))
continue
if result[]:
if verbatim:
print(.format(result[], result[]))
else:
print(.format(result[]))
continue
req = result[]
version = result[]
latest_version, latest_release_date = get_version_and_release_date(
req, verbose=verbose, response=result[].result()
)
specified_version, specified_release_date = \
get_version_and_release_date(
req, version, response=result[].result()
)
if latest_release_date and specified_release_date:
time_delta = (latest_release_date - specified_release_date).days
total_time_delta = total_time_delta + time_delta
max_outdated_time = max(time_delta, max_outdated_time)
if verbose:
if time_delta > 0:
print(
.format(req, version, time_delta,
latest_version))
elif version != latest_version:
print(
.format(req, version, latest_version))
elif not outdated:
print(.format(req, version))
if latest and latest_version != specified_version:
print(.format(req, latest_version,
specified_version))
elif verbatim and latest_version != specified_version:
print(.format(req, specified_version,
latest_version))
elif verbatim:
print(.format(req, specified_version))
elif verbatim:
print(
.format(req, version)
)
verbatim_str = ""
if verbatim:
verbatim_str = "
if total_time_delta > 0 and delay is None:
print("{}Your requirements are {} "
"days out of date".format(verbatim_str, total_time_delta))
sys.exit(1)
elif delay is not None and max_outdated_time > int(delay):
print("{}At least one of your dependancies is {} "
"days out of date which is more than the allowed"
"{} days.".format(verbatim_str, max_outdated_time, delay))
sys.exit(1)
elif delay is not None and max_outdated_time <= int(delay):
print("{}All of your dependancies are at most {} "
"days out of date.".format(verbatim_str, delay))
sys.exit(1)
else:
print("{}Looks like you've been keeping up to date, "
"time for a delicious beverage!".format(verbatim_str))
|
Given a list of requirements files reports which requirements are out
of date.
Everything is rather somewhat obvious:
- verbose makes things a little louder
- outdated forces piprot to only report out of date packages
- latest outputs the requirements line with the latest version
- verbatim outputs the requirements file as-is - with comments showing the
latest versions (can be used with latest to output the latest with the
old version in the comment)
- delay specifies a timerange during an outdated package is allowed
|
23,922 |
def calculate_within_class_scatter_matrix(X, y):
mean_vectors = calculate_mean_vectors(X, y)
n_features = X.shape[1]
Sw = np.zeros((n_features, n_features))
for cl, m in zip(np.unique(y), mean_vectors):
Si = np.zeros((n_features, n_features))
m = m.reshape(n_features, 1)
for x in X[y == cl, :]:
v = x.reshape(n_features, 1) - m
Si += v @ v.T
Sw += Si
return Sw
|
Calculates the Within-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
within_class_scatter_matrix : array-like, shape (n, n)
|
23,923 |
def read(cls, data):
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
|
Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
|
23,924 |
def _filter_attributes(self, keyset):
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered)
|
Return a copy of this object with a subset of its attributes set.
|
23,925 |
def _time_show(self):
if not self._time_visible:
self._time_visible = True
self._time_window = tk.Toplevel(self)
self._time_window.attributes("-topmost", True)
self._time_window.overrideredirect(True)
self._time_label = ttk.Label(self._time_window)
self._time_label.grid()
self._time_window.lift()
x, y = self.master.winfo_pointerxy()
geometry = "{0}x{1}+{2}+{3}".format(
self._time_label.winfo_width(),
self._time_label.winfo_height(),
x - 15,
self._canvas_ticks.winfo_rooty() - 10)
self._time_window.wm_geometry(geometry)
self._time_label.config(text=TimeLine.get_time_string(self.time, self._unit))
|
Show the time marker window
|
23,926 |
def status(name, runas=None):
*
return prlctl(, salt.utils.data.decode(name), runas=runas)
|
Status of a VM
:param str name:
Name/ID of VM whose status will be returned
:param str runas:
The user that the prlctl command will be run as
Example:
.. code-block:: bash
salt '*' parallels.status macvm runas=macdev
|
23,927 |
def match(self, *command_tokens, **command_env):
mutated_command_tokens = self.mutate_command_tokens(*command_tokens)
if mutated_command_tokens is None:
return False
return self.selector().select(*mutated_command_tokens, **command_env) is not None
|
:meth:`.WCommandProto.match` implementation
|
23,928 |
def scroll_to(self, selector, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT):
if self.demo_mode:
self.slow_scroll_to(selector, by=by, timeout=timeout)
return
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
try:
self.__scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__scroll_to_element(element)
|
Fast scroll to destination
|
23,929 |
def cli(context, host, username, password):
context.obj = FritzBox(host, username, password)
|
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
|
23,930 |
def disable_host_svc_notifications(self, host):
for service_id in host.services:
if service_id in self.daemon.services:
service = self.daemon.services[service_id]
self.disable_svc_notifications(service)
self.send_an_element(service.get_update_status_brok())
|
Disable services notifications for a host
Format of the line that triggers function call::
DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
|
23,931 |
def update_pypsa_bus_timeseries(network, timesteps=None):
if timesteps is None:
timesteps = network.pypsa.buses_t.v_mag_pu_set.index
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
buses = network.pypsa.buses.index
v_mag_pu_set = _pypsa_bus_timeseries(network, buses, timesteps)
network.pypsa.buses_t.v_mag_pu_set = v_mag_pu_set
|
Updates buses voltage time series in pypsa representation.
This function overwrites v_mag_pu_set of buses_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only bus time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current
time steps are overwritten by given time steps. Default: None.
|
23,932 |
def set_autocut_params(self, method, **params):
self.logger.debug("Setting autocut params method=%s params=%s" % (
method, str(params)))
params = list(params.items())
self.t_.set(autocut_method=method, autocut_params=params)
|
Set auto-cut parameters.
Parameters
----------
method : str
Auto-cut algorithm. A list of acceptable options can
be obtained by :meth:`get_autocut_methods`.
params : dict
Algorithm-specific keywords and values.
|
23,933 |
def connectSubsystem(connection, protocol, subsystem):
deferred = connectSession(connection, protocol)
@deferred.addCallback
def requestSubsystem(session):
return session.requestSubsystem(subsystem)
return deferred
|
Connect a Protocol to a ssh subsystem channel
|
23,934 |
def Images(vent=True):
images = []
return images
|
Get images that are build, by default limit to vent images
|
23,935 |
def reference(self):
if self.__reference is None:
self.__reference = _ConstructReference(self.__class__,
pairs=self.__pairs,
app=self.__app,
namespace=self.__namespace)
return self.__reference
|
Return the Reference object for this Key.
This is a entity_pb.Reference instance -- a protocol buffer class
used by the lower-level API to the datastore.
NOTE: The caller should not mutate the return value.
|
23,936 |
def text_editor(file=, background=False, return_cmd=False):
s preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
windowsftypetextfile=macopen -adefreadcom.apple.LaunchServicesLSHandlers-array{LSHandlerContentType=public.plain-text;}xdg-mimequerydefaulttext/plain\n\n.desktopt use desktopfile.execute() in order to have working
editor_cmd_str = desktopfile.parse(
desktopfile.locate(editor_cmd_str)[0])[]
for i in editor_cmd_str.split():
if i.startswith():
editor_cmd_str = editor_cmd_str.replace(i, )
if i == :
editor_cmd_str = editor_cmd_str.replace(i, )
if file:
editor_cmd_str += .format(shlex.quote(file))
if return_cmd:
return editor_cmd_str
text_editor_proc = sp.Popen([editor_cmd_str], shell=True)
if not background:
text_editor_proc.wait()
|
Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
|
23,937 |
def create_scan(self, host_ips):
now = datetime.datetime.now()
data = {
"uuid": self.get_template_uuid(),
"settings": {
"name": "jackal-" + now.strftime("%Y-%m-%d %H:%M"),
"text_targets": host_ips
}
}
response = requests.post(self.url + , data=json.dumps(data), verify=False, headers=self.headers)
if response:
result = json.loads(response.text)
return result[][]
|
Creates a scan with the given host ips
Returns the scan id of the created object.
|
23,938 |
def filter_rep_set(inF, otuSet):
seqs = []
for record in SeqIO.parse(inF, "fasta"):
if record.id in otuSet:
seqs.append(record)
return seqs
|
Parse the rep set file and remove all sequences not associated with unique
OTUs.
:@type inF: file
:@param inF: The representative sequence set
:@rtype: list
:@return: The set of sequences associated with unique OTUs
|
23,939 |
def netHours(self):
if self.specifiedHours is not None:
return self.specifiedHours
elif self.category in [getConstant(),getConstant()]:
return self.event.duration - sum([sub.netHours for sub in self.replacementFor.all()])
else:
return sum([x.duration for x in self.occurrences.filter(cancelled=False)])
|
For regular event staff, this is the net hours worked for financial purposes.
For Instructors, netHours is caclulated net of any substitutes.
|
23,940 |
def find_common_root(elements):
if not elements:
raise UserWarning("Can't find common root - no elements suplied.")
root_path = el_to_path_vector(elements.pop())
for el in elements:
el_path = el_to_path_vector(el)
root_path = common_vector_root(root_path, el_path)
if not root_path:
raise UserWarning(
"Vectors without common root:\n%s" % str(el_path)
)
return root_path
|
Find root which is common for all `elements`.
Args:
elements (list): List of double-linked HTMLElement objects.
Returns:
list: Vector of HTMLElement containing path to common root.
|
23,941 |
def appendNullPadding(str, blocksize=AES_blocksize):
pad_len = paddingLength(len(str), blocksize)
padding = *pad_len
return str + padding
|
Pad with null bytes
|
23,942 |
def _field_name_from_uri(self, uri):
uri = str(uri)
parts = uri.split()
if len(parts) == 1:
return uri.split()[-1] or uri
return parts[-1]
|
helper, returns the name of an attribute (without namespace prefix)
|
23,943 |
def import_results(log, pathToYamlFile):
import yaml
fileName = pathToYamlFile
stream = file(fileName, )
yamlContent = yaml.load(stream)
snSurveyDiscoveryTimes = yamlContent[]
lightCurveDiscoveryTimes = yamlContent[
]
snTypes = yamlContent[]
redshifts = yamlContent[]
cadenceDictionary = yamlContent[]
peakAppMagList = yamlContent[]
snCampaignLengthList = yamlContent[]
stream.close()
return snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList
|
*Import the results of the simulation (the filename is an argument of this models)*
**Key Arguments:**
- ``log`` -- logger
- ``pathToYamlFile`` -- the path to the yaml file to be imported
**Return:**
- None
|
23,944 |
def get_arguments(self):
PluginBase.get_arguments(self)
if self.args.organizationName is not None:
self.organizationName = self.args.organizationName
if self.args.repositoryName is not None:
self.repositoryName = self.args.repositoryName
self.path = "v1/plugins/private/{0}/{1}/{2}".format(self.pluginName, self.organizationName, self.repositoryName)
|
Extracts the specific arguments of this CLI
|
23,945 |
def sequence_equal(self, second_iterable, equality_comparer=operator.eq):
if self.closed():
raise ValueError("Attempt to call to_tuple() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute sequence_equal() with second_iterable of non-iterable {type}".format(
type=str(type(second_iterable))[7: -1]))
if not is_callable(equality_comparer):
raise TypeError("aggregate() parameter equality_comparer={equality_comparer} is not callable".format(
equality_comparer=repr(equality_comparer)))
try:
if len(self._iterable) != len(second_iterable):
return False
except TypeError:
pass
sentinel = object()
for first, second in izip_longest(self, second_iterable, fillvalue=sentinel):
if first is sentinel or second is sentinel:
return False
if not equality_comparer(first, second):
return False
return True
|
Determine whether two sequences are equal by elementwise comparison.
Sequence equality is defined as the two sequences being equal length
and corresponding elements being equal as determined by the equality
comparer.
Note: This method uses immediate execution.
Args:
second_iterable: The sequence which will be compared with the
source sequence.
equality_comparer: An optional binary predicate function which is
used to compare corresponding elements. Should return True if
the elements are equal, otherwise False. The default equality
comparer is operator.eq which calls __eq__ on elements of the
source sequence with the corresponding element of the second
sequence as a parameter.
Returns:
True if the sequences are equal, otherwise False.
Raises:
ValueError: If the Queryable is closed.
TypeError: If second_iterable is not in fact iterable.
TypeError: If equality_comparer is not callable.
|
23,946 |
def weighted_choice(self, probabilities, key):
try:
choice = self.values[key].lower()
except KeyError:
return super(RecordingParameters, self)\
.weighted_choice(probabilities, key)
for probability, option in probabilities:
if str(option).lower() == choice:
return option
for probability, option in probabilities:
if option.__name__.lower() == choice:
return option
assert False, "Invalid value provided"
|
Makes a weighted choice between several options.
Probabilities is a list of 2-tuples, (probability, option). The
probabilties don't need to add up to anything, they are automatically
scaled.
|
23,947 |
def initial(self, request, *args, **kwargs):
super(NodeLinkList, self).initial(request, *args, **kwargs)
try:
self.node = Node.objects.published()\
.accessible_to(request.user)\
.get(slug=self.kwargs.get(, None))
except Node.DoesNotExist:
raise Http404(_())
self.check_object_permissions(request, self.node)
self.queryset = Link.objects.select_related(, )\
.accessible_to(self.request.user)\
.filter(Q(node_a_id=self.node.id) |
Q(node_b_id=self.node.id))
|
Custom initial method:
* ensure node exists and store it in an instance attribute
* change queryset to return only links of current node
|
23,948 |
def to_element(self, root_name=None):
if not root_name:
root_name = self.nodename
elem = ElementTreeBuilder.Element(root_name)
for attrname in self.serializable_attributes():
try:
value = self.__dict__[attrname]
except KeyError:
continue
if attrname in self.xml_attribute_attributes:
elem.attrib[attrname] = six.text_type(value)
else:
sub_elem = self.element_for_value(attrname, value)
elem.append(sub_elem)
return elem
|
Serialize this `Resource` instance to an XML element.
|
23,949 |
def get_all_assignable_users_for_project(self, project_key, start=0, limit=50):
url = .format(
project_key=project_key,
start=start,
limit=limit)
return self.get(url)
|
Provide assignable users for project
:param project_key:
:param start: OPTIONAL: The start point of the collection to return. Default: 0.
:param limit: OPTIONAL: The limit of the number of users to return, this may be restricted by
fixed system limits. Default by built-in method: 50
:return:
|
23,950 |
def _bash_completion(self):
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for _name, _command in self.command_manager:
commands.add(_name)
cmd_factory = _command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser()
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(.join(commands | options))
|
Prints all of the commands and options for bash-completion.
|
23,951 |
def _factory(slice_, axis, weighted):
if slice_.dim_types[0] == DT.MR_SUBVAR:
return _MrXCatPairwiseSignificance(slice_, axis, weighted)
return _CatXCatPairwiseSignificance(slice_, axis, weighted)
|
return subclass for PairwiseSignificance, based on slice dimension types.
|
23,952 |
def sign_data(self, data, expires_in=None, url_safe=True):
if url_safe:
return utils.sign_url_safe(data,
secret_key=self.secret_key,
salt=self.user_salt,
expires_in=expires_in)
else:
return utils.sign_data(data,
secret_key=self.secret_key,
salt=self.user_salt,
expires_in=expires_in)
|
To safely sign a user data. It will be signed with the user key
:param data: mixed
:param expires_in: The time for it to expire
:param url_safe: bool. If true it will allow it to be passed in URL
:return: str - the token/signed data
|
23,953 |
def linestring_to_utm(linestring: LineString) -> LineString:
proj = lambda x, y: utm.from_latlon(y, x)[:2]
return transform(proj, linestring)
|
Given a Shapely LineString in WGS84 coordinates,
convert it to the appropriate UTM coordinates.
If ``inverse``, then do the inverse.
|
23,954 |
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
self.log.debug(
.format(resource_id, msg))
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug(
.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug(
.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error(.format(msg))
return False
|
Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
|
23,955 |
def shorten(text):
if len(text) >= MAX_DISPLAY_LEN:
text = text[:MAX_DISPLAY_LEN//2]+"..."+text[-MAX_DISPLAY_LEN//2:]
return text
|
Reduce text length for displaying / logging purposes.
|
23,956 |
def now(cls, Name = None):
self = cls()
if Name is not None:
self.Name = Name
self.pcdata = datetime.datetime.utcnow()
return self
|
Instantiate a Time element initialized to the current UTC
time in the default format (ISO-8601). The Name attribute
will be set to the value of the Name parameter if given.
|
23,957 |
def fcoe_get_login_output_fcoe_login_list_interface_name(self, **kwargs):
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop()
interface_name = ET.SubElement(fcoe_login_list, "interface-name")
interface_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
23,958 |
def wsgi_app(self, environ, start_response):
@_LOCAL_MANAGER.middleware
def _wrapped_app(environ, start_response):
request = Request(environ)
setattr(_local, _CURRENT_REQUEST_KEY, request)
response = self._dispatch_request(request)
return response(environ, start_response)
return _wrapped_app(environ, start_response)
|
A basic WSGI app
|
23,959 |
def shell_call(self, shellcmd):
return(subprocess.call(self.shellsetup + shellcmd, shell=True))
|
Shell call with necessary setup first.
|
23,960 |
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events
assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
|
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
|
23,961 |
def get_authorization_url(self, client_id=None, instance_id=None,
redirect_uri=None, region=None, scope=None,
state=None):
client_id = client_id or self.client_id
instance_id = instance_id or self.instance_id
redirect_uri = redirect_uri or self.redirect_uri
region = region or self.region
scope = scope or self.scope
state = state or str(uuid.uuid4())
self.state = state
return Request(
,
self.auth_base_url,
params={
: client_id,
: instance_id,
: redirect_uri,
: region,
: ,
: scope,
: state
}
).prepare().url, state
|
Generate authorization URL.
Args:
client_id (str): OAuth2 client ID. Defaults to ``None``.
instance_id (str): App Instance ID. Defaults to ``None``.
redirect_uri (str): Redirect URI. Defaults to ``None``.
region (str): App Region. Defaults to ``None``.
scope (str): Permissions. Defaults to ``None``.
state (str): UUID to detect CSRF. Defaults to ``None``.
Returns:
str, str: Auth URL, state
|
23,962 |
def AddTableColumn(self, table, column):
if column not in self._table_columns[table]:
self._table_columns[table].append(column)
|
Add column to table if it is not already there.
|
23,963 |
def update_metric(self, metric, labels, pre_sliced=False):
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)):
if not pre_sliced:
labels_slice = [label[islice] for label in labels]
else:
labels_slice = labels[current_exec]
metric.update(labels_slice, texec.outputs)
|
Update evaluation metric with label and current outputs.
|
23,964 |
def _paths_from_env(variable: str, default: List[Path]) -> List[Path]:
value = os.environ.get(variable)
if value:
return [Path(path) for path in value.split(":")]
return default
|
Read an environment variable as a list of paths.
The environment variable with the specified name is read, and its
value split on colons and returned as a list of paths. If the
environment variable is not set, or set to the empty string, the
default value is returned.
Parameters
----------
variable : str
Name of the environment variable.
default : List[Path]
Default value.
Returns
-------
List[Path]
Value from environment or default.
|
23,965 |
def dist(
self,
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
):
return (
synoname(src, tar, word_approx_min, char_approx_min, tests, False)
/ 14
)
|
Return the normalized Synoname distance between two words.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx'
match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test
names to perform (defaults to performing all tests)
Returns
-------
float
Normalized Synoname distance
|
23,966 |
def read_data(self, size):
result = list()
while size > 0:
count = min(size, 8)
buf = self.hid.read(count)
if len(buf) < count:
raise IOError(
)
result += buf
size -= count
return result
|
Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
|
23,967 |
def _trim_xpath(self, xpath, prop):
xroot = self._get_xroot_for(prop)
if xroot is None and isinstance(xpath, string_types):
xtags = xpath.split(XPATH_DELIM)
if xtags[-1] in _iso_tag_primitives:
xroot = XPATH_DELIM.join(xtags[:-1])
return xroot
|
Removes primitive type tags from an XPATH
|
23,968 |
def close_other_windows(self):
main_window_handle = self.current_window_handle
for window_handle in self.window_handles:
if window_handle == main_window_handle:
continue
self.switch_to_window(window_handle)
self.close()
self.switch_to_window(main_window_handle)
|
Closes all not current windows. Useful for tests - after each test you
can automatically close all windows.
|
23,969 |
def configure(self, config_file):
cfg = configparser.RawConfigParser()
try:
cfg.readfp(open(config_file))
except IOError as err:
logger.critical(
.format(
config_file, err.strerror))
sys.exit(1)
logger.info( + config_file)
if cfg.has_option(, ):
loglevel = cfg.get(, )
loglevel_numeric = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel_numeric, int):
logger.critical(
+ loglevel)
exit(1)
rl = logging.getLogger()
rl.setLevel(loglevel_numeric)
logger.debug(
.format(loglevel))
section_class_map = {
: self,
: DspamClient,
: DspamMilter,
}
for section in cfg.sections():
try:
class_ = section_class_map[section]
except KeyError:
logger.warning( + section)
continue
logger.debug( + section)
dict_options = [
,
,
,
]
for option in cfg.options(section):
if section == and option == :
value = cfg.get(, )
DspamMilter.static_user = value
logger.debug(
.format(
value))
continue
if not hasattr(class_, option):
logger.warning(
.format(
section, option))
continue
value = cfg.get(section, option)
if option in dict_options:
value = utils.config_str2dict(value)
elif value.lower() in [, ]:
value = False
elif value.lower() in [, ]:
value = True
setattr(class_, option, value)
logger.debug(
.format(
section, option, value))
logger.debug()
|
Parse configuration, and setup objects to use it.
|
23,970 |
async def peers(self):
response = await self._api.get("/v1/status/peers")
if response.status == 200:
return set(response.body)
|
Returns the current Raft peer set
Returns:
Collection: addresses of peers
This endpoint retrieves the Raft peers for the datacenter in which
the agent is running. It returns a collection of addresses, such as::
[
"10.1.10.12:8300",
"10.1.10.11:8300",
"10.1.10.10:8300"
]
This list of peers is strongly consistent and can be useful in
determining when a given server has successfully joined the cluster.
|
23,971 |
async def set_agent_neighbors(self):
for i in range(len(self.grid)):
for j in range(len(self.grid[0])):
agent = self.grid[i][j]
xy = (self.origin[0] + i, self.origin[1] + j)
nxy = _get_neighbor_xy(, xy)
exy = _get_neighbor_xy(, xy)
sxy = _get_neighbor_xy(, xy)
wxy = _get_neighbor_xy(, xy)
if j == 0:
naddr = await self._get_xy_address_from_neighbor(, nxy)
else:
naddr = self.get_xy(nxy, addr=True)
if i == 0:
waddr = await self._get_xy_address_from_neighbor(, wxy)
else:
waddr = self.get_xy(wxy, addr=True)
if j == len(self.grid[0]) - 1:
saddr = await self._get_xy_address_from_neighbor(, sxy)
else:
saddr = self.get_xy(sxy, addr=True)
if i == len(self.grid) - 1:
eaddr = await self._get_xy_address_from_neighbor(, exy)
else:
eaddr = self.get_xy(exy, addr=True)
agent.neighbors[] = naddr
agent.neighbors[] = eaddr
agent.neighbors[] = saddr
agent.neighbors[] = waddr
|
Set neighbors for each agent in each cardinal direction.
This method assumes that the neighboring :class:`GridEnvironment` of
this grid environment have already been set.
|
23,972 |
def page(self, friendly_name=values.unset,
evaluate_worker_attributes=values.unset, worker_sid=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
params = values.of({
: friendly_name,
: evaluate_worker_attributes,
: worker_sid,
: page_token,
: page_number,
: page_size,
})
response = self._version.page(
,
self._uri,
params=params,
)
return TaskQueuePage(self._version, response, self._solution)
|
Retrieve a single page of TaskQueueInstance records from the API.
Request is executed immediately
:param unicode friendly_name: Filter by a human readable description of a TaskQueue
:param unicode evaluate_worker_attributes: Provide a Worker attributes expression, and this will return the list of TaskQueues that would distribute tasks to a worker with these attributes.
:param unicode worker_sid: The worker_sid
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueuePage
|
23,973 |
def get_aligned_adjacent_coords(x, y):
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
|
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
|
23,974 |
def _get_host_only_mac_address():
vm_config = _get_vm_config()
for line in vm_config:
if line.startswith():
adapter_number = int(line[15:16])
break
else:
raise ValueError()
for line in vm_config:
if line.startswith(.format(adapter_number)):
return line.split()[1].strip().lower()
raise ValueError(.format(adapter_number))
|
Returns the MAC address assigned to the host-only adapter,
using output from VBoxManage. Returned MAC address has no colons
and is lower-cased.
|
23,975 |
def _get_attach_id(self, key, value, attributes):
if isinstance(value, dict):
key = list(value.keys())[0]
attributes.update(value[key])
return [key, attributes]
return value, attributes
|
Get the attach record ID and extra attributes.
|
23,976 |
def color(colors, export_type, output_file=None):
all_colors = flatten_colors(colors)
template_name = get_export_type(export_type)
template_file = os.path.join(MODULE_DIR, "templates", template_name)
output_file = output_file or os.path.join(CACHE_DIR, template_name)
if os.path.isfile(template_file):
template(all_colors, template_file, output_file)
logging.info("Exported %s.", export_type)
else:
logging.warning("Template doesn't exist.", export_type)
|
Export a single template file.
|
23,977 |
def _determine_namespaces(self):
ns_insts = None
ns_classname = None
interop_ns = self.interop_ns
for classname in self.NAMESPACE_CLASSNAMES:
try:
ns_insts = self._conn.EnumerateInstances(
classname, namespace=interop_ns)
except CIMError as exc:
if exc.status_code in (CIM_ERR_INVALID_CLASS,
CIM_ERR_NOT_FOUND):
continue
else:
raise
else:
ns_classname = classname
break
if ns_insts is None:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Namespace class could not be determined "
"(tried {0!A})", self.NAMESPACE_CLASSNAMES),
conn_id=self.conn.conn_id)
self._namespace_classname = ns_classname
self._namespaces = [inst[] for inst in ns_insts]
self._namespace_paths = [inst.path for inst in ns_insts]
namespaces_lower = [ns.lower() for ns in self._namespaces]
if interop_ns.lower() not in namespaces_lower:
warnings.warn(
_format("Server at {0} has an Interop namespace {1!A}, but "
"does not return it when enumerating class {2!A} "
"- adding it to the property",
self.conn.url, interop_ns, ns_classname),
ToleratedServerIssueWarning, stacklevel=2)
self._namespaces.append(interop_ns)
|
Determine the names of all namespaces of the WBEM server, by
communicating with it and enumerating the instances of a number of
possible CIM classes that typically represent CIM namespaces. Their
class names are defined in the :attr:`NAMESPACE_CLASSNAMES`
class variable.
If the namespaces could be determined, this method sets the following
properties of this object:
* :attr:`namespace_classname`
* :attr:`namespaces`
* :attr:`namespace_paths`
Otherwise, it raises an exception.
Note that there is at least one WBEM server that implements an Interop
namespace but does not represent that with a CIM instance. In that
case, the :attr:`namespaces` property will include the Interop
namespace, but the :attr:`namespace_paths` property will not.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be
determined.
CIMError: CIM_ERR_NOT_FOUND, Namespace class could not be
determined.
|
23,978 |
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers[] is not None:
ctype, pdict = parse_header(response.headers.get(, ))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
|
23,979 |
def _call_in_reactor_thread(self, f, *args, **kwargs):
self._reactor.callFromThread(f, *args, **kwargs)
|
Call the given function with args in the reactor thread.
|
23,980 |
def process_terminals(self, word):
length = len(word)
if word[length - 1] == :
if self.r2 <= (length - 1):
word = word[:-1]
elif self.r1 <= (length - 1):
if not self.is_short(word[:-1]):
word = word[:-1]
elif word[length - 1] == :
if self.r2 <= (length - 1) and word[length - 2] == :
word = word[:-1]
char_list = [x if x != else for x in word]
word = .join(char_list)
return word
|
Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase.
|
23,981 |
def _optimize(self, maxIter=1000, c1=1.193, c2=1.193, lookback=0.25, standard_dev=None):
gBests = []
for swarm in self._sample(maxIter, c1, c2, lookback, standard_dev):
gBests.append(self._gbest.copy())
return gBests
|
:param maxIter: maximum number of swarm iterations
:param c1: social weight
:param c2: personal weight
:param lookback: how many particles to assess when considering convergence
:param standard_dev: the standard deviation of the last lookback # of particles used to determine convergence
:return:
|
23,982 |
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
schema = [x.encode() if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode() if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
" is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
" is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
" is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
|
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
|
23,983 |
def fit(self, blocks, y=None):
feature_array = self.feature.fit_transform(blocks)
self.scaler = self.scaler.fit(feature_array)
return self
|
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`StandardizedFeature`: an instance of this class with the
``self.scaler`` attribute fit to the ``blocks`` data
Note:
When fitting the :class:`StandardScaler` object, you'll probably
want to determine the mean and/or std of *multiple* HTML files'
blocks, rather than just a single observation. To do that, just
concatenate all of the blocks together in a single iterable.
In contrast, you'll typically apply :meth:`transform` to a *single*
HTML file's blocks at a time.
|
23,984 |
def generate_targets(self, local_go_targets=None):
go_roots_by_category = defaultdict(list)
for sr in self.context.source_roots.all_roots():
if in sr.langs:
go_roots_by_category[sr.category].append(sr.path)
if go_roots_by_category[SourceRootCategories.TEST]:
raise self.InvalidLocalRootsError()
if go_roots_by_category[SourceRootCategories.UNKNOWN]:
raise self.InvalidLocalRootsError(
)
local_roots = go_roots_by_category[SourceRootCategories.SOURCE]
if not local_roots:
raise self.NoLocalRootsError(
)
if len(local_roots) > 1:
raise self.InvalidLocalRootsError(
.format(.join(sorted(local_roots))))
local_root = local_roots.pop()
if local_go_targets:
unrooted_locals = {t for t in local_go_targets if t.target_base != local_root}
if unrooted_locals:
raise self.UnrootedLocalSourceError(
.format(local_root,
.join(sorted(t.address.reference()
for t in unrooted_locals))))
else:
root = os.path.join(get_buildroot(), local_root)
local_go_targets = self.context.scan(root=root).targets(self.is_local_src)
if not local_go_targets:
return None
remote_roots = go_roots_by_category[SourceRootCategories.THIRDPARTY]
if len(remote_roots) > 1:
raise self.InvalidRemoteRootsError(
.format(.join(sorted(remote_roots))))
remote_root = remote_roots.pop() if remote_roots else None
generator = GoTargetGenerator(self.import_oracle,
self.context.build_graph,
local_root,
self.get_fetcher_factory(),
generate_remotes=self.get_options().remote,
remote_root=remote_root)
with self.context.new_workunit(, labels=[WorkUnitLabel.MULTITOOL]):
try:
generated = generator.generate(local_go_targets)
return self.GenerationResult(generated=generated,
local_root=local_root,
remote_root=remote_root)
except generator.GenerationError as e:
raise self.GenerationError(e)
|
Generate Go targets in memory to form a complete Go graph.
:param local_go_targets: The local Go targets to fill in a complete target graph for. If
`None`, then all local Go targets under the Go source root are used.
:type local_go_targets: :class:`collections.Iterable` of
:class:`pants.contrib.go.targets.go_local_source import GoLocalSource`
:returns: A generation result if targets were generated, else `None`.
:rtype: :class:`GoBuildgen.GenerationResult`
|
23,985 |
def withColumnRenamed(self, existing, new):
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
|
Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
|
23,986 |
def create_settings(sender, **kwargs):
created = kwargs[]
user = kwargs[]
if created:
UserWebNotificationSettings.objects.create(user=user)
UserEmailNotificationSettings.objects.create(user=user)
|
create user notification settings on user creation
|
23,987 |
def save(self, prepend_vault_id=):
assert self.is_valid()
cc_details_map = {
: self.cleaned_data[],
: self.cleaned_data[],
: %\
(self.cleaned_data[], self.cleaned_data[]),
: self.cleaned_data[],
: {
: self.cleaned_data[],
}
}
if self.__user_vault:
try:
response = Customer.find(self.__user_vault.vault_id)
cc_info = response.credit_cards[0]
return CreditCard.update(cc_info.token, params=cc_details_map)
except Exception, e:
logging.error( % e)
self.__user_vault.delete()
new_customer_vault_id = % (prepend_vault_id, md5_hash()[:24])
respone = Customer.create({
: new_customer_vault_id,
: cc_details_map
})
if respone.is_success:
UserVault.objects.create(user=self.__user, vault_id=new_customer_vault_id)
return respone
|
Adds or updates a users CC to the vault.
@prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by
multiple projects/apps.
|
23,988 |
def message(self, category, subject, msg_file):
users = getattr(self.sub, category)
if not users:
print(.format(category, self.sub))
return
if msg_file:
try:
msg = open(msg_file).read()
except IOError as error:
print(str(error))
return
else:
print()
msg = sys.stdin.read()
print(
.format(.join([str(x) for x in users])))
print(.format(msg))
if input().lower() not in [, ]:
print()
return
for user in users:
user.send_message(subject, msg)
print(.format(user))
|
Send message to all users in `category`.
|
23,989 |
def setBottomLeft(self, loc):
offset = self.getBottomLeft().getOffset(loc)
return self.setLocation(self.getTopLeft().offset(offset))
|
Move this region so its bottom left corner is on ``loc``
|
23,990 |
def merge_env(self, env):
current_env = dict(item.split() for item in self._env)
self.env = env
new_env = dict(item.split() for item in self._env)
current_env.update(new_env)
self.env = current_env
|
:param env:
:return:
|
23,991 |
def annotate_snapshot(self, snapshot):
if hasattr(snapshot, ):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError:
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname][] = merged
|
Store additional statistical data in snapshot.
|
23,992 |
def _check_link_completion(self, link, fail_pending=False, fail_running=False):
status_vect = JobStatusVector()
for job_key, job_details in link.jobs.items():
if job_key.find(JobDetails.topkey) >= 0:
continue
job_details.status = self._interface.check_job(job_details)
if job_details.status == JobStatus.pending:
if fail_pending:
job_details.status = JobStatus.failed
elif job_details.status == JobStatus.running:
if fail_running:
job_details.status = JobStatus.failed
status_vect[job_details.status] += 1
link.jobs[job_key] = job_details
link._set_status_self(job_details.jobkey, job_details.status)
return status_vect
|
Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
|
23,993 |
def validate_VALUERANGE(in_value, restriction):
if len(restriction) != 2:
raise ValidationError("Template ERROR: Only two values can be specified in a date range.")
value = _get_val(in_value)
if type(value) is list:
for subval in value:
if type(subval) is tuple:
subval = subval[1]
validate_VALUERANGE(subval, restriction)
else:
min_val = Decimal(restriction[0])
max_val = Decimal(restriction[1])
val = Decimal(value)
if val < min_val or val > max_val:
raise ValidationError("VALUERANGE: %s, %s"%(min_val, max_val))
|
Test to ensure that a value sits between a lower and upper bound.
Parameters: A Decimal value and a tuple, containing a lower and upper bound,
both as Decimal values.
|
23,994 |
def query(self, sql_query, return_as="dataframe"):
if isinstance(sql_query, str):
pass
elif isinstance(sql_query, unicode):
sql_query = str(sql_query)
else:
raise QueryDbError("query() requires a str or unicode input.")
query = sqlalchemy.sql.text(sql_query)
if return_as.upper() in ["DF", "DATAFRAME"]:
return self._to_df(query, self._engine)
elif return_as.upper() in ["RESULT", "RESULTPROXY"]:
with self._engine.connect() as conn:
result = conn.execute(query)
return result
else:
raise QueryDbError("Other return types not implemented.")
|
Execute a raw SQL query against the the SQL DB.
Args:
sql_query (str): A raw SQL query to execute.
Kwargs:
return_as (str): Specify what type of object should be
returned. The following are acceptable types:
- "dataframe": pandas.DataFrame or None if no matching query
- "result": sqlalchemy.engine.result.ResultProxy
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result (specified with
return_as="result")
Raises:
QueryDbError
|
23,995 |
async def nodes(self, *,
dc=None, near=None, watch=None, consistency=None):
params = {"dc": dc, "near": near}
response = await self._api.get("/v1/catalog/nodes",
params=params,
watch=watch,
consistency=consistency)
return consul(response)
|
Lists nodes in a given DC
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
near (str): Sort the node list in ascending order based on the
estimated round trip time from that node.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list
It returns a body like this::
[
{
"Node": "baz",
"Address": "10.1.10.11",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.11"
}
},
{
"Node": "foobar",
"Address": "10.1.10.12",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.12"
}
}
]
|
23,996 |
def _get_importer(path_name):
cache = sys.path_importer_cache
try:
importer = cache[path_name]
except KeyError:
cache[path_name] = None
for hook in sys.path_hooks:
try:
importer = hook(path_name)
break
except ImportError:
pass
else:
try:
importer = imp.NullImporter(path_name)
except ImportError:
return None
cache[path_name] = importer
return importer
|
Python version of PyImport_GetImporter C API function
|
23,997 |
def nworker(data, smpchunk, tests):
with h5py.File(data.database.input, ) as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:]
nall_mask = seqview[:] == 78
rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16)
rweights = None
rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32)
for idx in xrange(smpchunk.shape[0]):
sidx = smpchunk[idx]
seqchunk = seqview[sidx]
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqchunk == seqchunk[0], axis=0)
bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests)
rdstats[idx] = qstats
rquartets[idx] = smpchunk[idx][bidx]
return rquartets, rweights, rdstats
|
The workhorse function. Not numba.
|
23,998 |
def numericalize(self, arr, device=None):
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=self.dtype, device=device)
if self.use_vocab:
if self.sequential:
arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]
else:
arr = [self.vocab.stoi[x] for x in arr]
if self.postprocessing is not None:
arr = self.postprocessing(arr, self.vocab)
else:
if self.dtype not in self.dtypes:
raise ValueError(
"Specified Field dtype {} can not be used with "
"use_vocab=False because we do not know how to numericalize it. "
"Please raise an issue at "
"https://github.com/pytorch/text/issues".format(self.dtype))
numericalization_func = self.dtypes[self.dtype]
if not self.sequential:
arr = [numericalization_func(x) if isinstance(x, six.string_types)
else x for x in arr]
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
var = torch.tensor(arr, dtype=self.dtype, device=device)
if self.sequential and not self.batch_first:
var.t_()
if self.sequential:
var = var.contiguous()
if self.include_lengths:
return var, lengths
return var
|
Turn a batch of examples that use this field into a Variable.
If the field has include_lengths=True, a tensor of lengths will be
included in the return value.
Arguments:
arr (List[List[str]], or tuple of (List[List[str]], List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): A string or instance of `torch.device`
specifying which device the Variables are going to be created on.
If left as default, the tensors will be created on cpu. Default: None.
|
23,999 |
def setRpms(self, package, build, build_ts, rpms):
self._builds[package] = {"build": build, "build_ts": build_ts, "rpms": rpms}
|
Add/Update package rpm
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.