filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24822
|
from flask import render_template, url_for, flash, redirect, request, Blueprint, jsonify, abort, Response, current_app
from flask_jwt_extended import (
JWTManager, jwt_required, get_jwt_identity,
create_access_token, create_refresh_token,
jwt_refresh_token_required, get_raw_jwt
)
from backend import db, bcrypt
from backend.models import Users, UsersSchema, Follow_Relationship
from backend.auth.utils import session_cache_path, db_write_spotify_token, is_users_spotify_linked, send_reset_email, validate_register, validate_login, validate_profile_edit, get_user_info_from_token
from backend.errors.handlers import InvalidAPIUsage
from flask_restful import Resource, Api, reqparse
import os
import datetime
import jwt
import spotipy
auth = Blueprint('auth', __name__)
@auth.route("/api/auth/register", methods=['GET', 'POST'])
def register():
data = request.json
users_schema = UsersSchema()
validate_register(data)
hashed_password = bcrypt.generate_password_hash(data['password']).decode('utf-8')
user = Users(username=data['username'], email=data['email'], password=hashed_password, firstname=data['firstname'], lastname=data['lastname'])
db.session.add(user)
db.session.commit()
return jsonify(users_schema.dump(user)), 200
@auth.route("/api/auth/login", methods=['GET', 'POST'])
def login():
data = request.json
validate_login(data)
user = Users.query.filter_by(email=data['email']).first()
try:
authorized = bcrypt.check_password_hash(user.password, data['password'])
if not authorized:
raise InvalidAPIUsage('Login unsuccessful', status_code=410) # Incorrect password
except:
raise InvalidAPIUsage('Login unsuccessful', status_code=410) # Incorrect email
expires = datetime.timedelta(days=7)
users_schema = UsersSchema()
spotify_linked = is_users_spotify_linked(user.username)
ret = users_schema.dump(user)
ret['access_token'] = create_access_token(identity=users_schema.dump(user), expires_delta=expires)
ret['refresh_token'] = create_refresh_token(identity=users_schema.dump(user), expires_delta=expires)
ret['spotify_linked'] = spotify_linked
return jsonify(ret), 200
# Standard refresh endpoint. A blacklisted refresh token
# will not be able to access this endpoint
@auth.route('/api/auth/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user)
}
return jsonify(ret), 200
# Endpoint for revoking the current users access token
@auth.route('/api/auth/logout', methods=['DELETE'])
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return jsonify({"msg": "Successfully logged out"}), 200
# Endpoint for revoking the current users refresh token
@auth.route('/api/auth/logout2', methods=['DELETE'])
@jwt_refresh_token_required
def logout2():
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return jsonify({"msg": "Successfully logged out"}), 200
# This will now prevent users with blacklisted tokens from
# accessing this endpoint
@auth.route('/api/auth/protected', methods=['GET'])
@jwt_required
def protected():
return jsonify({'hello': 'world'})
@auth.route("/api/auth/get_user_info/<username>", methods=['GET'])
@jwt_required
def getUserInfo(username):
users_schema = UsersSchema()
current_user = get_user_info_from_token(request)
current_username = current_user['username']
current_user_id = current_user['user_id']
user = Users.query.filter_by(username=username).first()
# Is user linked to Spotify
spotify_linked = is_users_spotify_linked(user.username)
# Does requesting user follow user
if current_username != username:
follows = db.session.query(Follow_Relationship).join(Users, (Users.user_id == Follow_Relationship.followed_id and current_user_id == Follow_Relationship.follower_id))\
.filter(Users.username == username).first()
if follows is not None:
user_followed = True
else:
user_followed = False
else:
user_followed = None
# Follower count
follower_count = db.session.query(Follow_Relationship).join(Users, (Users.user_id == Follow_Relationship.followed_id))\
.filter(Users.username == username).count()
# Following count
following_count = db.session.query(Follow_Relationship).join(Users, (Users.user_id == Follow_Relationship.follower_id))\
.filter(Users.username == username).count()
userInfo = users_schema.dump(user)
userInfo['spotify_linked'] = spotify_linked
userInfo['user_followed'] = user_followed
userInfo['follower_count'] = follower_count
userInfo['following_count'] = following_count
return jsonify({
'status': 'success',
'userInfo': userInfo
}), 200
@auth.route("/api/auth/reset_password", methods=['GET', 'POST'])
def reset_request():
data = request.json
user = Users.query.filter_by(email=data['email']).first()
send_reset_email(user)
return jsonify({
'status': 'success'
}), 200
@auth.route("/api/auth/reset_password_token", methods=['GET', 'POST'])
def reset_token():
data = request.json
user = Users.verify_reset_token(data['token'])
hashed_password = bcrypt.generate_password_hash(data['password']).decode('utf-8')
user.password = hashed_password
db.session.commit()
return jsonify({
'status': 'success'
}), 200
@auth.route("/api/auth/update_profile", methods=['GET', 'POST'])
@jwt_required
def update_profile():
data = request.json
validate_profile_edit(data)
current_user = get_user_info_from_token(request)
current_username = current_user['username']
user = Users.query.filter_by(username=current_username).first()
user.firstname = data['firstname']
user.lastname = data['lastname']
user.username = data['username']
user.email = data['email']
user.bio = data['bio']
db.session.commit()
expires = datetime.timedelta(days=7)
users_schema = UsersSchema()
spotify_linked = is_users_spotify_linked(user.username)
ret = users_schema.dump(user)
ret['access_token'] = create_access_token(identity=users_schema.dump(user), expires_delta=expires)
ret['refresh_token'] = create_refresh_token(identity=users_schema.dump(user), expires_delta=expires)
ret['spotify_linked'] = spotify_linked
return jsonify(ret), 200
@auth.route("/api/auth/update_appcolor", methods=['GET', 'POST'])
@jwt_required
def update_appcolor():
data = request.json
current_user = get_user_info_from_token(request)
current_username = current_user['username']
user = Users.query.filter_by(username=current_username).first()
user.appcolor = data['appcolor']
db.session.commit()
users_schema = UsersSchema()
spotify_linked = is_users_spotify_linked(user.username)
ret = users_schema.dump(user)
ret['spotify_linked'] = spotify_linked
return jsonify(ret), 200
@auth.route("/api/auth/link_spotify", methods=['GET', 'POST'])
@jwt_required
def link_spotify():
data = request.json
current_user = get_user_info_from_token(request)
current_username = current_user['username']
user = Users.query.filter_by(username=current_username).first()
user.spotify_account = data['spotify_account']
db.session.commit()
expires = datetime.timedelta(days=7)
users_schema = UsersSchema()
cache_file = str(data['spotify_account'])
auth_manager = spotipy.oauth2.SpotifyOAuth( client_id=current_app.config['SPOTIFY_CLIENT_ID'],
client_secret=current_app.config['SPOTIFY_SECRET_ID'],
redirect_uri=current_app.config['SPOTIFY_REDIRECT_URI'],
show_dialog=True,
cache_path=session_cache_path(cache_file),
scope=current_app.config['SCOPE'] )
auth_url = None
if not auth_manager.get_cached_token():
auth_url = auth_manager.get_authorize_url()
expires = datetime.timedelta(days=7)
users_schema = UsersSchema()
spotify_linked = is_users_spotify_linked(user.username)
token = users_schema.dump(user)
token['access_token'] = create_access_token(identity=users_schema.dump(user), expires_delta=expires)
token['refresh_token'] = create_refresh_token(identity=users_schema.dump(user), expires_delta=expires)
ret = {}
ret['token'] = token
ret['spotify_linked'] = spotify_linked
ret['auth_url'] = auth_url
return jsonify(ret), 200
@auth.route("/api/auth/link_spotify_callback", methods=['GET', 'POST'])
def link_spotify_callback():
cache_file = 'temp'
auth_manager = spotipy.oauth2.SpotifyOAuth( client_id=current_app.config['SPOTIFY_CLIENT_ID'],
client_secret=current_app.config['SPOTIFY_SECRET_ID'],
redirect_uri=current_app.config['SPOTIFY_REDIRECT_URI'],
show_dialog=True,
cache_path=session_cache_path(cache_file),
scope=current_app.config['SCOPE'] )
if request.args.get('code'):
auth_manager.get_access_token(request.args.get("code"))
redirect('/api/auth/link_spotify_callback')
spotify = spotipy.Spotify(auth_manager=auth_manager)
sp_user = spotify.current_user()
from_file = open(session_cache_path(cache_file), 'r')
token_data = from_file.read()
from_file.close()
os.remove(session_cache_path(cache_file))
to_file = open(session_cache_path(str(sp_user['id'])), 'w')
to_file.write(token_data)
to_file.close()
db_write_spotify_token(token_data, str(sp_user['id']))
user = Users.query.filter_by(spotify_account=sp_user['id']).first()
return redirect(current_app.config['FRONTEND_URL'] + 'findyourtune/#/u/' + user.username)
|
the-stack_0_24823
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Termination(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"max_retries": "int", "ttl": "int", "timeout": "int"}
attribute_map = {"max_retries": "max_retries", "ttl": "ttl", "timeout": "timeout"}
def __init__(
self, max_retries=None, ttl=None, timeout=None, local_vars_configuration=None
): # noqa: E501
"""V1Termination - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_retries = None
self._ttl = None
self._timeout = None
self.discriminator = None
if max_retries is not None:
self.max_retries = max_retries
if ttl is not None:
self.ttl = ttl
if timeout is not None:
self.timeout = timeout
@property
def max_retries(self):
"""Gets the max_retries of this V1Termination. # noqa: E501
:return: The max_retries of this V1Termination. # noqa: E501
:rtype: int
"""
return self._max_retries
@max_retries.setter
def max_retries(self, max_retries):
"""Sets the max_retries of this V1Termination.
:param max_retries: The max_retries of this V1Termination. # noqa: E501
:type: int
"""
self._max_retries = max_retries
@property
def ttl(self):
"""Gets the ttl of this V1Termination. # noqa: E501
:return: The ttl of this V1Termination. # noqa: E501
:rtype: int
"""
return self._ttl
@ttl.setter
def ttl(self, ttl):
"""Sets the ttl of this V1Termination.
:param ttl: The ttl of this V1Termination. # noqa: E501
:type: int
"""
self._ttl = ttl
@property
def timeout(self):
"""Gets the timeout of this V1Termination. # noqa: E501
:return: The timeout of this V1Termination. # noqa: E501
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this V1Termination.
:param timeout: The timeout of this V1Termination. # noqa: E501
:type: int
"""
self._timeout = timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Termination):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Termination):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_24824
|
import numpy as np
# from terminaltables import AsciiTable
from .bbox_overlaps import bbox_overlaps
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
_ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros((ious.shape[0]))
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
_ious[k, :] = tmp_ious
_ious = np.fliplr(np.sort(_ious, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format.
"""
if isinstance(proposal_nums, list):
_proposal_nums = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
_proposal_nums = np.array([proposal_nums])
else:
_proposal_nums = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, list):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return _proposal_nums, _iou_thrs
def eval_recalls(gts,
proposals,
proposal_nums=None,
iou_thrs=None,
print_summary=True):
"""Calculate recalls.
Args:
gts(list or ndarray): a list of arrays of shape (n, 4)
proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums(int or list of int or ndarray): top N proposals
thrs(float or list or ndarray): iou thresholds
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
if print_summary:
print_recall_summary(recalls, proposal_nums, iou_thrs)
return recalls
def print_recall_summary(recalls,
proposal_nums,
iou_thrs,
row_idxs=None,
col_idxs=None):
"""Print recalls in a table.
Args:
recalls(ndarray): calculated from `bbox_recalls`
proposal_nums(ndarray or list): top N proposals
iou_thrs(ndarray or list): iou thresholds
row_idxs(ndarray): which rows(proposal nums) to print
col_idxs(ndarray): which cols(iou thresholds) to print
"""
proposal_nums = np.array(proposal_nums, dtype=np.int32)
iou_thrs = np.array(iou_thrs)
if row_idxs is None:
row_idxs = np.arange(proposal_nums.size)
if col_idxs is None:
col_idxs = np.arange(iou_thrs.size)
row_header = [''] + iou_thrs[col_idxs].tolist()
table_data = [row_header]
for i, num in enumerate(proposal_nums[row_idxs]):
row = [
'{:.3f}'.format(val)
for val in recalls[row_idxs[i], col_idxs].tolist()
]
row.insert(0, num)
table_data.append(row)
table = AsciiTable(table_data)
print(table.table)
def plot_num_recall(recalls, proposal_nums):
"""Plot Proposal_num-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
proposal_nums(ndarray or list): same shape as `recalls`
"""
if isinstance(proposal_nums, np.ndarray):
_proposal_nums = proposal_nums.tolist()
else:
_proposal_nums = proposal_nums
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot([0] + _proposal_nums, [0] + _recalls)
plt.xlabel('Proposal num')
plt.ylabel('Recall')
plt.axis([0, proposal_nums.max(), 0, 1])
f.show()
def plot_iou_recall(recalls, iou_thrs):
"""Plot IoU-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
iou_thrs(ndarray or list): same shape as `recalls`
"""
if isinstance(iou_thrs, np.ndarray):
_iou_thrs = iou_thrs.tolist()
else:
_iou_thrs = iou_thrs
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot(_iou_thrs + [1.0], _recalls + [0.])
plt.xlabel('IoU')
plt.ylabel('Recall')
plt.axis([iou_thrs.min(), 1, 0, 1])
f.show()
|
the-stack_0_24825
|
"""
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
don't compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from __future__ import print_function, division
from sympy.core import Basic, S, oo, Symbol, I, Dummy, Wild
from sympy.functions import log, exp
from sympy.series.order import Order
from sympy.simplify import powsimp
from sympy import cacheit
from sympy.core.compatibility import reduce
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
from sympy.utilities.misc import debug_decorator as debug
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and a.func is exp:
la = a.args[0]
if isinstance(b, Basic) and b.func is exp:
lb = b.args[0]
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_unbounded:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be choosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super(SubsSet, self).__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if not key in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
for expr, var in self.items():
e = e.subs(var, expr)
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.subs(var, res[expr])
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.subs(tr)
return res, exps
def copy(self):
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
e = powsimp(e, deep=True, combine='exp')
assert isinstance(e, Basic)
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow:
b, e = e.as_base_exp()
if e.has(x):
return mrv(exp(e * log(b)), x)
else:
s, expr = mrv(b, x)
return s, expr**e
elif e.func is log:
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif e.func is exp:
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if e.args[0].func is log:
return mrv(e.args[0].args[0], x)
if limitinf(e.args[0], x).is_unbounded:
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.args[0], x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.args[0], x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derviatives"
" not implemented yet.")
return mrv(e.args[0], x)
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
assert isinstance(f, SubsSet)
assert isinstance(g, SubsSet)
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
assert c == "="
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
from sympy import sign as _sign
assert isinstance(e, Basic)
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif e.func is exp:
return 1
elif e.is_Pow:
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif e.func is log:
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x):
"""Limit e(x) for x-> oo"""
#rewrite e in terms of tractable functions only
e = e.rewrite('tractable', deep=True)
if not e.has(x):
return e # e is a constant
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive:
# We make sure that x.is_positive is True so we
# get all the correct mathematical bechavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True, bounded=True)
e = e.subs(x, p)
x = p
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
#the leading term shouldn't be 0:
assert s != 0
return s*oo
elif sig == 0:
return limitinf(c0, x) # e0=0: lim f = lim c0
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.subs(x, exp(x))] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].subs(x, exp(x))
return r
def moveup(l, x):
return [e.subs(x, exp(x)) for e in l]
@debug
@timeit
def calculate_series(e, x, skip_abs=False, logx=None):
""" Calculates at least one term of the series of "e" in "x".
This is a place that fails most often, so it is in its own function.
"""
from sympy.core.exprtools import factor_terms
n = 1
while 1:
series = e.nseries(x, n=n, logx=logx)
if not series.has(Order):
# The series expansion is locally exact.
return series
series = series.removeO()
series = factor_terms(series, fraction=True)
if series and ((not skip_abs) or series.has(x)):
return series
n *= 2
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
series = calculate_series(e, x)
c0, e0 = series.leadterm(x)
assert e0 == 0
return c0, e0
if x in Omega:
#move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
e_up = moveup([e], x)[0]
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
e = e_up
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", real=True, positive=True, bounded=True)
f, logw = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
series = series.subs(log(w), logw) # this should not be necessary
return series.leadterm(w)
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.before = []
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
from sympy import ilcm
assert isinstance(Omega, SubsSet)
assert len(Omega) != 0
#all items in Omega must be exponentials
for t in Omega.keys():
assert t.func is exp
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
g, _ = Omega[-1]
# g is going to be the "w" - the simplest one in the mrv set
sig = sign(g.args[0], x)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
elif sig != -1:
raise NotImplementedError('Result depends on the sign of %s' % sig)
#O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.args[0]/g.args[0], x)
if c.is_Rational:
denominators.append(c.q)
arg = f.args[0]
if var in rewrites:
assert rewrites[var].func is exp
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.args[0]).expand())*wsym**c))
#Remember that Omega contains subexpressions of "e". So now we find
#them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .subs() below succeeds:
# TODO this should not be necessary
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.subs(a, b)
for _, var in Omega:
assert not f.has(var)
#finally compute the logarithm of w (logw).
logw = g.args[0]
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of sympy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs(wsym, wsym**exponent)
logw /= exponent
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
z0 can be any expression, including oo and -oo.
For dir="+" (default) it calculates the limit from the right
(z->z0+) and for dir="-" the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument doesn't matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not isinstance(z, Symbol):
raise NotImplementedError("Second argument must be a Symbol")
#convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 == oo:
r = limitinf(e, z)
elif z0 == -oo:
r = limitinf(e.subs(z, -z), z)
else:
if dir == "-":
e0 = e.subs(z, z0 - 1/z)
elif dir == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
r = limitinf(e0, z)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
the-stack_0_24827
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from collections import defaultdict
import pandas as pd
from wa import Workload, Parameter, ConfigError, TargetError, WorkloadError
from wa.utils.exec_control import once
class Mongoperf(Workload):
name = 'mongoperf'
description = """
A utility to check disk I/O performance independently of MongoDB.
It times tests of random disk I/O and presents the results. You can use
mongoperf for any case apart from MongoDB. The mmf true mode is completely
generic.
.. note:: ``mongoperf`` seems to ramp up threads in powers of two over a
period of tens of seconds (there doesn't appear to be a way to
change that). Bear this in mind when setting the ``duration``.
"""
parameters = [
Parameter('duration', kind=int, default=300,
description="""
Duration of of the workload.
"""),
Parameter('threads', kind=int, default=16,
description="""
Defines the number of threads mongoperf will use in the test.
To saturate the system storage system you will need
multiple threads.
"""),
Parameter('file_size_mb', kind=int, default=1,
description="""
Test file size in MB.
"""),
Parameter('sleep_micros', kind=int, default=0,
description="""
mongoperf will pause for this number of microseconds divided
by the the number of threads between each operation.
"""),
Parameter('mmf', kind=bool, default=True,
description="""
When ``True``, use memory mapped files for the tests.
Generally:
- when mmf is ``False``, mongoperf tests direct, physical, I/O,
without caching. Use a large file size to test heavy random
I/O load and to avoid I/O coalescing.
- when mmf is ``True``, mongoperf runs tests of the caching
system, and can use normal file system cache. Use mmf in
this mode to test file system cache behavior with memory
mapped files.
"""),
Parameter('read', kind=bool, default=True,
aliases=['r'],
description="""
When ``True``, perform reads as part of the test. Either
``read`` or ``write`` must be ``True``.
"""),
Parameter('write', kind=bool, default=True,
aliases=['w'],
description="""
When ``True``, perform writes as part of the test. Either
``read`` or ``write`` must be ``True``.
"""),
Parameter('rec_size_kb', kind=int, default=4,
description="""
The size of each write operation
"""),
Parameter('sync_delay', kind=int, default=0,
description="""
Seconds between disk flushes. Only use this if ``mmf`` is set
to ``True``.
"""),
]
def validate(self):
if not self.read and not self.write:
raise ConfigError('Either "read" or "write" must be True.')
if not self.mmf and self.sync_delay:
raise ConfigError('sync_delay can only be set if mmf is True')
@once
def initialize(self, context):
try:
self.target.execute('mongoperf -h')
except TargetError:
raise WorkloadError('Mongoperf must be installed and in $PATH on the target.')
def setup(self, context):
config = {}
config['nThreads'] = self.threads
config['fileSizeMB'] = self.file_size_mb
config['sleepMicros'] = self.sleep_micros
config['mmf'] = self.mmf
config['r'] = self.read
config['w'] = self.write
config['recSizeKB'] = self.rec_size_kb
config['syncDelay'] = self.sync_delay
config_text = json.dumps(config)
self.outfile = self.target.get_workpath('mongperf.out')
self.command = 'echo "{}" | mongoperf > {}'.format(config_text, self.outfile)
def run(self, context):
self.target.kick_off(self.command)
self.target.sleep(self.duration)
self.target.killall('mongoperf', signal='SIGTERM')
def extract_results(self, context):
host_outfile = os.path.join(context.output_directory, 'mongoperf.out')
self.target.pull(self.outfile, host_outfile)
context.add_artifact('mongoperf-output', host_outfile, kind='raw')
def update_output(self, context):
host_file = context.get_artifact_path('mongoperf-output')
results = defaultdict(list)
threads = None
with open(host_file) as fh:
for line in fh:
if 'new thread,' in line:
threads = int(line.split()[-1])
elif 'ops/sec' in line:
results[threads].append(int(line.split()[0]))
if not results:
raise WorkloadError('No mongoperf results found in the output.')
for threads, values in results.items():
rs = pd.Series(values)
context.add_metric('ops_per_sec', rs.mean(),
classifiers={'threads': threads})
context.add_metric('ops_per_sec_std', rs.std(), lower_is_better=True,
classifiers={'threads': threads})
def teardown(self, context):
if self.cleanup_assets:
self.target.remove(self.outfile)
|
the-stack_0_24829
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import asyncio
from concurrent.futures import ThreadPoolExecutor
from thumbor.utils import logger
class ThreadPool:
@classmethod
def instance(cls, size):
"""
Cache threadpool since context is
recreated for each request
"""
if not getattr(cls, "_instance", None):
cls._instance = {}
if size not in cls._instance:
cls._instance[size] = ThreadPool(size)
return cls._instance[size]
@classmethod
def reset(cls):
if cls._instance is not None:
for size in cls._instance:
cls._instance[size].cleanup()
cls._instance = None
def __init__(self, thread_pool_size):
if thread_pool_size:
self.pool = ThreadPoolExecutor(thread_pool_size)
else:
self.pool = None
async def _execute_in_foreground(self, operation, *args):
return operation(*args)
async def _execute_in_pool(self, operation, *args):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(self.pool, operation, *args)
async def queue(self, operation, *args):
if not self.pool:
return await self._execute_in_foreground(operation, *args)
return await self._execute_in_pool(operation, *args)
def cleanup(self):
if self.pool:
logger.info("Shutting down threads")
self.pool.shutdown()
|
the-stack_0_24830
|
import pytest
import time
import slug
from slug import QuickConnect, Pipe
def test_through():
pin = Pipe()
pout = Pipe()
qc = QuickConnect( # noqa
side_in=pin.side_out,
side_out=pout.side_in,
keepopen=False
)
pin.side_in.write(b'spameggs')
pin.side_in.close()
roundtrip = pout.side_out.read()
assert roundtrip == b'spameggs'
# Broken on windows, see https://github.com/xonsh/slug/issues/7
@pytest.mark.skipif(slug.QuickConnect is slug.base.QuickConnect,
reason="Base QuickConnect is broken this way")
def test_change_input():
p1 = Pipe()
p2 = Pipe()
pout = Pipe()
qc = QuickConnect(
side_in=p1.side_out,
side_out=pout.side_in,
keepopen=False
)
p1.side_in.write(b'spam')
p2.side_in.write(b'foo')
time.sleep(1.0)
qc.side_in = p2.side_out
p1.side_in.write(b'eggs')
p2.side_in.write(b'bar')
p1.side_in.close()
p2.side_in.close()
roundtrip = pout.side_out.read()
assert roundtrip == b'spamfoobar'
def test_change_output():
pin = Pipe()
p1 = Pipe()
p2 = Pipe()
qc = QuickConnect(
side_in=pin.side_out,
side_out=p1.side_in,
)
pin.side_in.write(b'spam')
time.sleep(1.0)
qc.side_out = p2.side_in
pin.side_in.write(b'eggs')
out1 = p1.side_out.read(4000)
out2 = p2.side_out.read(4000)
assert out1 == b'spam'
assert out2 == b'eggs'
|
the-stack_0_24832
|
import gym
import numpy as np
from typing import Dict, Tuple
class NoopResetEnv(gym.Wrapper):
def __init__(self,
env: gym.Wrapper,
no_op_max=30,
):
"""
Samples initial states by performing a random number of no operations on reset.
Slightly modified from OpenAI baselines AtariWrappers. As detailed in Mnih et al. (2015) -- aka Nature paper.
:param env: the inner environment
:param no_op_max: maximum number of no operations
"""
super().__init__(env)
self.no_op_max = no_op_max
self.no_op_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self,
**kwargs,
) -> np.ndarray:
"""
Resets the environment
:param kwargs: keyword arguments of the OpenAI core
:return: state
"""
self.env.reset(**kwargs)
no_ops = np.random.randint(1, self.no_op_max + 1)
state = None
for _ in range(no_ops):
state, _, done, _ = self.env.step(self.no_op_action)
if done:
state = self.env.reset(**kwargs)
return state
def step(self,
action: int,
) -> Tuple[np.ndarray, float, bool, Dict]:
"""
Performs the provided action
:param action: the action taken
:return: state, reward, done, information dictionary
"""
return self.env.step(action)
|
the-stack_0_24834
|
import os
import json
from flask import Flask, render_template, abort
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.config["SECRET_KEY"] = os.environ["SECRET_KEY"]
def render_page(root="templates/example"):
return render_template("index.html", examples=load_examples())
@app.route("/")
def index():
return render_page()
@app.route("/example/<name>/")
def example(name):
with open("templates/example/{}.json".format(name), "r") as fd:
code = fd.read()
return code
def load_examples():
path = os.path.join("templates", "example")
for f in os.listdir(path):
filename, _ = os.path.splitext(f)
with open(os.path.join(path, f), "r") as fd:
example_data = json.load(fd)
if not "name" in example_data:
name = filename.split()
else:
name = str(example_data["name"])
yield {"name": name, "filename": filename}
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
|
the-stack_0_24835
|
# -*- coding: utf-8 -*-
"""Tests for single-instance prediction"""
from __future__ import print_function
import os
import unittest
from nose.tools import nottest
from sklearn.datasets import load_svmlight_file
import numpy as np
import treelite
import treelite.runtime
from util import load_txt, os_compatible_toolchains, libname, make_annotation,\
assert_almost_equal
dpath = os.path.abspath(os.path.join(os.getcwd(), 'tests/examples/'))
class TestSingleInst(unittest.TestCase):
@nottest
def run_pipeline_test(self, model, dtest_path, libname_fmt,
expected_prob_path, expected_margin_path,
multiclass, use_annotation, use_quantize):
dpath = os.path.abspath(os.path.join(os.getcwd(), 'tests/examples/'))
dtest_path = os.path.join(dpath, dtest_path)
libpath = libname(libname_fmt)
X_test, _ = load_svmlight_file(dtest_path, zero_based=True)
expected_prob_path = os.path.join(dpath, expected_prob_path)
expected_margin_path = os.path.join(dpath, expected_margin_path)
expected_prob = load_txt(expected_prob_path)
expected_margin = load_txt(expected_margin_path)
if multiclass:
nrow = X_test.shape[0]
expected_prob = expected_prob.reshape((nrow, -1))
expected_margin = expected_margin.reshape((nrow, -1))
params = {}
if use_annotation is not None:
params['annotate_in'] = use_annotation
if use_quantize:
params['quantize'] = 1
for toolchain in os_compatible_toolchains():
model.export_lib(toolchain=toolchain, libpath=libpath,
params=params, verbose=True)
predictor = treelite.runtime.Predictor(libpath=libpath, verbose=True)
for i in range(X_test.shape[0]):
x = X_test[i,:]
# Scipy CSR matrix
out_prob = predictor.predict_instance(x)
out_margin = predictor.predict_instance(x, pred_margin=True)
assert_almost_equal(out_prob, expected_prob[i])
assert_almost_equal(out_margin, expected_margin[i])
# NumPy 1D array with 0 as missing value
x = x.toarray().flatten()
out_prob = predictor.predict_instance(x, missing=0.0)
out_margin = predictor.predict_instance(x, missing=0.0, pred_margin=True)
assert_almost_equal(out_prob, expected_prob[i])
assert_almost_equal(out_margin, expected_margin[i])
# NumPy 1D array with np.nan as missing value
np.place(x, x == 0.0, [np.nan])
out_prob = predictor.predict_instance(x, missing=np.nan)
out_margin = predictor.predict_instance(x, missing=np.nan, pred_margin=True)
assert_almost_equal(out_prob, expected_prob[i])
assert_almost_equal(out_margin, expected_margin[i])
# NumPy 1D array with np.nan as missing value
# (default when `missing` parameter is unspecified)
out_prob = predictor.predict_instance(x)
out_margin = predictor.predict_instance(x, pred_margin=True)
assert_almost_equal(out_prob, expected_prob[i])
assert_almost_equal(out_margin, expected_margin[i])
def test_single_inst(self):
for model_path, dtrain_path, dtest_path, libname_fmt, \
expected_prob_path, expected_margin_path, multiclass in \
[('mushroom/mushroom.model', 'mushroom/agaricus.train',
'mushroom/agaricus.test', './agaricus{}',
'mushroom/agaricus.test.prob',
'mushroom/agaricus.test.margin', False),
('dermatology/dermatology.model', 'dermatology/dermatology.train',
'dermatology/dermatology.test', './dermatology{}',
'dermatology/dermatology.test.prob',
'dermatology/dermatology.test.margin', True)]:
model_path = os.path.join(dpath, model_path)
model = treelite.Model.load(model_path, model_format='xgboost')
make_annotation(model=model, dtrain_path=dtrain_path,
annotation_path='./annotation.json')
for use_annotation in ['./annotation.json', None]:
for use_quantize in [True, False]:
self.run_pipeline_test(model=model, dtest_path=dtest_path,
libname_fmt=libname_fmt,
expected_prob_path=expected_prob_path,
expected_margin_path=expected_margin_path,
multiclass=multiclass,
use_annotation=use_annotation,
use_quantize=use_quantize)
|
the-stack_0_24838
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from flask import Response
from flask import logging
from celery import Celery
import json
import sys
import os
sys.path.append(''.join([os.getcwd() ]))
app = Flask(__name__)
logger = logging.getLogger(__name__)
from dao.DB import WebScannerDB
from utils.IPv4Address import is_ipv4
celery_app = Celery('SheepScanner', include=['sheepscan.tasks'])
celery_app.config_from_object('config.celery')
db = WebScannerDB('192.168.233.1', 27017)
@app.route('/')
def main():
index = u"""
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>管理平台</title>
</head>
<body>
<p>REST API 返回JSON结果</p>
<ul>
<li>/tasks/all 显示所有任务ID</li>
<li>/tasks/add?ip=ip_address 添加IP地址任务</li>
<li>/tasks/status?task_id=task_id 查看任务状态</li>
</ul>
</body>
</html>
"""
return index#Response(index.encode("utf-8"), status=200, content_type='application/json' )
@app.route('/scan/add')
def add_scan():
iprange = request.args['hosts']
portrange = request.args.get('ports', 80, type=int)
result = celery_app.send_task("sheepscan.tasks.scan_host", args=[iprange, portrange]) or None
if result:
return json.dumps(dict(status=0, task_id=result.id))
else:
return json.dumps(dict(status=1))
@app.route('/tasks/add')
def add_task():
ip_address = request.args['ip']
if not is_ipv4(ip_address, skip_local=True):
return Response("""{status=1, info="invalid ip address!"}""", status=200, content_type='application/json' )
# 增加任务
#celery_app.test.task(ip_address)
result = celery_app.send_task("sheepscan.tasks.http", args=[ip_address]) or None
if result:
return json.dumps(dict(status=0, task_id=result.id))
else:
return json.dumps(dict(status=1))
@app.route('/socks/add')
def add_socks():
ip_address = request.args['ip']
port = request.args.get('port', 1080, type=int)
if not is_ipv4(ip_address, skip_local=True):
return Response("""{status=1, info="invalid ip address!"}""", status=200, content_type='application/json' )
result = celery_app.send_task("sheepscan.tasks.socksv5", args=[ip_address, port]) or None
if result:
return json.dumps(dict(status=0, task_id=result.id))
else:
return json.dumps(dict(status=1))
@app.route('/tasks/status')
def show_result():
task_uuid = request.args['task_id']
res = db.query_result(task_uuid)
count = res.count()
if count < 1:
return Response("""{status=1, info="no founded!"}""", status=200, content_type='application/json' )
elif count == 1:
rep = dict(status=0)
rep.update(**res[0])
rep['date_done'] = rep.get('date_done').isoformat(' ')
return Response(json.dumps(rep), status=200, content_type='application/json' )
else:
return Response("""{status=1, info="no founded!"}""", status=200, content_type='application/json' )
@app.route('/tasks/all')
def show_all_task():
res = db.find()
uids = []
for r in res:
uids.append(r['_id'])
rep = dict(status=0, tasks=uids)
return Response(json.dumps(rep), status=200, content_type='application/json' )
if __name__ == '__main__':
from config.settings import web
app.run(web['host'], web['port'], debug=web['debug'], threaded=True)
|
the-stack_0_24839
|
# -*- coding: utf-8 -*-
import web , datetime , os, time, re, cgi , json
from web import form
import conf
def parse_config_variables(text:str, conf):
""" Parses and replace the variables in the text by their values from config.
Parameters
----------
text: str
The input string representing the config
conf
The config module
Returns
-------
str
The same text with the replaced wildards
"""
params = {
'$myEndpoint': conf.myEndpoint,
'$myPublicEndpoint': conf.myPublicEndpoint
}
for k, v in params.items():
text = text.replace(k, v)
return text
def get_form(json_form):
""" read config in 'myform.json' and return a webpy form """
import io
with open(json_form) as config_form:
# The StringIO wrapper was used to re-use the json.load function
# without any other change.
text = config_form.read()
text = parse_config_variables(text, conf)
buf = io.StringIO(text)
buf.seek(0)
fields = json.load(buf)
params = ()
for field in fields:
# all
myid = field['id']
description = field['label'] if 'label' in field and len(field['label']) > 0 else 'input'
pre_a = "<span class='tip' data-toggle='tooltip' data-placement='bottom' title='"
pre_b = "'><i class='fas fa-info-circle'></i></span>"
prepend = pre_a+field['prepend']+pre_b if 'prepend' in field and len(field['prepend']) > 0 else ''
disabled = 'disabled' if 'disabled' in field and field['disabled'] == "True" else ''
classes = field['class'] if 'class' in field and len(field['class']) > 0 else ''
classes = classes+' searchWikidata' if 'searchWikidata' in field and field['searchWikidata'] == 'True' else classes
classes = classes+' disambiguate' if "disambiguate" in field and field["disambiguate"] == 'True' else classes
classes = classes+' ('+conf.main_entity+') '+disabled
autocomplete = field['cache_autocomplete'] if 'cache_autocomplete' in field and len(field['cache_autocomplete']) > 0 else ''
# text box
placeholder = field['placeholder'] if 'placeholder' in field else None
default = field['defaultvalue'] if 'defaultvalue' in field else ''
# dropdown
dropdown_values = [(k,v) for k,v in field['values'].items()] if 'values' in field else None
# Text box
if field['type'] == 'Textbox':
if "disambiguate" in field and field["disambiguate"] == 'True':
vpass = form.regexp(r".{1,200}$", 'must be between 1 and 200 characters')
params = params + (form.Textbox(myid, vpass,
description = description,
id=myid,
placeholder=placeholder,
pre = prepend,
class_= classes,
value=default) , )
else:
params = params + (form.Textbox(myid,
description = description,
id=myid,
placeholder=placeholder,
pre = prepend,
class_= classes,
value=default), )
if field['type'] == 'Dropdown':
params = params + (form.Dropdown(myid,
description = description,
args=dropdown_values,
placeholder=placeholder,
id=myid,
pre = prepend,
class_= classes), )
if field['type'] == 'Checkbox':
prepend_title = '<section class="checkbox_group_label label col-12">'+description+'</section>'
i = 0
params = params + (form.Checkbox(myid+'-'+str(i),
value=dropdown_values[0][0]+','+dropdown_values[0][1],
description = dropdown_values[0][1],
id=myid,
pre = prepend_title+prepend,
class_= classes+' checkbox_group',
checked=False), )
for value in dropdown_values[1:]:
i += 1
params = params + (form.Checkbox(myid+'-'+str(i),
value=value[0]+','+value[1],
description = value[1],
id=myid,
pre = '',
class_= classes+' checkbox_group following_checkbox',
checked=False), )
myform = form.Form(*params)
return myform
searchRecord = form.Form(
form.Textbox("search",
class_="searchWikidata col-md-11",
description="Search",
autocomplete="off")
)
#searchGeneral = form.Form(
#form.Textbox("search",
#class_="searchGeneral",
#description="search",
#autocomplete="off")
#)
|
the-stack_0_24840
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ConversationCallEventTopicScoredAgent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ConversationCallEventTopicScoredAgent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'agent': 'ConversationCallEventTopicUriReference',
'score': 'int'
}
self.attribute_map = {
'agent': 'agent',
'score': 'score'
}
self._agent = None
self._score = None
@property
def agent(self):
"""
Gets the agent of this ConversationCallEventTopicScoredAgent.
:return: The agent of this ConversationCallEventTopicScoredAgent.
:rtype: ConversationCallEventTopicUriReference
"""
return self._agent
@agent.setter
def agent(self, agent):
"""
Sets the agent of this ConversationCallEventTopicScoredAgent.
:param agent: The agent of this ConversationCallEventTopicScoredAgent.
:type: ConversationCallEventTopicUriReference
"""
self._agent = agent
@property
def score(self):
"""
Gets the score of this ConversationCallEventTopicScoredAgent.
:return: The score of this ConversationCallEventTopicScoredAgent.
:rtype: int
"""
return self._score
@score.setter
def score(self, score):
"""
Sets the score of this ConversationCallEventTopicScoredAgent.
:param score: The score of this ConversationCallEventTopicScoredAgent.
:type: int
"""
self._score = score
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_24841
|
from math import sqrt
import pygame
class Shape:
movement_speed = 10
def __init__(self, x, y):
self.x, self.y = x, y
def move_up(self):
self.y -= self.movement_speed
def move_down(self):
self.y += self.movement_speed
def move_left(self):
self.x -= self.movement_speed
def move_right(self):
self.x += self.movement_speed
class Circle(Shape):
def __init__(self, center_x, center_y, radius):
super().__init__(center_x, center_y)
self.radius = radius
class Rectangle(Shape):
def __init__(self, top_left_x, top_left_y, width, height):
super().__init__(top_left_x, top_left_y)
self.width, self.height = width, height
class Line(Shape):
def __init__(self, x1, y1, x2, y2):
super().__init__(x1, y1)
self.x2, self.y2 = x2, y2
def check_collision(rectangle, circle):
rectangle_box = (rectangle.x,
rectangle.y,
rectangle.x + rectangle.width,
rectangle.y + rectangle.height)
circle_box = (circle.x - circle.radius,
circle.y - circle.radius,
circle.x + circle.radius,
circle.y + circle.radius)
if rectangle_box[0] > circle_box[2] \
or rectangle_box[1] > circle_box[3] \
or rectangle_box[2] < circle_box[0] \
or rectangle_box[3] < circle_box[1]:
return False
for x in (rectangle_box[0], rectangle_box[2]):
for y in (rectangle_box[1], rectangle_box[3]):
if sqrt((x - circle.x) ^ 2 + (y - circle.y) ^ 2) <= circle.radius:
print(sqrt((x - circle.x) ^ 2 + (y - circle.y) ^ 2), circle.radius)
return True
# Screen parameters
BOARD_WID = 500
BOARD_HGT = 500
SIDEBAR_WID = 200
PADD = 15
WINDOW_WID = BOARD_WID + PADD * 2 + SIDEBAR_WID
WINDOW_HGT = BOARD_HGT + PADD * 2
WINDOW_CAPTION = "Collision"
FPS = 60
# Board borders
TOP_Y = PADD
BOT_Y = PADD + BOARD_HGT
LFT_X = PADD
RGT_X = PADD + BOARD_WID
# Colors
BOARD_COLOR = (240, 240, 240)
BACKGROUND_COLOR = (255, 255, 255)
OBJECTS_COLOR = (85, 85, 85)
TEXT_COLOR = (0, 0, 0)
pygame.init()
FONT = pygame.font.Font(None, 24)
screen = pygame.display.set_mode((WINDOW_WID, WINDOW_HGT))
pygame.display.set_caption(WINDOW_CAPTION)
CLOCK = pygame.time.Clock()
running = True
collision = False
objects = [Circle(100, 100, 50), Rectangle(200, 200, 100, 100)]
selected_index = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
elif event.key == pygame.K_1:
selected_index = 0
elif event.key == pygame.K_2:
selected_index = 1
elif event.key == pygame.K_UP:
objects[selected_index].move_up()
elif event.key == pygame.K_DOWN:
objects[selected_index].move_down()
elif event.key == pygame.K_LEFT:
objects[selected_index].move_left()
elif event.key == pygame.K_RIGHT:
objects[selected_index].move_right()
screen.fill(BACKGROUND_COLOR)
pygame.draw.rect(screen, BOARD_COLOR, [PADD, PADD, BOARD_WID, BOARD_HGT], 0)
for obj in objects:
if type(obj) is Circle:
pygame.draw.circle(screen, OBJECTS_COLOR, (obj.x, obj.y), obj.radius, 0)
elif type(obj) is Rectangle:
pygame.draw.rect(screen, OBJECTS_COLOR, [obj.x, obj.y, obj.width, obj.height], 0)
collision = check_collision(objects[1], objects[0])
instr_text = FONT.render('Selected object %d' % (selected_index + 1), 1, TEXT_COLOR)
instr_text_pos = instr_text.get_rect()
instr_text_pos.centerx = BOARD_WID + PADD * 2 + SIDEBAR_WID / 2
instr_text_pos.centery = WINDOW_HGT - PADD - instr_text.get_height() / 2
screen.blit(instr_text, instr_text_pos)
coll_text = FONT.render('COLLIDING!' if collision else 'Not colliding', 1, TEXT_COLOR)
coll_text_pos = coll_text.get_rect()
coll_text_pos.centerx = BOARD_WID + PADD * 2 + SIDEBAR_WID / 2
coll_text_pos.centery = WINDOW_HGT / 2
screen.blit(coll_text, coll_text_pos)
pygame.display.flip()
CLOCK.tick(FPS)
|
the-stack_0_24843
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def attestation_create_provider(client,
resource_group_name,
provider_name,
location,
tags=None,
attestation_policy=None,
keys=None):
creation_params = {}
creation_params['location'] = location
creation_params['tags'] = tags
creation_params['properties'] = {}
creation_params['properties']['attestation_policy'] = attestation_policy
creation_params['properties']['policy_signing_certificates'] = {}
creation_params['properties']['policy_signing_certificates']['keys'] = keys
return client.create(resource_group_name=resource_group_name,
offer_type="virtualmachine",
provider_name=provider_name,
creation_params=creation_params)
def attestation_list_operation(client):
return client.list()
def attestation_attestation_provider_provider_list(client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def attestation_attestation_provider_show(client,
resource_group_name,
provider_name):
return client.get(resource_group_name=resource_group_name,
offer_type="virtualmachine",
provider_name=provider_name)
def attestation_attestation_provider_update(client,
resource_group_name,
provider_name,
tags=None):
update_params = {}
update_params['tags'] = tags
return client.update(resource_group_name=resource_group_name,
offer_type="virtualmachine",
provider_name=provider_name,
update_params=update_params)
def attestation_attestation_provider_delete(client,
resource_group_name,
provider_name):
return client.delete(resource_group_name=resource_group_name,
offer_type="virtualmachine",
provider_name=provider_name)
|
the-stack_0_24845
|
"""
Kafka Consumer
"""
import json
from confluent_kafka import Consumer
from app_name.api.services import purchase_process
class KafkaConsumer:
"""
Kafka Consumer class
"""
def __init__(self, group_id, offset, topics, kafka_broker="localhost:9092", config=None):
"""
Kafka consumer's constructor
:param group_id:
:param offset:
:param topics:
:param kafka_broker:
:param config:
"""
self.group_id = group_id
self.offset = offset
self.topics = topics
self.kafka_broker = kafka_broker
if config is None:
config = self.properties
self.consumer = Consumer(config)
@property
def properties(self):
"""
Properties of consumer
:return: dictionary with config to consumer
"""
properties = {
"bootstrap.servers": self.kafka_broker,
"group.id": self.group_id,
"auto.offset.reset": self.offset,
}
return properties
def subscribe(self, topics):
"""
Subscribe in topics
:param topics: list of topics
"""
self.consumer.subscribe(topics)
def close(self):
"""
Close application
"""
self.consumer.close()
def run(self):
"""
Start application
"""
self.subscribe(self.topics)
while True:
msg = self.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
if msg.topic() == "PURCHASE_BOOK":
print(
f'Received key {msg.key().decode("utf-8")} message: {msg.value().decode("utf-8")}'
)
purchase_data = json.loads(msg.value())
purchase_process.purchase_process(purchase_data)
self.close()
if __name__ == "__main__":
consumer = KafkaConsumer(group_id=0, offset="earliest", topics=["PURCHASE_BOOK"])
consumer.run()
|
the-stack_0_24846
|
# qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=15
prog.cz(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=32
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=23
prog.cz(input_qubit[2],input_qubit[0]) # number=24
prog.h(input_qubit[0]) # number=25
prog.y(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.cx(input_qubit[2],input_qubit[0]) # number=18
prog.h(input_qubit[0]) # number=26
prog.x(input_qubit[2]) # number=31
prog.cz(input_qubit[2],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.y(input_qubit[3]) # number=33
prog.y(input_qubit[3]) # number=34
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2347.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_24847
|
# Copyright 2020 Filippo Aleotti, Fabio Tosi, Li Zhang, Matteo Poggi, Stefano Mattoccia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class Loss(nn.modules.Module):
def __init__(self, loss_weights):
super(Loss, self).__init__()
self.loss_weights = loss_weights
def _expand_dims(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
return x
def _get_valid(self, target):
target = torch.squeeze(target, 1)
mask = target > 0
mask = self._expand_dims(mask)
target = self._expand_dims(target)
mask.detach()
valid = target[mask].size()[0]
return mask, valid
def forward(self, predictions, target):
mask, valid = self._get_valid(target)
if valid > 0:
loss = [
F.smooth_l1_loss(predictions[i][mask], target[mask])
* self.loss_weights[i]
for i in range(len(predictions))
]
loss = torch.stack(loss)
return torch.mean(loss)
else:
return 0.0
|
the-stack_0_24850
|
from ..controller import PontoVerificacaoController, UsuarioController
from ..util.authorization import Authorization
from flask_restful import reqparse, request, Resource
class PontoVerificacaoResource(Resource):
ENDPOINT = 'ponto_verificacao'
ROUTE = '/pontos_verificacao/ponto_verificacao'
@Authorization.token_required(with_usuario=True)
def get(self, usuario):
return PontoVerificacaoController.get_by_usuario(usuario.id_usuario)
@Authorization.token_required()
def post(self):
res = request.json
ponto_verificacao_body = res.get("ponto_verificacao")
usuario_body = res.get("usuario")
usuario = UsuarioController.create_usuario(usuario_body)
return PontoVerificacaoController.post(ponto_verificacao_body, usuario=usuario)
@Authorization.token_required()
def put(self):
body = request.json
return PontoVerificacaoController.put(body)
@Authorization.token_required()
def delete(self):
parser = reqparse.RequestParser()
parser.add_argument('id_ponto_verificacao', type=int, required=True, help='Required query string id_ponto_verificacao.')
args = parser.parse_args()
id_ponto_verificacao = args.get('id_ponto_verificacao')
return PontoVerificacaoController.delete(id_ponto_verificacao)
class ListaPontoVerificacaoResource(Resource):
ENDPOINT = 'pontos_verificacao'
ROUTE = '/pontos_verificacao'
@Authorization.token_required()
def get(self):
return PontoVerificacaoController.get_list()
|
the-stack_0_24852
|
from typing import Callable, Dict, List
from goji.harvester.harvester import Harvester
from goji.util.ws_message import WsRpcMessage, create_payload_dict
class HarvesterRpcApi:
def __init__(self, harvester: Harvester):
self.service = harvester
self.service_name = "goji_harvester"
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_plots": self.get_plots,
"/refresh_plots": self.refresh_plots,
"/delete_plot": self.delete_plot,
"/add_plot_directory": self.add_plot_directory,
"/get_plot_directories": self.get_plot_directories,
"/remove_plot_directory": self.remove_plot_directory,
}
async def _state_changed(self, change: str) -> List[WsRpcMessage]:
if change == "plots":
data = await self.get_plots({})
payload = create_payload_dict("get_plots", data, self.service_name, "wallet_ui")
return [payload]
return []
async def get_plots(self, request: Dict) -> Dict:
plots, failed_to_open, not_found = self.service.get_plots()
return {
"plots": plots,
"failed_to_open_filenames": failed_to_open,
"not_found_filenames": not_found,
}
async def refresh_plots(self, request: Dict) -> Dict:
await self.service.refresh_plots()
return {}
async def delete_plot(self, request: Dict) -> Dict:
filename = request["filename"]
if self.service.delete_plot(filename):
return {}
raise ValueError(f"Not able to delete file {filename}")
async def add_plot_directory(self, request: Dict) -> Dict:
directory_name = request["dirname"]
if await self.service.add_plot_directory(directory_name):
return {}
raise ValueError(f"Did not add plot directory {directory_name}")
async def get_plot_directories(self, request: Dict) -> Dict:
plot_dirs = await self.service.get_plot_directories()
return {"directories": plot_dirs}
async def remove_plot_directory(self, request: Dict) -> Dict:
directory_name = request["dirname"]
if await self.service.remove_plot_directory(directory_name):
return {}
raise ValueError(f"Did not remove plot directory {directory_name}")
|
the-stack_0_24854
|
import numpy as np
from hyperspectral import HyperspectralCube as Cube
def read_hyperspectral_cube(cube):
if isinstance(cube, basestring):
cube = Cube.from_fits(cube)
if not isinstance(cube, Cube):
raise TypeError("Provided cube is not a HyperspectralCube")
if cube.is_empty():
raise ValueError("Provided cube is empty")
return cube
def above_percentile(cube, percentile=30):
"""
Will create a mask that will only allow the pixels *above* the `percentile`.
"""
cube = read_hyperspectral_cube(cube)
img = np.nansum(cube, axis=0) # flatten along spectral axis
p = np.nanpercentile(img, percentile)
mask = np.copy(img)
mask[img >= p] = 1
mask[img < p] = 0
return mask
|
the-stack_0_24856
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of converters between db models, Python and JSON dictionaries, etc."""
__author__ = 'Pavel Simakov ([email protected])'
import base64
import datetime
import json
from google.appengine.ext import db
JSON_DATE_FORMAT = '%Y/%m/%d'
JSON_TYPES = ['string', 'date', 'text', 'html', 'boolean', 'integer', 'array']
# Prefix to add to all JSON responses to guard against XSSI. Must be kept in
# sync with modules/oeditor/oeditor.html.
_JSON_XSSI_PREFIX = ")]}'\n"
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
SUPPORTED_TYPES = (db.GeoPt, datetime.date)
def dict_to_json(source_dict, unused_schema):
"""Converts Python dictionary into JSON dictionary using schema."""
output = {}
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
output[key] = value.strftime(JSON_DATE_FORMAT)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
else:
raise ValueError(
'Failed to encode key \'%s\' with value \'%s\'.' % (key, value))
return output
def dumps(*args, **kwargs):
"""Wrapper around json.dumps.
No additional behavior; present here so this module is a drop-in replacement
for json.dumps|loads. Clients should never use json.dumps|loads directly.
See usage docs at http://docs.python.org/2/library/json.html.
Args:
*args: positional arguments delegated to json.dumps.
**kwargs: keyword arguments delegated to json.dumps.
Returns:
string. The converted JSON.
"""
return json.dumps(*args, **kwargs)
def loads(s, prefix=_JSON_XSSI_PREFIX, **kwargs):
"""Wrapper around json.loads that handles XSSI-protected responses.
To prevent XSSI we insert a prefix before our JSON responses during server-
side rendering. This loads() removes the prefix and should always be used in
place of json.loads. See usage docs at
http://docs.python.org/2/library/json.html.
Args:
s: str or unicode. JSON contents to convert.
prefix: string. The XSSI prefix we remove before conversion.
**kwargs: keyword arguments delegated to json.loads.
Returns:
object. Python object reconstituted from the given JSON string.
"""
if s.startswith(prefix):
s = s.lstrip(prefix)
return json.loads(s, **kwargs)
def json_to_dict(source_dict, schema):
"""Converts JSON dictionary into Python dictionary using schema."""
output = {}
for key, attr in schema['properties'].items():
# Skip schema elements that don't exist in source.
if not key in source_dict:
continue
attr_type = attr['type']
if not attr_type in JSON_TYPES:
raise ValueError('Unsupported JSON type: %s' % attr_type)
if attr_type == 'date':
output[key] = datetime.datetime.strptime(
source_dict[key], JSON_DATE_FORMAT).date()
elif attr_type == 'array':
subschema = attr['items']
array = []
for item in source_dict[key]:
array.append(json_to_dict(item, subschema))
output[key] = array
else:
output[key] = source_dict[key]
return output
def entity_to_dict(entity, force_utf_8_encoding=False):
"""Puts model object attributes into a Python dictionary."""
output = {}
for key, prop in entity.properties().iteritems():
value = getattr(entity, key)
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
output[key] = value
# some values are raw bytes; force utf-8 or base64 encoding
if force_utf_8_encoding and isinstance(value, basestring):
try:
output[key] = value.encode('utf-8')
except UnicodeDecodeError:
output[key] = {
'type': 'binary',
'encoding': 'base64',
'content': base64.urlsafe_b64encode(value)}
else:
raise ValueError('Failed to encode: %s' % prop)
# explicitly add entity key as a 'string' attribute
output['key'] = str(entity.key())
return output
def dict_to_entity(entity, source_dict):
"""Sets model object attributes from a Python dictionary."""
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
setattr(entity, key, value)
else:
raise ValueError('Failed to encode: %s' % value)
return entity
def string_to_value(string, value_type):
"""Converts string representation to a value."""
if value_type == str:
if not string:
return ''
else:
return string
elif value_type == bool:
if string == '1' or string == 'True' or string == 1:
return True
else:
return False
elif value_type == int or value_type == long:
if not string:
return 0
else:
return long(string)
else:
raise ValueError('Unknown type: %s' % value_type)
def value_to_string(value, value_type):
"""Converts value to a string representation."""
if value_type == str:
return value
elif value_type == bool:
if value:
return 'True'
else:
return 'False'
elif value_type == int or value_type == long:
return str(value)
else:
raise ValueError('Unknown type: %s' % value_type)
def dict_to_instance(adict, instance, defaults=None):
"""Populates instance attributes using data dictionary."""
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
if key in adict:
setattr(instance, key, adict[key])
elif defaults and key in defaults:
setattr(instance, key, defaults[key])
else:
raise KeyError(key)
def instance_to_dict(instance):
"""Populates data dictionary from instance attrs."""
adict = {}
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
adict[key] = getattr(instance, key)
return adict
def send_json_response(
handler, status_code, message, payload_dict=None, xsrf_token=None):
"""Formats and sends out a JSON REST response envelope and body."""
handler.response.headers[
'Content-Type'] = 'application/javascript; charset=utf-8'
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
handler.response.headers['Content-Disposition'] = 'attachment'
response = {}
response['status'] = status_code
response['message'] = message
if payload_dict:
response['payload'] = dumps(payload_dict)
if xsrf_token:
response['xsrf_token'] = xsrf_token
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
def send_json_file_upload_response(handler, status_code, message):
"""Formats and sends out a JSON REST response envelope and body.
NOTE: This method has lowered protections against XSSI (compared to
send_json_response) and so it MUST NOT be used with dynamic data. Use ONLY
constant data originating entirely on the server as arguments.
Args:
handler: the request handler.
status_code: the HTTP status code for the response.
message: the text of the message - must not be dynamic data.
"""
# The correct MIME type for JSON is application/json but there are issues
# with our AJAX file uploader in MSIE which require text/plain instead.
if 'MSIE' in handler.request.headers.get('user-agent'):
content_type = 'text/plain; charset=utf-8'
else:
content_type = 'application/javascript; charset=utf-8'
handler.response.headers['Content-Type'] = content_type
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
response = {}
response['status'] = status_code
response['message'] = message
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
class JsonFile(object):
"""A streaming file-ish interface for JSON content.
Usage:
writer = JsonFile('path')
writer.open('w')
writer.write(json_serializable_python_object) # We serialize for you.
writer.write(another_json_serializable_python_object)
writer.close() # Must close before read.
reader = JsonFile('path')
reader.open('r') # Only 'r' and 'w' are supported.
for entity in reader:
do_something_with(entity) # We deserialize back to Python for you.
self.reader.reset() # Reset read pointer to head.
contents = self.reader.read() # Returns {'rows': [...]}.
for entity in contents['rows']:
do_something_with(entity) # Again, deserialized back to Python.
reader.close()
with syntax is not supported. Cannot be used inside the App Engine
container where the filesystem is read-only.
Internally, each call to write will take a Python object, serialize it, and
write the contents as one line to the json file. On __iter__ we deserialize
one line at a time, generator-style, to avoid OOM unless serialization/de-
serialization of one object exhausts memory.
"""
# When writing to files use \n instead of os.linesep; see
# http://docs.python.org/2/library/os.html.
_LINE_TEMPLATE = ',\n %s'
_MODE_READ = 'r'
_MODE_WRITE = 'w'
_MODES = frozenset([_MODE_READ, _MODE_WRITE])
_PREFIX = '{"rows": ['
_SUFFIX = ']}'
def __init__(self, path):
self._first = True
self._file = None
self._path = path
def __iter__(self):
assert self._file
return self
def close(self):
"""Closes the file; must close before read."""
assert self._file
if not self._file.closed: # Like file, allow multiple close calls.
if self.mode == self._MODE_WRITE:
self._file.write('\n' + self._SUFFIX)
self._file.close()
@property
def mode(self):
"""Returns the mode the file was opened in."""
assert self._file
return self._file.mode
@property
def name(self):
"""Returns string name of the file."""
assert self._file
return self._file.name
def next(self):
"""Retrieves the next line and deserializes it into a Python object."""
assert self._file
line = self._file.readline()
if line.startswith(self._PREFIX):
line = self._file.readline()
if line.endswith(self._SUFFIX):
raise StopIteration()
line = line.strip()
if line.endswith(','):
line = line[:-1]
return loads(line)
def open(self, mode):
"""Opens the file in the given mode string ('r, 'w' only)."""
assert not self._file
assert mode in self._MODES
self._file = open(self._path, mode)
if self.mode == self._MODE_WRITE:
self._file.write(self._PREFIX)
def read(self):
"""Reads the file into a single Python object; may exhaust memory.
Returns:
dict. Format: {'rows': [...]} where the value is a list of de-
serialized objects passed to write.
"""
assert self._file
return loads(self._file.read())
def reset(self):
"""Resets file's position to head."""
assert self._file
self._file.seek(0)
def write(self, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
ValueError: if python_object cannot be JSON-serialized.
"""
assert self._file
template = self._LINE_TEMPLATE
if self._first:
template = template[1:]
self._first = False
self._file.write(template % dumps(python_object))
def run_all_unit_tests():
"""Runs all unit tests."""
assert value_to_string(True, bool) == 'True'
assert value_to_string(False, bool) == 'False'
assert value_to_string(None, bool) == 'False'
assert string_to_value('True', bool)
assert string_to_value('1', bool)
assert string_to_value(1, bool)
assert not string_to_value('False', bool)
assert not string_to_value('0', bool)
assert not string_to_value('5', bool)
assert not string_to_value(0, bool)
assert not string_to_value(5, bool)
assert not string_to_value(None, bool)
assert string_to_value('15', int) == 15
assert string_to_value(15, int) == 15
assert string_to_value(None, int) == 0
assert string_to_value('foo', str) == 'foo'
assert string_to_value(None, str) == str('')
if __name__ == '__main__':
run_all_unit_tests()
|
the-stack_0_24858
|
# coding=utf8
'''
this file is designed to join pytorch profile's log with nccl's log
'''
import json
import argparse
import os
from collections import OrderedDict
from dataclasses import dataclass
import nccl_log_miner as ncl_miner
def _load_py_prof(f_dir):
f_list = os.listdir(f_dir)
f_list = [os.path.join(f_dir, x) for x in f_list if x.endswith(".json")]
f_list.sort(key=lambda x: os.path.getmtime(x))
f_path = f_list[-1]
res = []
with open(f_path) as r:
obj = json.load(r)
evt_obj_arr = obj['traceEvents']
for evt_obj in evt_obj_arr:
if 'cat' in evt_obj and 'name' in evt_obj:
cat = evt_obj['cat']
name = evt_obj['name'].lower()
if 'Kernel' == cat:
if 'nccl' in name:
res.append(evt_obj)
return res
def _join_coarsely(nccl_event_arr, ncl_obj_arr, ncl_obj_cnt):
res = []
for event in nccl_event_arr:
ncl_obj_sub_arr = _filter_sub_arr(event, ncl_obj_arr)
ncl_obj_sub_arr = [x for x in ncl_obj_sub_arr if str(x) in ncl_obj_cnt]
res.append([event, ncl_obj_sub_arr])
return res
def _filter_sub_arr(event, ncl_obj_arr):
res = []
for obj in ncl_obj_arr:
if _is_match(obj, event):
res.append(obj)
return res
def _is_match(obj, event):
name = event['name'].lower()
device = int(event['args']['device'])
if obj.device != device:
return False
if obj.op_name.lower() not in name:
return False
if "_f32" in name and obj.dtype!=7:
return False
if "_f16" in name and obj.dtype!=6:
return False
return True
def _cons_stream_map(coarse_joined_res):
# first stream no to ncl_address map
no_2_addr_map_list = []
for evt, ncl_obj_subs in coarse_joined_res:
tmp_dict = dict()
key = evt['args']['stream']
tmp_dict[key] = []
for ncl_obj in ncl_obj_subs:
val = ncl_obj.stream
if val not in tmp_dict[key]:
tmp_dict[key].append(val)
no_2_addr_map_list.append(tmp_dict)
# then merge the intersection of the map
joined_key2map_list = _find_intersection(no_2_addr_map_list)
# thirdly select out one to one map if possible
res = OrderedDict()
taken = set()
for key, val in joined_key2map_list.items():
if len(val) == 1 and val[0] not in taken:
res[key] = val
taken.add(val[0])
for key, val in joined_key2map_list.items():
if key not in res:
new_val = [x for x in val if x not in taken]
res[key] = new_val
return res
def _find_intersection(map_list):
tmp_key_arr_map = OrderedDict()
for tmp_dict in map_list:
for key, val in tmp_dict.items():
if key not in tmp_key_arr_map:
tmp_key_arr_map[key] = []
tmp_key_arr_map[key].append(val)
res = OrderedDict()
for key, val in tmp_key_arr_map.items():
tmp_list = []
is_first = True
for cur_list in val:
tmp_list = _list_intersect(tmp_list, cur_list, is_first)
is_first = False
res[key] = tmp_list
return res
def _list_intersect(list1, list2, is_first=False):
if is_first:
return list2
res = []
for elem in list1:
if elem in list2:
res.append(elem)
return res
def _filter_with_stream(coarse_joined_res, stream_map):
res = []
for evt, sub_arr in coarse_joined_res:
evt_stream = evt['args']['stream']
obj_streams = stream_map.get(evt_stream, [])
tmp_arr = []
for ncl_obj in sub_arr:
if ncl_obj not in tmp_arr and ncl_obj.stream in obj_streams:
tmp_arr.append(ncl_obj)
res.append([evt, tmp_arr])
return res
def main(args):
f_py_prof = args.f_py_prof
nccl_events = _load_py_prof(f_py_prof)
print(f"number of events is {len(nccl_events)}")
for evt in nccl_events:
print("new evt: ")
print(evt)
ncl_obj_arr, ncl_obj_cnt = ncl_miner.parse_and_mine(args)
# step 1 coarsely join
coarse_joined_res = _join_coarsely(nccl_events, ncl_obj_arr, ncl_obj_cnt)
# step 2 construct stream map
stream_map = _cons_stream_map(coarse_joined_res)
for key, val in stream_map.items():
print(key)
print(val)
# step3 filter joined_res by stream map
filter_joined_res = _filter_with_stream(coarse_joined_res, stream_map)
# save to output
with open(args.output, "w") as out_:
for evt, sub_arr in filter_joined_res:
extra_str = [str(x) for x in sub_arr]
evt['extra_nccl'] = extra_str
out_str = json.dumps(evt)
out_.write(out_str + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--f_py_prof', type=str, help="file path to the pytorch profiler")
parser.add_argument('-n', '--nccl', type=str, help='file path to log with nccl\'s log')
parser.add_argument('--iters', type=int, help='minimum iterations log required from nccl')
parser.add_argument('-o', '--output', type=str, help='path to output path about the joined files')
args = parser.parse_args()
main(args)
|
the-stack_0_24859
|
import os
import re
from setuptools import setup, find_packages
from io import open
with open(os.path.join(os.path.dirname(__file__), 'cloudscraper', '__init__.py')) as fp:
VERSION = re.match(r'.*__version__ = \'(.*?)\'', fp.read(), re.S).group(1)
with open('README.md', 'r', encoding='utf-8') as fp:
readme = fp.read()
setup(
name = 'cloudscraper',
author = 'VeNoMouS',
author_email = '[email protected]',
version=VERSION,
packages = find_packages(exclude=['tests*']),
description = 'A Python module to bypass Cloudflare\'s anti-bot page.',
long_description=readme,
long_description_content_type='text/markdown',
url = 'https://github.com/venomous/cloudscraper',
keywords = [
'cloudflare',
'scraping',
'ddos',
'scrape',
'webscraper',
'anti-bot',
'waf',
'iuam',
'bypass',
'challenge'
],
include_package_data = True,
install_requires = [
'requests >= 2.9.2',
'requests_toolbelt >= 0.9.1',
'pyparsing >= 2.4.7'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
the-stack_0_24861
|
import kol.Error as Error
from GenericRequest import GenericRequest
from kol.manager import PatternManager
class AutoSellRequest(GenericRequest):
"Sells items via the autosell system"
ALL = 1
ALL_BUT_ONE = 2
QUANTITY = 3
def __init__(self, session, itemList, howMany, amount=1):
super(AutoSellRequest, self).__init__(session)
self.url = session.serverURL + "sellstuff_ugly.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "sell"
if howMany == self.ALL:
self.requestData["mode"] = self.ALL
elif howMany == self.ALL_BUT_ONE:
self.requestData["mode"] = self.ALL_BUT_ONE
elif howMany == self.QUANTITY:
self.requestData["mode"] = self.QUANTITY
self.requestData["quantity"] = amount
else:
raise Error.Error("Invalid AutoSell Mode Selected.", Error.REQUEST_GENERIC)
for item in itemList:
self.requestData[("item"+str(item["id"]))] = str(item["id"])
def parseResponse(self):
salePattern = PatternManager.getOrCompilePattern("autosellResponse")
saleMatch = salePattern.search(self.responseText)
if saleMatch:
multiItemPattern = PatternManager.getOrCompilePattern("autosellItems")
finalTwoPattern = PatternManager.getOrCompilePattern("autosellLastTwo")
finalOnePattern = PatternManager.getOrCompilePattern("autosellOne")
salesTotal = int(saleMatch.group(2).replace(',',''))
soldItems = []
lastItemIndex = None
for itemMatch in multiItemPattern.finditer(saleMatch.group(1)):
# We cannot look up the item because the names are likely pluralized
name = itemMatch.group(2)
if itemMatch.group(1) == "":
quantity = 1
else:
quantity = int(itemMatch.group(1).replace(',',''))
soldItems.append({"quantity":quantity, "name":name})
lastItemIndex = itemMatch.end(2)
if lastItemIndex != None:
finalMatch = finalTwoPattern.search(saleMatch.group(1)[lastItemIndex+1:])
else:
finalMatch = finalTwoPattern.search(saleMatch.group(1))
if finalMatch:
if finalMatch.group(2) != "":
name = finalMatch.group(2)
if finalMatch.group(1) == "":
quantity = 1
else:
quantity = int(finalMatch.group(1).replace(',',''))
soldItems.append({"quantity":quantity, "name":name})
name = finalMatch.group(4)
if finalMatch.group(3) == "":
quantity = 1
else:
quantity = int(finalMatch.group(3).replace(',',''))
soldItems.append({"quantity":quantity, "name":name})
else:
singleItem = finalOnePattern.search(saleMatch.group(1))
name = singleItem.group(2)
if singleItem.group(1) == "":
quantity = 1
else:
quantity = int(singleItem.group(1).replace(',',''))
soldItems.append({"quantity":quantity, "name":name})
else:
salesTotal = 0
soldItems = []
self.responseData["meatGained"] = salesTotal
self.responseData["itemsSold"] = soldItems
|
the-stack_0_24862
|
def profileEntity(profile) -> dict:
return {
"eob_id": str(profile["_id"]),
"user_id": profile["user_id"],
"name": profile["name"],
"screen_name": profile["screen_name"],
"location": profile["location"],
"description": profile["description"],
"followers_count": profile["followers_count"],
"friends_count": profile["friends_count"],
"created_at": profile["created_at"],
"verified": profile["verified"],
"profile_image_url": profile["profile_image_url"],
"profile_banner_url": profile["profile_banner_url"]
}
def messageEntity(msg) -> dict:
return {
"id": str(msg['_id']),
"msg_id": msg["msg_id"],
"full_text":msg["full_text"],
"created_at":msg["created_at"],
"retweet_count":msg["retweet_count"],
"favorite_count":msg["favorite_count"],
"retweeted": msg["retweeted"],
"lang": msg["lang"]
}
|
the-stack_0_24863
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image Transformer model with model and data parallelism using MTF.
Integration of Mesh tensorflow with Image Transformer to do model parallelism.
Currently, this supports unconditional image generation. Specify a particular
architecture layout in the hparams that specifies how different dimensions are
split or replicated along the mesh dimensions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import mesh_tensorflow as mtf
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import mtf_model
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
@registry.register_model
class MtfImageTransformer(mtf_model.MtfModel):
"""Image Transformer in mesh_tensorflow."""
@property
def inputs_vocab_dim(self):
assert self.has_input
return mtf.Dimension("inputs_vocab", self._hparams.num_classes)
@property
def targets_vocab_dim(self):
vocab_size = self._problem_hparams.vocab_size["targets"]
if hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
return mtf.Dimension("vocab", vocab_size)
@property
def outputs_vocab_dim(self):
return mtf.Dimension("output_vocab", 256)
@property
def pos_dim(self):
return mtf.Dimension("pos", self._hparams.img_len)
@property
def rows_dim(self):
return mtf.Dimension("rows", self._hparams.img_len)
@property
def cols_dim(self):
return mtf.Dimension(
"cols", self._hparams.img_len*self._hparams.num_channels)
@property
def orig_cols_dim(self):
return mtf.Dimension("orig_cols", self._hparams.img_len)
@property
def channels_dim(self):
return mtf.Dimension("channels", self._hparams.num_channels)
@property
def model_dim(self):
return mtf.Dimension("d_model", self._hparams.hidden_size)
@property
def max_length_dim(self):
return mtf.Dimension(
"max_length",
self._hparams.img_len*self._hparams.img_len*self._hparams.num_channels)
@property
def length_dim(self):
return mtf.Dimension(
"length",
self._hparams.img_len*self._hparams.img_len*self._hparams.num_channels)
@property
def heads_dim(self):
return mtf.Dimension("heads", self._hparams.num_heads)
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self._hparams.d_kv)
@property
def feedforward_dim(self):
return mtf.Dimension("d_ff", self._hparams.d_ff)
@property
def activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype
def create_positional_emb_2d(self, targets):
"""Learned 2d positional embedding for images."""
mesh = targets.mesh
positional_emb_rows_var = mtf.get_variable(
mesh, "positional_emb_rows",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
positional_emb_cols_var = mtf.get_variable(
mesh, "positional_emb_cols",
mtf.Shape([self.pos_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=self.activation_type)
targets_position_x = mtf.range(mesh, self.rows_dim, dtype=tf.int32)
targets_position_y = mtf.range(mesh, self.cols_dim, dtype=tf.int32)
position_x = mtf.broadcast(
mtf.gather(positional_emb_rows_var, targets_position_x,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
position_y = mtf.broadcast(
mtf.gather(positional_emb_cols_var, targets_position_y,
self.pos_dim),
mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim]))
return position_x + position_y
def mtf_model_fn(self, features, mesh):
features = copy.copy(features)
tf.logging.info("features = %s" % features)
hparams = self._hparams
activation_dtype = self.activation_type
# We assume fixed vocab size for targets
targets = tf.to_int32(features["targets"])
# Image preprocessing, reshape into a 1D sequence and shift right.
length = hparams.img_len*hparams.img_len*hparams.num_channels
targets = tf.reshape(targets, [hparams.batch_size, length])
shifted_targets = common_layers.shift_right_2d(targets)
# Declare all the dimensions
batch_dim = mtf.Dimension("batch", hparams.batch_size)
def import_to_batch_by_length(x, name):
return mtf.import_tf_tensor(
mesh, x, mtf.Shape([batch_dim, self.length_dim]), name=name)
targets = import_to_batch_by_length(targets, "targets")
shifted_targets = import_to_batch_by_length(
shifted_targets, "shifted_targets")
extra_losses = []
# Create targets content and position embeddings.
# Create embedding var for targets and positions and do a gather.
targets_embedding_var = mtf.get_variable(
mesh, "targets_embedding",
mtf.Shape([self.targets_vocab_dim, self.model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=activation_dtype)
x = mtf.gather(targets_embedding_var,
shifted_targets, self.targets_vocab_dim)
# Add positional embeddings
x += mtf.reshape(self.create_positional_emb_2d(targets),
[self.length_dim, self.model_dim])
# If conditional and input is given, add the input embedding to the target.
# TODO(nikip): Verify conditional.
if self.has_input and not hparams.unconditional:
inputs = tf.squeeze(tf.to_int32(features["inputs"]), [2, 3])
inputs = import_to_batch_by_length(inputs, "inputs")
# Input embeddings
inputs_embedding_var = mtf.layers.embedding(
mesh, "input_embedding",
mtf.Shape([self.inputs_vocab_dim, self.model_dim]),
activation_dtype=activation_dtype)
inputs_emb = mtf.gather(
inputs_embedding_var, inputs, self.inputs_vocab_dim)
x += inputs_emb
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
if hparams.attention_type == "local1d_spatial":
decoder_output = local_attention1d_spatial_decoder(
x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams)
elif hparams.attention_type == "local2d_spatial":
decoder_output = local_attention2d_spatial_decoder(
x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams)
elif hparams.attention_type == "local1d":
decoder_output = local_attention1d_masked_decoder(
x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams)
else:
raise ValueError("Invalid attention type.")
# Calculate the logits and loss.
logits = mtf.layers.dense(
decoder_output, self.outputs_vocab_dim, name="logits")
# Need a reshape for logits
logits = mtf.reshape(
logits, mtf.Shape([batch_dim, self.length_dim, self.outputs_vocab_dim]))
soft_targets = mtf.one_hot(
targets, self.outputs_vocab_dim, dtype=activation_dtype)
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, soft_targets, self.outputs_vocab_dim)
loss = mtf.reduce_mean(loss)
for l in extra_losses:
loss += l
# Reshape logits to original target shape.
logits = mtf.reshape(
logits,
mtf.Shape([batch_dim, self.rows_dim, self.orig_cols_dim,
self.channels_dim, self.outputs_vocab_dim]))
return logits, loss
def layer_prepostprocess_dropout(x, hparams):
batch_dim = x.shape.dims[0]
model_dim = x.shape.dims[-1]
return mtf.dropout(
x,
keep_prob=1.0 - hparams.layer_prepostprocess_dropout,
noise_shape=mtf.Shape([batch_dim, model_dim]))
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length)
num_w_blocks_dim = mtf.Dimension("num_wblocks",
length_dim.size // blocks_w_dim.size)
x = mtf.reshape(
x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim]))
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_w_dim=blocks_w_dim,
mask_right=True,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
def local_attention2d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local2D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height)
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width)
num_h_blocks_dim = mtf.Dimension("num_h_blocks",
hparams.img_len // hparams.block_height)
num_w_blocks_dim = mtf.Dimension(
"num_w_blocks",
hparams.img_len * hparams.num_channels // hparams.block_width)
x = mtf.transpose(
mtf.reshape(
x,
mtf.Shape([
batch_dim, num_h_blocks_dim, blocks_h_dim,
num_w_blocks_dim, blocks_w_dim, model_dim
])),
mtf.Shape([
batch_dim, num_h_blocks_dim, num_w_blocks_dim,
blocks_h_dim, blocks_w_dim, model_dim
]))
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_2d_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_h_dim=num_h_blocks_dim,
memory_w_dim=num_w_blocks_dim,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
def local_attention1d_masked_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D masked layers."""
print(x)
_, length_dim, model_dim = x.shape.dims
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
length_per_split = mtf.tensor_dim_to_size_per_split(
hparams.layout, hparams.mesh_shape, length_dim)
x += layer_prepostprocess_dropout(
mtf.layers.masked_local_attention_1d(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
window_size=hparams.block_length,
length_per_split=length_per_split,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
@registry.register_hparams
def mtf_image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 1
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("mtf_mode", True)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 6)
hparams.add_hparam("attention_key_size", 256)
hparams.add_hparam("attention_value_size", 256)
# Share weights between input and target embeddings
hparams.shared_embedding = True
# mixture of experts hparams
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 64)
hparams.add_hparam("d_ff", 2048)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("unconditional", True)
# Local Attention related params
hparams.add_hparam("block_length", 128)
hparams.add_hparam("block_height", 16)
hparams.add_hparam("block_width", 16)
hparams.add_hparam("attention_type", "local1d")
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_image_transformer_base()
hparams.hidden_size = 128
hparams.d_ff = 256
hparams.batch_size = 4
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 4
hparams.num_heads = 4
hparams.attention_key_size = 128
hparams.attention_value_size = 128
hparams.block_length = 32
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2"
hparams.layout = "batch:batch"
return hparams
@registry.register_hparams
def mtf_image_transformer_single():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 2
hparams.attention_key_size = 32
hparams.attention_value_size = 32
hparams.block_length = 16
return hparams
@registry.register_hparams
def mtf_image_transformer_base_single():
"""Small single parameters."""
hparams = mtf_image_transformer_base()
hparams.num_decoder_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_spatial1d():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.num_decoder_layers = 6
hparams.filter_size = 128
hparams.block_height = 8
hparams.block_width = 8
hparams.attention_type = "local1d_spatial"
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_spatial2d():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.num_decoder_layers = 6
hparams.filter_size = 128
hparams.block_height = 8
hparams.block_width = 8
hparams.attention_type = "local2d_spatial"
hparams.mesh_shape = "b1:2,b2:2"
hparams.layout = "num_h_blocks:b1,num_wblocks:b2"
return hparams
@registry.register_hparams
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_cifar_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
return hparams
@registry.register_hparams
def mtf_image_transformer_cifar_mp_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
hparams.d_ff = 2048
hparams.hidden_size = 512
hparams.num_decoder_layers = 12
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 31250
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet_mp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet_mp128():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.num_heads = 8
hparams.num_decoder_layers = 4
hparams.d_ff = 4096
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
hparams.max_length = 256*256*3
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet_mp_sp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet_mp128()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;num_wblocks:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.attention_type = "local1d_spatial"
return hparams
@registry.register_hparams
def mtf_image_transformer_base_imagenet_mp64():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 64
hparams.num_decoder_layers = 8
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_8gpu():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "all:8"
hparams.layout = "vocab:all;filter_size:all;heads:all"
return hparams
@registry.register_hparams
def mtf_image_transformer_length_sharded():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "all:2"
hparams.layout = "length:all"
return hparams
|
the-stack_0_24865
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Check the DEPS file for correctness."""
import os
import re
import subprocess
import sys
import utils
INFRA_BOTS_DIR = os.path.dirname(os.path.realpath(__file__))
SKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))
def main():
"""Load the DEPS file and verify that all entries are valid."""
# Find gclient.py and run that instead of simply "gclient", which calls into
# update_depot_tools.
gclient = subprocess.check_output([utils.WHICH, utils.GCLIENT])
gclient_py = os.path.join(os.path.dirname(gclient), 'gclient.py')
python = sys.executable or 'python'
# Obtain the DEPS mapping.
output = subprocess.check_output(
[python, gclient_py, 'revinfo'], cwd=SKIA_DIR)
# Check each entry.
errs = []
for e in output.rstrip().splitlines():
split = e.split(': ')
if len(split) != 2:
errs.append(
'Failed to parse `gclient revinfo` output; invalid format: %s' % e)
if split[0] == 'skia':
continue
split = split[1].split('@')
if len(split) != 2:
errs.append(
'Failed to parse `gclient revinfo` output; invalid format: %s' % e)
repo = split[0]
rev = split[1]
if 'chrome-infra-packages' in repo:
continue
if not 'googlesource.com' in repo:
errs.append(
'DEPS must be hosted on googlesource.com; %s is not allowed. '
'See http://go/new-skia-git-mirror' % repo)
if not re.match(r'^[a-z0-9]{40}$', rev):
errs.append('%s: "%s" does not look like a commit hash.' % (repo, rev))
if errs:
print >> sys.stderr, 'Found problems in DEPS:'
for err in errs:
print >> sys.stderr, err
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_0_24866
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
from setuptools import find_packages, setup
import sys
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
long_description = f.read()
deps = ["typing", "future"] if sys.version_info[0] == 2 else None
setup(
name='hdlConvertorAst',
version='1.0',
description='A library of AST nodes for HDL languages (Verilog, VHDL, ...) and transpiler/compiler utilities',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Nic30/hdlConvertorAst',
author='Michal Orsak',
author_email='[email protected]',
keywords=['hdl', 'vhdl', 'verilog', 'systemverilog',
'parser', 'preprocessor', 'antlr4', 'ast', 'code-generator'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
],
license="MIT",
packages=find_packages(exclude=["tests", ]),
test_suite='tests.all.suite',
install_requires=deps,
test_require=deps,
)
|
the-stack_0_24867
|
import math
from .vector2d import Vector2d
from .config import RAY_COUNT
from .config import FOV_ANGLE
from .ray import Ray
class FieldOfView():
def __init__(self, angle: float):
self.__ang_abs_min = (FOV_ANGLE * -0.5 + math.pi * 2) % (math.pi * 2)
self.__ang_abs_max = (FOV_ANGLE * 0.5 + math.pi * 2) % (math.pi * 2)
self.__vector2d_ang_min = Vector2d(1, 0) ** self.__ang_abs_min
self.__vector2d_ang_max = Vector2d(1, 0) ** self.__ang_abs_max
self.__vector2d_ang = Vector2d(1, 0)
self.__dist = 0
ray_count = RAY_COUNT - RAY_COUNT % 2
ray_angle = FOV_ANGLE / (RAY_COUNT - 1)
self.__rays = [Ray((c * ray_angle) - (FOV_ANGLE / 2)) for c in range(ray_count)]
self.rot(angle)
@property
def ang(self):
return self.__vector2d_ang.ang
@property
def ang_min(self):
return self.__vector2d_ang_min.ang
@property
def ang_max(self):
return self.__vector2d_ang_max.ang
@property
def rays(self):
return tuple(self.__rays)
def rot(self, rad: float):
self.__vector2d_ang = self.__vector2d_ang ** rad
self.__vector2d_ang_min = self.__vector2d_ang_min ** rad
self.__vector2d_ang_max = self.__vector2d_ang_max ** rad
for ray in self.__rays:
ray.rot(rad)
def cast(self, player, grid):
pos = Vector2d(player.x, player.y)
for ray in self.__rays:
ray.cast_wall(pos, grid)
if ray.dist > self.__dist:
self.__dist = ray.dist
dist = max([ray.dist for ray in self.__rays])
doors = grid.get_doors_by_fov(pos, self.ang, FOV_ANGLE, dist)
for ray in self.__rays:
ray.cast_doors(pos, doors)
if ray.dist > self.__dist:
self.__dist = ray.dist
dist = max([ray.dist for ray in self.__rays])
items = grid.get_items_by_fov(pos, self.ang, FOV_ANGLE, dist)
for ray in self.__rays:
ray.cast_items(player, items)
|
the-stack_0_24869
|
# coding=utf-8
""" improve_S.py
"""
from __future__ import print_function
import sys, re,codecs
class Entry(object):
def __init__(self,lines):
self.lines = lines
if lines[0] != '<entry>':
print('Entry start error:','\n'.join(lines))
exit(1)
if lines[-1] != '</entry>':
print('Entry end error:','\n'.join(lines))
exit(1)
self.parse_info()
self.parse_groups()
return
def parse_info(self):
if len(self.lines) < 2:
print('entry parse_info Error 1','\n'.join(self.lines))
exit(1)
infoline = self.lines[1]
m = re.search(r'<info L="(.*?)" page="(.*?)" gtypes="(.*?)"/>',infoline)
if m == None:
print('entry parse_info Error 2','\n'.join(self.lines))
exit(1)
self.L = m.group(1)
self.page = m.group(2)
self.gtypes = m.group(3).split(',')
self.info = infoline
def parse_groups(self):
# groupelts agrees with boesp.dtd
groupelts = 'HS,S,D,F,V1,V2,V3,V4,V5'.split(',')
groupbegs = ['<%s' % groupelt for groupelt in groupelts]
groupends = ['</%s>' % groupelt for groupelt in groupelts]
groups = []
ngroup = -1
groupelt = None
for iline,line in enumerate(self.lines):
if groupelt == None:
for i,groupbeg in enumerate(groupbegs):
if line.startswith(groupbeg):
groupelt = groupelts[i]
groupend = groupends[i]
group = [line]
break
elif line.startswith(groupend):
group.append(line)
groups.append(group)
ngroup = ngroup + 1
# check agreement with gtypes
if groupelt != self.gtypes[ngroup]:
print('parse_groups error 1',groupelt,self.gtypes[ngroup])
print('info=',self.info)
exit(1)
groupelt = None
group = []
else:
group.append(line)
self.groups = groups
if len(groups) != len(self.gtypes):
print('parse_groups error 2')
print('gtypes=',self.gtypes)
print('#groups=',len(groups))
print('info=',self.info)
exit(1)
def xml_header(xmlroot):
# write header lines
text = """
<?xml version="1.3" encoding="UTF-8"?>
<!DOCTYPE %s SYSTEM "%s.dtd">
<!-- <H> Boehtlingk, Indische Sprüche, 2. Auflage, St. Petersburg 1870 -->
<%s>
""" % (xmlroot,xmlroot,xmlroot)
lines = text.splitlines()
lines = [x.strip() for x in lines if x.strip()!='']
return lines
def generate_entries(lines):
# lines of xml file
# yield array of entries
inentry = False
elines = []
for iline,line in enumerate(lines):
if not inentry:
line = line.strip() # remove extra spaces
if line != '<entry>':
continue
# start entry
inentry = True
elines = [line]
else:
# looking for closing </entry>
line1 = line.strip()
if line1 == '</entry>':
elines.append(line1)
entry = Entry(elines)
yield entry
inentry = False
elines = []
else:
elines.append(line)
return
def xml_body(entries,tranin):
# generate xml header lines
body = []
nentry = 0
for entry in entries:
outarr = entrylines(entry,tranin)
nentry = nentry + 1
for out in outarr:
body.append(out)
print(nentry,'entries found')
return body
def entry_from_group(group,L,page,gtype):
# construct new entry
lines = ['<entry>']
info = '<info L="%s" page="%s" gtypes="%s"/>' % (L,page,gtype)
lines.append(info)
for line in group:
lines.append(line)
lines.append('</entry>')
entry = Entry(lines)
return entry
def S_adjust(entry):
""" Return a list of entries.
Add n attribute to S element, and for multiple D,
return adjusted entry (as a singleton list of entries)
"""
dbg = False
newentries = [] #
newgroups = [] #
newgtypes = []
L = entry.L
page = entry.page
changed = False # no changes to this entry
# get the single S group: It is the first group
# If the first group is instead 'HS', nothing to change
info = entry.info
if entry.gtypes == ['HS']:
return False,[entry]
isgroup = 0
sgroup = entry.groups[isgroup]
#
# get list of verse numbers from info
m = re.search(r'<info L="(.*?)"',info)
Lstring = m.group(1)
versenums = Lstring.split(',')
# get list of verses, identified as sequence of lines with
# last line ending in '||' (double danda)
verselines = sgroup[1:-1] # drop the <S> and </S> of the S-group
verses = []
verse = []
pb = None
for iline,line in enumerate(verselines):
if re.search(r'<pb n=".*?"/>',line):
pb = line
if (iline+1) != len(verselines):
print ('odd pb',info) # there are none
continue
verse.append(line)
if re.search(r'\|\| *$',line):
# last line of verse. Assume danda is coded as '||'
verses.append(verse)
verse = []
if len(verses) != len(versenums):
print(info,' numverse = %s, len(verses) = %s' %(len(versenums),len(verses)))
return False,entry
# construct list of new 'S' groups, one for each verse
sgroups = []
for iverse,verse in enumerate(verses):
versenum = versenums[iverse]
group = []
group.append('<S n="%s">' %versenum)
for line in verse:
# enclose in 's' (Sanskrit) tag
newline = '<s>%s</s>' % line
group.append(newline)
if pb != None:
group.append(pb)
group.append('</S>')
sgroups.append(group)
# construct new groups
# from sgroups and the other groups from entry
newgroups = sgroups + entry.groups[1:]
# construct new lines
newlines = ['<entry>']
# revise gtypes attribute info
gtypes2 = entry.gtypes[1:] # original list of types,
gtypes1 = ['S' for i in range(len(verses))]
newgtypes = gtypes1 + gtypes2
newgtypestr = ','.join(newgtypes)
newinfo = re.sub(r'gtypes=".*?"', 'gtypes="%s"' % newgtypestr,info)
newlines.append(newinfo)
for group in newgroups:
for line in group:
newlines.append(line)
newlines.append('</entry>')
try:
newentry = Entry(newlines)
except:
print('bad newlines')
for i,line in enumerate(newlines):
print('newlines[%s]: %s' %(i,line))
exit(1)
newentries.append(newentry)
changed = True
return changed,newentries
def read_lines(filein):
with codecs.open(filein,encoding='utf-8',mode='r') as f:
nprob = 0
lines = []
for iline,line in enumerate(f):
line = line.rstrip('\r\n')
lines.append(line)
print(len(lines),"lines read from",filein)
if nprob != 0:
print('read_and_clean_lines:',nprob,'problems need to be fixed')
exit(1)
return lines
def write_entries(entries,xmlroot,fileout):
head = xml_header(xmlroot)
head.append('')
body = []
for entry in entries:
lines = entry.lines
for line in lines:
body.append(line)
body.append('')
tail = ['</%s>'%xmlroot]
linesout = head + body + tail
with codecs.open(fileout,"w","utf-8") as f:
for line in linesout:
f.write(line+'\n')
print(len(linesout),"lines written to",fileout)
if __name__=="__main__":
tranin = 'hk'
filein = sys.argv[1] # boesp_utf8.xml
fileout = sys.argv[2] # boesp_utf8_revised.xml
xmlroot = 'boesp'
lines = read_lines(filein)
entries0 = list(generate_entries(lines))
print(len(entries0),'entries found')
entries = []
nadj = 0
for entry0 in entries0:
flag,newentries = S_adjust(entry0)
if flag:
nadj = nadj + 1
for entry in newentries:
entries.append(entry)
print(nadj,"entries adjusted for S")
write_entries(entries,xmlroot,fileout)
|
the-stack_0_24871
|
# Note: chat_id's are stored as strings because the int is too large to be stored in a PSQL database.
import threading
from sqlalchemy import Column, String, Boolean, UnicodeText, Integer, func, distinct
from makibot.modules.helper_funcs.msg_types import Types
from makibot.modules.sql import SESSION, BASE
class Notes(BASE):
__tablename__ = "notes"
chat_id = Column(String(14), primary_key=True)
name = Column(UnicodeText, primary_key=True)
value = Column(UnicodeText, nullable=False)
file = Column(UnicodeText)
is_reply = Column(Boolean, default=False)
has_buttons = Column(Boolean, default=False)
msgtype = Column(Integer, default=Types.BUTTON_TEXT.value)
def __init__(self, chat_id, name, value, msgtype, file=None):
self.chat_id = str(chat_id) # ensure string
self.name = name
self.value = value
self.msgtype = msgtype
self.file = file
def __repr__(self):
return "<Note %s>" % self.name
class Buttons(BASE):
__tablename__ = "note_urls"
id = Column(Integer, primary_key=True, autoincrement=True)
chat_id = Column(String(14), primary_key=True)
note_name = Column(UnicodeText, primary_key=True)
name = Column(UnicodeText, nullable=False)
url = Column(UnicodeText, nullable=False)
same_line = Column(Boolean, default=False)
def __init__(self, chat_id, note_name, name, url, same_line=False):
self.chat_id = str(chat_id)
self.note_name = note_name
self.name = name
self.url = url
self.same_line = same_line
Notes.__table__.create(checkfirst=True)
Buttons.__table__.create(checkfirst=True)
NOTES_INSERTION_LOCK = threading.RLock()
BUTTONS_INSERTION_LOCK = threading.RLock()
def add_note_to_db(chat_id, note_name, note_data, msgtype, buttons=None, file=None):
if not buttons:
buttons = []
with NOTES_INSERTION_LOCK:
prev = SESSION.query(Notes).get((str(chat_id), note_name))
if prev:
with BUTTONS_INSERTION_LOCK:
prev_buttons = SESSION.query(Buttons).filter(Buttons.chat_id == str(chat_id),
Buttons.note_name == note_name).all()
for btn in prev_buttons:
SESSION.delete(btn)
SESSION.delete(prev)
note = Notes(str(chat_id), note_name, note_data or "", msgtype=msgtype.value, file=file)
SESSION.add(note)
SESSION.commit()
for b_name, url, same_line in buttons:
add_note_button_to_db(chat_id, note_name, b_name, url, same_line)
def get_note(chat_id, note_name):
try:
return SESSION.query(Notes).get((str(chat_id), note_name))
finally:
SESSION.close()
def rm_note(chat_id, note_name):
with NOTES_INSERTION_LOCK:
note = SESSION.query(Notes).get((str(chat_id), note_name))
if note:
with BUTTONS_INSERTION_LOCK:
buttons = SESSION.query(Buttons).filter(Buttons.chat_id == str(chat_id),
Buttons.note_name == note_name).all()
for btn in buttons:
SESSION.delete(btn)
SESSION.delete(note)
SESSION.commit()
return True
else:
SESSION.close()
return False
def get_all_chat_notes(chat_id):
try:
return SESSION.query(Notes).filter(Notes.chat_id == str(chat_id)).order_by(Notes.name.asc()).all()
finally:
SESSION.close()
def add_note_button_to_db(chat_id, note_name, b_name, url, same_line):
with BUTTONS_INSERTION_LOCK:
button = Buttons(chat_id, note_name, b_name, url, same_line)
SESSION.add(button)
SESSION.commit()
def get_buttons(chat_id, note_name):
try:
return SESSION.query(Buttons).filter(Buttons.chat_id == str(chat_id), Buttons.note_name == note_name).order_by(
Buttons.id).all()
finally:
SESSION.close()
def num_notes():
try:
return SESSION.query(Notes).count()
finally:
SESSION.close()
def num_chats():
try:
return SESSION.query(func.count(distinct(Notes.chat_id))).scalar()
finally:
SESSION.close()
def migrate_chat(old_chat_id, new_chat_id):
with NOTES_INSERTION_LOCK:
chat_notes = SESSION.query(Notes).filter(Notes.chat_id == str(old_chat_id)).all()
for note in chat_notes:
note.chat_id = str(new_chat_id)
with BUTTONS_INSERTION_LOCK:
chat_buttons = SESSION.query(Buttons).filter(Buttons.chat_id == str(old_chat_id)).all()
for btn in chat_buttons:
btn.chat_id = str(new_chat_id)
SESSION.commit()
|
the-stack_0_24872
|
# coding: utf-8
# Public Domain (-) 2018-present, The Elko Website Authors.
# See the Elko Website UNLICENSE file for details.
import release
from weblite import app, handle, read
INSTALL_SCRIPT = read('install.sh') % {
'darwin': release.darwin,
'linux': release.linux,
}
@handle('/')
def root(ctx):
if ctx.host == 'get.elko.io':
ctx.response_headers['Content-Type'] = 'text/plain; charset=utf-8'
return INSTALL_SCRIPT
return ctx.render_mako_template(
'site', content=ctx.render_mako_template('home')
)
@handle('install.sh')
def install_sh(ctx):
ctx.response_headers['Content-Type'] = 'text/plain; charset=utf-8'
return INSTALL_SCRIPT
_ = app
|
the-stack_0_24874
|
from rlberry.envs.bandits import NormalBandit, BernoulliBandit
from rlberry.agents.bandits import (
IndexAgent,
RandomizedAgent,
TSAgent,
BanditWithSimplePolicy,
makeBetaPrior,
makeBoundedIMEDIndex,
makeBoundedMOSSIndex,
makeBoundedNPTSIndex,
makeBoundedUCBIndex,
makeETCIndex,
makeGaussianPrior,
makeEXP3Index,
makeSubgaussianMOSSIndex,
makeSubgaussianUCBIndex,
)
from rlberry.utils import check_bandit_agent
TEST_SEED = 42
def test_base_bandit():
assert check_bandit_agent(BanditWithSimplePolicy, NormalBandit, seed=TEST_SEED)
bounded_indices = {
"IMED": makeBoundedIMEDIndex,
"MOSS": makeBoundedMOSSIndex,
"NPTS": makeBoundedNPTSIndex,
"UCB": makeBoundedUCBIndex,
}
subgaussian_indices = {
"UCB": makeSubgaussianUCBIndex,
"MOSS": makeSubgaussianMOSSIndex,
}
misc_indices = {
"ETC": makeETCIndex,
}
def test_bounded_indices():
for agent_name, makeIndex in bounded_indices.items():
class Agent(IndexAgent):
name = agent_name
def __init__(self, env, **kwargs):
index, tracker_params = makeIndex()
IndexAgent.__init__(
self, env, index, tracker_params=tracker_params, **kwargs
)
assert check_bandit_agent(Agent, BernoulliBandit, seed=TEST_SEED)
def test_subgaussian_indices():
for agent_name, makeIndex in subgaussian_indices.items():
class Agent(IndexAgent):
name = agent_name
def __init__(self, env, **kwargs):
index, tracker_params = makeIndex()
IndexAgent.__init__(
self, env, index, tracker_params=tracker_params, **kwargs
)
assert check_bandit_agent(Agent, NormalBandit, seed=TEST_SEED)
def test_misc_indices():
for agent_name, makeIndex in misc_indices.items():
class Agent(IndexAgent):
name = agent_name
def __init__(self, env, **kwargs):
index, tracker_params = makeIndex()
IndexAgent.__init__(
self, env, index, tracker_params=tracker_params, **kwargs
)
assert check_bandit_agent(Agent, BernoulliBandit, seed=TEST_SEED)
def test_randomized_bandits():
class EXP3Agent(RandomizedAgent):
name = "EXP3"
def __init__(self, env, **kwargs):
prob, tracker_params = makeEXP3Index()
RandomizedAgent.__init__(
self, env, prob, tracker_params=tracker_params, **kwargs
)
assert check_bandit_agent(EXP3Agent, BernoulliBandit, seed=TEST_SEED)
priors = {
"Beta": (makeBetaPrior, BernoulliBandit),
"Gaussian": (makeGaussianPrior, NormalBandit),
}
def test_TS():
for agent_name, (makePrior, Bandit) in priors.items():
class Agent(TSAgent):
name = agent_name
def __init__(self, env, **kwargs):
prior_info, tracker_params = makePrior()
TSAgent.__init__(
self, env, prior_info, tracker_params=tracker_params, **kwargs
)
assert check_bandit_agent(Agent, Bandit, seed=TEST_SEED)
|
the-stack_0_24875
|
import logging
import re
from urllib.parse import unquote, urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json, update_scheme
log = logging.getLogger(__name__)
class Mediaklikk(Plugin):
PLAYER_URL = "https://player.mediaklikk.hu/playernew/player.php"
_url_re = re.compile(r"https?://(?:www\.)?(?:mediaklikk|m4sport|hirado|petofilive)\.hu/")
_re_player_manager = re.compile(r"""
mtva_player_manager\.player\s*\(\s*
document\.getElementById\(\s*"\w+"\s*\)\s*,\s*
(?P<json>{.*?})\s*
\)\s*;
""", re.VERBOSE | re.DOTALL)
_re_player_json = re.compile(r"pl\.setup\s*\(\s*(?P<json>{.*?})\s*\)\s*;", re.DOTALL)
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
params = self.session.http.get(self.url, schema=validate.Schema(
validate.transform(self._re_player_manager.search),
validate.any(None, validate.all(
validate.get("json"),
validate.transform(parse_json),
{
"contentId": validate.any(str, int),
validate.optional("streamId"): str,
validate.optional("idec"): str,
validate.optional("token"): str
}
))
))
if not params:
log.error("Could not find player manager data")
return
params.update({
"video": (unquote(params.pop("token"))
if params.get("token") is not None else
params.pop("streamId")),
"noflash": "yes",
"embedded": "0",
})
url_parsed = urlparse(self.url)
skip_vods = url_parsed.netloc.endswith("m4sport.hu") and url_parsed.path.startswith("/elo")
self.session.http.headers.update({"Referer": self.url})
playlists = self.session.http.get(self.PLAYER_URL, params=params, schema=validate.Schema(
validate.transform(self._re_player_json.search),
validate.any(None, validate.all(
validate.get("json"),
validate.transform(parse_json),
{"playlist": [{
"file": validate.url(),
"type": str
}]},
validate.get("playlist"),
validate.filter(lambda p: p["type"] == "hls"),
validate.filter(lambda p: not skip_vods or "vod" not in p["file"]),
validate.map(lambda p: update_scheme(self.url, p["file"]))
))
))
for url in playlists or []:
yield from HLSStream.parse_variant_playlist(self.session, url).items()
__plugin__ = Mediaklikk
|
the-stack_0_24877
|
import enum
import octoprint.plugin
from . import actions, keypad, menu
class Tentacles(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.TemplatePlugin):
def __init__(self):
self._tentacles = {}
self._mode = menu.Mode.UNINITIALIZED
def on_after_startup(self, *args, **kwargs):
# Load available actions for tentacles to use (just print them out now)
self._logger.debug('Loading actions...')
for action in actions.ACTIONS.keys():
self._logger.debug(f'Action: {action}')
# This should be dynamic
action_init_args = dict(printer=self._printer, tentacles=self)
# Load saved tentacle settings from octoprint instance
# and map actions to keycodes
self._logger.info('Loading tentacles...')
tentacles_settings = self._settings.get(['tentacles'])
menu_start_key = None
for tentacle_code, tentacle_config in tentacles_settings.items():
self._logger.debug(f"Loading tentacle: {tentacle_code}")
self._tentacles[tentacle_code] = {}
for mode, mode_action in tentacle_config.items():
self._logger.debug(f"Loading action: {mode_action['action']} for mode: {mode}")
if ('action' in mode_action) and (mode_action['action'] in actions.ACTIONS) and (menu.Mode[mode.upper()] in menu.Mode):
# Find the menu_start key to make it easy to reference later
if mode_action['action'] == 'menu_start':
menu_start_key = tentacle_code
# Init action passing it all arguments for now
key_action = actions.ACTIONS[mode_action['action']](**action_init_args)
# Configure action with defined args, or use defaults if none exist
if 'args' in mode_action:
key_action.configure(**mode_action['args'])
else:
key_action.configure()
# Configure name if defined
if 'name' in mode_action:
action_name = mode_action['name']
else:
action_name = mode_action['action'].title()
self._tentacles[tentacle_code][menu.Mode[mode.upper()]] = {
'action': key_action,
'name': action_name
}
else:
self._logger.warn(f"Invalid tentacle config: {mode} - {mode_action}")
if menu_start_key is not None:
# Setup our menu to switch modes
menu_start = menu.Mode(menu.Mode.MENU + 1)
menu_end = menu.Mode.MAX()
# Setup menu button for every mode
for mode in range(menu_start, menu_end+1):
self._tentacles[menu_start_key][menu.Mode(mode)] = \
self._tentacles[menu_start_key][menu.Mode.MENU]
# Clear menu_start button while in MENU mode
del self._tentacles[menu_start_key][menu.Mode.MENU]
# Init Menu object
self._menu = menu.Menu(self._printer, menu_start, menu_end)
else:
self._logger.warn('No menu key defined! You will not be able to change modes!')
# Configure serial port for our tentacle device
serial_conf = self._settings.get(['serial'], merged=True)
self._logger.info('Starting keypad listening thread...')
self._keypad_listener = keypad.KeypadListener(serial_conf['port'], serial_conf['baud'], self._logger, self._event_bus).start()
def on_event(self, event, payload, *args, **kwargs):
#Listen for key press events that get emitted from our KeypadListener
# and execute the action mapped to the keycode
if event == 'plugin_tentacle_key_press':
keycode = payload['keycode'][0]
self._logger.debug(f'Got keypress: {keycode}')
try:
tentacle_action = self._tentacles[keycode][self._mode]['action']
except KeyError:
self._logger.debug(f'Keycode {keycode} is not attached to an action!')
return
if isinstance(tentacle_action, actions.BaseAction):
tentacle_action._run()
else:
self._logger.err(f"Unknown action: {tentacle_action}")
if event == 'plugin_tentacle_key_release':
keycode = payload['keycode'][0] - 128
try:
tentacle_action = self._tentacles[keycode][self._mode]['action']
except KeyError:
self._logger.debug(f'Keycode {keycode} is not attached to an action!')
return
if isinstance(tentacle_action, actions.BaseAction):
if tentacle_action._running:
tentacle_action.stop()
else:
self._logger.err(f"Unknown action: {tentacle_action}")
# Change modes on printer state change
if event == 'PrinterStateChanged':
if payload['state_id'] == 'PRINTING':
self._mode = menu.Mode.PRINTING
elif payload['state_id'] == 'OPERATIONAL':
self._mode = menu.Mode.CONTROL
def get_template_vars(self):
tentacle_vars = {}
for tentacle_code, tentacle_config in self._tentacles.items():
tentacle_vars[tentacle_code] = {}
for mode, action in tentacle_config.items():
if isinstance(action['action'], actions.BaseAction):
action_name = action['action'].name
action_desc = action['name']
else:
action_name = action['action']
action_desc = action_name
tentacle_vars[tentacle_code][mode.name] = {
'action': action_name,
'name': action_desc
}
return {
'tentacles': tentacle_vars
}
# Default setting are no tentacles, and serial0 (default raspi UART)
def get_settings_defaults(self):
return {
'serial':
{
'port': '/dev/serial0',
'baud': 9600
}
}
def register_tentacle_events(*args, **kwargs):
return ['key_press', 'key_release']
|
the-stack_0_24878
|
import sys
import subprocess as sp
from typing import Optional
from pprint import pprint
from peo.util import format_message
from peo.disasm.comment import Comment
from peo.disasm.arrow import flow_arrow
from peo.disasm.setcolor import setcolor, arrow_clr
from peo.disasm.indent import organize, indent, combine
def disasm(filepath: str, fcn: Optional[str]=None):
proc = sp.run(
["objdump", "-d", "-M", "intel", filepath],
encoding="utf-8",
stdout=sp.PIPE,
stderr=sp.PIPE
)
# objdumpがエラーを出したらやめるっピ
if proc.returncode != 0:
print(proc.stderr)
sys.exit(1)
msgs = format_message(proc.stdout)
# 関数名をしていした場合はそこだけ抜き出す
if fcn is not None:
tmp = []
fcn_flag = False # その関数内のときTrue
for msg in msgs:
if len(msg) == 1:
if f"<{fcn}>" in msg[0]:
fcn_flag = True
else:
fcn_flag = False
if fcn_flag:
tmp.append(msg)
msgs = tmp
msgs = Comment(filepath, msgs).add()
msgs = organize(msgs)
arrows, arrowcolors = flow_arrow(msgs)
indent(arrows, msgs)
clr_arrows = arrow_clr(arrows, arrowcolors)
msgs = setcolor(msgs)
perf_msgs = combine(clr_arrows, msgs)
for i in range(len(perf_msgs)):
print(" ".join(msgs[i]))
if len(msgs[i]) != 1 and i+1 != len(msgs):
if len(msgs[i+1]) == 1:
print()
|
the-stack_0_24880
|
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""Build Fairlearn documentation
The static landing page is no longer under the control of Sphinx,
since we only want one copy of it as we enable multiple documentation
versions.
This makes the documentation build a three-stage process:
1. Copy the static pages into the output directory
2. Do the sphinx build
3. Make a duplicate copy of a single SVG in the output directory (a logo)
This ordering is in part because shutil.copytree() only acquired the
dirs_exist_ok argument in Python 3.8
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
from _utils import _ensure_cwd_is_fairlearn_root_dir, _LogWrapper
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
landing_page_directory = "static_landing_page"
extra_png_src_path = os.path.join("_static", "images", "fairlearn_full_color.svg")
def _build_argument_parser():
desc = "Build documentation for Fairlearn"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--documentation-path",
help="The path to the documentation sources (conf.py directory)",
required=True)
parser.add_argument("--output-path",
help="The directory for the output files (will be created)",
required=True)
return parser
def _pip_backwards_compatibility():
"""Install extra pip packages for backwards compatibility
This is specifically targeted at tempeh for v0.4.6.
"""
extra_packages = ['tempeh']
with _LogWrapper("Running pip install"):
subprocess.check_call(["pip", "install"] + extra_packages)
def main(argv):
_ensure_cwd_is_fairlearn_root_dir()
parser = _build_argument_parser()
args = parser.parse_args(argv)
_pip_backwards_compatibility()
with _LogWrapper("copying static files"):
shutil.copytree(os.path.join(args.documentation_path, landing_page_directory),
args.output_path)
with _LogWrapper("running Sphinx-Multiversion"):
subprocess.check_call(["sphinx-multiversion",
args.documentation_path,
args.output_path])
with _LogWrapper("copy of individual PNG"):
shutil.copy2(os.path.join(args.documentation_path, extra_png_src_path),
os.path.join(args.output_path, "images"))
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_24882
|
import argparse
import asyncio
import os
import signal
import sys
import threading
from typing import Any, Set
from .client import connect
from .exceptions import ConnectionClosed, format_close
if sys.platform == "win32":
def win_enable_vt100() -> None:
"""
Enable VT-100 for console output on Windows.
See also https://bugs.python.org/issue29059.
"""
import ctypes
STD_OUTPUT_HANDLE = ctypes.c_uint(-11)
INVALID_HANDLE_VALUE = ctypes.c_uint(-1)
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x004
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
if handle == INVALID_HANDLE_VALUE:
raise RuntimeError("unable to obtain stdout handle")
cur_mode = ctypes.c_uint()
if ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(cur_mode)) == 0:
raise RuntimeError("unable to query current console mode")
# ctypes ints lack support for the required bit-OR operation.
# Temporarily convert to Py int, do the OR and convert back.
py_int_mode = int.from_bytes(cur_mode, sys.byteorder)
new_mode = ctypes.c_uint(py_int_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
if ctypes.windll.kernel32.SetConsoleMode(handle, new_mode) == 0:
raise RuntimeError("unable to set console mode")
def exit_from_event_loop_thread(
loop: asyncio.AbstractEventLoop, stop: "asyncio.Future[None]"
) -> None:
loop.stop()
if not stop.done():
# When exiting the thread that runs the event loop, raise
# KeyboardInterrupt in the main thread to exit the program.
if sys.platform == "win32":
ctrl_c = signal.CTRL_C_EVENT
else:
ctrl_c = signal.SIGINT
os.kill(os.getpid(), ctrl_c)
def print_during_input(string: str) -> None:
sys.stdout.write(
# Save cursor position
"\N{ESC}7"
# Add a new line
"\N{LINE FEED}"
# Move cursor up
"\N{ESC}[A"
# Insert blank line, scroll last line down
"\N{ESC}[L"
# Print string in the inserted blank line
f"{string}\N{LINE FEED}"
# Restore cursor position
"\N{ESC}8"
# Move cursor down
"\N{ESC}[B"
)
sys.stdout.flush()
def print_over_input(string: str) -> None:
sys.stdout.write(
# Move cursor to beginning of line
"\N{CARRIAGE RETURN}"
# Delete current line
"\N{ESC}[K"
# Print string
f"{string}\N{LINE FEED}"
)
sys.stdout.flush()
async def run_client(
uri: str,
loop: asyncio.AbstractEventLoop,
inputs: "asyncio.Queue[str]",
stop: "asyncio.Future[None]",
) -> None:
try:
websocket = await connect(uri)
except Exception as exc:
print_over_input(f"Failed to connect to {uri}: {exc}.")
exit_from_event_loop_thread(loop, stop)
return
else:
print_during_input(f"Connected to {uri}.")
try:
while True:
incoming: asyncio.Future[Any] = asyncio.ensure_future(websocket.recv())
outgoing: asyncio.Future[Any] = asyncio.ensure_future(inputs.get())
done: Set[asyncio.Future[Any]]
pending: Set[asyncio.Future[Any]]
done, pending = await asyncio.wait(
[incoming, outgoing, stop], return_when=asyncio.FIRST_COMPLETED
)
# Cancel pending tasks to avoid leaking them.
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
except ConnectionClosed:
break
else:
if isinstance(message, str):
print_during_input("< " + message)
else:
print_during_input("< (binary) " + message.hex())
if outgoing in done:
message = outgoing.result()
await websocket.send(message)
if stop in done:
break
finally:
await websocket.close()
close_status = format_close(websocket.close_code, websocket.close_reason)
print_over_input(f"Connection closed: {close_status}.")
exit_from_event_loop_thread(loop, stop)
def main() -> None:
# If we're on Windows, enable VT100 terminal support.
if sys.platform == "win32":
try:
win_enable_vt100()
except RuntimeError as exc:
sys.stderr.write(
f"Unable to set terminal to VT100 mode. This is only "
f"supported since Win10 anniversary update. Expect "
f"weird symbols on the terminal.\nError: {exc}\n"
)
sys.stderr.flush()
try:
import readline # noqa
except ImportError: # Windows has no `readline` normally
pass
# Parse command line arguments.
parser = argparse.ArgumentParser(
prog="python -m websockets",
description="Interactive WebSocket client.",
add_help=False,
)
parser.add_argument("uri", metavar="<uri>")
args = parser.parse_args()
# Create an event loop that will run in a background thread.
loop = asyncio.new_event_loop()
# Create a queue of user inputs. There's no need to limit its size.
inputs: asyncio.Queue[str] = asyncio.Queue(loop=loop)
# Create a stop condition when receiving SIGINT or SIGTERM.
stop: asyncio.Future[None] = loop.create_future()
# Schedule the task that will manage the connection.
asyncio.ensure_future(run_client(args.uri, loop, inputs, stop), loop=loop)
# Start the event loop in a background thread.
thread = threading.Thread(target=loop.run_forever)
thread.start()
# Read from stdin in the main thread in order to receive signals.
try:
while True:
# Since there's no size limit, put_nowait is identical to put.
message = input("> ")
loop.call_soon_threadsafe(inputs.put_nowait, message)
except (KeyboardInterrupt, EOFError): # ^C, ^D
loop.call_soon_threadsafe(stop.set_result, None)
# Wait for the event loop to terminate.
thread.join()
if __name__ == "__main__":
main()
|
the-stack_0_24883
|
from io import StringIO
import pytest
from graphql_relay import to_global_id
from django.core import management
from db.helper.forms import convert_date
from db.models import JobPostingState, ProfileState, JobPostingLanguageRelation, UserLanguageRelation, Match
# pylint: disable=R0913
# pylint: disable=R0915
@pytest.mark.django_db
def test_job_posting_matching(job_posting_object, job_posting_object_2, skill_objects,
branch_objects, job_type_objects_date_range, user_employee,
soft_skill_objects, cultural_fit_objects, user_student,
job_posting_matching, login, language_objects, language_level_objects,
user_employee_2, company_fallback_images):
branch = branch_objects[0]
job_type = job_type_objects_date_range[0]
language = language_objects[0]
language_level = language_level_objects[0]
user_student.student.branch = branch
user_student.student.job_type = job_type
user_student.student.job_from_date = convert_date('2021-08-01', '%Y-%m-%d')
user_student.student.job_to_date = convert_date('2022-07-31', '%Y-%m-%d')
user_student.student.state = ProfileState.PUBLIC
user_student.student.save()
user_student.student.skills.set(skill_objects[:2])
user_student.student.soft_skills.set(soft_skill_objects[:6])
user_student.student.cultural_fits.set(cultural_fit_objects[:6])
user_student.student.save()
UserLanguageRelation.objects.create(language=language,
language_level=language_level,
student=user_student.student)
user_employee.company.street = 'street'
user_employee.company.zip = '1337'
user_employee.company.city = 'nowhere'
user_employee.company.save()
user_employee.company.soft_skills.set(soft_skill_objects[:6])
user_employee.company.cultural_fits.set(cultural_fit_objects[:6])
user_employee.company.save()
# a 100% match
job_posting_object.state = JobPostingState.PUBLIC
job_posting_object.title = 'title'
job_posting_object.slug = 'title'
job_posting_object.job_type = job_type
job_posting_object.workload = 100
job_posting_object.company = user_employee.company
job_posting_object.job_from_date = user_student.student.job_from_date
job_posting_object.job_to_date = user_student.student.job_to_date
job_posting_object.employee = user_employee.employee
job_posting_object.save()
job_posting_object.skills.set(skill_objects[:2])
job_posting_object.save()
job_posting_object.branches.set([branch])
JobPostingLanguageRelation.objects.create(language=language,
language_level=language_level,
job_posting=job_posting_object)
user_employee_2.company.street = 'street'
user_employee_2.company.zip = '1337'
user_employee_2.company.city = 'nowhere'
user_employee_2.company.save()
user_employee_2.company.soft_skills.set(soft_skill_objects[-6:])
user_employee_2.company.cultural_fits.set(cultural_fit_objects[-6:])
user_employee_2.company.save()
# a bad match
job_posting_object_2.state = JobPostingState.PUBLIC
job_posting_object_2.title = 'title2'
job_posting_object_2.slug = 'title2'
job_posting_object_2.job_type = job_type_objects_date_range[1]
job_posting_object_2.workload = 10
job_posting_object_2.company = user_employee_2.company
job_posting_object_2.job_from_date = convert_date('2022-08-01', '%Y-%m-%d')
job_posting_object_2.job_to_date = convert_date('2023-07-31', '%Y-%m-%d')
job_posting_object_2.employee = user_employee_2.employee
job_posting_object_2.save()
job_posting_object_2.skills.set(skill_objects[-2:])
job_posting_object_2.save()
job_posting_object_2.branches.set([branch])
JobPostingLanguageRelation.objects.create(language=language_objects[1],
language_level=language_level_objects[1],
job_posting=job_posting_object_2)
management.call_command('update_index', stdout=StringIO())
Match.objects.create(student=user_student.student,
job_posting=job_posting_object,
initiator=user_student.type,
student_confirmed=True)
login(user_student)
data, errors = job_posting_matching(user_student, user_student.student.branch,
user_student.student.job_type)
assert data is not None
assert errors is None
matches = data.get('matches')
assert matches is not None
assert len(matches) == 2
# max score for job posting is: 20 (see db/search/calculators/student.py)
# job_posting_object is a perfect match --> score = 20
# job_posting_object_2 matches only with branch --> score = 0
best_match = matches[0]
assert best_match.get('id') == to_global_id('JobPosting', job_posting_object.id)
assert float(best_match.get('score')) == 1
assert float(best_match.get('rawScore')) == 1
match_status = best_match.get('matchStatus')
assert match_status is not None
assert match_status.get('confirmed') is False
assert match_status.get('initiator') == user_student.type.upper()
worst_match = matches[1]
assert worst_match.get('id') == to_global_id('JobPosting', job_posting_object_2.id)
assert float(worst_match.get('score')) == 0
assert float(worst_match.get('rawScore')) == 0
match_status = worst_match.get('matchStatus')
assert match_status is None
|
the-stack_0_24885
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import DefaultDict, Sequence
from unittest import mock
import pytest
from pants.engine.fs import EMPTY_DIGEST
from pants.jvm.resolve.common import Coordinate, Coordinates
from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile
from pants.jvm.resolve.key import CoursierResolveKey
coord1 = Coordinate("test", "art1", "1.0.0")
coord2 = Coordinate("test", "art2", "1.0.0")
coord3 = Coordinate("test", "art3", "1.0.0")
coord4 = Coordinate("test", "art4", "1.0.0")
coord5 = Coordinate("test", "art5", "1.0.0")
# No dependencies (coord1)
# 1 direct dependency, more transitive dependencies (coord2)
# 1 where direct dependencies provide no transitive dependencies (coord 4)
# 1 where direct dependencies provide repeated dependencies (coord5)
direct: dict[Coordinate, set[Coordinate]] = {
coord1: set(),
coord2: {
coord3,
}, # 1, 2, 3, 4, 5
coord3: {coord1, coord4, coord5}, # 1, 3, 4, 5
coord4: {
coord1,
}, # 1, 4
coord5: {coord1, coord4}, # 1, 4, 5
}
@pytest.fixture
def lockfile() -> CoursierResolvedLockfile:
# Calculate transitive deps
transitive_ = {(i, k) for i, j in direct.items() for k in j}
while True:
old_len = len(transitive_)
transitive_ |= {(i, k) for i, j in transitive_ for k in direct[j]}
if old_len == len(transitive_):
break
transitive = DefaultDict(set)
for (i, j) in transitive_:
transitive[i].add(j)
entries = (
CoursierLockfileEntry(
coord=coord,
file_name=f"{coord.artifact}.jar",
direct_dependencies=Coordinates(direct[coord]),
dependencies=Coordinates(transitive[coord]),
file_digest=mock.Mock(),
)
for coord in direct
)
return CoursierResolvedLockfile(entries=tuple(entries))
def test_no_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord1, lockfile, False)
assert filtered == [coord1]
def test_filter_non_transitive_includes_direct_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord2, lockfile, False)
assert filtered == [coord2, coord3]
def test_filter_transitive_includes_transitive_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord2, lockfile, True)
assert set(filtered) == {coord1, coord2, coord3, coord4, coord5}
# Entries should only appear once.
assert len(filtered) == 5
def filter(coordinate, lockfile, transitive) -> Sequence[Coordinate]:
key = CoursierResolveKey("example", "example.json", EMPTY_DIGEST)
root, deps = (
lockfile.dependencies(key, coordinate)
if transitive
else lockfile.direct_dependencies(key, coordinate)
)
return [i.coord for i in (root, *deps)]
|
the-stack_0_24887
|
# -*- coding: utf-8 -*-
### basic modules
import numpy as np
import time, pickle, os, sys, json, PIL, tempfile, warnings, importlib, math, copy, shutil
### torch modules
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import torch.nn.functional as F
from torch import autograd
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import argparse
def argparser(data='cifar10', model='large',
batch_size=128, epochs=200, warmup=10, rampup=121,
augmentation=True,
seed=0, verbose=200,
epsilon=36/255, epsilon_infty=8/255, epsilon_train=36/255, epsilon_train_infty=8/255, starting_epsilon=0.0,
opt='adam', lr=0.001, momentum=0.9, weight_decay=0.0, step_size=10, gamma=0.5, lr_scheduler='step', wd_list=None,
starting_kappa=1.0, kappa=0.0,
niter=100,
opt_iter=1, sniter=1, test_opt_iter=1000, test_sniter=1000000):
parser = argparse.ArgumentParser()
# main settings
parser.add_argument('--method', default='BCP')
parser.add_argument('--rampup', type=int, default=rampup) ## rampup
parser.add_argument('--warmup', type=int, default=warmup)
parser.add_argument('--sniter', type=int, default=sniter) ###
parser.add_argument('--opt_iter', type=int, default=opt_iter)
parser.add_argument('--linfty', action='store_true')
parser.add_argument('--no_save', action='store_true')
parser.add_argument('--test_pth', default=None)
parser.add_argument('--print', action='store_true')
parser.add_argument('--bce', action='store_true')
parser.add_argument('--pgd', action='store_true')
# optimizer settings
parser.add_argument('--opt', default='adam')
parser.add_argument('--momentum', type=float, default=momentum)
parser.add_argument('--weight_decay', type=float, default=weight_decay)
parser.add_argument('--epochs', type=int, default=epochs)
parser.add_argument("--lr", type=float, default=lr)
parser.add_argument("--step_size", type=int, default=step_size)
parser.add_argument("--gamma", type=float, default=gamma)
parser.add_argument("--wd_list", nargs='*', type=int, default=wd_list)
parser.add_argument("--lr_scheduler", default=lr_scheduler)
# test settings during training
parser.add_argument('--train_method', default='BCP')
parser.add_argument('--test_sniter', type=int, default=test_sniter)
parser.add_argument('--test_opt_iter', type=int, default=test_opt_iter)
# pgd settings
parser.add_argument("--epsilon_pgd", type=float, default=epsilon)
parser.add_argument("--alpha", type=float, default=epsilon/4)
parser.add_argument("--niter", type=float, default=niter)
# epsilon settings
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument("--epsilon_infty", type=float, default=epsilon_infty)
parser.add_argument("--epsilon_train", type=float, default=epsilon_train)
parser.add_argument("--epsilon_train_infty", type=float, default=epsilon_train_infty)
parser.add_argument("--starting_epsilon", type=float, default=starting_epsilon)
parser.add_argument('--schedule_length', type=int, default=rampup) ## rampup
# kappa settings
parser.add_argument("--kappa", type=float, default=kappa)
parser.add_argument("--starting_kappa", type=float, default=starting_kappa)
parser.add_argument('--kappa_schedule_length', type=int, default=rampup) ## rampup
# model arguments
parser.add_argument('--model', default='large')
parser.add_argument('--model_factor', type=int, default=8)
parser.add_argument('--resnet_N', type=int, default=1)
parser.add_argument('--resnet_factor', type=int, default=1)
# other arguments
parser.add_argument('--prefix')
parser.add_argument('--data', default=data)
parser.add_argument('--real_time', action='store_true')
parser.add_argument('--seed', type=int, default=2019)
parser.add_argument('--verbose', type=int, default=200)
parser.add_argument('--cuda_ids', type=int, default=0)
# loader arguments
parser.add_argument('--batch_size', type=int, default=batch_size)
parser.add_argument('--test_batch_size', type=int, default=batch_size)
parser.add_argument('--normalization', action='store_true')
parser.add_argument('--no_augmentation', action='store_true', default=not(augmentation))
parser.add_argument('--drop_last', action='store_true')
parser.add_argument('--no_shuffle', action='store_true')
args = parser.parse_args()
args.augmentation = not(args.no_augmentation)
args.shuffle = not(args.no_shuffle)
args.save = not(args.no_save)
if args.rampup:
args.schedule_length = args.rampup
args.kappa_schedule_length = args.rampup
if args.epsilon_train is None:
args.epsilon_train = args.epsilon
if args.epsilon_train_infty is None:
args.epsilon_train_infty = args.epsilon_infty
if args.linfty:
print('LINFTY TRAINING')
args.epsilon = args.epsilon_infty
args.epsilon_train = args.epsilon_train_infty
args.epsilon_pgd = args.epsilon
args.alpha = args.epsilon/4
if args.starting_epsilon is None:
args.starting_epsilon = args.epsilon
if args.prefix:
args.prefix = 'models/'+args.data+'/'+args.prefix
if args.model is not None:
args.prefix += '_'+args.model
if args.method is not None:
args.prefix += '_'+args.method
banned = ['verbose', 'prefix',
'resume', 'baseline', 'eval',
'method', 'model', 'cuda_ids', 'load', 'real_time',
'test_batch_size', 'augmentation','batch_size','drop_last','normalization',
'print','save','step_size','epsilon','gamma','linfty','lr_scheduler',
'seed','shuffle','starting_epsilon','kappa','kappa_schedule_length',
'test_sniter','test_opt_iter', 'niter','epsilon_pgd','alpha','schedule_length',
'epsilon_infty','epsilon_train_infty','test_pth','wd_list','momentum', 'weight_decay',
'resnet_N', 'resnet_factor','bce','no_augmentation','no_shuffle','no_save','pgd']
if args.method == 'baseline':
banned += ['epsilon', 'starting_epsilon', 'schedule_length',
'l1_test', 'l1_train', 'm', 'l1_proj']
# if not using a model that uses model_factor,
# ignore model_factor
if args.model not in ['wide', 'deep']:
banned += ['model_factor']
for arg in sorted(vars(args)):
if arg not in banned and getattr(args,arg) is not None:
args.prefix += '_' + arg + '_' +str(getattr(args, arg))
if args.schedule_length > args.epochs:
raise ValueError('Schedule length for epsilon ({}) is greater than '
'number of epochs ({})'.format(args.schedule_length, args.epochs))
else:
args.prefix = 'models/'+args.data+'/temporary'
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
# os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
torch.cuda.set_device(args.cuda_ids)
return args
def select_model(data, m):
if data=='mnist':
if m == 'large': ### Wong et al. large
model = mnist_model_large().cuda()
elif m == 'large2': ### Wong et al. large
model = mnist_model_large2().cuda()
else: ### Wong et al. small
model = mnist_model().cuda()
elif data=='cifar10':
if m == 'large': ### Wong et al. large
model = cifar_model_large().cuda()
elif m == 'M': ### CROWN-IBP M
model = cifar_model_M().cuda()
elif m == 'CIBP': ### CROWN-IBP
print('CIBP model')
model = model_cnn_4layer(3,32,8,512).cuda()
elif m == 'CIBP_noinit': ### CROWN-IBP
print('CIBP model no init')
model = model_cnn_4layer_noinit(3,32,8,512).cuda()
elif m == 'c6f2':
model = c6f2().cuda()
elif m == 'c6f2_':
model = c6f2_().cuda()
else: ### Wong et al. small
model = cifar_model().cuda()
elif data=='tinyimagenet':
model = tinyimagenet().cuda()
return model
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
def mnist_model_large():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def mnist_model_large2():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(1024,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def cifar_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def model_cnn_4layer(in_ch, in_dim, width, linear_size):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 4*width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 8*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8*width, 8*width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*width*(in_dim // 4)*(in_dim // 4),linear_size),
nn.ReLU(),
nn.Linear(linear_size,linear_size),
nn.ReLU(),
nn.Linear(linear_size,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def model_cnn_4layer_noinit(in_ch, in_dim, width, linear_size):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 4*width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*width, 8*width, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8*width, 8*width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*width*(in_dim // 4)*(in_dim // 4),linear_size),
nn.ReLU(),
nn.Linear(linear_size,linear_size),
nn.ReLU(),
nn.Linear(linear_size,10)
)
# for m in model.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
return model
def cifar_model_M():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def c5f2():
model = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=2),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=2),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
# def c6f2():
# model = nn.Sequential(
# nn.Conv2d(3, 32, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 32, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 32, 4, stride=2, padding=1),
# nn.ReLU(),
# nn.Conv2d(32, 64, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(64, 64, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d(64, 64, 4, stride=2),
# nn.ReLU(),
# Flatten(),
# nn.Linear(3136,512),
# nn.ReLU(),
# nn.Linear(512,10)
# )
# for m in model.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
# return model
def c6f2_():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(4096,512),
nn.ReLU(),
nn.Linear(512,10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def tinyimagenet():
model = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 4, stride=2),
nn.ReLU(),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, 4, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(9216,256),
nn.ReLU(),
nn.Linear(256,200)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
############################## Flatten / one_hot
class Flatten(nn.Module): ## =nn.Flatten()
def forward(self, x):
return x.view(x.size()[0], -1)
def one_hot(batch,depth=10):
ones = torch.eye(depth).cuda()
return ones.index_select(0,batch)
##############################
def train(loader, model, opt, epoch, log, verbose):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.train()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
data_time.update(time.time() - end)
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.max(1)[1] != y).float().sum() / X.size(0)
loss = ce
opt.zero_grad()
loss.backward()
opt.step()
# measure accuracy and record loss
losses.update(ce.item(), X.size(0))
errors.update(err.item(), X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
print(epoch, i, ce.item(), file=log) ########
if verbose and (i==0 or i==len(loader)-1 or (i+1) % verbose == 0):
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.4f} ({errors.avg:.4f})'.format(
epoch, i+1, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, errors=errors))
log.flush()
def evaluate(loader, model, epoch, log, verbose):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.eval()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
# print to logfile
print(epoch, i, ce.item(), err.item(), file=log)
# measure accuracy and record loss
losses.update(ce.data, X.size(0))
errors.update(err, X.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose and (i==0 or i==len(loader)-1 or (i+1) % verbose == 0):
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.4f} ({error.avg:.4f})'.format(
i+1, len(loader), batch_time=batch_time, loss=losses,
error=errors))
log.flush()
print(' * Error {error.avg:.4f}'
.format(error=errors))
return errors.avg
def pgd_l2(model_eval, X, y, epsilon=36/255, niters=100, alpha=9/255):
EPS = 1e-24
X_pgd = Variable(X.data, requires_grad=True)
for i in range(niters):
opt = optim.Adam([X_pgd], lr=1.)
opt.zero_grad()
loss = nn.CrossEntropyLoss()(model_eval(X_pgd), y)
loss.backward()
grad = 1e10*X_pgd.grad.data
grad_norm = grad.view(grad.shape[0],-1).norm(2, dim=-1, keepdim=True)
grad_norm = grad_norm.view(grad_norm.shape[0],grad_norm.shape[1],1,1)
eta = alpha*grad/(grad_norm+EPS)
eta_norm = eta.view(eta.shape[0],-1).norm(2,dim=-1)
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = X_pgd.data-X.data
mask = eta.view(eta.shape[0], -1).norm(2, dim=1) <= epsilon
scaling_factor = eta.view(eta.shape[0],-1).norm(2,dim=-1)+EPS
scaling_factor[mask] = epsilon
eta *= epsilon / (scaling_factor.view(-1, 1, 1, 1))
X_pgd = torch.clamp(X.data + eta, 0, 1)
X_pgd = Variable(X_pgd.data, requires_grad=True)
return X_pgd.data
def pgd(model_eval, X, y, epsilon=8/255, niters=100, alpha=2/255):
X_pgd = Variable(X.data, requires_grad=True)
for i in range(niters):
opt = optim.Adam([X_pgd], lr=1.)
opt.zero_grad()
loss = nn.CrossEntropyLoss()(model_eval(X_pgd), y)
loss.backward()
eta = alpha*X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = torch.clamp(X.data + eta, 0, 1)
X_pgd = Variable(X_pgd, requires_grad=True)
return X_pgd.data
def evaluate_pgd(loader, model, args):
losses = AverageMeter()
errors = AverageMeter()
model.eval()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
if args.linfty:
X_pgd = pgd(model, X, y, args.epsilon, args.niter, args.alpha)
else:
X_pgd = pgd_l2(model, X, y, args.epsilon, args.niter, args.alpha)
out = model(Variable(X_pgd))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
losses.update(ce.data, X.size(0))
errors.update(err, X.size(0))
print(' * Error {error.avg:.4f}'
.format(error=errors))
return errors.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def test(net_eval, test_data_loader, imagenet=0):
st = time.time()
n_test = len(test_data_loader.dataset)
err = 0
n_done = 0
for j, (batch_images, batch_labels) in enumerate(test_data_loader):
X = Variable(batch_images.cuda())
Y = Variable(batch_labels.cuda())
out = net_eval(X)
err += (out.max(1)[1].data != (batch_labels-imagenet).cuda()).float().sum()
b_size = len(Y)
n_done += b_size
acc = 100*(1-err/n_done)
if j % 10 == 0:
print('%.2f %%'%(100*(n_done/n_test)), end='\r')
print('test accuracy: %.4f%%'%(acc))
def test_topk(net_eval, test_data_loader, k=5, imagenet=1):
st = time.time()
n_test = len(test_data_loader.dataset)
err = 0
n_done = 0
res = 0
for j, (batch_images, batch_labels) in enumerate(test_data_loader):
X = Variable(batch_images.cuda())
Y = Variable(batch_labels.cuda())
out = net_eval(X)
b_size = len(Y)
n_done += b_size
_,pred= out.topk(max((k,)),1,True,True)
aa = (batch_labels-imagenet).view(-1, 1).expand_as(pred).cuda()
correct = pred.eq(aa)
for kk in (k,):
correct_k = correct[:,:kk].view(-1).float().sum(0)
res += correct_k# (correct_k.mul_(100.0 / b_size))
if j % 10 == 0:
print('%.2f %%'%(100*(n_done/n_test)), end='\r')
print('test accuracy: %.4f%%'%(100*res/n_done))
|
the-stack_0_24888
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 20 09:01:51 2014
@author: perez
"""
from pylab import *
# Coefficient d'apprentissage (plus il est élevé, plus le système apprend vite, mais plus il est nerveux et instable)
coeffApprentissage = 0.5
# Ecart de température entre Tint et Tconsigne à partir duquel on va faire l'apprentissage du temps de relance
relanceDeltaTint = 2.0
class FuzzyMethodes:
""" Méthodes de calcul de logique floue """
##
# Renvoie un vecteur indiquant quelle est l'appartenance en logique floue de l'élément
# Exemple : tableau = ([1., 2., 3., 4.])
# si element = 2.25, renvoie ([0., 0.75, 0.25, 0.])
# car 2.25 est compris entre 2 et 3 et plus proche de 2 que de 3
# si element = 4., renvoie ([0., 0., 0., 1.])
@staticmethod
def fuzzyAppartenance1D(element, tableau):
taille = size(tableau)
resultat=zeros(taille)
if element < tableau[0]:
resultat[0] = 1.
return resultat
for i in range(taille-1):
if (tableau[i] <= element and element <= tableau[i+1]):
distance1 = element - tableau[i]
distance2 = tableau[i+1] - element
distance_total = distance1 + distance2
resultat[i] = distance2 / distance_total
resultat[i+1] = distance1 / distance_total
return resultat
# Si on est arrivé ici, c'est que element est supérieur au dernier élément du tableau
resultat[taille-1] = 1.
return resultat
# Même fonction que fuzzyAppartenance, mais en 2D (tableau 1 = lignes, tableau2 = colonnes)
@staticmethod
def fuzzyMatriceVerite(element1, tableau1, element2, tableau2):
# Calcule la matrice de vérité de Ti et Te
verite1 = FuzzyMethodes.fuzzyAppartenance1D(element1, tableau1)
verite1.shape = (1, size(verite1)) # Transformation 1D->2D
verite2 = FuzzyMethodes.fuzzyAppartenance1D(element2, tableau2)
verite2.shape = (1, size(verite2)) # Transformation 1D->2D
veriteMatrice = dot(verite1.T, verite2)
return veriteMatrice
# Corrige la matriceRegles après le constat d'un écart entre la valeur estimée et la valeur réelle
@staticmethod
def fuzzyCorrection(matriceRegles, matriceVerite, valeurEstimee, valeurReelle, coeffApprentissage):
erreur = valeurReelle - valeurEstimee
matriceCorrection = coeffApprentissage * erreur * matriceVerite
matriceRegles += matriceCorrection
return matriceRegles
##
# Estime le temps de relance
@staticmethod
def estimeTempsRelance(tInt, tExt, tCible, tempsRelanceMatrice, tIntSteps, tExtSteps):
# Translate le problème en fonction de t_a_atteindre
tIntPrime = tInt - (tCible - 20)
tExtPrime = tExt - (tCible - 20)
veriteMatrice = FuzzyMethodes.fuzzyMatriceVerite(tIntPrime, tIntSteps, tExtPrime, tExtSteps)
tempsRelanceEstime = sum(veriteMatrice * tempsRelanceMatrice)
return tempsRelanceEstime
##
# Corrige la matrice des temps de relance après avoir mesuré le temps de relance réel
# @param tIntInitial Tint utilisé lors de l'estimation du temps de relance
# @param tExtInitial Text utilisé lors de l'estimation du temps de relance
# @param tempsRelanceReel Temps de relance réel mesuré
# @param coeffApprentissage Coefficient d'apprentissage
# @return tempsRelanceMatrice Nouvelle matrice de temps de relance
@staticmethod
def corrigeMatrice(tIntInitial, tExtInitial, tCible, tempsRelanceReel, coeffApprentissage, tempsRelanceMatrice, tIntSteps, tExtSteps):
# Translate le problème en fonction de t_a_atteindre
tIntPrime = tIntInitial - (tCible - 20)
tExtPrime = tExtInitial - (tCible - 20)
veriteMatrice = FuzzyMethodes.fuzzyMatriceVerite(tIntPrime, tIntSteps, tExtPrime, tExtSteps)
tempsRelanceEstime = sum(veriteMatrice * tempsRelanceMatrice)
tempsRelanceMatrice = FuzzyMethodes.fuzzyCorrection(tempsRelanceMatrice, veriteMatrice, tempsRelanceEstime, tempsRelanceReel, coeffApprentissage)
return tempsRelanceMatrice
|
the-stack_0_24889
|
# -*- coding: utf-8 -*-
"""
Name: BurpScripter Plus
Version: 0.1
Author: Ganapati - @G4N4P4T1
Github: https://github.com/Acceis/BurpScripterPlus
Description: This extension provide a Python panel for writing custom proxy script.
License : THE BEER-WARE LICENSE" (Revision 42): ganapati (@G4N4P4T1) wrote this file. As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return.
"""
from java.awt import Font
from java.io import PrintWriter
from javax.swing import JScrollPane, JTextPane
from javax.swing.text import SimpleAttributeSet
from burp import (
IBurpExtender,
IExtensionStateListener,
IHttpListener,
ITab,
)
import base64
import urllib
import traceback
VERSION = "0.1"
def get_message(
message_info, helpers, message_is_request, callbacks
):
""" Return Message or Request according to message
:param message_info: message_info from Burp extender
:param helpers: helpers from Burp extender
:param message_is_request: message_is_request from Burp extender
:param callbacks: callbacks from Burp extender
:return: Request or Response instance
:rtype: Message
"""
if message_is_request:
return Request(message_info, helpers, callbacks)
else:
return Response(message_info, helpers, callbacks)
class Message(object):
""" Generic Class for Request and Response
Only used as parent class
"""
def __init__(self, message_info, helpers, callbacks):
""" Message constructor
:param message_info: message_info from Burp extender
:param helpers: helpers from Burp extender
:param callbacks: callbacks from Burp extender
:return: Message instance (not usable, just a parent class for Request and Response)
:rtype: Message
"""
self.message_info = message_info
self.callbacks = callbacks
self.helpers = helpers
self.is_request = False
self.is_response = False
self.is_in_scope = callbacks.isInScope(
message_info.getUrl()
)
def parse_message(self):
""" Parse message input from Burp extender
Do not use this one, it's only for forcing childs to implement method
"""
raise NotImplementedError
def build_message(self):
""" Build a string message from parsed message (created by parse_message)
Do not use this one, it's only for forcing childs to implement method
"""
raise NotImplementedError
def update_content_length(self, data):
""" Recalculate body length and set it in Content-Length header
:param data: data is the body string used for re-calculating Content-Length header
:type data: string
"""
if data is not None:
self.headers["Content-Length"] = len(data)
class Response(Message):
""" Response class
Map the entire Response into an object for easier manipulation
"""
def __init__(self, message_info, helpers, callbacks):
""" Response constructor
:param message_info: message_info from Burp extender
:param helpers: helpers from Burp extender
:param callbacks: callbacks from Burp extender
:return: Response instance
:rtype: Response
"""
Message.__init__(
self, message_info, helpers, callbacks
)
self.is_request = False
self.is_response = True
self.http_version = ""
self.response_code = 200
self.response_value = ""
self.headers = {}
self.body = ""
self.parse_message()
def parse_message(self):
""" Parse message input from Burp extender
This method populate the Response object with parsed data
"""
message = self.message_info.getResponse()
parsed_message = self.helpers.analyzeResponse(
message
)
# Parse headers
headers_dict = {}
headers = parsed_message.getHeaders()
# Reconstruct the headers as dict
headers_list = list(headers)
self.http_version, self.response_code, self.response_value = headers_list[
0
].split(
" ", 2
)
for header in headers_list[1:]:
k, v = header.split(": ")
headers_dict[str(k)] = str(v)
self.headers = headers_dict
self.body = message[
(parsed_message.getBodyOffset()) :
].tostring()
def build_message(self):
""" Build Complete message as string from attributes
This method takes all Response attributes and build a response string
This method is auto-called by extension and you don't have to call build_message yourself
"""
self.update_content_length(self.body)
message = ""
message = "%s %s %s\r\n" % (
self.http_version,
self.response_code,
self.response_value,
)
# Add headers
for k, v in self.headers.items():
message = message + "%s: %s\r\n" % (k, v)
message = message + "\r\n"
# Add body
message = message + self.body.decode(
"utf-8", "replace"
)
self.message_info.setResponse(message)
class Request(Message):
""" Request class
Map the entire Request into an object for easier manipulation
"""
def __init__(self, message_info, helpers, callbacks):
""" Request constructor
:param message_info: message_info from Burp extender
:param helpers: helpers from Burp extender
:param callbacks: callbacks from Burp extender
:return: Request instance
:rtype: Request
"""
Message.__init__(
self, message_info, helpers, callbacks
)
self.is_request = True
self.is_response = False
self.method = ""
self.path = ""
self.http_version = ""
self.params = {}
self.headers = {}
self.body = {}
self.body_str = ""
self.parse_message()
def parse_parameters(self, line):
""" Parse params string to dict
This method takes the GET parameters as string and create a dictionnary in params attribute
:param line: First line of request as string (ex: GET /foo?bar=baz HTTP/1.1)
:type line: string
"""
self.method, path_params, self.http_version = line.split(
" "
)
path_params_array = path_params.split("?")
self.path = path_params_array[0]
if len(path_params_array) > 1:
params = path_params_array[1]
for _ in params.split("&"):
try:
k, v = _.split("=")
self.params[k] = v
except ValueError:
k = _.split("=")[0]
self.params[k] = ""
def build_parameters(self):
""" From params dict to string
This method takes all key:values from parameters (GET) and build a string
:return: GET parameters as string
:rtype: string
"""
params = ""
for k, v in self.params.items():
params = params + "%s=%s&" % (
k.strip(),
v.strip(),
)
if len(params) > 0:
params = "?%s" % params[:-1]
return params
def parse_message(self):
""" Parse message input from Burp extender
This method populate the Request object with parsed data
"""
message = self.message_info.getRequest()
parsed_message = self.helpers.analyzeRequest(
message
)
# Parse headers
headers_dict = {}
headers = parsed_message.getHeaders()
# Reconstruct the headers as dict
headers_list = list(headers)
for header in headers_list[1:]:
k, v = header.split(": ")
headers_dict[str(k)] = str(v)
self.headers = headers_dict
self.parse_parameters(headers_list[0])
# Extract body from message
body = message[(parsed_message.getBodyOffset()) :]
self.body_str = "".join(chr(_) for _ in body)
body_dict = {}
if "Content-Length" in self.headers.keys():
try:
if int(self.headers["Content-Length"]) > 0:
for arg in self.body_str.split("&"):
k, v = arg.split("=")
body_dict[k] = v
self.body = body_dict
except:
self.body = None
def build_body(self):
""" Transform dict body to string
This method takes all key:values items from doby and construct a body string
:return: full body string
:rtype: string
"""
body = ""
for k, v in self.body.items():
body = body + "%s=%s&" % (k.strip(), v.strip())
return body[:-1]
def build_message(self):
""" Build Complete message as string from attributes
This method takes all Request attributes and build a response string
This method is auto-called by extension and you don't have to call build_message yourself
"""
if isinstance(self.body, dict):
self.body_str = self.build_body()
else:
if self.body is not None:
self.body_str = self.body
self.update_content_length(self.body_str)
message = ""
# Add method, path and params
message = "%s %s%s %s\r\n" % (
self.method,
self.path,
self.build_parameters(),
self.http_version,
)
# Add headers
for k, v in self.headers.items():
message = message + "%s: %s\r\n" % (k, v)
message = message + "\r\n"
# Add body
message = message + self.body_str.decode(
"utf-8", "replace"
)
self.message_info.setRequest(message)
class BurpExtender(
IBurpExtender,
IExtensionStateListener,
IHttpListener,
ITab,
):
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.helpers = callbacks.helpers
callbacks.setExtensionName("Burp Scripter Plus")
stdout = PrintWriter(callbacks.getStdout(), True)
stdout.println(
"""Successfully loaded Burp Scripter Plus v"""
+ VERSION
+ """\n
Repository @ https://github.com/Acceis/BurpScripterPlus
Send feedback or bug reports on twitter @G4N4P4T1"""
)
self.scriptpane = JTextPane()
self.scriptpane.setFont(
Font("Monospaced", Font.PLAIN, 12)
)
self.scrollpane = JScrollPane()
self.scrollpane.setViewportView(self.scriptpane)
self._code = compile("", "<string>", "exec")
self._script = ""
script = callbacks.loadExtensionSetting("script")
if script:
script = base64.b64decode(script)
self.scriptpane.document.insertString(
self.scriptpane.document.length,
script,
SimpleAttributeSet(),
)
self._script = script
try:
self._code = compile(
script, "<string>", "exec"
)
except Exception as e:
traceback.print_exc(
file=self.callbacks.getStderr()
)
callbacks.registerExtensionStateListener(self)
callbacks.registerHttpListener(self)
callbacks.customizeUiComponent(
self.getUiComponent()
)
callbacks.addSuiteTab(self)
self.scriptpane.requestFocus()
def extensionUnloaded(self):
try:
self.callbacks.saveExtensionSetting(
"script",
base64.b64encode(
self._script.replace(
"\nmessage.build_message()", ""
)
),
)
except Exception:
traceback.print_exc(
file=self.callbacks.getStderr()
)
return
def processHttpMessage(
self, toolFlag, messageIsRequest, messageInfo
):
try:
globals_ = {}
locals_ = {
"extender": self,
"toolFlag": toolFlag,
"messageInfo": messageInfo,
"message": get_message(
messageInfo,
self.helpers,
messageIsRequest,
self.callbacks,
),
}
exec(self.script, globals_, locals_)
except Exception:
traceback.print_exc(
file=self.callbacks.getStderr()
)
return
def getTabCaption(self):
return "Script+"
def getUiComponent(self):
return self.scrollpane
@property
def script(self):
end = self.scriptpane.document.length
_script = (
self.scriptpane.document.getText(0, end)
+ "\nmessage.build_message()"
)
if _script == self._script:
return self._code
self._script = _script
self._code = compile(_script, "<string>", "exec")
return self._code
|
the-stack_0_24890
|
"""
logutils
~~~~~~~~
Various logging utilities
"""
import logging
import inspect
def create_logger(*args, **kwargs):
"""Return a logger with a nicely formatted name, depending on the caller.
This works by using inspect to grab the frame containing information
about the caller of this function. This frame contains the module name,
and also the class, function or method names, if any.
"""
frame_caller = inspect.stack()[1][0]
location = module = inspect.getmodule(frame_caller).__name__
_, _, _, loc = inspect.getargvalues(frame_caller)
obj = loc.get('self', None) # check if called inside a class
if obj is not None:
location = '.'.join([module, obj.__class__.__name__])
del frame_caller # to avoid leak
logger = logging.getLogger(location)
return logger
|
the-stack_0_24891
|
# pylint: disable=missing-docstring
from resolwe.flow.models import Data, DescriptorSchema, Process
from resolwe.test import TestCase
class DescriptorTestCase(TestCase):
def setUp(self):
super().setUp()
self.process = Process.objects.create(name="Dummy process", contributor=self.contributor)
self.descriptor_schema = DescriptorSchema.objects.create(
name='Descriptor schema',
contributor=self.contributor,
schema=[
{'name': 'test_field', 'type': 'basic:string:', 'default': 'default value'}
]
)
def test_default_values(self):
data = Data.objects.create(
name='Data object',
contributor=self.contributor,
process=self.process,
descriptor_schema=self.descriptor_schema,
)
self.assertEqual(data.descriptor['test_field'], 'default value')
data = Data.objects.create(
name='Data object 2',
contributor=self.contributor,
process=self.process,
descriptor_schema=self.descriptor_schema,
descriptor={'test_field': 'changed value'}
)
self.assertEqual(data.descriptor['test_field'], 'changed value')
|
the-stack_0_24893
|
"""This module contains functions for handling sid's internal period.
sid's internal period is similar to Unix time, but the reference date is 2019-01-01
instead of 1970-01-01 and it is not measured in seconds but days. This allows to use a
int16 instead of int32 for Unix time.
The internal period is used to store dates more efficiently as a int16 instead of the
normal datetime64.
The advantage of this approach over enumerating periods passed via the ``duration``
argument of :func:`~sid.simulate.get_simulate_func` is that there is still information
on the exact dates in the states even if the ``"date"`` column is removed during
estimation to reduce memory consumption.
"""
from functools import partial
import pandas as pd
from sid.config import DTYPE_SID_PERIOD
from sid.config import SID_TIME_START
def period_to_timestamp(period, relative_to):
return pd.to_datetime(relative_to) + pd.to_timedelta(period, unit="d")
def timestamp_to_period(timestamp, relative_to):
return DTYPE_SID_PERIOD(
(pd.to_datetime(timestamp) - pd.to_datetime(relative_to)).days
)
sid_period_to_timestamp = partial(period_to_timestamp, relative_to=SID_TIME_START)
timestamp_to_sid_period = partial(timestamp_to_period, relative_to=SID_TIME_START)
def get_date(states):
"""Get date from states."""
if "date" in states.columns:
out = states["date"].iloc[0]
elif "period" in states.columns:
out = sid_period_to_timestamp(states["period"].iloc[0])
else:
raise ValueError("'states' does not contain 'date' or 'period'.")
return out
|
the-stack_0_24895
|
import time
import re
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
chrome_path = '/Users/amy/Downloads/chromedriver'
def scrollDown(driver, n_scroll):
body = driver.find_element_by_tag_name('body')
while n_scroll >=0:
body.send_keys(Keys.PAGE_DOWN)
n_scroll -=1
return driver
driver = webdriver.Chrome(executable_path = chrome_path)
url ='https://www.sephora.com'
driver.get(url)
time.sleep(5)
df = pd.DataFrame(columns=['Label', 'URL'])
tickers = ['face-wash-for-men','moisturizer-men','mens-grooming-kits-mens-shaving','mens-fragrance','eye-cream-men','deodorant-for-men-body-spray','mens-hair-products']
for ticker in tickers:
url = 'https://www.sephora.com/shop/' + ticker + '?pageSize=300'
driver.get(url)
# xpath = '/html/body/div[5]/div/div/div[1]/div/div/button'
# btn = driver.find_element_by_xpath(xpath)
# btn.click()
# time.sleep(20)
browser = scrollDown(driver,20)
time.sleep(10)
browser = scrollDown(driver,20)
time.sleep(10)
browser = scrollDown(driver,20)
time.sleep(10)
browser = scrollDown(driver, 10)
element = driver.find_elements_by_class_name('css-ix8km1')
subpageURL =[]
for a in element:
subURL = a.get_attribute('href')
subpageURL.append(subURL)
dic = {'Label':ticker, 'URL': subpageURL}
df =df.append(pd.DataFrame(dic), ignore_index = True)
df2 = pd.DataFrame(columns = ['brand_name','product_name','description','price','score','skin_type','ingredients','image_url'])
df = pd.concat([df, df2],axis =1)
for i in range(len(df)+1):
url = df.URL[i]
driver.get(url)
time.sleep(5)
#brand name
xpath ='/html/body/div[1]/div[2]/div/main/div/div[1]/div/div[2]/div[1]/div[1]/h1/a/span'
df.brand_name[i] = driver.find_element_by_xpath(xpath).text
#price
xpath1 ='/html/body/div[1]/div[2]/div/main/div/div[1]/div/div[2]/div[1]/div[2]/div[1]/span'
df.price[i] = driver.find_element_by_xpath(xpath1).text
# product name
xpath2 = '/html/body/div[1]/div[2]/div/main/div/div[1]/div/div[2]/div[1]/div[1]/h1/span'
df.product_name[i] = driver.find_element_by_xpath(xpath2).text
#image url
try:
img = driver.find_element_by_class_name('css-1rovmyu')
df.image_url[i] = img.get_attribute('src')
except NoSuchElementException:
df.image_url[i] = ' '
# #skin type
# xpath = '//*[@id="tabpanel0"]/div/b[2]'
# skin_type = driver.find_element_by_xpath(xpath)
#product description
df.description[i] = driver.find_element_by_class_name('css-1rny024').text
# ingredients
xpath3 = '//*[@id="tab2"]'
btn = driver.find_element_by_xpath(xpath3)
btn.click()
try:
df.ingredients[i] = driver.find_element_by_xpath('//*[@id="tabpanel2"]/div').text
except NoSuchElementException:
df.ingredients[i] = "No Info"
browser = scrollDown(driver,1)
time.sleep(5)
browser = scrollDown(driver,1)
time.sleep(5)
# score
try:
a = driver.find_element_by_class_name('css-1k3n1un').text
score = re.match('\d.\d', a).group()
df['score'][i] = str(score)
except NoSuchElementException:
df['score'][i] = 0
df.to_csv('sephora_products.csv', encoding = 'utf-8')
|
the-stack_0_24896
|
dir_path = './results/results_semhash_nomissingtag_10runs/'
runs = 10
for perc in [0.1, 0.2, 0.3, 0.4, 0.5, 0.8]:
str_write = ''
for dataset_name in ['snips']:
str_write += dataset_name + '\n'
acc_avg = 0
acc_max = 0
for run in range(1, runs + 1):
acc = 0
subdir_path = dir_path + 'comp_inc_{}_run{}/'.format(perc, run)
filename = subdir_path + '{}_f1.txt.txt'.format(dataset_name)
f = open(filename, 'r')
lines = f.read().split('\n')
# Get max acc from 1 run
for l in lines:
if l is not "":
l_split = l.split(': ')
l_acc = float(l_split[1])
print(l_acc)
if l_acc >= acc:
acc = l_acc
# Get average acc
acc_avg += acc
# Get max acc
if acc >= acc_max:
acc_max = acc
acc_avg /= runs
str_write += ' Avg-{}: {:.2f}\n'.format(runs, acc_avg * 100)
str_write += ' Best-{}: {:.2f}\n\n'.format(runs, acc_max * 100)
print("Saving to: {}".format(dir_path + 'comp_inc_{}'.format(perc)))
f_out = open(dir_path + 'comp_inc_{}'.format(perc), 'w')
f_out.write(str_write)
|
the-stack_0_24897
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
# Used by py_util.cc to get tracebacks.
import traceback # pylint: disable=unused-import
import weakref
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_script_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Map from EagerPyFunc token to tuple (tape, eager args, eager outputs);
# used for differentiation.
tape_cache = {}
def _maybe_copy_to_context_device(tensor, device_name):
"""Copy an EagerTensor to the current device if it's not on `device_name`."""
in_device = tensor.backing_device
if device_name == in_device:
return tensor
else:
# Note that EagerTensor._copy bypasses the placer and copies to the context
# device, which means e.g. int32 Tensors which would normally be forced onto
# the CPU can instead be placed on the GPU. This is necessary so that the
# PyFunc kernel always returns Tensors on the device it's executing on.
return tensor._copy() # pylint: disable=protected-access
class EagerFunc(object):
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout, is_grad_func):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
is_grad_func: Whether this EagerFunc is the gradient of another
EagerPyFunc.
"""
self._func = func
self._out_dtypes = Tout
self._is_grad_func = is_grad_func
def _convert(self, value, dtype):
"""Converts `value` to a tensor of type `dtype`, with error checking.
Args:
value: The tensor to convert.
dtype: The desired dtype.
Returns:
A tensor of type `dtype`, or a zeros tensor if value is None and
this function is in fact a grdient function.
Raises:
RuntimeError: if `value` is a variable.
"""
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(
"Attempting to return a variable from an eagerly executed py_func. "
"Only numeric data structures like Tensors or NumPy arrays should "
"be returned; to return the value of a variable, make sure to obtain "
"the Tensor backing it by calling `.read_value()` on the variable in "
"question: %s" % value)
if value is None and self._is_grad_func:
# Gradient functions may legitimately return a list that contains
# both Tensors and Python Nones. Unfortuantely this breaks the
# OpKernel, so for now we replace None objects with zeros, which is
# mathematically correct but will prevent short-circuiting gradient
# computations.
#
# TODO(akshayka): Make it possible to return a list of both Tensors and
# Nones from an EagerPyFunc.
return constant_op.constant(0.0, dtype=dtype)
return ops.convert_to_tensor(value, dtype=dtype)
def __call__(self, device, token, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode(), backprop.GradientTape() as tape:
# Only watch tensors with a floating dtype.
for tensor in args:
for t in nest.flatten(tensor):
if t.dtype.is_floating:
tape.watch(t)
ret = self._func(*args)
# copy the returned tensors to the PyFunc op's device if necessary.
device_name = device
if device_name is None:
# "None" here means "CPU", from the nullptr convention with C++ device
# pointers.
device_name = "/job:localhost/replica:0/task:0/device:CPU:0"
with ops.device(device):
if isinstance(ret, (tuple, list)):
outputs = [
_maybe_copy_to_context_device(self._convert(x, dtype=dtype),
device_name)
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
outputs = None
else:
outputs = _maybe_copy_to_context_device(
self._convert(ret, dtype=self._out_dtypes[0]), device_name)
tape_cache[compat.as_bytes(token)] = (tape, args, outputs)
return outputs
class FuncRegistry(object):
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
# Only store weakrefs to the functions. The strong reference is stored in
# the graph.
self._funcs = weakref.WeakValueDictionary()
@property
def _ctx(self):
# N.B. This is needed to support calling py_func with GPU tensors,
# which must be transferred to CPU if used in any of the NumPy APIs.
context.ensure_initialized()
return context.context()._handle # pylint: disable=protected-access
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
# Store a weakref to the function
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = np.asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode("utf8"))(value)
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, device, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
device: Name of the device on which outputs of `token`'s corresponding
operation should be placed. Used iff the function registered for `token`
is an EagerPyFunc.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self._funcs.get(token, None)
if func is None:
raise ValueError("callback %s is not found" % token)
if isinstance(func, EagerFunc):
# NB: Different invocations of the same py_func will share the same
# token, and the entries they stash in the tape_cache will collide.
# In practice, when executing a graph, this should only happen if
# the py_func is in a while_loop whose iterations are run in parallel
# or if the graph is being driven by concurrent session.run() calls.
#
# TODO(akshayka): Key the tape cache in a thread-safe way.
return func(device, token, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, six.binary_type):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
pywrap_tensorflow.InitializePyTrampoline(_py_funcs)
def _internal_py_func(func,
inp,
Tout,
stateful=None,
eager=False,
is_grad_func=False,
name=None):
"""See documentation for py_func and eager_py_func."""
if not callable(func):
raise ValueError("Expected func to be callable, got func of type {}".format(
type(func)))
is_list_or_tuple = False
if isinstance(Tout, (list, tuple)):
is_list_or_tuple = True
else:
Tout = [Tout]
if eager:
func = EagerFunc(func, Tout, is_grad_func)
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
# pylint: disable=protected-access
while isinstance(graph, function._FuncGraph):
# If the py_func was declared inside a _FuncGraph, its lifetime should be
# bound to that of the outer graph instead.
graph = graph._outer_graph
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_py_funcs_used_in_graph"):
graph._py_funcs_used_in_graph = []
# Store a reference to the function in the graph to ensure it stays alive
# as long as the graph lives. When the graph is destroyed, the function
# is left to the garbage collector for destruction as well.
graph._py_funcs_used_in_graph.append(func)
# pylint: enable=protected-access
if eager:
result = gen_script_ops.eager_py_func(
input=inp,
token=token,
is_async=context.is_async(),
Tout=Tout,
name=name)
else:
if stateful:
result = gen_script_ops.py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops.py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
return result if is_list_or_tuple else result[0]
# TODO(akshayka): Implement higher-order derivatives.
@ops.RegisterGradient("EagerPyFunc")
def _EagerPyFuncGrad(op, *dy):
"""Computes the gradient of an EagerPyFunc."""
token = op.get_attr("token")
def eagerly_executed_grad(*dy):
tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))
return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)
with ops.control_dependencies(op.outputs):
return _internal_py_func(
func=eagerly_executed_grad,
inp=dy,
Tout=[tensor.dtype for tensor in op.inputs],
eager=True,
is_grad_func=True)
@tf_export("py_function")
def eager_py_func(func, inp, Tout, name=None):
"""Wraps a python function into a TensorFlow op that executes it eagerly.
This function allows expressing computations in a TensorFlow graph as
Python functions. In particular, it wraps a Python function `func`
in a once-differentiable TensorFlow operation that executes it with eager
execution enabled. As a consequence, `tf.py_function` makes it
possible to express control flow using Python constructs (`if`, `while`,
`for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,
`tf.while_loop`). For example, you might use `tf.py_function` to
implement the log huber function:
```python
def log_huber(x, m):
if tf.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))
x = tf.compat.v1.placeholder(tf.float32)
m = tf.compat.v1.placeholder(tf.float32)
y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32)
dy_dx = tf.gradients(y, x)[0]
with tf.compat.v1.Session() as sess:
# The session executes `log_huber` eagerly. Given the feed values below,
# it will take the first branch, so `y` evaluates to 1.0 and
# `dy_dx` evaluates to 2.0.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
```
You can also use `tf.py_function` to debug your models at runtime
using Python tools, i.e., you can isolate portions of your code that
you want to debug, wrap them in Python functions and insert `pdb` tracepoints
or print statements as desired, and wrap those functions in
`tf.py_function`.
For more information on eager execution, see the
[Eager guide](https://tensorflow.org/guide/eager).
`tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike
the latter, the former lets you use TensorFlow operations in the wrapped
Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs
and
wraps functions that take NumPy arrays as inputs and return NumPy arrays as
outputs, `tf.py_function` can be placed on GPUs and wraps functions
that take Tensors as inputs, execute TensorFlow operations in their bodies,
and return Tensors as outputs.
Like `tf.compat.v1.py_func`, `tf.py_function` has the following limitations
with respect to serialization and distribution:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.py_function()` and you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
Args:
func: A Python function which accepts a list of `Tensor` objects having
element types that match the corresponding `tf.Tensor` objects in `inp`
and returns a list of `Tensor` objects (or a single `Tensor`, or `None`)
having element types that match the corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns; an empty list
if no value is returned (i.e., if the return value is `None`).
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes; an empty list
if `func` returns None.
"""
return _internal_py_func(func=func, inp=inp, Tout=Tout, eager=True, name=name)
def py_func_common(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
arguments and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
input = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [input], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts `ndarray` objects as arguments and
returns a list of `ndarray` objects (or a single `ndarray`). This function
must accept as many arguments as there are tensors in `inp`, and these
argument types will match the corresponding `tf.Tensor` objects in `inp`.
The returns `ndarray`s must match the number and types defined `Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
"""
if context.executing_eagerly():
result = func(*[x.numpy() for x in inp])
result = nest.flatten(result)
result = [x if x is None else ops.convert_to_tensor(x) for x in result]
if len(result) == 1:
# Mimic the automatic unwrapping in graph-mode py_func
result, = result
return result
return _internal_py_func(
func=func, inp=inp, Tout=Tout, stateful=stateful, eager=False, name=name)
@deprecation.deprecated(
date=None,
instructions="""tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
""")
@tf_export(v1=["py_func"])
def py_func(func, inp, Tout, stateful=True, name=None):
return py_func_common(func, inp, Tout, stateful, name=name)
py_func.__doc__ = "%s" % py_func_common.__doc__
@tf_export("numpy_function")
def numpy_function(func, inp, Tout, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func` wrap this function as an operation in a
TensorFlow function. `func` must take numpy arrays as its arguments and
return numpy arrays as its outputs.
The following example creates a TensorFlow graph with `np.sinh()` as an
operation in the graph:
>>> def my_numpy_func(x):
... # x will be a numpy array with the contents of the input to the
... # tf.function
... return np.sinh(x)
>>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
... def tf_function(input):
... y = tf.numpy_function(my_numpy_func, [input], tf.float32)
... return y * y
>>> tf_function(tf.constant(1.))
<tf.Tensor: id=..., shape=(), dtype=float32, numpy=1.3810978>
Comparison to `tf.py_function`:
`tf.py_function` and `tf.numpy_function` are very similar, except that
`tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the
function to contain `tf.Tensors`, and have any TensorFlow operations executed
in the function be differentiable, please use `tf.py_function`.
Note: The `tf.numpy_function` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`tf.SavedModel`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.numpy_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.numpy_function` you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Since the function takes numpy arrays, you cannot take gradients
through a numpy_function. If you require something that is differentiable,
please consider using tf.py_function.
Args:
func: A Python function, which accepts `numpy.ndarray` objects as arguments
and returns a list of `numpy.ndarray` objects (or a single
`numpy.ndarray`). This function must accept as many arguments as there are
tensors in `inp`, and these argument types will match the corresponding
`tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the
number and types defined `Tout`.
Important Note: Input and output `numpy.ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `tf.Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful (bool): If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: (Optional) A name for the operation.
Returns:
Single or list of `tf.Tensor` which `func` computes.
"""
return py_func_common(func, inp, Tout, stateful=True, name=name)
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
|
the-stack_0_24900
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Provides utilities useful for multiprocessing."""
from multiprocessing import Lock, RawArray
from collections.abc import MutableMapping
import ctypes
import sys
class SharedTable(MutableMapping):
"""Provides a simple shared-memory table of integers, floats, or strings.
Use this class as follows:
.. code-block:: python
tbl = SharedTable({'cnt': 0})
with tbl.get_lock():
tbl['startTime'] = time.time()
for i in range(10):
with tbl.get_lock():
tbl['cnt'] += 1
"""
types = {
int: ctypes.c_int,
float: ctypes.c_float,
bool: ctypes.c_bool,
}
def __init__(self, init_dict=None):
"""Create a shared memory version of each element of the initial
dictionary. Creates an empty array otherwise, which will extend
automatically when keys are added.
Each different type (all supported types listed in the ``types`` array
above) has its own array. For each key we store an index into the
appropriate array as well as the type of value stored for that key.
"""
# idx is dict of {key: (array_idx, value_type)}
self.idx = {}
# arrays is dict of {value_type: array_of_ctype}
self.arrays = {}
self.tensors = {}
if init_dict:
sizes = {typ: 0 for typ in self.types.keys()}
for k, v in init_dict.items():
if 'Tensor' in str(type(v)):
# add tensor to tensor dict--don't try to put in rawarray
self.tensors[k] = v
continue
elif type(v) not in sizes:
raise TypeError('SharedTable does not support values of ' +
'type ' + str(type(v)))
sizes[type(v)] += 1
# pop tensors from init_dict
for k in self.tensors.keys():
init_dict.pop(k)
# create raw arrays for each type
for typ, sz in sizes.items():
self.arrays[typ] = RawArray(self.types[typ], sz)
# track indices for each key, assign them to their typed rawarray
idxs = {typ: 0 for typ in self.types.keys()}
for k, v in init_dict.items():
val_type = type(v)
self.idx[k] = (idxs[val_type], val_type)
if val_type == str:
v = sys.intern(v)
self.arrays[val_type][idxs[val_type]] = v
idxs[val_type] += 1
# initialize any needed empty arrays
for typ, ctyp in self.types.items():
if typ not in self.arrays:
self.arrays[typ] = RawArray(ctyp, 0)
self.lock = Lock()
def __len__(self):
return len(self.idx) + len(self.tensors)
def __iter__(self):
return iter([k for k in self.idx] + [k for k in self.tensors])
def __contains__(self, key):
return key in self.idx or key in self.tensors
def __getitem__(self, key):
"""Returns shared value if key is available."""
if key in self.tensors:
return self.tensors[key]
elif key in self.idx:
idx, typ = self.idx[key]
return self.arrays[typ][idx]
else:
raise KeyError('Key "{}" not found in SharedTable'.format(key))
def __setitem__(self, key, value):
"""If key is in table, update it. Otherwise, extend the array to make
room. This uses additive resizing not multiplicative, since the number
of keys is not likely to change frequently during a run, so do not abuse
it.
Raises an error if you try to change the type of the value stored for
that key--if you need to do this, you must delete the key first.
"""
val_type = type(value)
if 'Tensor' in str(val_type):
self.tensors[key] = value
return
if val_type not in self.types:
raise TypeError('SharedTable does not support type ' + str(type(value)))
if val_type == str:
value = sys.intern(value)
if key in self.idx:
idx, typ = self.idx[key]
if typ != val_type:
raise TypeError(('Cannot change stored type for {key} from ' +
'{v1} to {v2}. You need to del the key first' +
' if you need to change value types.'
).format(key=key, v1=typ, v2=val_type))
self.arrays[typ][idx] = value
else:
raise KeyError('Cannot add more keys to the shared table as '
'they will not be synced across processes.')
def __delitem__(self, key):
if key in self.tensors:
del self.tensors[key]
elif key in self.idx:
del self.idx[key]
else:
raise KeyError('Key "{}" not found in SharedTable'.format(key))
def __str__(self):
"""Returns simple dict representation of the mapping."""
return '{{{}}}'.format(
', '.join(
['{k}: {v}'.format(k=key, v=self.arrays[typ][idx])
for key, (idx, typ) in self.idx.items()] +
['{k}: {v}'.format(k=k, v=v) for k, v in self.tensors.items()]
)
)
def __repr__(self):
"""Returns the object type and memory location with the mapping."""
representation = super().__repr__()
return representation.replace('>', ': {}>'.format(str(self)))
def get_lock(self):
return self.lock
|
the-stack_0_24901
|
import requests
from bs4 import BeautifulSoup
import webbrowser as web
import urllib.parse as urlparse
import keyboard
from collections import OrderedDict
import re
import subprocess
def copy2clip(txt):
return subprocess.check_call(
'echo "'+txt.strip()+'" |clip',
shell=True)
class Question:
def __init__(self,question,options) -> None:
self.question = question
self.options = options
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class lms_quiz():
def __init__(self,sessionid,url):
self.sessionid = sessionid
self.url = url
self.hot_hey = "ctrl+shift+'"
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'Cookie': f"MoodleSession={self.sessionid}"
}
self.questions = OrderedDict()
self.url_parts = list(urlparse.urlparse(self.url))
self.query = dict(urlparse.parse_qsl(self.url_parts[4]))
# GET TOTAL QUESTION AND PAGES
html_soup = self.get_page(self.url)
questions = self.get_total_questins(html_soup)
self.question_count = len(questions)
# pages are 0 - nth-1
# if more page exits
try:
self.pages = urlparse.parse_qs(urlparse.urlparse(questions[-1]).query).get('page')[0]
except :
self.pages = 0
# get question from first page
for que in self.get_question(html_soup) :
self.questions[que] = None
def get_updated_url(self,params):
self.query.update(params)
self.url_parts[4] = urlparse.urlencode(self.query)
return urlparse.urlunparse(self.url_parts)
def get_all_question(self):
count = 1
print(f"\n{bcolors.OKGREEN}Exploring new Pages.")
while self.question_count != len(self.questions):
params = {'page':f'{count}'}
page_url = self.get_updated_url(params)
print(f"{bcolors.OKCYAN} {count} : {page_url}{bcolors.ENDC}")
# get the page
page_html_soul = self.get_page(page_url)
# get the questions and update list of questions
for i in self.get_question(page_html_soul): ### need to use it
self.questions[i] = None
count += 1
if count >= self.question_count:
break
print()
def get_page(self,url):
"""get the page from url and cookies then return parse resutls"""
response = requests.get(
url,
headers = self.headers
)
if response.status_code == 404:
print(bcolors.FAIL, "Error in opening page - ", url, bcolors.ENDC)
exit()
return BeautifulSoup(response.content, 'html.parser')
def get_total_questins(self,soup):
"""take page and return and count total questions"""
return [ href for href in [link_['href'] for link_ in \
soup.select("div.qn_buttons.clearfix.multipages a") \
if link_] if href]
def get_question(self,soup):
"""get questions from the soup/ from html page"""
content = soup.select(".content .formulation.clearfix")
# fix it for the text-area adn file upload
questions = []
for question in content:
question_ = BeautifulSoup(f"<html>{question}</html>", 'html.parser').text
question_ = re.sub(r"[A-Z]{3}[0-9]{2}[A-Z][0-9]{4}[_A-Z0-9:]*","",question_)
question_ = question_.replace('\r\n',' ')
question_ = question_.replace('Clear my choice','')
question_ = question_.replace('Select one:','\n')
question_ = question_.replace('Loading...','')
question_ = question_.replace('You can drag and drop files here to add them.Drop files here to upload','\n')
question_ = question_.replace('Maximum file size: 1MB, ','')
question_ = question_.replace('- drag and drop not supported','')
question_ = question_.replace('You can drag and drop files here to add them.','')
question_ = question_.replace('Drop files here to upload','')
question_ = re.sub(r"maximum number of files: \d","",question_)
question_ = re.sub(r"Accepted file typesDocument file.*","",question_)
question_ = re.sub(r"\nPDF.*","",question_)
question_ = question_.replace('\n\n','')
# seperate quesiton and its options
only_question = re.search(r"^Question text(.*)",question_).group(1)
options = [ x.group().strip() \
for x in re.finditer(r"(\n([a-z]\. )?.*)",question_) ] or []
# for x in re.finditer(r"(([a-z]\. |\n).*)",question_) ] or []
# print(question_)
# print(only_question)
# print(options)
# print()
questions.append(
Question(only_question,options)
)
return questions
def get_content_urls(self,question):
"""return the list of url to search reasults"""
question = urlparse.quote(question)
# update the comments to get more results
return [
f"https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q={question}&btnG=",# google scholar
f"https://www.google.com/search?q={question}",# google
# f"https://en.wikipedia.org/w/index.php?search={question}&title=Special%3ASearch&profile=advanced",# wiki-pedia
# f"https://www.quora.com/search?q={question}",# quora
f"https://www.answers.com/search?q={question}",# answer
f"https://www.toppr.com/ask/search/?query={question}"# toppr
]
def open_searches(self,question):
"this will open pages to get the results"
# can be update according to use
for url in self.get_content_urls(question) :
web.open(url)
def show_answers(self):
"""show resutls to the user"""
for index ,ques in enumerate(self.questions.keys()):
print(f"{bcolors.WARNING}[Question:{index+1}] > \n{bcolors.OKBLUE}{ques.question}{bcolors.ENDC}")
for op in ques.options:
print(f"{bcolors.OKBLUE}{op}{bcolors.ENDC}")
copy2clip(ques.question)
print(f"{bcolors.OKCYAN}\tQuestion is copied to the clip-board.{bcolors.ENDC}")
print(f"{bcolors.HEADER}{bcolors.UNDERLINE}Press hot keys to see the next question results <{self.hot_hey}>.{bcolors.ENDC}")
print(f"{bcolors.HEADER}{bcolors.UNDERLINE}\t\tor ctrl+c for next question.{bcolors.ENDC}")
try :
# on key press event
keyboard.wait(f"{self.hot_hey}")
except KeyboardInterrupt:
print("next questinon")
continue
print(f"{bcolors.OKGREEN}showing pages{bcolors.ENDC}")
self.open_searches(ques.question)
@classmethod
def runner(cls,sessionid,url):
cl = cls(sessionid,url)
cl.get_all_question()
cl.show_answers()
if __name__ == "__main__":
# update the sessionid from brower and url
sessionid = "" # example - '5gk1ti0t2e65qmgb0772snsdfl7'
url = "" # example "https://lms.galgotiasuniversity.edu.in/mod/quiz/attempt.php?attempt=6314102&cmid=484157"
# testing
sessionid = "lev15q85j50mn8viqlks0ert85" # example - '5gk1ti0t2e65qmgb0772snsdfl7'
url = "https://lms.galgotiasuniversity.edu.in/mod/quiz/attempt.php?attempt=6404785&cmid=500516" # example "https://lms.galgotiasuniversity.edu.in/mod/quiz/attempt.php?attempt=6314102&cmid=484157"
# url = "http://127.0.0.1:5500/testing10que.html?attempt=6349252&cmid=498254"
if not sessionid or not url :
raise Exception(f"{bcolors.FAIL}Update the value to use the program {bcolors.ENDC}")
lms_quiz.runner(
sessionid,url
)
|
the-stack_0_24903
|
from common.numpy_fast import clip, interp
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.toyota.toyotacan import make_can_msg, create_video_target,\
create_steer_command, create_ui_command, \
create_ipas_steer_command, create_accel_command, \
create_fcw_command
from selfdrive.car.toyota.values import ECU, STATIC_MSGS
from selfdrive.can.packer import CANPacker
# Accel limits
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
# Steer torque limits
STEER_MAX = 1000 #1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 25 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
# Steer angle limits (tested at the Crows Landing track and considered ok)
ANGLE_MAX_BP = [0., 5.]
ANGLE_MAX_V = [510., 300.]
ANGLE_DELTA_BP = [0., 5., 15.]
ANGLE_DELTA_V = [5., .8, .15] # windup limit
ANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit
TARGET_IDS = [0x340, 0x341, 0x342, 0x343, 0x344, 0x345,
0x363, 0x364, 0x365, 0x370, 0x371, 0x372,
0x373, 0x374, 0x375, 0x380, 0x381, 0x382,
0x383]
def accel_hysteresis(accel, accel_steady, enabled):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if not enabled:
# send 0 when disabled, otherwise acc faults
accel_steady = 0.
elif accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(hud_alert, audible_alert):
# initialize to no alert
steer = 0
fcw = 0
sound1 = 0
sound2 = 0
if hud_alert == 'fcw':
fcw = 1
elif hud_alert == 'steerRequired':
steer = 1
if audible_alert == 'chimeRepeated':
sound1 = 1
elif audible_alert in ['beepSingle', 'chimeSingle', 'chimeDouble']:
# TODO: find a way to send single chimes
sound2 = 1
return steer, fcw, sound1, sound2
def ipas_state_transition(steer_angle_enabled, enabled, ipas_active, ipas_reset_counter):
if enabled and not steer_angle_enabled:
#ipas_reset_counter = max(0, ipas_reset_counter - 1)
#if ipas_reset_counter == 0:
# steer_angle_enabled = True
#else:
# steer_angle_enabled = False
#return steer_angle_enabled, ipas_reset_counter
return True, 0
elif enabled and steer_angle_enabled:
if steer_angle_enabled and not ipas_active:
ipas_reset_counter += 1
else:
ipas_reset_counter = 0
if ipas_reset_counter > 10: # try every 0.1s
steer_angle_enabled = False
return steer_angle_enabled, ipas_reset_counter
else:
return False, 0
class CarController(object):
def __init__(self, dbc_name, car_fingerprint, enable_camera, enable_dsu, enable_apg):
self.braking = False
# redundant safety check with the board
self.controls_allowed = True
self.last_steer = 0
self.last_angle = 0
self.accel_steady = 0.
self.car_fingerprint = car_fingerprint
self.alert_active = False
self.last_standstill = False
self.standstill_req = False
self.angle_control = False
self.steer_angle_enabled = False
self.ipas_reset_counter = 0
self.fake_ecus = set()
if enable_camera: self.fake_ecus.add(ECU.CAM)
if enable_dsu: self.fake_ecus.add(ECU.DSU)
if enable_apg: self.fake_ecus.add(ECU.APGS)
self.packer = CANPacker(dbc_name)
def update(self, sendcan, enabled, CS, frame, actuators,
pcm_cancel_cmd, hud_alert, audible_alert):
# *** compute control surfaces ***
# gas and brake
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady, enabled)
apply_accel = clip(apply_accel * ACCEL_SCALE, ACCEL_MIN, ACCEL_MAX)
# steer torque
apply_steer = int(round(actuators.steer * STEER_MAX))
# max_lim = min(max(CS.steer_torque_motor + STEER_ERROR_MAX, STEER_ERROR_MAX), STEER_MAX)
# min_lim = max(min(CS.steer_torque_motor - STEER_ERROR_MAX, -STEER_ERROR_MAX), -STEER_MAX)
# apply_steer = clip(apply_steer, min_lim, max_lim)
# slow rate if steer torque increases in magnitude
if self.last_steer > 0:
apply_steer = clip(apply_steer, max(self.last_steer - STEER_DELTA_DOWN, - STEER_DELTA_UP), self.last_steer + STEER_DELTA_UP)
else:
apply_steer = clip(apply_steer, self.last_steer - STEER_DELTA_UP, min(self.last_steer + STEER_DELTA_DOWN, STEER_DELTA_UP))
# dropping torque immediately might cause eps to temp fault. On the other hand, safety_toyota
# cuts steer torque immediately anyway TODO: monitor if this is a real issue
# only cut torque when steer state is a known fault
if not enabled or CS.steer_state in [9, 25]:
apply_steer = 0
self.steer_angle_enabled, self.ipas_reset_counter = \
ipas_state_transition(self.steer_angle_enabled, enabled, CS.ipas_active, self.ipas_reset_counter)
#print self.steer_angle_enabled, self.ipas_reset_counter, CS.ipas_active
# steer angle
if self.steer_angle_enabled and CS.ipas_active:
apply_angle = actuators.steerAngle
angle_lim = interp(CS.v_ego, ANGLE_MAX_BP, ANGLE_MAX_V)
apply_angle = clip(apply_angle, -angle_lim, angle_lim)
# windup slower
if self.last_angle * apply_angle > 0. and abs(apply_angle) > abs(self.last_angle):
angle_rate_lim = interp(CS.v_ego, ANGLE_DELTA_BP, ANGLE_DELTA_V)
else:
angle_rate_lim = interp(CS.v_ego, ANGLE_DELTA_BP, ANGLE_DELTA_VU)
apply_angle = clip(apply_angle, self.last_angle - angle_rate_lim, self.last_angle + angle_rate_lim)
else:
apply_angle = CS.angle_steers
if not enabled and CS.pcm_acc_status:
# send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
pcm_cancel_cmd = 1
# on entering standstill, send standstill request
if CS.standstill and not self.last_standstill:
self.standstill_req = True
if CS.pcm_acc_status != 8:
# pcm entered standstill or it's disabled
self.standstill_req = False
self.last_steer = apply_steer
self.last_angle = apply_angle
self.last_accel = apply_accel
self.last_standstill = CS.standstill
can_sends = []
#*** control msgs ***
#print "steer", apply_steer, min_lim, max_lim, CS.steer_torque_motor
# toyota can trace shows this message at 42Hz, with counter adding alternatively 1 and 2;
# sending it at 100Hz seem to allow a higher rate limit, as the rate limit seems imposed
# on consecutive messages
if ECU.CAM in self.fake_ecus:
if self.angle_control:
can_sends.append(create_steer_command(self.packer, 0., frame))
else:
can_sends.append(create_steer_command(self.packer, apply_steer, frame))
if self.angle_control:
can_sends.append(create_ipas_steer_command(self.packer, apply_angle, self.steer_angle_enabled,
ECU.APGS in self.fake_ecus))
elif ECU.APGS in self.fake_ecus:
can_sends.append(create_ipas_steer_command(self.packer, 0, 0, True))
# accel cmd comes from DSU, but we can spam can to cancel the system even if we are using lat only control
if (frame % 3 == 0 and ECU.DSU in self.fake_ecus) or (pcm_cancel_cmd and ECU.CAM in self.fake_ecus):
if ECU.DSU in self.fake_ecus:
can_sends.append(create_accel_command(self.packer, apply_accel, pcm_cancel_cmd, self.standstill_req))
else:
can_sends.append(create_accel_command(self.packer, 0, pcm_cancel_cmd, False))
if frame % 10 == 0 and ECU.CAM in self.fake_ecus:
for addr in TARGET_IDS:
can_sends.append(create_video_target(frame/10, addr))
# ui mesg is at 100Hz but we send asap if:
# - there is something to display
# - there is something to stop displaying
alert_out = process_hud_alert(hud_alert, audible_alert)
steer, fcw, sound1, sound2 = alert_out
if (any(alert_out) and not self.alert_active) or \
(not any(alert_out) and self.alert_active):
send_ui = True
self.alert_active = not self.alert_active
else:
send_ui = False
if (frame % 100 == 0 or send_ui) and ECU.CAM in self.fake_ecus:
can_sends.append(create_ui_command(self.packer, steer, sound1, sound2))
can_sends.append(create_fcw_command(self.packer, fcw))
#*** static msgs ***
for (addr, ecu, cars, bus, fr_step, vl) in STATIC_MSGS:
if frame % fr_step == 0 and ecu in self.fake_ecus and self.car_fingerprint in cars:
# special cases
if fr_step == 5 and ecu == ECU.CAM and bus == 1:
cnt = (((frame / 5) % 7) + 1) << 5
vl = chr(cnt) + vl
elif addr in (0x489, 0x48a) and bus == 0:
# add counter for those 2 messages (last 4 bits)
cnt = ((frame/100)%0xf) + 1
if addr == 0x48a:
# 0x48a has a 8 preceding the counter
cnt += 1 << 7
vl += chr(cnt)
can_sends.append(make_can_msg(addr, vl, bus, False))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
|
the-stack_0_24904
|
import collections
from datetime import datetime, timedelta
from typing import Optional
from flask import abort, redirect, request
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from werkzeug.wrappers import Response
from backend.common.consts import comp_level, playoff_type
from backend.common.decorators import cached_public
from backend.common.flask_cache import make_cached_response
from backend.common.helpers.award_helper import AwardHelper
from backend.common.helpers.event_helper import EventHelper
from backend.common.helpers.match_helper import MatchHelper
from backend.common.helpers.media_helper import MediaHelper
from backend.common.helpers.playlist_helper import PlaylistHelper
from backend.common.helpers.playoff_advancement_helper import PlayoffAdvancementHelper
from backend.common.helpers.season_helper import SeasonHelper
from backend.common.helpers.team_helper import TeamHelper
from backend.common.models.event import Event
from backend.common.models.keys import EventKey, Year
from backend.common.queries import district_query, event_query, media_query
from backend.web.profiled_render import render_template
@cached_public
def event_list(year: Optional[Year] = None) -> Response:
explicit_year = year is not None
if year is None:
year = SeasonHelper.get_current_season()
valid_years = list(reversed(SeasonHelper.get_valid_years()))
if year not in valid_years:
abort(404)
state_prov = request.args.get("state_prov", None)
districts_future = district_query.DistrictsInYearQuery(year).fetch_async()
all_events_future = event_query.EventListQuery(
year
).fetch_async() # Needed for state_prov
if state_prov:
events_future = Event.query(
Event.year == year, Event.state_prov == state_prov
).fetch_async()
else:
events_future = all_events_future
events = EventHelper.sorted_events(events_future.get_result())
if state_prov == "" or (state_prov and not events):
return redirect(request.path)
week_events = EventHelper.group_by_week(events)
districts = [] # a tuple of (district abbrev, district name)
for district in districts_future.get_result():
districts.append((district.abbreviation, district.display_name))
districts = sorted(districts, key=lambda d: d[1])
valid_state_provs = set()
for event in all_events_future.get_result():
if event.state_prov:
valid_state_provs.add(event.state_prov)
valid_state_provs = sorted(valid_state_provs)
template_values = {
"events": events,
"explicit_year": explicit_year,
"selected_year": year,
"valid_years": valid_years,
"week_events": week_events,
"districts": districts,
"state_prov": state_prov,
"valid_state_provs": valid_state_provs,
}
return make_cached_response(
render_template("event_list.html", template_values),
ttl=timedelta(minutes=5) if year == datetime.now().year else timedelta(days=1),
)
@cached_public
def event_detail(event_key: EventKey) -> Response:
event: Optional[Event] = event_query.EventQuery(event_key).fetch()
if not event:
abort(404)
event.prep_awards_matches_teams()
event.prep_details()
medias_future = media_query.EventTeamsPreferredMediasQuery(event_key).fetch_async()
district_future = (
district_query.DistrictQuery(
none_throws(none_throws(event.district_key).string_id())
).fetch_async()
if event.district_key
else None
)
event_medias_future = media_query.EventMediasQuery(event_key).fetch_async()
# status_sitevar_future = Sitevar.get_by_id_async('apistatus.down_events')
event_divisions_future = None
event_codivisions_future = None
parent_event_future = None
if event.divisions:
event_divisions_future = ndb.get_multi_async(event.divisions)
elif event.parent_event:
parent_event_future = none_throws(event.parent_event).get_async()
event_codivisions_future = event_query.EventDivisionsQuery(
none_throws(none_throws(event.parent_event).string_id())
).fetch_async()
awards = AwardHelper.organize_awards(event.awards)
cleaned_matches = event.matches
# MatchHelper.delete_invalid_matches(event.matches, event)
match_count, matches = MatchHelper.organized_matches(cleaned_matches)
teams = TeamHelper.sort_teams(event.teams) # pyre-ignore[6]
# Organize medias by team
image_medias = MediaHelper.get_images(
[media for media in medias_future.get_result()]
)
team_medias = collections.defaultdict(list)
for image_media in image_medias:
for reference in image_media.references:
team_medias[reference].append(image_media)
team_and_medias = []
for team in teams:
team_and_medias.append((team, team_medias.get(team.key, [])))
num_teams = len(team_and_medias)
middle_value = num_teams // 2
if num_teams % 2 != 0:
middle_value += 1
teams_a, teams_b = team_and_medias[:middle_value], team_and_medias[middle_value:]
oprs = (
[i for i in event.matchstats["oprs"].items()]
if (event.matchstats is not None and "oprs" in event.matchstats)
else []
)
oprs = sorted(oprs, key=lambda t: t[1], reverse=True) # sort by OPR
oprs = oprs[:15] # get the top 15 OPRs
if event.now:
matches_recent = MatchHelper.recent_matches(cleaned_matches)
matches_upcoming = MatchHelper.upcoming_matches(cleaned_matches)
else:
matches_recent = None
matches_upcoming = None
bracket_table = event.playoff_bracket
playoff_advancement = event.playoff_advancement
double_elim_matches = PlayoffAdvancementHelper.double_elim_matches(event, matches)
playoff_template = PlayoffAdvancementHelper.playoff_template(event)
# Lazy handle the case when we haven't backfilled the event details
if not bracket_table or not playoff_advancement:
(
bracket_table2,
playoff_advancement2,
_,
_,
) = PlayoffAdvancementHelper.generate_playoff_advancement(event, matches)
bracket_table = bracket_table or bracket_table2
playoff_advancement = playoff_advancement or playoff_advancement2
district_points_sorted = None
if event.district_key and event.district_points:
district_points_sorted = sorted(
none_throws(event.district_points)["points"].items(),
key=lambda team_and_points: -team_and_points[1]["total"],
)
event_insights = event.details.insights if event.details else None
event_insights_template = None
if event_insights:
event_insights_template = "event_partials/event_insights_{}.html".format(
event.year
)
district = district_future.get_result() if district_future else None
event_divisions = None
if event_divisions_future:
event_divisions = [e.get_result() for e in event_divisions_future]
elif event_codivisions_future:
event_divisions = event_codivisions_future.get_result()
medias_by_slugname = MediaHelper.group_by_slugname(
[media for media in event_medias_future.get_result()]
)
has_time_predictions = matches_upcoming and any(
match.predicted_time for match in matches_upcoming
)
# status_sitevar = status_sitevar_future.get_result()
qual_playlist = PlaylistHelper.generate_playlist_link(
matches_organized=matches,
title=f"{event.year} {event.name} Qualifications",
allow_levels=[comp_level.CompLevel.QM],
)
elim_playlist = PlaylistHelper.generate_playlist_link(
matches_organized=matches,
title=f"{event.year} {event.name} Playoffs",
allow_levels=comp_level.ELIM_LEVELS,
)
template_values = {
"event": event,
"event_down": False, # status_sitevar and event_key in status_sitevar.contents,
"district_name": district.display_name if district else None,
"district_abbrev": district.abbreviation if district else None,
"matches": matches,
"match_count": match_count,
"matches_recent": matches_recent,
"matches_upcoming": matches_upcoming,
"has_time_predictions": has_time_predictions,
"awards": awards,
"teams_a": teams_a,
"teams_b": teams_b,
"num_teams": num_teams,
"oprs": oprs,
"bracket_table": bracket_table,
"playoff_advancement": playoff_advancement,
"playoff_template": playoff_template,
"playoff_advancement_tiebreakers": PlayoffAdvancementHelper.ROUND_ROBIN_TIEBREAKERS.get(
event.year, []
),
"district_points_sorted": district_points_sorted,
"event_insights_qual": event_insights["qual"] if event_insights else None,
"event_insights_playoff": event_insights["playoff"] if event_insights else None,
"event_insights_template": event_insights_template,
"medias_by_slugname": medias_by_slugname,
"event_divisions": event_divisions,
"parent_event": parent_event_future.get_result()
if parent_event_future
else None,
"double_elim_matches": double_elim_matches,
"double_elim_playoff_types": playoff_type.DOUBLE_ELIM_TYPES,
"qual_playlist": qual_playlist,
"elim_playlist": elim_playlist,
}
return make_cached_response(
render_template("event_details.html", template_values),
ttl=timedelta(seconds=61) if event.within_a_day else timedelta(days=1),
)
|
the-stack_0_24906
|
from __future__ import division
import imp
import os
import sys
import numpy as np
from pyrl import utils
from pyrl.figtools import Figure
#=========================================================================================
# Files
#=========================================================================================
here = utils.get_here(__file__)
parent = utils.get_parent(here)
# Paths
scratchpath = os.environ.get('SCRATCH')
if scratchpath is None:
scratchpath = os.path.join(os.environ['HOME'], 'scratch')
trialspath = os.path.join(scratchpath, 'work', 'pyrl', 'examples')
analysispath = os.path.join(parent, 'examples', 'analysis')
modelspath = os.path.join(parent, 'examples', 'models')
# analysis
analysisfile = os.path.join(analysispath, 'postdecisionwager.py')
analysis = imp.load_source('postdecisionwager_analysis', analysisfile)
# model
modelfile = os.path.join(modelspath, 'postdecisionwager.py')
model = imp.load_source('model', modelfile)
trialsfile_b = os.path.join(trialspath, 'postdecisionwager', 'trials_behavior.pkl')
trialsfile_a = os.path.join(trialspath, 'postdecisionwager', 'trials_activity.pkl')
#=========================================================================================
# Figure
#=========================================================================================
w = utils.mm_to_inch(114)
r = 0.9
thickness = 0.4
axislabelsize = 6
fig = Figure(w=w, r=r, thickness=thickness, ticksize=3, ticklabelsize=5,
axislabelsize=axislabelsize, labelpadx=3, labelpady=4.5)
x0 = 0.16
y0 = 0.67
DX = 0.1
w_task = 0.82
h_task = 0.3
dy0 = 0.09
dy = 0.12
w_behavior = 0.24
h_behavior = 0.2
y1 = y0-dy0-h_behavior
dx = 0.03
w_fr = 0.12
h_fr = 0.18
y2 = y1-dy-h_fr
fig.add('task', [x0, y0, w_task, h_task]),
fig.add('sure-stimulus-duration', [x0, y1, w_behavior, h_behavior]),
fig.add('correct-stimulus-duration', [fig[-1].right+DX, y1, w_behavior, h_behavior]),
fig.add('noTs-stimulus', [x0, y2, w_fr, h_fr]),
fig.add('noTs-choice', [fig[-1].right+dx, y2, 5/8*w_fr, h_fr]),
fig.add('Ts-stimulus', [fig[-1].right+1.1*DX, y2, w_fr, h_fr]),
fig.add('Ts-sure', [fig[-1].right+dx, y2, w_fr, h_fr]),
fig.add('Ts-choice', [fig[-1].right+dx, y2, 5/8*w_fr, h_fr])
pl_x0 = 0.025
pl_y0 = 0.945
pl_y1 = 0.595
pl_y2 = 0.28
plotlabels = {
'A': (pl_x0, pl_y0),
'B': (pl_x0, pl_y1),
'C': (pl_x0, pl_y2)
}
fig.plotlabels(plotlabels, fontsize=9)
#=========================================================================================
# Task
#=========================================================================================
rng = np.random.RandomState(1)
plot = fig['task']
plot.axis_off('left')
plot.axis_off('bottom')
ms = 2.5
dx_circ = 0.14
dy_above = +0.08
dy_below = -0.1
fontsize = 4.5
def circle(x, y, color):
if color is None:
plot.plot(x, y, 'o', ms=ms, mew=0.5, mfc='none', mec='k')
else:
plot.plot(x, y, 'o', ms=ms, mew=0.5, mfc=color, mec=color)
def display_actions(x, y, rewards, colors):
rF, rL, rR, rS = rewards
cF, cL, cR, cS = colors
circle(x-1.5*dx_circ, y, cF)
plot.text(x-1.5*dx_circ, y+dy_above, 'F', ha='center', va='bottom', fontsize=fontsize)
plot.text(x-1.5*dx_circ, y+dy_below, rF, ha='center', va='top', fontsize=fontsize)
circle(x-0.5*dx_circ, y, cL)
plot.text(x-0.5*dx_circ, y+dy_above, 'L', ha='center', va='bottom', fontsize=fontsize)
plot.text(x-0.5*dx_circ, y+dy_below, rL, ha='center', va='top', fontsize=fontsize)
circle(x+0.5*dx_circ, y, cR)
plot.text(x+0.5*dx_circ, y+dy_above, 'R', ha='center', va='bottom', fontsize=fontsize)
plot.text(x+0.5*dx_circ, y+dy_below, rR, ha='center', va='top', fontsize=fontsize)
circle(x+1.5*dx_circ, y, cS)
plot.text(x+1.5*dx_circ, y+dy_above, 'S', ha='center', va='bottom', fontsize=fontsize)
plot.text(x+1.5*dx_circ, y+dy_below, rS, ha='center', va='top', fontsize=fontsize)
# Durations
durations = {}
durations['fixation'] = (0, 1)
durations['stimulus'] = (1, 2)
durations['delay'] = (2, 4)
durations['sure'] = (3, 4)
durations['decision'] = (4, 5)
trial_t = np.linspace(0, durations['decision'][1], 501)[1:]
lw = 0.6
# Baselines
y_sure_Ts = 1.75
y_fixation = 1.3
y_stimulus = 0.65
y_sure_noTs = 0.05
def fake_left_axis(ybot, ytop, thickness, ticklabels=None, axislabel=None):
plot.plot(np.zeros(2), [ybot, ytop], color='k', lw=thickness)
plot.plot([-0.03, 0], ybot*np.ones(2), color='k', lw=thickness)
plot.plot([-0.03, 0], ytop*np.ones(2), color='k', lw=thickness)
if ticklabels is not None:
plot.text(-0.06, ytop, ticklabels[1], ha='right', va='center', fontsize=3.5)
plot.text(-0.06, ybot, ticklabels[0], ha='right', va='center', fontsize=3.5)
if axislabel is not None:
text = plot.text(-0.25, (ybot+ytop)/2, axislabel,
ha='right', va='center', fontsize=4.5)
#-----------------------------------------------------------------------------------------
# Plot fixation
#-----------------------------------------------------------------------------------------
fixation = np.zeros_like(trial_t)
w, = np.where((0 <= trial_t) & (trial_t <= durations['decision'][0]))
fixation[w] = 1
def rescale(y):
return y_fixation + 0.2*y
fake_left_axis(rescale(0), rescale(1), thickness, ticklabels=['OFF', 'ON'],
axislabel='Fixation cue')
plot.plot(trial_t, rescale(fixation), color=Figure.colors('darkgreen'), lw=lw)
#-----------------------------------------------------------------------------------------
# Plot stimulus
#-----------------------------------------------------------------------------------------
coh = 25.6
stimulus_L = np.zeros_like(trial_t)
stimulus_R = np.zeros_like(trial_t)
w, = np.where((durations['stimulus'][0] < trial_t) & (trial_t <= durations['stimulus'][1]))
stimulus_L[w] = model.scale(-coh) + rng.normal(scale=0.15, size=len(w))
stimulus_R[w] = model.scale(+coh) + rng.normal(scale=0.15, size=len(w))
def rescale(y):
return y_stimulus + 0.3*y
fake_left_axis(rescale(0), rescale(1), thickness, ticklabels=[0, 1],
axislabel='Evidence L/R')
plot.plot(trial_t, rescale(stimulus_L), color=Figure.colors('red'), lw=lw)
plot.plot(trial_t, rescale(stimulus_R), color=Figure.colors('blue'), lw=lw)
#-----------------------------------------------------------------------------------------
# Plot sure target
#-----------------------------------------------------------------------------------------
sure = np.zeros_like(trial_t)
w, = np.where((durations['sure'][0] < trial_t))
sure[w] = 1
def rescale(y):
return y_sure_Ts + 0.2*y
fake_left_axis(rescale(0), rescale(1), thickness, ticklabels=['OFF', 'ON'],
axislabel='Sure target\n(Wager trials)')
plot.plot(trial_t, rescale(sure), color=Figure.colors('magenta'), lw=lw)
def rescale(y):
return y_sure_noTs + 0.2*y
sure = np.zeros_like(trial_t)
fake_left_axis(rescale(0), rescale(1), thickness, ticklabels=['OFF', 'ON'],
axislabel='Sure target\n(Non-wager trials)')
plot.plot(trial_t, rescale(sure), color=Figure.colors('magenta'), lw=lw)
#-----------------------------------------------------------------------------------------
# Display actions (all trials))
#-----------------------------------------------------------------------------------------
shift = 0.55
rewards = ['0', '-1', '-1', '-1']
colors = ['k', None, None, None]
display_actions(np.mean(durations['fixation']), y_stimulus+shift, rewards, colors)
rewards = ['0', '-1', '-1', '-1']
colors = ['k', None, None, None]
display_actions(np.mean(durations['stimulus']), y_stimulus+shift, rewards, colors)
rewards = ['0', '-1', '-1', '-1']
colors = ['k', None, None, None]
display_actions(np.mean(durations['delay']), y_stimulus+shift, rewards, colors)
#-----------------------------------------------------------------------------------------
# Display actions (Ts)
#-----------------------------------------------------------------------------------------
shift = 0.5
rewards = ['0', '0', '1', '0.7']
colors = [None, None, Figure.colors('darkblue'), Figure.colors('blue')]
display_actions(np.mean(durations['decision']), y_sure_Ts+shift, rewards, colors)
plot.text(durations['sure'][0], y_sure_Ts+shift, 'Sure bet offered',
ha='left', va='center', fontsize=fontsize+0.5)
#-----------------------------------------------------------------------------------------
# Display actions (no Ts)
#-----------------------------------------------------------------------------------------
shift = 0.3
rewards = ['0', '0', '1', '-1']
colors = [None, None, Figure.colors('darkblue'), None]
display_actions(np.mean(durations['decision']), y_sure_noTs+shift, rewards, colors)
plot.text(durations['sure'][0], y_sure_noTs+shift, 'Sure bet not offered',
ha='left', va='center', fontsize=fontsize+0.5)
#-----------------------------------------------------------------------------------------
y_timeline = -0.35
# Task timeline
plot.plot([0, durations['decision'][1]], y_timeline*np.ones(2), 'k', lw=0.75)
for t in [0] + [durations[e][1] for e in ['fixation', 'stimulus', 'delay', 'decision']]:
plot.plot(t*np.ones(2), [y_timeline-0.04, y_timeline+0.04], 'k', lw=0.75)
# Epoch labels
for e, label in zip(['fixation', 'stimulus', 'delay', 'decision'],
['Fixation', 'Stimulus', 'Delay/Sure target', 'Decision']):
plot.text(np.mean(durations[e]), y_timeline+0.08, label, ha='center', va='bottom',
fontsize=fontsize+0.5)
# Epoch durations
for e, label in zip(['fixation', 'stimulus', 'delay', 'decision'],
['750 ms', '100-900 ms',
'1200-1800 ms\n(Sure target onset 500-750 ms)', '500 ms']):
plot.text(np.mean(durations[e]), y_timeline-0.11, label, ha='center', va='top',
fontsize=fontsize+0.5)
# Limits
plot.xlim(0, durations['decision'][1])
plot.ylim(y_timeline, y_sure_Ts+0.2+0.35)
#=========================================================================================
plot = fig['sure-stimulus-duration']
savefile = os.path.join(here, 'work', 'data', 'sure_stimulus_duration.pkl')
if 'fast' in sys.argv and os.path.isfile(savefile):
print("Plotting data in {}".format(savefile))
saved = utils.load(savefile)
else:
saved = None
kwargs = dict(ms=3, lw=0.7)
saved = analysis.sure_stimulus_duration(trialsfile_b, plot, saved=saved,
nbins=10, **kwargs)
utils.save(savefile, saved)
plot.xticks([100, 300, 500, 700])
plot.yticks([0, 0.2, 0.4, 0.6, 0.8])
plot.xlim(100, 700)
plot.ylim(0, 0.8)
plot.xlabel('Stimulus duration (ms)')
plot.ylabel('Probability sure target')
# Legend
props = {'prop': {'size': 5}, 'handlelength': 1.2,
'handletextpad': 1.1, 'labelspacing': 0.7}
plot.legend(bbox_to_anchor=(3, 0.98), **props)
#=========================================================================================
plot = fig['correct-stimulus-duration']
savefile = os.path.join(here, 'work', 'data', 'correct_stimulus_duration.pkl')
if 'fast' in sys.argv and os.path.isfile(savefile):
print("Plotting data in {}".format(savefile))
saved = utils.load(savefile)
else:
saved = None
kwargs = dict(ms=2.5, mew=0.5, lw=0.7, dashes=[3, 1])
saved = analysis.correct_stimulus_duration(trialsfile_b, plot,
saved=saved, nbins=10, **kwargs)
utils.save(savefile, saved)
plot.xticks([100, 300, 500, 700])
plot.xlim(100, 700)
plot.ylim(0.5, 1)
plot.xlabel('Stimulus duration (ms)')
plot.ylabel('Probability correct')
#=========================================================================================
# Plot unit
#=========================================================================================
unit = 63
plots = {name: fig[name]
for name in ['noTs-stimulus', 'noTs-choice',
'Ts-stimulus', 'Ts-sure', 'Ts-choice']}
kwargs = dict(lw=0.8, lw_vline=0.4, dashes=[3, 1.5], dashes_vline=[5, 4])
y = analysis.sort(trialsfile_a, plots, unit=unit, **kwargs)
for plot in plots.values():
ylim = plot.lim('y', y, lower=0)
plot.vline(0, lw=thickness, linestyle='--', dashes=[3.5, 2])
fig['noTs-choice'].xticks([-400, 0])
fig['Ts-choice'].xticks([-400, 0])
#=========================================================================================
fontsize_epoch = 5
y_epoch = 1.03*ylim[1]
plot = fig['noTs-stimulus']
plot.xlabel('Time (ms)')
plot.ylabel('Firing rate (a.u.)')
plot.text(0, y_epoch , 'stimulus', fontsize=fontsize_epoch, ha='left', va='bottom')
plot = fig['noTs-choice']
plot.axis_off('left')
plot.text(0, y_epoch , 'choice', fontsize=fontsize_epoch, ha='right', va='bottom')
plot = fig['Ts-stimulus']
plot.xlabel('Time (ms)')
plot.ylabel('Firing rate (a.u.)')
plot.text(0, y_epoch , 'stimulus', fontsize=fontsize_epoch, ha='left', va='bottom')
plot = fig['Ts-sure']
plot.axis_off('left')
plot.text(0, y_epoch , 'sure target', fontsize=fontsize_epoch, ha='left', va='bottom')
plot = fig['Ts-choice']
plot.axis_off('left')
plot.text(0, y_epoch , 'choice', fontsize=fontsize_epoch, ha='right', va='bottom')
#=========================================================================================
fig.save()
|
the-stack_0_24908
|
import functools
import operator
import sys
import time
VECTORS = frozenset([
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
])
def main(terrain_data):
t = []
for r, d in VECTORS:
w = len(terrain_data[0])
h = len(terrain_data)
l = p = 0
t.append(0)
while True:
l += d
p += r
p = p % (w - 1)
t[-1] += terrain_data[l][p]
if l + 1 == h:
break
return functools.reduce(operator.mul, t, 1)
def reader(fh):
for l in fh:
yield list(map(lambda c: c != '.', l))
if __name__ == '__main__':
fname = sys.argv[1]
with open(fname, 'r') as fh:
inputs = list(reader(fh))
start = time.monotonic_ns()
result = main(inputs)
end = time.monotonic_ns()
print(result)
print(f'Result calculated in {(end - start) / 1e3:0.3f} microseconds.', file=sys.stderr)
|
the-stack_0_24910
|
# IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
import copy
import functools
import html
import io
import itertools
import locale
import operator
import os
import pickle
import sys
import textwrap
import types
import unittest
import warnings
import weakref
from functools import partial
from itertools import product, islice
from test import support
from test.support import findfile, gc_collect, swap_attr, os_helper
from test.support.os_helper import TESTFN
from test.support.import_helper import import_fresh_module
# pyET is the pure-Python implementation.
#
# ET is pyET in test_xml_etree and is the C accelerated version in
# test_xml_etree_c.
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
UTF8_BUG_XMLFILE = findfile("expat224_utf8_bug.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
EXTERNAL_ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY entity SYSTEM "file:///non-existing-file.xml">
]>
<document>&entity;</document>
"""
def checkwarnings(*filters, quiet=False):
def decorator(test):
def newtest(*args, **kwargs):
with support.check_warnings(*filters, quiet=quiet):
test(*args, **kwargs)
functools.update_wrapper(newtest, test)
return newtest
return decorator
class ModuleTest(unittest.TestCase):
def test_sanity(self):
# Import sanity.
from xml.etree import ElementTree
from xml.etree import ElementInclude
from xml.etree import ElementPath
def test_all(self):
names = ("xml.etree.ElementTree", "_elementtree")
support.check__all__(self, ET, names, not_exported=("HTML_EMPTY",))
def serialize(elem, to_string=True, encoding='unicode', **options):
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize_list(seq):
return [elem.tag for elem in seq]
class ElementTestCase:
@classmethod
def setUpClass(cls):
cls.modules = {pyET, ET}
def pickleRoundTrip(self, obj, name, dumper, loader, proto):
save_m = sys.modules[name]
try:
sys.modules[name] = dumper
temp = pickle.dumps(obj, proto)
sys.modules[name] = loader
result = pickle.loads(temp)
except pickle.PicklingError as pe:
# pyET must be second, because pyET may be (equal to) ET.
human = dict([(ET, "cET"), (pyET, "pyET")])
raise support.TestFailed("Failed to round-trip %r from %r to %r"
% (obj,
human.get(dumper, dumper),
human.get(loader, loader))) from pe
finally:
sys.modules[name] = save_m
return result
def assertEqualElements(self, alice, bob):
self.assertIsInstance(alice, (ET.Element, pyET.Element))
self.assertIsInstance(bob, (ET.Element, pyET.Element))
self.assertEqual(len(list(alice)), len(list(bob)))
for x, y in zip(alice, bob):
self.assertEqualElements(x, y)
properties = operator.attrgetter('tag', 'tail', 'text', 'attrib')
self.assertEqual(properties(alice), properties(bob))
# --------------------------------------------------------------------
# element tree tests
class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(ET.iselement(element), msg="not an element")
direlem = dir(element)
for attr in 'tag', 'attrib', 'text', 'tail':
self.assertTrue(hasattr(element, attr),
msg='no %s member' % attr)
self.assertIn(attr, direlem,
msg='no %s visible by dir' % attr)
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = ET.Element("tag")
check_element(element)
tree = ET.ElementTree(element)
check_element(tree.getroot())
element = ET.Element("t\xe4g", key="value")
tree = ET.ElementTree(element)
self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = ET.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
self.assertEqual(ET.XML, ET.fromstring)
self.assertEqual(ET.PI, ET.ProcessingInstruction)
def test_set_attribute(self):
element = ET.Element('tag')
self.assertEqual(element.tag, 'tag')
element.tag = 'Tag'
self.assertEqual(element.tag, 'Tag')
element.tag = 'TAG'
self.assertEqual(element.tag, 'TAG')
self.assertIsNone(element.text)
element.text = 'Text'
self.assertEqual(element.text, 'Text')
element.text = 'TEXT'
self.assertEqual(element.text, 'TEXT')
self.assertIsNone(element.tail)
element.tail = 'Tail'
self.assertEqual(element.tail, 'Tail')
element.tail = 'TAIL'
self.assertEqual(element.tail, 'TAIL')
self.assertEqual(element.attrib, {})
element.attrib = {'a': 'b', 'c': 'd'}
self.assertEqual(element.attrib, {'a': 'b', 'c': 'd'})
element.attrib = {'A': 'B', 'C': 'D'}
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_simpleops(self):
# Basic method sanity checks.
elem = ET.XML("<body><tag/></body>")
self.serialize_check(elem, '<body><tag /></body>')
e = ET.Element("tag2")
elem.append(e)
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
self.serialize_check(elem, '<body><tag /></body>')
elem.insert(0, e)
self.serialize_check(elem, '<body><tag2 /><tag /></body>')
elem.remove(e)
elem.extend([e])
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
elem.extend(iter([e]))
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
element = ET.Element("tag", key="value")
self.serialize_check(element, '<tag key="value" />') # 1
subelement = ET.Element("subtag")
element.append(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
element.insert(0, subelement)
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>') # 3
element.remove(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
element.remove(subelement)
self.serialize_check(element, '<tag key="value" />') # 5
with self.assertRaises(ValueError) as cm:
element.remove(subelement)
self.assertEqual(str(cm.exception), 'list.remove(x): x not in list')
self.serialize_check(element, '<tag key="value" />') # 6
element[0:0] = [subelement, subelement, subelement]
self.serialize_check(element[1], '<subtag />')
self.assertEqual(element[1:9], [element[1], element[2]])
self.assertEqual(element[:9:2], [element[0], element[2]])
del element[1:2]
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cdata(self):
# Test CDATA handling (etc).
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
'<tag>hello</tag>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_file_init(self):
stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
tree = ET.ElementTree(file=stringfile)
self.assertEqual(tree.find("tag").tag, 'tag')
self.assertEqual(tree.find("section/tag").tag, 'tag')
tree = ET.ElementTree(file=SIMPLE_XMLFILE)
self.assertEqual(tree.find("element").tag, 'element')
self.assertEqual(tree.find("element/../empty-element").tag,
'empty-element')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_path_cache(self):
# Check that the path cache behaves sanely.
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
self.assertEqual(len(ElementPath._cache), cache_len_10)
for i in range(20): ET.ElementTree(elem).find('./'+str(i))
self.assertGreater(len(ElementPath._cache), cache_len_10)
for i in range(600): ET.ElementTree(elem).find('./'+str(i))
self.assertLess(len(ElementPath._cache), 500)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_copy(self):
# Test copy handling (etc).
import copy
e1 = ET.XML("<tag>hello<foo/></tag>")
e2 = copy.copy(e1)
e3 = copy.deepcopy(e1)
e1.find("foo").tag = "bar"
self.serialize_check(e1, '<tag>hello<bar /></tag>')
self.serialize_check(e2, '<tag>hello<bar /></tag>')
self.serialize_check(e3, '<tag>hello<foo /></tag>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attrib(self):
# Test attribute handling.
elem = ET.Element("tag")
elem.get("key") # 1.1
self.assertEqual(elem.get("key", "default"), 'default') # 1.2
elem.set("key", "value")
self.assertEqual(elem.get("key"), 'value') # 1.3
elem = ET.Element("tag", key="value")
self.assertEqual(elem.get("key"), 'value') # 2.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
attrib = {"key": "value"}
elem = ET.Element("tag", attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 3.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
attrib = {"key": "value"}
elem = ET.Element("tag", **attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 4.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
elem = ET.Element("tag", {"key": "other"}, key="value")
self.assertEqual(elem.get("key"), 'value') # 5.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
elem = ET.Element('test')
elem.text = "aa"
elem.set('testa', 'testval')
elem.set('testb', 'test2')
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test2">aa</test>')
self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
self.assertEqual(sorted(elem.items()),
[('testa', 'testval'), ('testb', 'test2')])
self.assertEqual(elem.attrib['testb'], 'test2')
elem.attrib['testb'] = 'test1'
elem.attrib['testc'] = 'test2'
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test1" testc="test2">aa</test>')
elem = ET.Element('test')
elem.set('a', '\r')
elem.set('b', '\r\n')
elem.set('c', '\t\n\r ')
elem.set('d', '\n\n')
self.assertEqual(ET.tostring(elem),
b'<test a=" " b=" " c="	 " d=" " />')
def test_makeelement(self):
# Test makeelement handling.
elem = ET.Element("tag")
attrib = {"key": "value"}
subelem = elem.makeelement("subtag", attrib)
self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.clear()
self.serialize_check(elem, '<tag />')
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.extend([subelem, subelem])
self.serialize_check(elem,
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
elem[:] = [subelem]
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem[:] = tuple([subelem])
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_parsefile(self):
# Test parsing from file.
tree = ET.parse(SIMPLE_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
tree = ET.parse(SIMPLE_NS_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<ns0:root xmlns:ns0="namespace">\n'
' <ns0:element key="value">text</ns0:element>\n'
' <ns0:element>text</ns0:element>tail\n'
' <ns0:empty-element />\n'
'</ns0:root>')
with open(SIMPLE_XMLFILE) as f:
data = f.read()
parser = ET.XMLParser()
self.assertRegex(parser.version, r'^Expat ')
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
target = ET.TreeBuilder()
parser = ET.XMLParser(target=target)
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_parseliteral(self):
element = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
element = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
sequence = ["<html><body>", "text</bo", "dy></html>"]
element = ET.fromstringlist(sequence)
self.assertEqual(ET.tostring(element),
b'<html><body>text</body></html>')
self.assertEqual(b"".join(ET.tostringlist(element)),
b'<html><body>text</body></html>')
self.assertEqual(ET.tostring(element, "ascii"),
b"<?xml version='1.0' encoding='ascii'?>\n"
b"<html><body>text</body></html>")
_, ids = ET.XMLID("<html><body>text</body></html>")
self.assertEqual(len(ids), 0)
_, ids = ET.XMLID("<html><body id='body'>text</body></html>")
self.assertEqual(len(ids), 1)
self.assertEqual(ids["body"].tag, 'body')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iterparse(self):
# Test iterparse interface.
iterparse = ET.iterparse
context = iterparse(SIMPLE_XMLFILE)
action, elem = next(context)
self.assertEqual((action, elem.tag), ('end', 'element'))
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', 'element'),
('end', 'empty-element'),
('end', 'root'),
])
self.assertEqual(context.root.tag, 'root')
context = iterparse(SIMPLE_NS_XMLFILE)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', '{namespace}element'),
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
events = ()
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ()
context = iterparse(SIMPLE_XMLFILE, events=events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ("start", "end")
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
events = ("start", "end", "start-ns", "end-ns")
context = iterparse(SIMPLE_NS_XMLFILE, events)
self.assertEqual([(action, elem.tag) if action in ("start", "end")
else (action, elem)
for action, elem in context], [
('start-ns', ('', 'namespace')),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', None),
])
events = ('start-ns', 'end-ns')
context = iterparse(io.StringIO(r"<root xmlns=''/>"), events)
res = [action for action, elem in context]
self.assertEqual(res, ['start-ns', 'end-ns'])
events = ("start", "end", "bogus")
with open(SIMPLE_XMLFILE, "rb") as f:
with self.assertRaises(ValueError) as cm:
iterparse(f, events)
self.assertFalse(f.closed)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError) as cm:
iterparse(SIMPLE_XMLFILE, events)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
del cm
source = io.BytesIO(
b"<?xml version='1.0' encoding='iso-8859-1'?>\n"
b"<body xmlns='http://éffbot.org/ns'\n"
b" xmlns:cl\xe9='http://effbot.org/ns'>text</body>\n")
events = ("start-ns",)
context = iterparse(source, events)
self.assertEqual([(action, elem) for action, elem in context], [
('start-ns', ('', 'http://\xe9ffbot.org/ns')),
('start-ns', ('cl\xe9', 'http://effbot.org/ns')),
])
source = io.StringIO("<document />junk")
it = iterparse(source)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
self.addCleanup(os_helper.unlink, TESTFN)
with open(TESTFN, "wb") as f:
f.write(b"<document />junk")
it = iterparse(TESTFN)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with support.check_no_resource_warning(self):
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
del cm, it
def test_writefile(self):
elem = ET.Element("tag")
elem.text = "text"
self.serialize_check(elem, '<tag>text</tag>')
ET.SubElement(elem, "subtag").text = "subtext"
self.serialize_check(elem, '<tag>text<subtag>subtext</subtag></tag>')
# Test tag suppression
elem.tag = None
self.serialize_check(elem, 'text<subtag>subtext</subtag>')
elem.insert(0, ET.Comment("comment"))
self.serialize_check(elem,
'text<!--comment--><subtag>subtext</subtag>') # assumes 1.3
elem[0] = ET.PI("key", "value")
self.serialize_check(elem, 'text<?key value?><subtag>subtext</subtag>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_custom_builder(self):
# Test parser w. custom builder.
with open(SIMPLE_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
with open(SIMPLE_NS_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
def pi(self, target, data):
self.append(("pi", target, data))
def comment(self, data):
self.append(("comment", data))
def start_ns(self, prefix, uri):
self.append(("start-ns", prefix, uri))
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('pi', 'pi', 'data'),
('comment', ' comment '),
('start-ns', '', 'namespace'),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', ''),
])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_custom_builder_only_end_ns(self):
class Builder(list):
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(textwrap.dedent("""\
<?pi data?>
<!-- comment -->
<root xmlns='namespace' xmlns:p='pns' xmlns:a='ans'>
<a:element key='value'>text</a:element>
<p:element>text</p:element>tail
<empty-element/>
</root>
"""))
self.assertEqual(builder, [
('end-ns', 'a'),
('end-ns', 'p'),
('end-ns', ''),
])
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Element.getchildren() and ElementTree.getiterator() are deprecated.
@checkwarnings(("This method will be removed in future versions. "
"Use .+ instead.",
DeprecationWarning))
def test_getchildren(self):
# Test Element.getchildren()
with open(SIMPLE_XMLFILE, "rb") as f:
tree = ET.parse(f)
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getroot().iter()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getiterator()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
elem = ET.XML(SAMPLE_XML)
self.assertEqual(len(elem.getchildren()), 3)
self.assertEqual(len(elem[2].getchildren()), 1)
self.assertEqual(elem[:], elem.getchildren())
child1 = elem[0]
child2 = elem[2]
del elem[1:2]
self.assertEqual(len(elem.getchildren()), 2)
self.assertEqual(child1, elem[0])
self.assertEqual(child2, elem[1])
elem[0:2] = [child2, child1]
self.assertEqual(child2, elem[0])
self.assertEqual(child1, elem[1])
self.assertNotEqual(child1, elem[0])
elem.clear()
self.assertEqual(elem.getchildren(), [])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_writestring(self):
elem = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
elem = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_default_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode'),
'<ns0:body xmlns:ns0="http://effbot.org/ns"><ns0:tag /></ns0:body>'
)
self.assertEqual(
ET.tostring(elem, encoding='unicode', default_namespace='http://effbot.org/ns'),
'<body xmlns="http://effbot.org/ns"><tag /></body>'
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_default_namespace_different_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode', default_namespace='foobar'),
'<ns1:body xmlns="foobar" xmlns:ns1="http://effbot.org/ns"><ns1:tag /></ns1:body>'
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_default_namespace_original_no_namespace(self):
elem = ET.XML('<body><tag/></body>')
EXPECTED_MSG = '^cannot use non-qualified names with default_namespace option$'
with self.assertRaisesRegex(ValueError, EXPECTED_MSG):
ET.tostring(elem, encoding='unicode', default_namespace='foobar')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_no_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='unicode'),
'<body><tag /></body>'
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
ET.tostring(elem, encoding='utf8', xml_declaration=True),
b"<?xml version='1.0' encoding='utf8'?>\n<body><tag /></body>"
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_xml_declaration_unicode_encoding(self):
elem = ET.XML('<body><tag/></body>')
preferredencoding = locale.getpreferredencoding()
self.assertEqual(
f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>",
ET.tostring(elem, encoding='unicode', xml_declaration=True)
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostring_xml_declaration_cases(self):
elem = ET.XML('<body><tag>ø</tag></body>')
preferredencoding = locale.getpreferredencoding()
TESTCASES = [
# (expected_retval, encoding, xml_declaration)
# ... xml_declaration = None
(b'<body><tag>ø</tag></body>', None, None),
(b'<body><tag>\xc3\xb8</tag></body>', 'UTF-8', None),
(b'<body><tag>ø</tag></body>', 'US-ASCII', None),
(b"<?xml version='1.0' encoding='ISO-8859-1'?>\n"
b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', None),
('<body><tag>ø</tag></body>', 'unicode', None),
# ... xml_declaration = False
(b"<body><tag>ø</tag></body>", None, False),
(b"<body><tag>\xc3\xb8</tag></body>", 'UTF-8', False),
(b"<body><tag>ø</tag></body>", 'US-ASCII', False),
(b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', False),
("<body><tag>ø</tag></body>", 'unicode', False),
# ... xml_declaration = True
(b"<?xml version='1.0' encoding='us-ascii'?>\n"
b"<body><tag>ø</tag></body>", None, True),
(b"<?xml version='1.0' encoding='UTF-8'?>\n"
b"<body><tag>\xc3\xb8</tag></body>", 'UTF-8', True),
(b"<?xml version='1.0' encoding='US-ASCII'?>\n"
b"<body><tag>ø</tag></body>", 'US-ASCII', True),
(b"<?xml version='1.0' encoding='ISO-8859-1'?>\n"
b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', True),
(f"<?xml version='1.0' encoding='{preferredencoding}'?>\n"
"<body><tag>ø</tag></body>", 'unicode', True),
]
for expected_retval, encoding, xml_declaration in TESTCASES:
with self.subTest(f'encoding={encoding} '
f'xml_declaration={xml_declaration}'):
self.assertEqual(
ET.tostring(
elem,
encoding=encoding,
xml_declaration=xml_declaration
),
expected_retval
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostringlist_default_namespace(self):
elem = ET.XML('<body xmlns="http://effbot.org/ns"><tag/></body>')
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode')),
'<ns0:body xmlns:ns0="http://effbot.org/ns"><ns0:tag /></ns0:body>'
)
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode', default_namespace='http://effbot.org/ns')),
'<body xmlns="http://effbot.org/ns"><tag /></body>'
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostringlist_xml_declaration(self):
elem = ET.XML('<body><tag/></body>')
self.assertEqual(
''.join(ET.tostringlist(elem, encoding='unicode')),
'<body><tag /></body>'
)
self.assertEqual(
b''.join(ET.tostringlist(elem, xml_declaration=True)),
b"<?xml version='1.0' encoding='us-ascii'?>\n<body><tag /></body>"
)
preferredencoding = locale.getpreferredencoding()
stringlist = ET.tostringlist(elem, encoding='unicode', xml_declaration=True)
self.assertEqual(
''.join(stringlist),
f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>"
)
self.assertRegex(stringlist[0], r"^<\?xml version='1.0' encoding='.+'?>")
self.assertEqual(['<body', '>', '<tag', ' />', '</body>'], stringlist[1:])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoding(self):
def check(encoding, body=''):
xml = ("<?xml version='1.0' encoding='%s'?><xml>%s</xml>" %
(encoding, body))
self.assertEqual(ET.XML(xml.encode(encoding)).text, body)
self.assertEqual(ET.XML(xml).text, body)
check("ascii", 'a')
check("us-ascii", 'a')
check("iso-8859-1", '\xbd')
check("iso-8859-15", '\u20ac')
check("cp437", '\u221a')
check("mac-roman", '\u02da')
def xml(encoding):
return "<?xml version='1.0' encoding='%s'?><xml />" % encoding
def bxml(encoding):
return xml(encoding).encode(encoding)
supported_encodings = [
'ascii', 'utf-8', 'utf-8-sig', 'utf-16', 'utf-16be', 'utf-16le',
'iso8859-1', 'iso8859-2', 'iso8859-3', 'iso8859-4', 'iso8859-5',
'iso8859-6', 'iso8859-7', 'iso8859-8', 'iso8859-9', 'iso8859-10',
'iso8859-13', 'iso8859-14', 'iso8859-15', 'iso8859-16',
'cp437', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852',
'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp869', 'cp874', 'cp1006', 'cp1125',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'mac-cyrillic', 'mac-greek', 'mac-iceland', 'mac-latin2',
'mac-roman', 'mac-turkish',
'iso2022-jp', 'iso2022-jp-1', 'iso2022-jp-2', 'iso2022-jp-2004',
'iso2022-jp-3', 'iso2022-jp-ext',
'koi8-r', 'koi8-t', 'koi8-u', 'kz1048',
'hz', 'ptcp154',
]
for encoding in supported_encodings:
self.assertEqual(ET.tostring(ET.XML(bxml(encoding))), b'<xml />')
unsupported_ascii_compatible_encodings = [
'big5', 'big5hkscs',
'cp932', 'cp949', 'cp950',
'euc-jp', 'euc-jis-2004', 'euc-jisx0213', 'euc-kr',
'gb2312', 'gbk', 'gb18030',
'iso2022-kr', 'johab',
'shift-jis', 'shift-jis-2004', 'shift-jisx0213',
'utf-7',
]
for encoding in unsupported_ascii_compatible_encodings:
self.assertRaises(ValueError, ET.XML, bxml(encoding))
unsupported_ascii_incompatible_encodings = [
'cp037', 'cp424', 'cp500', 'cp864', 'cp875', 'cp1026', 'cp1140',
'utf_32', 'utf_32_be', 'utf_32_le',
]
for encoding in unsupported_ascii_incompatible_encodings:
self.assertRaises(ET.ParseError, ET.XML, bxml(encoding))
self.assertRaises(ValueError, ET.XML, xml('undefined').encode('ascii'))
self.assertRaises(LookupError, ET.XML, xml('xxx').encode('ascii'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_methods(self):
# Test serialization methods.
e = ET.XML("<html><link/><script>1 < 2</script></html>")
e.tail = "\n"
self.assertEqual(serialize(e),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method=None),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="xml"),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="html"),
'<html><link><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="text"), '1 < 2\n')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue18347(self):
e = ET.XML('<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e),
'<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e, method="html"),
'<html><CamelCase>text</CamelCase></html>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_entity(self):
# Test entity handling.
# 1) good entities
e = ET.XML("<document title='舰'>test</document>")
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<document title="舰">test</document>')
self.serialize_check(e, '<document title="\u8230">test</document>')
# 2) bad entities
with self.assertRaises(ET.ParseError) as cm:
ET.XML("<document>&entity;</document>")
self.assertEqual(str(cm.exception),
'undefined entity: line 1, column 10')
with self.assertRaises(ET.ParseError) as cm:
ET.XML(ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 5, column 10')
# 3) custom entity
parser = ET.XMLParser()
parser.entity["entity"] = "text"
parser.feed(ENTITY_XML)
root = parser.close()
self.serialize_check(root, '<document>text</document>')
# 4) external (SYSTEM) entity
with self.assertRaises(ET.ParseError) as cm:
ET.XML(EXTERNAL_ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 4, column 10')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_namespace(self):
# Test namespace issues.
# 1) xml namespace
elem = ET.XML("<tag xml:lang='en' />")
self.serialize_check(elem, '<tag xml:lang="en" />') # 1.1
# 2) other "well-known" namespaces
elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
self.serialize_check(elem,
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />') # 2.1
elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
self.serialize_check(elem,
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />') # 2.2
elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
self.serialize_check(elem,
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />') # 2.3
# 3) unknown namespaces
elem = ET.XML(SAMPLE_XML_NS)
self.serialize_check(elem,
'<ns0:body xmlns:ns0="http://effbot.org/ns">\n'
' <ns0:tag>text</ns0:tag>\n'
' <ns0:tag />\n'
' <ns0:section>\n'
' <ns0:tag>subtext</ns0:tag>\n'
' </ns0:section>\n'
'</ns0:body>')
def test_qname(self):
# Test QName handling.
# 1) decorated tags
elem = ET.Element("{uri}tag")
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.1
elem = ET.Element(ET.QName("{uri}tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.2
elem = ET.Element(ET.QName("uri", "tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.3
elem = ET.Element(ET.QName("uri", "tag"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>') # 1.4
# 2) decorated attributes
elem.clear()
elem.attrib["{uri}key"] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.1
elem.clear()
elem.attrib[ET.QName("{uri}key")] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.2
# 3) decorated values are not converted by default, but the
# QName wrapper can be used for values
elem.clear()
elem.attrib["{uri}key"] = "{uri}value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />') # 3.1
elem.clear()
elem.attrib["{uri}key"] = ET.QName("{uri}value")
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />') # 3.2
elem.clear()
subelem = ET.Element("tag")
subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
elem.append(subelem)
elem.append(subelem)
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2">'
'<tag ns1:key="ns2:value" />'
'<tag ns1:key="ns2:value" />'
'</ns0:tag>') # 3.3
# 4) Direct QName tests
self.assertEqual(str(ET.QName('ns', 'tag')), '{ns}tag')
self.assertEqual(str(ET.QName('{ns}tag')), '{ns}tag')
q1 = ET.QName('ns', 'tag')
q2 = ET.QName('ns', 'tag')
self.assertEqual(q1, q2)
q2 = ET.QName('ns', 'other-tag')
self.assertNotEqual(q1, q2)
self.assertNotEqual(q1, 'ns:tag')
self.assertEqual(q1, '{ns}tag')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_doctype_public(self):
# Test PUBLIC doctype.
elem = ET.XML('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
def test_xpath_tokenizer(self):
# Test the XPath tokenizer.
from xml.etree import ElementPath
def check(p, expected, namespaces=None):
self.assertEqual([op or tag
for op, tag in ElementPath.xpath_tokenizer(p, namespaces)],
expected)
# tests from the xml specification
check("*", ['*'])
check("text()", ['text', '()'])
check("@name", ['@', 'name'])
check("@*", ['@', '*'])
check("para[1]", ['para', '[', '1', ']'])
check("para[last()]", ['para', '[', 'last', '()', ']'])
check("*/para", ['*', '/', 'para'])
check("/doc/chapter[5]/section[2]",
['/', 'doc', '/', 'chapter', '[', '5', ']',
'/', 'section', '[', '2', ']'])
check("chapter//para", ['chapter', '//', 'para'])
check("//para", ['//', 'para'])
check("//olist/item", ['//', 'olist', '/', 'item'])
check(".", ['.'])
check(".//para", ['.', '//', 'para'])
check("..", ['..'])
check("../@lang", ['..', '/', '@', 'lang'])
check("chapter[title]", ['chapter', '[', 'title', ']'])
check("employee[@secretary and @assistant]", ['employee',
'[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'])
# additional tests
check("@{ns}attr", ['@', '{ns}attr'])
check("{http://spam}egg", ['{http://spam}egg'])
check("./spam.egg", ['.', '/', 'spam.egg'])
check(".//{http://spam}egg", ['.', '//', '{http://spam}egg'])
# wildcard tags
check("{ns}*", ['{ns}*'])
check("{}*", ['{}*'])
check("{*}tag", ['{*}tag'])
check("{*}*", ['{*}*'])
check(".//{*}tag", ['.', '//', '{*}tag'])
# namespace prefix resolution
check("./xsd:type", ['.', '/', '{http://www.w3.org/2001/XMLSchema}type'],
{'xsd': 'http://www.w3.org/2001/XMLSchema'})
check("type", ['{http://www.w3.org/2001/XMLSchema}type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@xsd:type", ['@', '{http://www.w3.org/2001/XMLSchema}type'],
{'xsd': 'http://www.w3.org/2001/XMLSchema'})
check("@type", ['@', 'type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@{*}type", ['@', '{*}type'],
{'': 'http://www.w3.org/2001/XMLSchema'})
check("@{ns}attr", ['@', '{ns}attr'],
{'': 'http://www.w3.org/2001/XMLSchema',
'ns': 'http://www.w3.org/2001/XMLSchema'})
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_processinginstruction(self):
# Test ProcessingInstruction directly
self.assertEqual(ET.tostring(ET.ProcessingInstruction('test', 'instruction')),
b'<?test instruction?>')
self.assertEqual(ET.tostring(ET.PI('test', 'instruction')),
b'<?test instruction?>')
# Issue #2746
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>')),
b'<?test <testing&>?>')
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>\xe3'), 'latin-1'),
b"<?xml version='1.0' encoding='latin-1'?>\n"
b"<?test <testing&>\xe3?>")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
for element in ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']:
for elem in [element, element.lower()]:
expected = '<%s>' % elem
serialized = serialize(ET.XML('<%s />' % elem), method='html')
self.assertEqual(serialized, expected)
serialized = serialize(ET.XML('<%s></%s>' % (elem,elem)),
method='html')
self.assertEqual(serialized, expected)
def test_dump_attribute_order(self):
# See BPO 34160
e = ET.Element('cirriculum', status='public', company='example')
with support.captured_stdout() as stdout:
ET.dump(e)
self.assertEqual(stdout.getvalue(),
'<cirriculum status="public" company="example" />\n')
def test_tree_write_attribute_order(self):
# See BPO 34160
root = ET.Element('cirriculum', status='public', company='example')
self.assertEqual(serialize(root),
'<cirriculum status="public" company="example" />')
self.assertEqual(serialize(root, method='html'),
'<cirriculum status="public" company="example"></cirriculum>')
class XMLPullParserTest(unittest.TestCase):
def _feed(self, parser, data, chunk_size=None):
if chunk_size is None:
parser.feed(data)
else:
for i in range(0, len(data), chunk_size):
parser.feed(data[i:i+chunk_size])
def assert_events(self, parser, expected, max_events=None):
self.assertEqual(
[(event, (elem.tag, elem.text))
for event, elem in islice(parser.read_events(), max_events)],
expected)
def assert_event_tuples(self, parser, expected, max_events=None):
self.assertEqual(
list(islice(parser.read_events(), max_events)),
expected)
def assert_event_tags(self, parser, expected, max_events=None):
events = islice(parser.read_events(), max_events)
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_simple_xml(self):
for chunk_size in (None, 1, 5):
with self.subTest(chunk_size=chunk_size):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n", chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser,
"<root>\n <element key='value'>text</element",
chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser, ">\n", chunk_size)
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser, "<element>text</element>tail\n", chunk_size)
self._feed(parser, "<empty-element/>\n", chunk_size)
self.assert_event_tags(parser, [
('end', 'element'),
('end', 'empty-element'),
])
self._feed(parser, "</root>\n", chunk_size)
self.assert_event_tags(parser, [('end', 'root')])
self.assertIsNone(parser.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_feed_while_iterating(self):
parser = ET.XMLPullParser()
it = parser.read_events()
self._feed(parser, "<root>\n <element key='value'>text</element>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'element'))
self._feed(parser, "</root>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'root'))
with self.assertRaises(StopIteration):
next(it)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_simple_xml_with_ns(self):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root xmlns='namespace'>\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', '{namespace}element')])
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self.assert_event_tags(parser, [
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
])
self._feed(parser, "</root>\n")
self.assert_event_tags(parser, [('end', '{namespace}root')])
self.assertIsNone(parser.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ns_events(self):
parser = ET.XMLPullParser(events=('start-ns', 'end-ns'))
self._feed(parser, "<!-- comment -->\n")
self._feed(parser, "<root xmlns='namespace'>\n")
self.assertEqual(
list(parser.read_events()),
[('start-ns', ('', 'namespace'))])
self._feed(parser, "<element key='value'>text</element")
self._feed(parser, ">\n")
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self._feed(parser, "</root>\n")
self.assertEqual(list(parser.read_events()), [('end-ns', None)])
self.assertIsNone(parser.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ns_events_start(self):
parser = ET.XMLPullParser(events=('start-ns', 'start', 'end'))
self._feed(parser, "<tag xmlns='abc' xmlns:p='xyz'>\n")
self.assert_event_tuples(parser, [
('start-ns', ('', 'abc')),
('start-ns', ('p', 'xyz')),
], max_events=2)
self.assert_event_tags(parser, [
('start', '{abc}tag'),
], max_events=1)
self._feed(parser, "<child />\n")
self.assert_event_tags(parser, [
('start', '{abc}child'),
('end', '{abc}child'),
])
self._feed(parser, "</tag>\n")
parser.close()
self.assert_event_tags(parser, [
('end', '{abc}tag'),
])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ns_events_start_end(self):
parser = ET.XMLPullParser(events=('start-ns', 'start', 'end', 'end-ns'))
self._feed(parser, "<tag xmlns='abc' xmlns:p='xyz'>\n")
self.assert_event_tuples(parser, [
('start-ns', ('', 'abc')),
('start-ns', ('p', 'xyz')),
], max_events=2)
self.assert_event_tags(parser, [
('start', '{abc}tag'),
], max_events=1)
self._feed(parser, "<child />\n")
self.assert_event_tags(parser, [
('start', '{abc}child'),
('end', '{abc}child'),
])
self._feed(parser, "</tag>\n")
parser.close()
self.assert_event_tags(parser, [
('end', '{abc}tag'),
], max_events=1)
self.assert_event_tuples(parser, [
('end-ns', None),
('end-ns', None),
])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_events(self):
parser = ET.XMLPullParser(events=())
self._feed(parser, "<root/>\n")
self.assert_event_tags(parser, [])
parser = ET.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [])
parser = ET.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
('end', '{foo}empty-element'),
('end', '{foo}element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
self.assert_event_tags(parser, [('end', 'root')])
parser = ET.XMLPullParser(events=('start',))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_events_comment(self):
parser = ET.XMLPullParser(events=('start', 'comment', 'end'))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' text here '))])
self._feed(parser, "<!-- more text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' more text here '))])
self._feed(parser, "<root-tag>text")
self.assert_event_tags(parser, [('start', 'root-tag')])
self._feed(parser, "<!-- inner comment-->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' inner comment'))])
self._feed(parser, "</root-tag>\n")
self.assert_event_tags(parser, [('end', 'root-tag')])
self._feed(parser, "<!-- outer comment -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' outer comment '))])
parser = ET.XMLPullParser(events=('comment',))
self._feed(parser, "<!-- text here -->\n")
self.assert_events(parser, [('comment', (ET.Comment, ' text here '))])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_events_pi(self):
parser = ET.XMLPullParser(events=('start', 'pi', 'end'))
self._feed(parser, "<?pitarget?>\n")
self.assert_events(parser, [('pi', (ET.PI, 'pitarget'))])
parser = ET.XMLPullParser(events=('pi',))
self._feed(parser, "<?pitarget some text ?>\n")
self.assert_events(parser, [('pi', (ET.PI, 'pitarget some text '))])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_events_sequence(self):
# Test that events can be some sequence that's not just a tuple or list
eventset = {'end', 'start'}
parser = ET.XMLPullParser(events=eventset)
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
class DummyIter:
def __init__(self):
self.events = iter(['start', 'end', 'start-ns'])
def __iter__(self):
return self
def __next__(self):
return next(self.events)
parser = ET.XMLPullParser(events=DummyIter())
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_unknown_event(self):
with self.assertRaises(ValueError):
ET.XMLPullParser(events=('start', 'end', 'bogus'))
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(html.escape(SIMPLE_XMLFILE, True))
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
class XIncludeTest(unittest.TestCase):
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = ET.XML(data)
return data
def none_loader(self, href, parser, encoding=None):
return None
def _my_loader(self, href, parse):
# Used to avoid a test-dependency problem where the default loader
# of ElementInclude uses the pyET parser for cET tests.
if parse == 'xml':
with open(href, 'rb') as f:
return ET.parse(f).getroot()
else:
return None
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xinclude_default(self):
from xml.etree import ElementInclude
doc = self.xinclude_loader('default.xml')
ElementInclude.include(doc, self._my_loader)
self.assertEqual(serialize(doc),
'<document>\n'
' <p>Example.</p>\n'
' <root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>\n'
'</document>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xinclude(self):
from xml.etree import ElementInclude
# Basic inclusion example (XInclude C.1)
document = self.xinclude_loader("C1.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>120 Mz is adequate for an average home user.</p>\n'
' <disclaimer>\n'
' <p>The opinions represented herein represent those of the individual\n'
' and should not be interpreted as official policy endorsed by this\n'
' organization.</p>\n'
'</disclaimer>\n'
'</document>') # C1
# Textual inclusion example (XInclude C.2)
document = self.xinclude_loader("C2.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been accessed\n'
' 324387 times.</p>\n'
'</document>') # C2
# Textual inclusion after sibling element (based on modified XInclude C.2)
document = self.xinclude_loader("C2b.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been <em>accessed</em>\n'
' 324387 times.</p>\n'
'</document>') # C2b
# Textual inclusion of XML example (XInclude C.3)
document = self.xinclude_loader("C3.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>The following is the source of the "data.xml" resource:</p>\n'
" <example><?xml version='1.0'?>\n"
'<data>\n'
' <item><![CDATA[Brooks & Shields]]></item>\n'
'</data>\n'
'</example>\n'
'</document>') # C3
# Fallback example (XInclude C.5)
# Note! Fallback support is not yet implemented
document = self.xinclude_loader("C5.xml")
with self.assertRaises(OSError) as cm:
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception), 'resource not found')
self.assertEqual(serialize(document),
'<div xmlns:ns0="http://www.w3.org/2001/XInclude">\n'
' <ns0:include href="example.txt" parse="text">\n'
' <ns0:fallback>\n'
' <ns0:include href="fallback-example.txt" parse="text">\n'
' <ns0:fallback><a href="mailto:[email protected]">Report error</a></ns0:fallback>\n'
' </ns0:include>\n'
' </ns0:fallback>\n'
' </ns0:include>\n'
'</div>') # C5
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xinclude_failures(self):
from xml.etree import ElementInclude
# Test failure to locate included XML file.
document = ET.XML(XINCLUDE["C1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'disclaimer.xml' as 'xml'")
# Test failure to locate included text file.
document = ET.XML(XINCLUDE["C2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'count.txt' as 'text'")
# Test bad parse type.
document = ET.XML(XINCLUDE_BAD["B1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"unknown parse type in xi:include tag ('BAD_TYPE')")
# Test xi:fallback outside xi:include.
document = ET.XML(XINCLUDE_BAD["B2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"xi:fallback tag must be child of xi:include "
"('{http://www.w3.org/2001/XInclude}fallback')")
# --------------------------------------------------------------------
# reported bugs
class BugsTest(unittest.TestCase):
def test_bug_xmltoolkit21(self):
# marshaller gives obscure errors for non-string values
def check(elem):
with self.assertRaises(TypeError) as cm:
serialize(elem)
self.assertEqual(str(cm.exception),
'cannot serialize 123 (type int)')
elem = ET.Element(123)
check(elem) # tag
elem = ET.Element("elem")
elem.text = 123
check(elem) # text
elem = ET.Element("elem")
elem.tail = 123
check(elem) # tail
elem = ET.Element("elem")
elem.set(123, "123")
check(elem) # attribute key
elem = ET.Element("elem")
elem.set("123", 123)
check(elem) # attribute value
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit25(self):
# typo in ElementTree.findtext
elem = ET.XML(SAMPLE_XML)
tree = ET.ElementTree(elem)
self.assertEqual(tree.findtext("tag"), 'text')
self.assertEqual(tree.findtext("section/tag"), 'subtext')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit28(self):
# .//tag causes exceptions
tree = ET.XML("<doc><table><tbody/></table></doc>")
self.assertEqual(summarize_list(tree.findall(".//thead")), [])
self.assertEqual(summarize_list(tree.findall(".//tbody")), ['tbody'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkitX1(self):
# dump() doesn't flush the output buffer
tree = ET.XML("<doc><table><tbody/></table></doc>")
with support.captured_stdout() as stdout:
ET.dump(tree)
self.assertEqual(stdout.getvalue(), '<doc><table><tbody /></table></doc>\n')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit39(self):
# non-ascii element and attribute names doesn't work
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b"<tag \xe4ttr='välue' />")
self.assertEqual(tree.attrib, {'\xe4ttr': 'v\xe4lue'})
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<t\xe4g>text</t\xe4g>')
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<t\xc3\xa4g>text</t\xc3\xa4g>')
tree = ET.Element("t\u00e4g")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.Element("tag")
tree.set("\u00e4ttr", "v\u00e4lue")
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit54(self):
# problems handling internally defined entities
e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]>"
'<doc>&ldots;</doc>')
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<doc>舰</doc>')
self.assertEqual(serialize(e), '<doc>\u8230</doc>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit55(self):
# make sure we're reporting the first error, not the last
with self.assertRaises(ET.ParseError) as cm:
ET.XML(b"<!DOCTYPE doc SYSTEM 'doc.dtd'>"
b'<doc>&ldots;&ndots;&rdots;</doc>')
self.assertEqual(str(cm.exception),
'undefined entity &ldots;: line 1, column 36')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit60(self):
# Handle crash in stream source.
class ExceptionFile:
def read(self, x):
raise OSError
self.assertRaises(OSError, ET.parse, ExceptionFile())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_xmltoolkit62(self):
# Don't crash when using custom entities.
ENTITIES = {'rsquo': '\u2019', 'lsquo': '\u2018'}
parser = ET.XMLParser()
parser.entity.update(ENTITIES)
parser.feed("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>""")
t = parser.close()
self.assertEqual(t.find('.//paragraph').text,
'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.')
@unittest.skipIf(sys.gettrace(), "Skips under coverage.")
def test_bug_xmltoolkit63(self):
# Check reference leak.
def xmltoolkit63():
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
xmltoolkit63()
count = sys.getrefcount(None)
for i in range(1000):
xmltoolkit63()
self.assertEqual(sys.getrefcount(None), count)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_200708_newline(self):
# Preserve newlines in attributes.
e = ET.Element('SomeTag', text="def _f():\n return 3\n")
self.assertEqual(ET.tostring(e),
b'<SomeTag text="def _f(): return 3 " />')
self.assertEqual(ET.XML(ET.tostring(e)).get("text"),
'def _f():\n return 3\n')
self.assertEqual(ET.tostring(ET.XML(ET.tostring(e))),
b'<SomeTag text="def _f(): return 3 " />')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_200708_close(self):
# Test default builder.
parser = ET.XMLParser() # default
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
# Test custom builder.
class EchoTarget:
def close(self):
return ET.Element("element") # simulate root
parser = ET.XMLParser(target=EchoTarget())
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
def test_bug_200709_default_namespace(self):
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 1
'<elem xmlns="default"><elem /></elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "{not-default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 2
'<elem xmlns="default" xmlns:ns1="not-default">'
'<elem />'
'<ns1:elem />'
'</elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "elem") # unprefixed name
with self.assertRaises(ValueError) as cm:
serialize(e, default_namespace="default") # 3
self.assertEqual(str(cm.exception),
'cannot use non-qualified names with default_namespace option')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bug_200709_register_namespace(self):
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />')
ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />')
# And the Dublin Core namespace is in the default list:
e = ET.Element("{http://purl.org/dc/elements/1.1/}title")
self.assertEqual(ET.tostring(e),
b'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />')
def test_bug_200709_element_comment(self):
# Not sure if this can be fixed, really (since the serializer needs
# ET.Comment, not cET.comment).
a = ET.Element('a')
a.append(ET.Comment('foo'))
self.assertEqual(a[0].tag, ET.Comment)
a = ET.Element('a')
a.append(ET.PI('foo'))
self.assertEqual(a[0].tag, ET.PI)
def test_bug_200709_element_insert(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
c = ET.SubElement(a, 'c')
d = ET.Element('d')
a.insert(0, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'c'])
a.insert(-1, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'd', 'c'])
def test_bug_200709_iter_comment(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
comment_b = ET.Comment("TEST-b")
b.append(comment_b)
self.assertEqual(summarize_list(a.iter(ET.Comment)), [ET.Comment])
# --------------------------------------------------------------------
# reported on bugs.python.org
def test_bug_1534630(self):
bob = ET.TreeBuilder()
e = bob.data("data")
e = bob.start("tag", {})
e = bob.end("tag")
e = bob.close()
self.assertEqual(serialize(e), '<tag />')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue6233(self):
e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?>"
b'<body>t\xc3\xa3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<body>t\xe3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue3151(self):
e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
self.assertEqual(e.tag, '{${stuff}}localname')
t = ET.ElementTree(e)
self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue6565(self):
elem = ET.XML("<body><tag/></body>")
self.assertEqual(summarize_list(elem), ['tag'])
newelem = ET.XML(SAMPLE_XML)
elem[:] = newelem[:]
self.assertEqual(summarize_list(elem), ['tag', 'tag', 'section'])
def test_issue10777(self):
# Registering a namespace twice caused a "dictionary changed size during
# iteration" bug.
ET.register_namespace('test10777', 'http://myuri/')
ET.register_namespace('test10777', 'http://myuri/')
def test_lost_text(self):
# Issue #25902: Borrowed text can disappear
class Text:
def __bool__(self):
e.text = 'changed'
return True
e = ET.Element('tag')
e.text = Text()
i = e.itertext()
t = next(i)
self.assertIsInstance(t, Text)
self.assertIsInstance(e.text, str)
self.assertEqual(e.text, 'changed')
def test_lost_tail(self):
# Issue #25902: Borrowed tail can disappear
class Text:
def __bool__(self):
e[0].tail = 'changed'
return True
e = ET.Element('root')
e.append(ET.Element('tag'))
e[0].tail = Text()
i = e.itertext()
t = next(i)
self.assertIsInstance(t, Text)
self.assertIsInstance(e[0].tail, str)
self.assertEqual(e[0].tail, 'changed')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lost_elem(self):
# Issue #25902: Borrowed element can disappear
class Tag:
def __eq__(self, other):
e[0] = ET.Element('changed')
next(i)
return True
e = ET.Element('root')
e.append(ET.Element(Tag()))
e.append(ET.Element('tag'))
i = e.iter('tag')
try:
t = next(i)
except ValueError:
self.skipTest('generators are not reentrant')
self.assertIsInstance(t.tag, Tag)
self.assertIsInstance(e[0].tag, str)
self.assertEqual(e[0].tag, 'changed')
def check_expat224_utf8_bug(self, text):
xml = b'<a b="%s"/>' % text
root = ET.XML(xml)
self.assertEqual(root.get('b'), text.decode('utf-8'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_expat224_utf8_bug(self):
# bpo-31170: Expat 2.2.3 had a bug in its UTF-8 decoder.
# Check that Expat 2.2.4 fixed the bug.
#
# Test buffer bounds at odd and even positions.
text = b'\xc3\xa0' * 1024
self.check_expat224_utf8_bug(text)
text = b'x' + b'\xc3\xa0' * 1024
self.check_expat224_utf8_bug(text)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_expat224_utf8_bug_file(self):
with open(UTF8_BUG_XMLFILE, 'rb') as fp:
raw = fp.read()
root = ET.fromstring(raw)
xmlattr = root.get('b')
# "Parse" manually the XML file to extract the value of the 'b'
# attribute of the <a b='xxx' /> XML element
text = raw.decode('utf-8').strip()
text = text.replace('\r\n', ' ')
text = text[6:-4]
self.assertEqual(root.get('b'), text)
# --------------------------------------------------------------------
class BasicElementTest(ElementTestCase, unittest.TestCase):
def test___init__(self):
tag = "foo"
attrib = { "zix": "wyp" }
element_foo = ET.Element(tag, attrib)
# traits of an element
self.assertIsInstance(element_foo, ET.Element)
self.assertIn("tag", dir(element_foo))
self.assertIn("attrib", dir(element_foo))
self.assertIn("text", dir(element_foo))
self.assertIn("tail", dir(element_foo))
# string attributes have expected values
self.assertEqual(element_foo.tag, tag)
self.assertIsNone(element_foo.text)
self.assertIsNone(element_foo.tail)
# attrib is a copy
self.assertIsNot(element_foo.attrib, attrib)
self.assertEqual(element_foo.attrib, attrib)
# attrib isn't linked
attrib["bar"] = "baz"
self.assertIsNot(element_foo.attrib, attrib)
self.assertNotEqual(element_foo.attrib, attrib)
def test___copy__(self):
element_foo = ET.Element("foo", { "zix": "wyp" })
element_foo.append(ET.Element("bar", { "baz": "qix" }))
element_foo2 = copy.copy(element_foo)
# elements are not the same
self.assertIsNot(element_foo2, element_foo)
# string attributes are equal
self.assertEqual(element_foo2.tag, element_foo.tag)
self.assertEqual(element_foo2.text, element_foo.text)
self.assertEqual(element_foo2.tail, element_foo.tail)
# number of children is the same
self.assertEqual(len(element_foo2), len(element_foo))
# children are the same
for (child1, child2) in itertools.zip_longest(element_foo, element_foo2):
self.assertIs(child1, child2)
# attrib is a copy
self.assertEqual(element_foo2.attrib, element_foo.attrib)
def test___deepcopy__(self):
element_foo = ET.Element("foo", { "zix": "wyp" })
element_foo.append(ET.Element("bar", { "baz": "qix" }))
element_foo2 = copy.deepcopy(element_foo)
# elements are not the same
self.assertIsNot(element_foo2, element_foo)
# string attributes are equal
self.assertEqual(element_foo2.tag, element_foo.tag)
self.assertEqual(element_foo2.text, element_foo.text)
self.assertEqual(element_foo2.tail, element_foo.tail)
# number of children is the same
self.assertEqual(len(element_foo2), len(element_foo))
# children are not the same
for (child1, child2) in itertools.zip_longest(element_foo, element_foo2):
self.assertIsNot(child1, child2)
# attrib is a copy
self.assertIsNot(element_foo2.attrib, element_foo.attrib)
self.assertEqual(element_foo2.attrib, element_foo.attrib)
# attrib isn't linked
element_foo.attrib["bar"] = "baz"
self.assertIsNot(element_foo2.attrib, element_foo.attrib)
self.assertNotEqual(element_foo2.attrib, element_foo.attrib)
def test_augmentation_type_errors(self):
e = ET.Element('joe')
self.assertRaises(TypeError, e.append, 'b')
self.assertRaises(TypeError, e.extend, [ET.Element('bar'), 'foo'])
self.assertRaises(TypeError, e.insert, 0, 'foo')
e[:] = [ET.Element('bar')]
with self.assertRaises(TypeError):
e[0] = 'foo'
with self.assertRaises(TypeError):
e[:] = [ET.Element('bar'), 'foo']
if hasattr(e, '__setstate__'):
state = {
'tag': 'tag',
'_children': [None], # non-Element
'attrib': 'attr',
'tail': 'tail',
'text': 'text',
}
self.assertRaises(TypeError, e.__setstate__, state)
if hasattr(e, '__deepcopy__'):
class E(ET.Element):
def __deepcopy__(self, memo):
return None # non-Element
e[:] = [E('bar')]
self.assertRaises(TypeError, copy.deepcopy, e)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cyclic_gc(self):
class Dummy:
pass
# Test the shortest cycle: d->element->d
d = Dummy()
d.dummyref = ET.Element('joe', attr=d)
wref = weakref.ref(d)
del d
gc_collect()
self.assertIsNone(wref())
# A longer cycle: d->e->e2->d
e = ET.Element('joe')
d = Dummy()
d.dummyref = e
wref = weakref.ref(d)
e2 = ET.SubElement(e, 'foo', attr=d)
del d, e, e2
gc_collect()
self.assertIsNone(wref())
# A cycle between Element objects as children of one another
# e1->e2->e3->e1
e1 = ET.Element('e1')
e2 = ET.Element('e2')
e3 = ET.Element('e3')
e3.append(e1)
e2.append(e3)
e1.append(e2)
wref = weakref.ref(e1)
del e1, e2, e3
gc_collect()
self.assertIsNone(wref())
def test_weakref(self):
flag = False
def wref_cb(w):
nonlocal flag
flag = True
e = ET.Element('e')
wref = weakref.ref(e, wref_cb)
self.assertEqual(wref().tag, 'e')
del e
self.assertEqual(flag, True)
self.assertEqual(wref(), None)
def test_get_keyword_args(self):
e1 = ET.Element('foo' , x=1, y=2, z=3)
self.assertEqual(e1.get('x', default=7), 1)
self.assertEqual(e1.get('w', default=7), 7)
def test_pickle(self):
# issue #16076: the C implementation wasn't pickleable.
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dumper, loader in product(self.modules, repeat=2):
e = dumper.Element('foo', bar=42)
e.text = "text goes here"
e.tail = "opposite of head"
dumper.SubElement(e, 'child').append(dumper.Element('grandchild'))
e.append(dumper.Element('child'))
e.findall('.//grandchild')[0].set('attr', 'other value')
e2 = self.pickleRoundTrip(e, 'xml.etree.ElementTree',
dumper, loader, proto)
self.assertEqual(e2.tag, 'foo')
self.assertEqual(e2.attrib['bar'], 42)
self.assertEqual(len(e2), 2)
self.assertEqualElements(e, e2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle_issue18997(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dumper, loader in product(self.modules, repeat=2):
XMLTEXT = """<?xml version="1.0"?>
<group><dogs>4</dogs>
</group>"""
e1 = dumper.fromstring(XMLTEXT)
if hasattr(e1, '__getstate__'):
self.assertEqual(e1.__getstate__()['tag'], 'group')
e2 = self.pickleRoundTrip(e1, 'xml.etree.ElementTree',
dumper, loader, proto)
self.assertEqual(e2.tag, 'group')
self.assertEqual(e2[0].tag, 'dogs')
class BadElementTest(ElementTestCase, unittest.TestCase):
def test_extend_mutable_list(self):
class X:
@property
def __class__(self):
L[:] = [ET.Element('baz')]
return ET.Element
L = [X()]
e = ET.Element('foo')
try:
e.extend(L)
except TypeError:
pass
class Y(X, ET.Element):
pass
L = [Y('x')]
e = ET.Element('foo')
e.extend(L)
def test_extend_mutable_list2(self):
class X:
@property
def __class__(self):
del L[:]
return ET.Element
L = [X(), ET.Element('baz')]
e = ET.Element('foo')
try:
e.extend(L)
except TypeError:
pass
class Y(X, ET.Element):
pass
L = [Y('bar'), ET.Element('baz')]
e = ET.Element('foo')
e.extend(L)
@unittest.skip("TODO: RUSTPYTHON, hangs")
def test_remove_with_mutating(self):
class X(ET.Element):
def __eq__(self, o):
del e[:]
return False
e = ET.Element('foo')
e.extend([X('bar')])
self.assertRaises(ValueError, e.remove, ET.Element('baz'))
e = ET.Element('foo')
e.extend([ET.Element('bar')])
self.assertRaises(ValueError, e.remove, X('baz'))
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, thread 'main' has overflowed its stack")
def test_recursive_repr(self):
# Issue #25455
e = ET.Element('foo')
with swap_attr(e, 'tag', e):
with self.assertRaises(RuntimeError):
repr(e) # Should not crash
def test_element_get_text(self):
# Issue #27863
class X(str):
def __del__(self):
try:
elem.text
except NameError:
pass
b = ET.TreeBuilder()
b.start('tag', {})
b.data('ABCD')
b.data(X('EFGH'))
b.data('IJKL')
b.end('tag')
elem = b.close()
self.assertEqual(elem.text, 'ABCDEFGHIJKL')
def test_element_get_tail(self):
# Issue #27863
class X(str):
def __del__(self):
try:
elem[0].tail
except NameError:
pass
b = ET.TreeBuilder()
b.start('root', {})
b.start('tag', {})
b.end('tag')
b.data('ABCD')
b.data(X('EFGH'))
b.data('IJKL')
b.end('root')
elem = b.close()
self.assertEqual(elem[0].tail, 'ABCDEFGHIJKL')
@unittest.skip("TODO: RUSTPYTHON, hangs")
def test_subscr(self):
# Issue #27863
class X:
def __index__(self):
del e[:]
return 1
e = ET.Element('elem')
e.append(ET.Element('child'))
e[:X()] # shouldn't crash
e.append(ET.Element('child'))
e[0:10:X()] # shouldn't crash
def test_ass_subscr(self):
# Issue #27863
class X:
def __index__(self):
e[:] = []
return 1
e = ET.Element('elem')
for _ in range(10):
e.insert(0, ET.Element('child'))
e[0:10:X()] = [] # shouldn't crash
def test_treebuilder_start(self):
# Issue #27863
def element_factory(x, y):
return []
b = ET.TreeBuilder(element_factory=element_factory)
b.start('tag', {})
b.data('ABCD')
self.assertRaises(AttributeError, b.start, 'tag2', {})
del b
gc_collect()
def test_treebuilder_end(self):
# Issue #27863
def element_factory(x, y):
return []
b = ET.TreeBuilder(element_factory=element_factory)
b.start('tag', {})
b.data('ABCD')
self.assertRaises(AttributeError, b.end, 'tag')
del b
gc_collect()
class MutatingElementPath(str):
def __new__(cls, elem, *args):
self = str.__new__(cls, *args)
self.elem = elem
return self
def __eq__(self, o):
del self.elem[:]
return True
MutatingElementPath.__hash__ = str.__hash__
class BadElementPath(str):
def __eq__(self, o):
raise 1/0
BadElementPath.__hash__ = str.__hash__
class BadElementPathTest(ElementTestCase, unittest.TestCase):
def setUp(self):
super().setUp()
from xml.etree import ElementPath
self.path_cache = ElementPath._cache
ElementPath._cache = {}
def tearDown(self):
from xml.etree import ElementPath
ElementPath._cache = self.path_cache
super().tearDown()
def test_find_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.find(MutatingElementPath(e, 'x'))
def test_find_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.find(BadElementPath('x'))
except ZeroDivisionError:
pass
def test_findtext_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.findtext(MutatingElementPath(e, 'x'))
def test_findtext_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.findtext(BadElementPath('x'))
except ZeroDivisionError:
pass
def test_findall_with_mutating(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
e.findall(MutatingElementPath(e, 'x'))
def test_findall_with_error(self):
e = ET.Element('foo')
e.extend([ET.Element('bar')])
try:
e.findall(BadElementPath('x'))
except ZeroDivisionError:
pass
class ElementTreeTypeTest(unittest.TestCase):
def test_istype(self):
self.assertIsInstance(ET.ParseError, type)
self.assertIsInstance(ET.QName, type)
self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
self.assertIsInstance(ET.TreeBuilder, type)
self.assertIsInstance(ET.XMLParser, type)
def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
pass
mye = MyElement('foo')
self.assertIsInstance(mye, ET.Element)
self.assertIsInstance(mye, MyElement)
self.assertEqual(mye.tag, 'foo')
# test that attribute assignment works (issue 14849)
mye.text = "joe"
self.assertEqual(mye.text, "joe")
def test_Element_subclass_constructor(self):
class MyElement(ET.Element):
def __init__(self, tag, attrib={}, **extra):
super(MyElement, self).__init__(tag + '__', attrib, **extra)
mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
self.assertEqual(mye.tag, 'foo__')
self.assertEqual(sorted(mye.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4)])
def test_Element_subclass_new_method(self):
class MyElement(ET.Element):
def newmethod(self):
return self.tag
mye = MyElement('joe')
self.assertEqual(mye.newmethod(), 'joe')
def test_Element_subclass_find(self):
class MyElement(ET.Element):
pass
e = ET.Element('foo')
e.text = 'text'
sub = MyElement('bar')
sub.text = 'subtext'
e.append(sub)
self.assertEqual(e.findtext('bar'), 'subtext')
self.assertEqual(e.find('bar').tag, 'bar')
found = list(e.findall('bar'))
self.assertEqual(len(found), 1, found)
self.assertEqual(found[0].tag, 'bar')
class ElementFindTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_find_simple(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(e.find('tag').tag, 'tag')
self.assertEqual(e.find('section/tag').tag, 'tag')
self.assertEqual(e.find('./tag').tag, 'tag')
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(e.find('section/nexttag').tag, 'nexttag')
self.assertEqual(e.findtext('./tag'), 'text')
self.assertEqual(e.findtext('section/tag'), 'subtext')
# section/nexttag is found but has no text
self.assertEqual(e.findtext('section/nexttag'), '')
self.assertEqual(e.findtext('section/nexttag', 'default'), '')
# tog doesn't exist and 'default' kicks in
self.assertIsNone(e.findtext('tog'))
self.assertEqual(e.findtext('tog', 'default'), 'default')
# Issue #16922
self.assertEqual(ET.XML('<tag><empty /></tag>').findtext('empty'), '')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_find_xpath(self):
LINEAR_XML = '''
<body>
<tag class='a'/>
<tag class='b'/>
<tag class='c'/>
<tag class='d'/>
</body>'''
e = ET.XML(LINEAR_XML)
# Test for numeric indexing and last()
self.assertEqual(e.find('./tag[1]').attrib['class'], 'a')
self.assertEqual(e.find('./tag[2]').attrib['class'], 'b')
self.assertEqual(e.find('./tag[last()]').attrib['class'], 'd')
self.assertEqual(e.find('./tag[last()-1]').attrib['class'], 'c')
self.assertEqual(e.find('./tag[last()-2]').attrib['class'], 'b')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[-1]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()-0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()+1]')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_findall(self):
e = ET.XML(SAMPLE_XML)
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(summarize_list(e.findall('.')), ['body'])
self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
self.assertEqual(summarize_list(e.findall('tog')), [])
self.assertEqual(summarize_list(e.findall('tog/foo')), [])
self.assertEqual(summarize_list(e.findall('*')),
['tag', 'tag', 'section'])
self.assertEqual(summarize_list(e.findall('.//tag')),
['tag'] * 4)
self.assertEqual(summarize_list(e.findall('section/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('section//tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('section/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('section//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('section/.//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('*//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('*/./tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('./tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('././tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@class]')),
['tag'] * 3)
self.assertEqual(summarize_list(e.findall('.//tag[@class="a"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[@class="b"]')),
['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@id]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//section[tag]')),
['section'])
self.assertEqual(summarize_list(e.findall('.//section[element]')), [])
self.assertEqual(summarize_list(e.findall('../tag')), [])
self.assertEqual(summarize_list(e.findall('section/../tag')),
['tag'] * 2)
self.assertEqual(e.findall('section//'), e.findall('section//*'))
self.assertEqual(summarize_list(e.findall(".//section[tag='subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag ='subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag= 'subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[tag = 'subtext']")),
['section'])
self.assertEqual(summarize_list(e.findall(".//section[ tag = 'subtext' ]")),
['section'])
self.assertEqual(summarize_list(e.findall(".//tag[.='subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. ='subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[.= "subtext"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[ . = "subtext" ]')),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext']")),
['tag'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext ']")),
[])
self.assertEqual(summarize_list(e.findall(".//tag[.= ' subtext']")),
[])
# duplicate section => 2x tag matches
e[1] = e[2]
self.assertEqual(summarize_list(e.findall(".//section[tag = 'subtext']")),
['section', 'section'])
self.assertEqual(summarize_list(e.findall(".//tag[. = 'subtext']")),
['tag', 'tag'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_test_find_with_ns(self):
e = ET.XML(SAMPLE_XML_NS)
self.assertEqual(summarize_list(e.findall('tag')), [])
self.assertEqual(
summarize_list(e.findall("{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 2)
self.assertEqual(
summarize_list(e.findall(".//{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 3)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_findall_different_nsmaps(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'X', '': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_findall_wildcard(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
root.append(ET.Comment('test'))
self.assertEqual(summarize_list(root.findall("{*}b")),
['{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall("{*}c")),
['c'])
self.assertEqual(summarize_list(root.findall("{X}*")),
['{X}b'])
self.assertEqual(summarize_list(root.findall("{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall("{}*")),
['b', 'c'])
self.assertEqual(summarize_list(root.findall("{}b")), # only for consistency
['b'])
self.assertEqual(summarize_list(root.findall("{}b")),
summarize_list(root.findall("b")))
self.assertEqual(summarize_list(root.findall("{*}*")),
['{X}b', 'b', 'c', '{Y}b'])
# This is an unfortunate difference, but that's how find('*') works.
self.assertEqual(summarize_list(root.findall("{*}*") + [root[-1]]),
summarize_list(root.findall("*")))
self.assertEqual(summarize_list(root.findall(".//{*}b")),
['{X}b', 'b', '{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{*}c")),
['c', 'c'])
self.assertEqual(summarize_list(root.findall(".//{X}*")),
['{X}b', '{X}b'])
self.assertEqual(summarize_list(root.findall(".//{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{}*")),
['c', 'b', 'c', 'b'])
self.assertEqual(summarize_list(root.findall(".//{}b")), # only for consistency
['b', 'b'])
self.assertEqual(summarize_list(root.findall(".//{}b")),
summarize_list(root.findall(".//b")))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_find(self):
e = ET.XML(SAMPLE_XML)
with self.assertRaisesRegex(SyntaxError, 'cannot use absolute path'):
e.findall('/tag')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_find_through_ElementTree(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
self.assertEqual(summarize_list(ET.ElementTree(e).findall('tag')),
['tag'] * 2)
# this produces a warning
msg = ("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'")
with self.assertWarnsRegex(FutureWarning, msg):
it = ET.ElementTree(e).findall('//tag')
self.assertEqual(summarize_list(it), ['tag'] * 3)
class ElementIterTest(unittest.TestCase):
def _ilist(self, elem, tag=None):
return summarize_list(elem.iter(tag))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic(self):
doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
self.assertEqual(next(doc.iter()).tag, 'html')
self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
self.assertEqual(''.join(doc.find('body').itertext()),
'this is a paragraph.')
self.assertEqual(next(doc.itertext()), 'this is a ')
# iterparse should return an iterator
sourcefile = serialize(doc, to_string=False)
self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
# With an explicit parser too (issue #9708)
sourcefile = serialize(doc, to_string=False)
parser = ET.XMLParser(target=ET.TreeBuilder())
self.assertEqual(next(ET.iterparse(sourcefile, parser=parser))[0],
'end')
tree = ET.ElementTree(None)
self.assertRaises(AttributeError, tree.iter)
# Issue #16913
doc = ET.XML("<root>a&<sub>b&</sub>c&</root>")
self.assertEqual(''.join(doc.itertext()), 'a&b&c&')
def test_corners(self):
# single root, no subelements
a = ET.Element('a')
self.assertEqual(self._ilist(a), ['a'])
# one child
b = ET.SubElement(a, 'b')
self.assertEqual(self._ilist(a), ['a', 'b'])
# one child and one grandchild
c = ET.SubElement(b, 'c')
self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
# two children, only first with grandchild
d = ET.SubElement(a, 'd')
self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
# replace first child by second
a[0] = a[1]
del a[1]
self.assertEqual(self._ilist(a), ['a', 'd'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_iter_by_tag(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
# test that iter also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.iter(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(summarize_list(doc.iter()), all_tags)
self.assertEqual(self._ilist(doc), all_tags)
self.assertEqual(self._ilist(doc, '*'), all_tags)
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Element.getiterator() is deprecated.
@checkwarnings(("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning))
def test_getiterator(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(summarize_list(doc.getiterator('room')),
['room'] * 3)
self.assertEqual(summarize_list(doc.getiterator('house')),
['house'] * 2)
# test that getiterator also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.getiterator(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(summarize_list(doc.getiterator()), all_tags)
self.assertEqual(summarize_list(doc.getiterator(None)), all_tags)
self.assertEqual(summarize_list(doc.getiterator('*')), all_tags)
def test_copy(self):
a = ET.Element('a')
it = a.iter()
with self.assertRaises(TypeError):
copy.copy(it)
def test_pickle(self):
a = ET.Element('a')
it = a.iter()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(it, proto)
class TreeBuilderTest(unittest.TestCase):
sample1 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text<div>subtext</div>tail</html>')
sample2 = '''<toplevel>sometext</toplevel>'''
def _check_sample1_element(self, e):
self.assertEqual(e.tag, 'html')
self.assertEqual(e.text, 'text')
self.assertEqual(e.tail, None)
self.assertEqual(e.attrib, {})
children = list(e)
self.assertEqual(len(children), 1)
child = children[0]
self.assertEqual(child.tag, 'div')
self.assertEqual(child.text, 'subtext')
self.assertEqual(child.tail, 'tail')
self.assertEqual(child.attrib, {})
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dummy_builder(self):
class BaseDummyBuilder:
def close(self):
return 42
class DummyBuilder(BaseDummyBuilder):
data = start = end = lambda *a: None
parser = ET.XMLParser(target=DummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=BaseDummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=object())
parser.feed(self.sample1)
self.assertIsNone(parser.close())
def test_treebuilder_comment(self):
b = ET.TreeBuilder()
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
b = ET.TreeBuilder(comment_factory=ET.Comment)
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
b = ET.TreeBuilder(comment_factory=len)
self.assertEqual(b.comment('ctext'), len('ctext'))
def test_treebuilder_pi(self):
b = ET.TreeBuilder()
self.assertEqual(b.pi('target', None).tag, ET.PI)
self.assertEqual(b.pi('target', None).text, 'target')
b = ET.TreeBuilder(pi_factory=ET.PI)
self.assertEqual(b.pi('target').tag, ET.PI)
self.assertEqual(b.pi('target').text, "target")
self.assertEqual(b.pi('pitarget', ' text ').tag, ET.PI)
self.assertEqual(b.pi('pitarget', ' text ').text, "pitarget text ")
b = ET.TreeBuilder(pi_factory=lambda target, text: (len(target), text))
self.assertEqual(b.pi('target'), (len('target'), None))
self.assertEqual(b.pi('pitarget', ' text '), (len('pitarget'), ' text '))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_late_tail(self):
# Issue #37399: The tail of an ignored comment could overwrite the text before it.
class TreeBuilderSubclass(ET.TreeBuilder):
pass
xml = "<a>text<!-- comment -->tail</a>"
a = ET.fromstring(xml)
self.assertEqual(a.text, "texttail")
parser = ET.XMLParser(target=TreeBuilderSubclass())
parser.feed(xml)
a = parser.close()
self.assertEqual(a.text, "texttail")
xml = "<a>text<?pi data?>tail</a>"
a = ET.fromstring(xml)
self.assertEqual(a.text, "texttail")
xml = "<a>text<?pi data?>tail</a>"
parser = ET.XMLParser(target=TreeBuilderSubclass())
parser.feed(xml)
a = parser.close()
self.assertEqual(a.text, "texttail")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_late_tail_mix_pi_comments(self):
# Issue #37399: The tail of an ignored comment could overwrite the text before it.
# Test appending tails to comments/pis.
class TreeBuilderSubclass(ET.TreeBuilder):
pass
xml = "<a>text<?pi1?> <!-- comment -->\n<?pi2?>tail</a>"
parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, ' comment ')
self.assertEqual(a[0].tail, '\ntail')
self.assertEqual(a.text, "text ")
parser = ET.XMLParser(target=TreeBuilderSubclass(insert_comments=True))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, ' comment ')
self.assertEqual(a[0].tail, '\ntail')
self.assertEqual(a.text, "text ")
xml = "<a>text<!-- comment -->\n<?pi data?>tail</a>"
parser = ET.XMLParser(target=ET.TreeBuilder(insert_pis=True))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, 'pi data')
self.assertEqual(a[0].tail, 'tail')
self.assertEqual(a.text, "text\n")
parser = ET.XMLParser(target=TreeBuilderSubclass(insert_pis=True))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, 'pi data')
self.assertEqual(a[0].tail, 'tail')
self.assertEqual(a.text, "text\n")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_treebuilder_elementfactory_none(self):
parser = ET.XMLParser(target=ET.TreeBuilder(element_factory=None))
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder()
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_comment_pi(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder(comment_factory=ET.Comment, pi_factory=ET.PI)
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
parser.feed('<!-- a comment--><?and a pi?>')
e = parser.close()
self._check_sample1_element(e)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_element_factory(self):
lst = []
def myfactory(tag, attrib):
nonlocal lst
lst.append(tag)
return ET.Element(tag, attrib)
tb = ET.TreeBuilder(element_factory=myfactory)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample2)
parser.close()
self.assertEqual(lst, ['toplevel'])
def _check_element_factory_class(self, cls):
tb = ET.TreeBuilder(element_factory=cls)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self.assertIsInstance(e, cls)
self._check_sample1_element(e)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_element_factory_subclass(self):
class MyElement(ET.Element):
pass
self._check_element_factory_class(MyElement)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_element_factory_pure_python_subclass(self):
# Mimick SimpleTAL's behaviour (issue #16089): both versions of
# TreeBuilder should be able to cope with a subclass of the
# pure Python Element class.
base = ET._Element_Py
# Not from a C extension
self.assertEqual(base.__module__, 'xml.etree.ElementTree')
# Force some multiple inheritance with a C class to make things
# more interesting.
class MyElement(base, ValueError):
pass
self._check_element_factory_class(MyElement)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_doctype(self):
class DoctypeParser:
_doctype = None
def doctype(self, name, pubid, system):
self._doctype = (name, pubid, system)
def close(self):
return self._doctype
parser = ET.XMLParser(target=DoctypeParser())
parser.feed(self.sample1)
self.assertEqual(parser.close(),
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_builder_lookup_errors(self):
class RaisingBuilder:
def __init__(self, raise_in=None, what=ValueError):
self.raise_in = raise_in
self.what = what
def __getattr__(self, name):
if name == self.raise_in:
raise self.what(self.raise_in)
def handle(*args):
pass
return handle
ET.XMLParser(target=RaisingBuilder())
# cET also checks for 'close' and 'doctype', PyET does it only at need
for event in ('start', 'data', 'end', 'comment', 'pi'):
with self.assertRaisesRegex(ValueError, event):
ET.XMLParser(target=RaisingBuilder(event))
ET.XMLParser(target=RaisingBuilder(what=AttributeError))
for event in ('start', 'data', 'end', 'comment', 'pi'):
parser = ET.XMLParser(target=RaisingBuilder(event, what=AttributeError))
parser.feed(self.sample1)
self.assertIsNone(parser.close())
class XMLParserTest(unittest.TestCase):
sample1 = b'<file><line>22</line></file>'
sample2 = (b'<!DOCTYPE html PUBLIC'
b' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
b'<html>text</html>')
sample3 = ('<?xml version="1.0" encoding="iso-8859-1"?>\n'
'<money value="$\xa3\u20ac\U0001017b">$\xa3\u20ac\U0001017b</money>')
def _check_sample_element(self, e):
self.assertEqual(e.tag, 'file')
self.assertEqual(e[0].tag, 'line')
self.assertEqual(e[0].text, '22')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor_args(self):
parser2 = ET.XMLParser(encoding='utf-8',
target=ET.TreeBuilder())
parser2.feed(self.sample1)
self._check_sample_element(parser2.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass(self):
class MyParser(ET.XMLParser):
pass
parser = MyParser()
parser.feed(self.sample1)
self._check_sample_element(parser.close())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_doctype_warning(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
parser = ET.XMLParser()
parser.feed(self.sample2)
parser.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_doctype(self):
_doctype = None
class MyParserWithDoctype(ET.XMLParser):
def doctype(self, *args, **kwargs):
nonlocal _doctype
_doctype = (args, kwargs)
parser = MyParserWithDoctype()
with self.assertWarnsRegex(RuntimeWarning, 'doctype'):
parser.feed(self.sample2)
parser.close()
self.assertIsNone(_doctype)
_doctype = _doctype2 = None
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
warnings.simplefilter('error', RuntimeWarning)
class DoctypeParser:
def doctype(self, name, pubid, system):
nonlocal _doctype2
_doctype2 = (name, pubid, system)
parser = MyParserWithDoctype(target=DoctypeParser())
parser.feed(self.sample2)
parser.close()
self.assertIsNone(_doctype)
self.assertEqual(_doctype2,
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_inherited_doctype(self):
'''Ensure that ordinary usage is not deprecated (Issue 19176)'''
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
warnings.simplefilter('error', RuntimeWarning)
class MyParserWithoutDoctype(ET.XMLParser):
pass
parser = MyParserWithoutDoctype()
parser.feed(self.sample2)
parser.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_parse_string(self):
parser = ET.XMLParser(target=ET.TreeBuilder())
parser.feed(self.sample3)
e = parser.close()
self.assertEqual(e.tag, 'money')
self.assertEqual(e.attrib['value'], '$\xa3\u20ac\U0001017b')
self.assertEqual(e.text, '$\xa3\u20ac\U0001017b')
class NamespaceParseTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_find_with_namespace(self):
nsmap = {'h': 'hello', 'f': 'foo'}
doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
class ElementSlicingTest(unittest.TestCase):
def _elem_tags(self, elemlist):
return [e.tag for e in elemlist]
def _subelem_tags(self, elem):
return self._elem_tags(list(elem))
def _make_elem_with_children(self, numchildren):
"""Create an Element with a tag 'a', with the given amount of children
named 'a0', 'a1' ... and so on.
"""
e = ET.Element('a')
for i in range(numchildren):
ET.SubElement(e, 'a%s' % i)
return e
def test_getslice_single_index(self):
e = self._make_elem_with_children(10)
self.assertEqual(e[1].tag, 'a1')
self.assertEqual(e[-2].tag, 'a8')
self.assertRaises(IndexError, lambda: e[12])
self.assertRaises(IndexError, lambda: e[-12])
def test_getslice_range(self):
e = self._make_elem_with_children(6)
self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
def test_getslice_steps(self):
e = self._make_elem_with_children(10)
self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
self.assertEqual(self._elem_tags(e[3::sys.maxsize]), ['a3'])
self.assertEqual(self._elem_tags(e[3::sys.maxsize<<64]), ['a3'])
def test_getslice_negative_steps(self):
e = self._make_elem_with_children(4)
self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize]), ['a3'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize-1]), ['a3'])
self.assertEqual(self._elem_tags(e[3::-sys.maxsize<<64]), ['a3'])
def test_delslice(self):
e = self._make_elem_with_children(4)
del e[0:2]
self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
e = self._make_elem_with_children(4)
del e[0:]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-1]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(4)
del e[1::2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(2)
del e[::2]
self.assertEqual(self._subelem_tags(e), ['a1'])
def test_setslice_single_index(self):
e = self._make_elem_with_children(4)
e[1] = ET.Element('b')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[-2] = ET.Element('c')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
with self.assertRaises(IndexError):
e[5] = ET.Element('d')
with self.assertRaises(IndexError):
e[-5] = ET.Element('d')
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
def test_setslice_range(self):
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'a3'])
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a3'])
e = self._make_elem_with_children(4)
e[1:3] = [ET.Element('b%s' % i) for i in range(3)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'b2', 'a3'])
def test_setslice_steps(self):
e = self._make_elem_with_children(6)
e[1:5:2] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'a2', 'b1', 'a4', 'a5'])
e = self._make_elem_with_children(6)
with self.assertRaises(ValueError):
e[1:5:2] = [ET.Element('b')]
with self.assertRaises(ValueError):
e[1:5:2] = [ET.Element('b%s' % i) for i in range(3)]
with self.assertRaises(ValueError):
e[1:5:2] = []
self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3', 'a4', 'a5'])
e = self._make_elem_with_children(4)
e[1::sys.maxsize] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[1::sys.maxsize<<64] = [ET.Element('c')]
self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
def test_setslice_negative_steps(self):
e = self._make_elem_with_children(4)
e[2:0:-1] = [ET.Element('b%s' % i) for i in range(2)]
self.assertEqual(self._subelem_tags(e), ['a0', 'b1', 'b0', 'a3'])
e = self._make_elem_with_children(4)
with self.assertRaises(ValueError):
e[2:0:-1] = [ET.Element('b')]
with self.assertRaises(ValueError):
e[2:0:-1] = [ET.Element('b%s' % i) for i in range(3)]
with self.assertRaises(ValueError):
e[2:0:-1] = []
self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3'])
e = self._make_elem_with_children(4)
e[1::-sys.maxsize] = [ET.Element('b')]
self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
e[1::-sys.maxsize-1] = [ET.Element('c')]
self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
e[1::-sys.maxsize<<64] = [ET.Element('d')]
self.assertEqual(self._subelem_tags(e), ['a0', 'd', 'a2', 'a3'])
class IOTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
elem.text = "abc"
self.assertEqual(serialize(elem), '<tag>abc</tag>')
for enc in ("utf-8", "us-ascii"):
with self.subTest(enc):
self.assertEqual(serialize(elem, encoding=enc),
b'<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding=enc.upper()),
b'<tag>abc</tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
with self.subTest(enc):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % enc).encode(enc))
upper = enc.upper()
self.assertEqual(serialize(elem, encoding=upper),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % upper).encode(enc))
elem = ET.Element("tag")
elem.text = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag><&"\'></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag><&\"'></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="<&"\'>" />')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"<&"'>\" />" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag>\xe5\xf6\xf6<></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>åöö<></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>åöö<></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag key="\xe5\xf6\xf6<>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="åöö<>" />')
for enc in ("iso-8859-1", "utf-16", "utf-16le", "utf-16be", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"åöö<>\" />" % enc).encode(enc))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_filename(self):
self.addCleanup(os_helper.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_text_file(self):
self.addCleanup(os_helper.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_binary_file(self):
self.addCleanup(os_helper.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_binary_file_with_bom(self):
self.addCleanup(os_helper.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_from_stringio(self):
tree = ET.ElementTree()
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
tree.parse(stream)
self.assertEqual(tree.getroot().tag, 'site')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_stringio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_from_bytesio(self):
tree = ET.ElementTree()
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree.parse(raw)
self.assertEqual(tree.getroot().tag, 'site')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_bytesio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
tree.write(raw)
self.assertEqual(raw.getvalue(), b'''<site />''')
class dummy:
pass
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_from_user_text_reader(self):
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = stream.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_user_text_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = raw.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
tree = ET.ElementTree()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_user_binary_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
self.assertEqual(raw.getvalue(), b'''<site />''')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
writer.seekable = lambda: True
writer.tell = raw.tell
tree.write(writer, encoding="utf-16")
self.assertEqual(raw.getvalue(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tostringlist_invariant(self):
root = ET.fromstring('<tag>foo</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
''.join(ET.tostringlist(root, 'unicode')))
self.assertEqual(
ET.tostring(root, 'utf-16'),
b''.join(ET.tostringlist(root, 'utf-16')))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_short_empty_elements(self):
root = ET.fromstring('<tag>a<x />b<y></y>c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=True),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=False),
'<tag>a<x></x>b<y></y>c</tag>')
class ParseErrorTest(unittest.TestCase):
def test_subclass(self):
self.assertIsInstance(ET.ParseError(), SyntaxError)
def _get_error(self, s):
try:
ET.fromstring(s)
except ET.ParseError as e:
return e
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_error_position(self):
self.assertEqual(self._get_error('foo').position, (1, 0))
self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
self.assertEqual(self._get_error('foobar<').position, (1, 6))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_error_code(self):
import xml.parsers.expat.errors as ERRORS
self.assertEqual(self._get_error('foo').code,
ERRORS.codes[ERRORS.XML_ERROR_SYNTAX])
class KeywordArgsTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Test various issues with keyword arguments passed to ET.Element
# constructor and methods
def test_issue14818(self):
x = ET.XML("<a>foo</a>")
self.assertEqual(x.find('a', None),
x.find(path='a', namespaces=None))
self.assertEqual(x.findtext('a', None, None),
x.findtext(path='a', default=None, namespaces=None))
self.assertEqual(x.findall('a', None),
x.findall(path='a', namespaces=None))
self.assertEqual(list(x.iterfind('a', None)),
list(x.iterfind(path='a', namespaces=None)))
self.assertEqual(ET.Element('a').attrib, {})
elements = [
ET.Element('a', dict(href="#", id="foo")),
ET.Element('a', attrib=dict(href="#", id="foo")),
ET.Element('a', dict(href="#"), id="foo"),
ET.Element('a', href="#", id="foo"),
ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
]
for e in elements:
self.assertEqual(e.tag, 'a')
self.assertEqual(e.attrib, dict(href="#", id="foo"))
e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
self.assertEqual(e2.attrib['key1'], 'value1')
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', "I'm not a dict")
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', attrib="I'm not a dict")
# --------------------------------------------------------------------
class NoAcceleratorTest(unittest.TestCase):
def setUp(self):
if not pyET:
raise unittest.SkipTest('only for the Python version')
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Test that the C accelerator was not imported for pyET
def test_correct_import_pyET(self):
# The type of methods defined in Python code is types.FunctionType,
# while the type of methods defined inside _elementtree is
# <class 'wrapper_descriptor'>
self.assertIsInstance(pyET.Element.__init__, types.FunctionType)
self.assertIsInstance(pyET.XMLParser.__init__, types.FunctionType)
# --------------------------------------------------------------------
def c14n_roundtrip(xml, **options):
return pyET.canonicalize(xml, **options)
class C14NTest(unittest.TestCase):
maxDiff = None
#
# simple roundtrip tests (from c14n.py)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_simple_roundtrip(self):
# Basics
self.assertEqual(c14n_roundtrip("<doc/>"), '<doc></doc>')
self.assertEqual(c14n_roundtrip("<doc xmlns='uri'/>"), # FIXME
'<doc xmlns="uri"></doc>')
self.assertEqual(c14n_roundtrip("<prefix:doc xmlns:prefix='uri'/>"),
'<prefix:doc xmlns:prefix="uri"></prefix:doc>')
self.assertEqual(c14n_roundtrip("<doc xmlns:prefix='uri'><prefix:bar/></doc>"),
'<doc><prefix:bar xmlns:prefix="uri"></prefix:bar></doc>')
self.assertEqual(c14n_roundtrip("<elem xmlns:wsu='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd' xmlns:SOAP-ENV='http://schemas.xmlsoap.org/soap/envelope/' />"),
'<elem></elem>')
# C14N spec
self.assertEqual(c14n_roundtrip("<doc>Hello, world!<!-- Comment 1 --></doc>"),
'<doc>Hello, world!</doc>')
self.assertEqual(c14n_roundtrip("<value>2</value>"),
'<value>2</value>')
self.assertEqual(c14n_roundtrip('<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'),
'<compute>value>"0" && value<"10" ?"valid":"error"</compute>')
self.assertEqual(c14n_roundtrip('''<compute expr='value>"0" && value<"10" ?"valid":"error"'>valid</compute>'''),
'<compute expr="value>"0" && value<"10" ?"valid":"error"">valid</compute>')
self.assertEqual(c14n_roundtrip("<norm attr=' '   
	 ' '/>"),
'<norm attr=" \' 
	 \' "></norm>')
self.assertEqual(c14n_roundtrip("<normNames attr=' A   
	 B '/>"),
'<normNames attr=" A 
	 B "></normNames>')
self.assertEqual(c14n_roundtrip("<normId id=' '   
	 ' '/>"),
'<normId id=" \' 
	 \' "></normId>')
# fragments from PJ's tests
#self.assertEqual(c14n_roundtrip("<doc xmlns:x='http://example.com/x' xmlns='http://example.com/default'><b y:a1='1' xmlns='http://example.com/default' a3='3' xmlns:y='http://example.com/y' y:a2='2'/></doc>"),
#'<doc xmlns:x="http://example.com/x"><b xmlns:y="http://example.com/y" a3="3" y:a1="1" y:a2="2"></b></doc>')
# Namespace issues
xml = '<X xmlns="http://nps/a"><Y targets="abc,xyz"></Y></X>'
self.assertEqual(c14n_roundtrip(xml), xml)
xml = '<X xmlns="http://nps/a"><Y xmlns="http://nsp/b" targets="abc,xyz"></Y></X>'
self.assertEqual(c14n_roundtrip(xml), xml)
xml = '<X xmlns="http://nps/a"><Y xmlns:b="http://nsp/b" b:targets="abc,xyz"></Y></X>'
self.assertEqual(c14n_roundtrip(xml), xml)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_c14n_exclusion(self):
xml = textwrap.dedent("""\
<root xmlns:x="http://example.com/x">
<a x:attr="attrx">
<b>abtext</b>
</a>
<b>btext</b>
<c>
<x:d>dtext</x:d>
</c>
</root>
""")
self.assertEqual(
c14n_roundtrip(xml, strip_text=True),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"><b>abtext</b></a>'
'<b>btext</b>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_attrs=['{http://example.com/x}attr']),
'<root>'
'<a><b>abtext</b></a>'
'<b>btext</b>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['{http://example.com/x}d']),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"><b>abtext</b></a>'
'<b>btext</b>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_attrs=['{http://example.com/x}attr'],
exclude_tags=['{http://example.com/x}d']),
'<root>'
'<a><b>abtext</b></a>'
'<b>btext</b>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['a', 'b']),
'<root>'
'<c><x:d xmlns:x="http://example.com/x">dtext</x:d></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, exclude_tags=['a', 'b']),
'<root>\n'
' \n'
' \n'
' <c>\n'
' <x:d xmlns:x="http://example.com/x">dtext</x:d>\n'
' </c>\n'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, strip_text=True, exclude_tags=['{http://example.com/x}d', 'b']),
'<root>'
'<a xmlns:x="http://example.com/x" x:attr="attrx"></a>'
'<c></c>'
'</root>')
self.assertEqual(
c14n_roundtrip(xml, exclude_tags=['{http://example.com/x}d', 'b']),
'<root>\n'
' <a xmlns:x="http://example.com/x" x:attr="attrx">\n'
' \n'
' </a>\n'
' \n'
' <c>\n'
' \n'
' </c>\n'
'</root>')
#
# basic method=c14n tests from the c14n 2.0 specification. uses
# test files under xmltestdata/c14n-20.
# note that this uses generated C14N versions of the standard ET.write
# output, not roundtripped C14N (see above).
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xml_c14n2(self):
datadir = findfile("c14n-20", subdir="xmltestdata")
full_path = partial(os.path.join, datadir)
files = [filename[:-4] for filename in sorted(os.listdir(datadir))
if filename.endswith('.xml')]
input_files = [
filename for filename in files
if filename.startswith('in')
]
configs = {
filename: {
# <c14n2:PrefixRewrite>sequential</c14n2:PrefixRewrite>
option.tag.split('}')[-1]: ((option.text or '').strip(), option)
for option in ET.parse(full_path(filename) + ".xml").getroot()
}
for filename in files
if filename.startswith('c14n')
}
tests = {
input_file: [
(filename, configs[filename.rsplit('_', 1)[-1]])
for filename in files
if filename.startswith(f'out_{input_file}_')
and filename.rsplit('_', 1)[-1] in configs
]
for input_file in input_files
}
# Make sure we found all test cases.
self.assertEqual(30, len([
output_file for output_files in tests.values()
for output_file in output_files]))
def get_option(config, option_name, default=None):
return config.get(option_name, (default, ()))[0]
for input_file, output_files in tests.items():
for output_file, config in output_files:
keep_comments = get_option(
config, 'IgnoreComments') == 'true' # no, it's right :)
strip_text = get_option(
config, 'TrimTextNodes') == 'true'
rewrite_prefixes = get_option(
config, 'PrefixRewrite') == 'sequential'
if 'QNameAware' in config:
qattrs = [
f"{{{el.get('NS')}}}{el.get('Name')}"
for el in config['QNameAware'][1].findall(
'{http://www.w3.org/2010/xml-c14n2}QualifiedAttr')
]
qtags = [
f"{{{el.get('NS')}}}{el.get('Name')}"
for el in config['QNameAware'][1].findall(
'{http://www.w3.org/2010/xml-c14n2}Element')
]
else:
qtags = qattrs = None
# Build subtest description from config.
config_descr = ','.join(
f"{name}={value or ','.join(c.tag.split('}')[-1] for c in children)}"
for name, (value, children) in sorted(config.items())
)
with self.subTest(f"{output_file}({config_descr})"):
if input_file == 'inNsRedecl' and not rewrite_prefixes:
self.skipTest(
f"Redeclared namespace handling is not supported in {output_file}")
if input_file == 'inNsSuperfluous' and not rewrite_prefixes:
self.skipTest(
f"Redeclared namespace handling is not supported in {output_file}")
if 'QNameAware' in config and config['QNameAware'][1].find(
'{http://www.w3.org/2010/xml-c14n2}XPathElement') is not None:
self.skipTest(
f"QName rewriting in XPath text is not supported in {output_file}")
f = full_path(input_file + ".xml")
if input_file == 'inC14N5':
# Hack: avoid setting up external entity resolution in the parser.
with open(full_path('world.txt'), 'rb') as entity_file:
with open(f, 'rb') as f:
f = io.BytesIO(f.read().replace(b'&ent2;', entity_file.read()))
text = ET.canonicalize(
from_file=f,
with_comments=keep_comments,
strip_text=strip_text,
rewrite_prefixes=rewrite_prefixes,
qname_aware_tags=qtags, qname_aware_attrs=qattrs)
with open(full_path(output_file + ".xml"), 'r', encoding='utf8') as f:
expected = f.read()
if input_file == 'inC14N3':
# FIXME: cET resolves default attributes but ET does not!
expected = expected.replace(' attr="default"', '')
text = text.replace(' attr="default"', '')
self.assertEqual(expected, text)
# --------------------------------------------------------------------
def test_main(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
global pyET
pyET = import_fresh_module('xml.etree.ElementTree',
blocked=['_elementtree'])
if module is None:
module = pyET
global ET
ET = module
test_classes = [
ModuleTest,
ElementSlicingTest,
BasicElementTest,
BadElementTest,
BadElementPathTest,
ElementTreeTest,
IOTest,
ParseErrorTest,
XIncludeTest,
ElementTreeTypeTest,
ElementFindTest,
ElementIterTest,
TreeBuilderTest,
XMLParserTest,
XMLPullParserTest,
BugsTest,
KeywordArgsTest,
C14NTest,
]
# These tests will only run for the pure-Python version that doesn't import
# _elementtree. We can't use skipUnless here, because pyET is filled in only
# after the module is loaded.
if pyET is not ET:
test_classes.extend([
NoAcceleratorTest,
])
# Provide default namespace mapping and path cache.
from xml.etree import ElementPath
nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
nsmap_copy = nsmap.copy()
# Copy the path cache (should be empty)
path_cache = ElementPath._cache
ElementPath._cache = path_cache.copy()
# Align the Comment/PI factories.
if hasattr(ET, '_set_factories'):
old_factories = ET._set_factories(ET.Comment, ET.PI)
else:
old_factories = None
try:
support.run_unittest(*test_classes)
finally:
from xml.etree import ElementPath
# Restore mapping and path cache
nsmap.clear()
nsmap.update(nsmap_copy)
ElementPath._cache = path_cache
if old_factories is not None:
ET._set_factories(*old_factories)
# don't interfere with subsequent tests
ET = pyET = None
if __name__ == '__main__':
test_main()
|
the-stack_0_24911
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import csv
import itertools
from operator import attrgetter
from flask import current_app, json, session
from qrcode import QRCode, constants
from sqlalchemy import and_, or_
from sqlalchemy.orm import contains_eager, joinedload, load_only, undefer
from werkzeug.urls import url_parse
from wtforms import BooleanField, ValidationError
from indico.core import signals
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.core.errors import UserValueError
from indico.modules.events import EventLogKind, EventLogRealm
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.payment.models.transactions import TransactionStatus
from indico.modules.events.registration import logger
from indico.modules.events.registration.badges import RegistrantsListToBadgesPDF, RegistrantsListToBadgesPDFFoldable
from indico.modules.events.registration.fields.choices import (AccommodationField, ChoiceBaseField,
get_field_merged_options)
from indico.modules.events.registration.models.form_fields import (RegistrationFormFieldData,
RegistrationFormPersonalDataField)
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.invitations import InvitationState, RegistrationInvitation
from indico.modules.events.registration.models.items import (PersonalDataType, RegistrationFormItemType,
RegistrationFormPersonalDataSection)
from indico.modules.events.registration.models.registrations import Registration, RegistrationData, RegistrationState
from indico.modules.events.registration.notifications import (notify_registration_creation,
notify_registration_modification)
from indico.modules.users.util import get_user_by_email
from indico.util.date_time import format_date
from indico.util.i18n import _
from indico.util.spreadsheets import csv_text_io_wrapper, unique_col
from indico.util.string import validate_email, validate_email_verbose
from indico.web.forms.base import IndicoForm
from indico.web.forms.widgets import SwitchWidget
def get_title_uuid(regform, title):
"""Convert a string title to its UUID value.
If the title does not exist in the title PD field, it will be
ignored and returned as ``None``.
"""
if not title:
return None
title_field = next((x
for x in regform.active_fields
if (x.type == RegistrationFormItemType.field_pd and
x.personal_data_type == PersonalDataType.title)), None)
if title_field is None: # should never happen
return None
valid_choices = {x['id'] for x in title_field.current_data.versioned_data['choices']}
uuid = next((k for k, v in title_field.data['captions'].items() if v == title), None)
return {uuid: 1} if uuid in valid_choices else None
def get_event_section_data(regform, management=False, registration=None):
data = []
if not registration:
return [s.view_data for s in regform.sections if not s.is_deleted and (management or not s.is_manager_only)]
registration_data = {r.field_data.field.id: r for r in registration.data}
for section in regform.sections:
if section.is_deleted or (not management and section.is_manager_only):
continue
section_data = section.own_data
section_data['items'] = []
for child in section.children:
if child.is_deleted:
continue
if child.is_field and isinstance(child.field_impl, (ChoiceBaseField, AccommodationField)):
field_data = get_field_merged_options(child, registration_data)
else:
field_data = child.view_data
section_data['items'].append(field_data)
data.append(section_data)
return data
def check_registration_email(regform, email, registration=None, management=False):
"""Check whether an email address is suitable for registration.
:param regform: The registration form
:param email: The email address
:param registration: The existing registration (in case of
modification)
:param management: If it's a manager adding a new registration
"""
email = email.lower().strip()
user = get_user_by_email(email)
email_registration = regform.get_registration(email=email)
user_registration = regform.get_registration(user=user) if user else None
if registration is not None:
if email_registration and email_registration != registration:
return dict(status='error', conflict='email-already-registered')
elif user_registration and user_registration != registration:
return dict(status='error', conflict='user-already-registered')
elif user and registration.user and registration.user != user:
return dict(status='warning' if management else 'error', conflict='email-other-user', user=user.full_name)
elif not user and registration.user:
return dict(status='warning' if management else 'error', conflict='email-no-user',
user=registration.user.full_name)
elif user:
return dict(status='ok', user=user.full_name, self=(not management and user == session.user),
same=(user == registration.user))
email_err = validate_email_verbose(email)
if email_err:
return dict(status='error', conflict='email-invalid', email_error=email_err)
if regform.require_user and (management or email != registration.email):
return dict(status='warning' if management else 'error', conflict='no-user')
else:
return dict(status='ok', user=None)
else:
if email_registration:
return dict(status='error', conflict='email-already-registered')
elif user_registration:
return dict(status='error', conflict='user-already-registered')
elif user:
return dict(status='ok', user=user.full_name, self=(not management and user == session.user), same=False)
email_err = validate_email_verbose(email)
if email_err:
return dict(status='error', conflict='email-invalid', email_error=email_err)
if regform.require_user:
return dict(status='warning' if management else 'error', conflict='no-user')
else:
return dict(status='ok', user=None)
def make_registration_form(regform, management=False, registration=None):
"""Create a WTForm based on registration form fields."""
class RegistrationFormWTF(IndicoForm):
if management:
notify_user = BooleanField(_('Send email'), widget=SwitchWidget())
def validate_email(self, field):
status = check_registration_email(regform, field.data, registration, management=management)
if status['status'] == 'error':
raise ValidationError('Email validation failed: ' + status['conflict'])
for form_item in regform.active_fields:
if not management and form_item.parent.is_manager_only:
continue
field_impl = form_item.field_impl
setattr(RegistrationFormWTF, form_item.html_field_name, field_impl.create_wtf_field())
signals.event.registration_form_wtform_created.send(regform, registration=registration, management=management,
wtform_cls=RegistrationFormWTF)
RegistrationFormWTF.modified_registration = registration
return RegistrationFormWTF
def create_personal_data_fields(regform):
"""Create the special section/fields for personal data."""
section = next((s for s in regform.sections if s.type == RegistrationFormItemType.section_pd), None)
if section is None:
section = RegistrationFormPersonalDataSection(registration_form=regform, title='Personal Data')
missing = set(PersonalDataType)
else:
existing = {x.personal_data_type for x in section.children if x.type == RegistrationFormItemType.field_pd}
missing = set(PersonalDataType) - existing
for pd_type, data in PersonalDataType.FIELD_DATA:
if pd_type not in missing:
continue
field = RegistrationFormPersonalDataField(registration_form=regform, personal_data_type=pd_type,
is_required=pd_type.is_required)
if not data.get('is_enabled', True):
field.position = data['position']
for key, value in data.items():
setattr(field, key, value)
field.data, versioned_data = field.field_impl.process_field_data(data.pop('data', {}))
field.current_data = RegistrationFormFieldData(versioned_data=versioned_data)
section.children.append(field)
def url_rule_to_angular(endpoint):
"""Convert a flask-style rule to angular style."""
mapping = {
'event_id': 'eventId',
'reg_form_id': 'confFormId',
'section_id': 'sectionId',
'field_id': 'fieldId',
}
rules = list(current_app.url_map.iter_rules(endpoint))
assert len(rules) == 1
rule = rules[0]
assert not rule.defaults
segments = [':' + mapping.get(data, data) if is_dynamic else data
for is_dynamic, data in rule._trace]
prefix = url_parse(config.BASE_URL).path
return prefix + ''.join(segments).split('|', 1)[-1]
@no_autoflush
def create_registration(regform, data, invitation=None, management=False, notify_user=True, skip_moderation=None):
user = session.user if session else None
registration = Registration(registration_form=regform, user=get_user_by_email(data['email']),
base_price=regform.base_price, currency=regform.currency)
if skip_moderation is None:
skip_moderation = management
for form_item in regform.active_fields:
if form_item.parent.is_manager_only:
value = form_item.field_impl.default_value
else:
value = data.get(form_item.html_field_name)
data_entry = RegistrationData()
registration.data.append(data_entry)
for attr, value in form_item.field_impl.process_form_data(registration, value).items():
setattr(data_entry, attr, value)
if form_item.type == RegistrationFormItemType.field_pd and form_item.personal_data_type.column:
setattr(registration, form_item.personal_data_type.column, value)
if invitation is None:
# Associate invitation based on email in case the user did not use the link
invitation = (RegistrationInvitation.query
.filter_by(email=data['email'], registration_id=None)
.with_parent(regform)
.first())
if invitation:
invitation.state = InvitationState.accepted
invitation.registration = registration
registration.sync_state(_skip_moderation=skip_moderation)
db.session.flush()
signals.event.registration_created.send(registration, management=management, data=data)
notify_registration_creation(registration, notify_user)
logger.info('New registration %s by %s', registration, user)
registration.log(EventLogRealm.management if management else EventLogRealm.participants,
EventLogKind.positive, 'Registration',
f'New registration: {registration.full_name}', user, data={'Email': registration.email})
return registration
@no_autoflush
def modify_registration(registration, data, management=False, notify_user=True):
old_price = registration.price
personal_data_changes = {}
regform = registration.registration_form
data_by_field = registration.data_by_field
if management or not registration.user:
registration.user = get_user_by_email(data['email'])
billable_items_locked = not management and registration.is_paid
for form_item in regform.active_fields:
field_impl = form_item.field_impl
if management or not form_item.parent.is_manager_only:
value = data.get(form_item.html_field_name)
elif form_item.id not in data_by_field:
# set default value for manager-only field if it didn't have one before
value = field_impl.default_value
else:
# manager-only field that has data which should be preserved
continue
if form_item.id not in data_by_field:
data_by_field[form_item.id] = RegistrationData(registration=registration,
field_data=form_item.current_data)
attrs = field_impl.process_form_data(registration, value, data_by_field[form_item.id],
billable_items_locked=billable_items_locked)
for key, val in attrs.items():
setattr(data_by_field[form_item.id], key, val)
if form_item.type == RegistrationFormItemType.field_pd and form_item.personal_data_type.column:
key = form_item.personal_data_type.column
if getattr(registration, key) != value:
personal_data_changes[key] = value
setattr(registration, key, value)
registration.sync_state()
db.session.flush()
# sanity check
if billable_items_locked and old_price != registration.price:
raise Exception("There was an error while modifying your registration (price mismatch: %s / %s)",
old_price, registration.price)
if personal_data_changes:
signals.event.registration_personal_data_modified.send(registration, change=personal_data_changes)
signals.event.registration_updated.send(registration, management=management, data=data)
notify_registration_modification(registration, notify_user)
logger.info('Registration %s modified by %s', registration, session.user)
registration.log(EventLogRealm.management if management else EventLogRealm.participants,
EventLogKind.change, 'Registration',
f'Registration modified: {registration.full_name}',
session.user, data={'Email': registration.email})
def generate_spreadsheet_from_registrations(registrations, regform_items, static_items):
"""Generate a spreadsheet data from a given registration list.
:param registrations: The list of registrations to include in the file
:param regform_items: The registration form items to be used as columns
:param static_items: Registration form information as extra columns
"""
field_names = ['ID', 'Name']
special_item_mapping = {
'reg_date': ('Registration date', lambda x: x.submitted_dt),
'state': ('Registration state', lambda x: x.state.title),
'price': ('Price', lambda x: x.render_price()),
'checked_in': ('Checked in', lambda x: x.checked_in),
'checked_in_date': ('Check-in date', lambda x: x.checked_in_dt if x.checked_in else ''),
'payment_date': ('Payment date', lambda x: (x.transaction.timestamp
if (x.transaction is not None and
x.transaction.status == TransactionStatus.successful)
else '')),
}
for item in regform_items:
field_names.append(unique_col(item.title, item.id))
if item.input_type == 'accommodation':
field_names.append(unique_col('{} ({})'.format(item.title, 'Arrival'), item.id))
field_names.append(unique_col('{} ({})'.format(item.title, 'Departure'), item.id))
field_names.extend(title for name, (title, fn) in special_item_mapping.items() if name in static_items)
rows = []
for registration in registrations:
data = registration.data_by_field
registration_dict = {
'ID': registration.friendly_id,
'Name': f"{registration.first_name} {registration.last_name}"
}
for item in regform_items:
key = unique_col(item.title, item.id)
if item.input_type == 'accommodation':
registration_dict[key] = data[item.id].friendly_data.get('choice') if item.id in data else ''
key = unique_col('{} ({})'.format(item.title, 'Arrival'), item.id)
arrival_date = data[item.id].friendly_data.get('arrival_date') if item.id in data else None
registration_dict[key] = format_date(arrival_date) if arrival_date else ''
key = unique_col('{} ({})'.format(item.title, 'Departure'), item.id)
departure_date = data[item.id].friendly_data.get('departure_date') if item.id in data else None
registration_dict[key] = format_date(departure_date) if departure_date else ''
else:
registration_dict[key] = data[item.id].friendly_data if item.id in data else ''
for name, (title, fn) in special_item_mapping.items():
if name not in static_items:
continue
value = fn(registration)
registration_dict[title] = value
rows.append(registration_dict)
return field_names, rows
def get_registrations_with_tickets(user, event):
query = (Registration.query.with_parent(event)
.filter(Registration.user == user,
Registration.state == RegistrationState.complete,
RegistrationForm.tickets_enabled,
RegistrationForm.ticket_on_event_page,
~RegistrationForm.is_deleted,
~Registration.is_deleted)
.join(Registration.registration_form))
return [r for r in query if not r.is_ticket_blocked]
def get_published_registrations(event):
"""Get a list of published registrations for an event.
:param event: the `Event` to get registrations for
:return: list of `Registration` objects
"""
return (Registration.query
.filter(Registration.is_publishable,
~RegistrationForm.is_deleted,
RegistrationForm.event_id == event.id,
RegistrationForm.publish_registrations_enabled)
.join(Registration.registration_form)
.options(contains_eager(Registration.registration_form))
.order_by(db.func.lower(Registration.first_name),
db.func.lower(Registration.last_name),
Registration.friendly_id)
.all())
def get_events_registered(user, dt=None):
"""Get the IDs of events where the user is registered.
:param user: A `User`
:param dt: Only include events taking place on/after that date
:return: A set of event ids
"""
query = (user.registrations
.options(load_only('event_id'))
.options(joinedload(Registration.registration_form).load_only('event_id'))
.join(Registration.registration_form)
.join(RegistrationForm.event)
.filter(Registration.is_active, ~RegistrationForm.is_deleted, ~Event.is_deleted,
Event.ends_after(dt)))
return {registration.event_id for registration in query}
def build_registrations_api_data(event):
api_data = []
query = (RegistrationForm.query.with_parent(event)
.options(joinedload('registrations').joinedload('data').joinedload('field_data')))
for regform in query:
for registration in regform.active_registrations:
registration_info = _build_base_registration_info(registration)
registration_info['checkin_secret'] = registration.ticket_uuid
api_data.append(registration_info)
return api_data
def _build_base_registration_info(registration):
personal_data = _build_personal_data(registration)
return {
'registrant_id': str(registration.id),
'checked_in': registration.checked_in,
'checkin_secret': registration.ticket_uuid,
'full_name': '{} {}'.format(personal_data.get('title', ''), registration.full_name).strip(),
'personal_data': personal_data
}
def _build_personal_data(registration):
personal_data = registration.get_personal_data()
personal_data['firstName'] = personal_data.pop('first_name')
personal_data['surname'] = personal_data.pop('last_name')
personal_data['country'] = personal_data.pop('country', '')
personal_data['phone'] = personal_data.pop('phone', '')
return personal_data
def build_registration_api_data(registration):
registration_info = _build_base_registration_info(registration)
registration_info['amount_paid'] = registration.price if registration.is_paid else 0
registration_info['ticket_price'] = registration.price
registration_info['registration_date'] = registration.submitted_dt.isoformat()
registration_info['paid'] = registration.is_paid
registration_info['checkin_date'] = registration.checked_in_dt.isoformat() if registration.checked_in_dt else ''
registration_info['event_id'] = registration.event_id
return registration_info
def generate_ticket_qr_code(registration):
"""Generate a Pillow `Image` with a QR Code encoding a check-in ticket.
:param registration: corresponding `Registration` object
"""
qr = QRCode(
version=17,
error_correction=constants.ERROR_CORRECT_Q,
box_size=3,
border=1
)
qr_data = {
"registrant_id": registration.id,
"checkin_secret": registration.ticket_uuid,
"event_id": str(registration.event.id),
"server_url": config.BASE_URL,
"version": 1
}
signals.event.registration.generate_ticket_qr_code.send(registration, ticket_data=qr_data)
json_qr_data = json.dumps(qr_data)
qr.add_data(json_qr_data)
qr.make(fit=True)
return qr.make_image()._img
def get_event_regforms(event, user, with_registrations=False, only_in_acl=False):
"""Get registration forms with information about user registrations.
:param event: the `Event` to get registration forms for
:param user: A `User`
:param with_registrations: Whether to return the user's
registration instead of just
whether they have one
:param only_in_acl: Whether to include only registration forms
that are in the event's ACL
"""
if not user:
registered_user = db.literal(None if with_registrations else False)
elif with_registrations:
registered_user = Registration
else:
registered_user = RegistrationForm.registrations.any((Registration.user == user) & ~Registration.is_deleted)
query = (RegistrationForm.query.with_parent(event)
.with_entities(RegistrationForm, registered_user)
.options(undefer('active_registration_count'))
.order_by(db.func.lower(RegistrationForm.title)))
if only_in_acl:
query = query.filter(RegistrationForm.in_event_acls.any(event=event))
if with_registrations:
query = query.outerjoin(Registration, db.and_(Registration.registration_form_id == RegistrationForm.id,
Registration.user == user,
~Registration.is_deleted))
return query.all()
def get_event_regforms_registrations(event, user, include_scheduled=True, only_in_acl=False):
"""Get regforms and the associated registrations for an event+user.
:param event: the `Event` to get registration forms for
:param user: A `User`
:param include_scheduled: Whether to include scheduled
but not open registration forms
:param only_in_acl: Whether to include only registration forms
that are in the event's ACL
:return: A tuple, which includes:
- All registration forms which are scheduled, open or registered.
- A dict mapping all registration forms to the user's registration if they have one.
"""
all_regforms = get_event_regforms(event, user, with_registrations=True, only_in_acl=only_in_acl)
if include_scheduled:
displayed_regforms = [regform for regform, registration in all_regforms
if regform.is_scheduled or registration]
else:
displayed_regforms = [regform for regform, registration in all_regforms
if regform.is_open or registration]
return displayed_regforms, dict(all_regforms)
def generate_ticket(registration):
from indico.modules.designer.util import get_default_ticket_on_category
from indico.modules.events.registration.controllers.management.tickets import DEFAULT_TICKET_PRINTING_SETTINGS
template = (registration.registration_form.ticket_template or
get_default_ticket_on_category(registration.event.category))
registrations = [registration]
signals.event.designer.print_badge_template.send(template, regform=registration.registration_form,
registrations=registrations)
pdf_class = RegistrantsListToBadgesPDFFoldable if template.backside_template else RegistrantsListToBadgesPDF
pdf = pdf_class(template, DEFAULT_TICKET_PRINTING_SETTINGS, registration.event, registrations)
return pdf.get_pdf()
def get_ticket_attachments(registration):
return [('Ticket.pdf', generate_ticket(registration).getvalue())]
def update_regform_item_positions(regform):
"""Update positions when deleting/disabling an item in order to prevent gaps."""
section_positions = itertools.count(1)
disabled_section_positions = itertools.count(1000)
for section in sorted(regform.sections, key=attrgetter('position')):
section_active = section.is_enabled and not section.is_deleted
section.position = next(section_positions if section_active else disabled_section_positions)
# ensure consistent field ordering
positions = itertools.count(1)
disabled_positions = itertools.count(1000)
for child in section.children:
child_active = child.is_enabled and not child.is_deleted
child.position = next(positions if child_active else disabled_positions)
def import_registrations_from_csv(regform, fileobj, skip_moderation=True, notify_users=False):
"""Import event registrants from a CSV file into a form."""
with csv_text_io_wrapper(fileobj) as ftxt:
reader = csv.reader(ftxt.read().splitlines())
reg_data = (db.session.query(Registration.user_id, Registration.email)
.with_parent(regform)
.filter(Registration.is_active)
.all())
registered_user_ids = {rd.user_id for rd in reg_data if rd.user_id is not None}
registered_emails = {rd.email for rd in reg_data}
used_emails = set()
email_row_map = {}
todo = []
for row_num, row in enumerate(reader, 1):
try:
first_name, last_name, affiliation, position, phone, email = [value.strip() for value in row]
email = email.lower()
except ValueError:
raise UserValueError(_('Row {}: malformed CSV data - please check that the number of columns is correct')
.format(row_num))
if not email:
raise UserValueError(_('Row {}: missing e-mail address').format(row_num))
if not validate_email(email):
raise UserValueError(_('Row {}: invalid e-mail address').format(row_num))
if not first_name or not last_name:
raise UserValueError(_('Row {}: missing first or last name').format(row_num))
if email in registered_emails:
raise UserValueError(_('Row {}: a registration with this email already exists').format(row_num))
user = get_user_by_email(email)
if user and user.id in registered_user_ids:
raise UserValueError(_('Row {}: a registration for this user already exists').format(row_num))
if email in used_emails:
raise UserValueError(_('Row {}: email address is not unique').format(row_num))
if conflict_row_num := email_row_map.get(email):
raise UserValueError(_('Row {}: email address belongs to the same user as in row {}')
.format(row_num, conflict_row_num))
used_emails.add(email)
if user:
email_row_map.update((e, row_num) for e in user.all_emails)
todo.append({
'email': email,
'first_name': first_name.title(),
'last_name': last_name.title(),
'affiliation': affiliation,
'phone': phone,
'position': position
})
return [create_registration(regform, data, notify_user=notify_users, skip_moderation=skip_moderation)
for data in todo]
def get_registered_event_persons(event):
"""Get all registered EventPersons of an event."""
query = event.persons.join(Registration, and_(Registration.event_id == EventPerson.event_id,
Registration.is_active,
or_(Registration.user_id == EventPerson.user_id,
Registration.email == EventPerson.email)))
return set(query)
def serialize_registration_form(regform):
"""Serialize registration form to JSON-like object."""
return {
'id': regform.id,
'name': regform.title,
'identifier': f'RegistrationForm:{regform.id}',
'_type': 'RegistrationForm'
}
|
the-stack_0_24912
|
# -*- coding: utf-8 -*-
import datetime
import itertools
import logging
import os
import tempfile
import time
from collections import Counter
import torch
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import FasterRCNN.utils.comm as comm
from FasterRCNN.evaluation.testing import flatten_results_dict
from FasterRCNN.utils.events import EventStorage, EventWriter
from .train_loop import HookBase
__all__ = [
"CallbackHook",
"IterationTimer",
"PeriodicWriter",
"PeriodicCheckpointer",
"LRScheduler",
"AutogradProfiler",
"EvalHook",
"PreciseBN",
]
"""
Implement some common hooks.
"""
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer = Timer()
self._total_timer.pause()
def after_train(self):
logger = logging.getLogger(__name__)
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step
iter_done = self.trainer.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage periodically.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
# No way to use **kwargs
self.step(self.trainer.iter)
class LRScheduler(HookBase):
"""
A hook which executes a torch builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer, scheduler):
"""
Args:
optimizer (torch.optim.Optimizer):
scheduler (torch.optim._LRScheduler)
"""
self._optimizer = optimizer
self._scheduler = scheduler
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.param_groups):
if g["lr"] == lr:
self._best_param_group_id = i
break
else:
for i, g in enumerate(optimizer.param_groups):
if len(g["params"]) == largest_group:
self._best_param_group_id = i
break
def after_step(self):
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self._scheduler.step()
class AutogradProfiler(HookBase):
"""
A hook which runs `torch.autograd.profiler.profile`.
Examples:
.. code-block:: python
hooks.AutogradProfiler(
lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
)
The above example will run the profiler for iteration 10~20 and dump
results to ``OUTPUT_DIR``. We did not profile the first few iterations
because they are typically slower than the rest.
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
Note:
When used together with NCCL on older version of GPUs,
autograd profiler may cause deadlock because it unnecessarily allocates
memory on every device it sees. The memory management calls, if
interleaved with NCCL calls, lead to deadlock on GPUs that do not
support `cudaLaunchCooperativeKernelMultiDevice`.
"""
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
"""
Args:
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
and returns whether to enable the profiler.
It will be called once every step, and can be used to select which steps to profile.
output_dir (str): the output directory to dump tracing files.
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
"""
self._enable_predicate = enable_predicate
self._use_cuda = use_cuda
self._output_dir = output_dir
def before_step(self):
if self._enable_predicate(self.trainer):
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
self._profiler.__enter__()
else:
self._profiler = None
def after_step(self):
if self._profiler is None:
return
self._profiler.__exit__(None, None, None)
out_file = os.path.join(
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
)
if "://" not in out_file:
self._profiler.export_chrome_trace(out_file)
else:
# Support non-posix filesystems
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
tmp_file = os.path.join(d, "tmp.json")
self._profiler.export_chrome_trace(tmp_file)
with open(tmp_file) as f:
content = f.read()
with PathManager.open(out_file, "w") as f:
f.write(content)
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function):
"""
Args:
eval_period (int): the period to run `eval_function`.
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
self._done_eval_at_last = False
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
)
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
comm.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_eval()
if is_final:
self._done_eval_at_last = True
def after_train(self):
if not self._done_eval_at_last:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class PreciseBN(HookBase):
"""
The standard implementation of BatchNorm uses EMA in inference, which is
sometimes suboptimal.
This class computes the true average of statistics rather than the moving average,
and put true averages to every BN layer in the given model.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, period, model, data_loader, num_iter):
"""
Args:
period (int): the period this hook is run, or 0 to not run during training.
The hook will always run in the end of training.
model (nn.Module): a module whose all BN layers in training mode will be
updated by precise BN.
Note that user is responsible for ensuring the BN layers to be
updated are in training mode when this hook is triggered.
data_loader (iterable): it will produce data to be run by `model(data)`.
num_iter (int): number of iterations used to compute the precise
statistics.
"""
self._logger = logging.getLogger(__name__)
if len(get_bn_modules(model)) == 0:
self._logger.info(
"PreciseBN is disabled because model does not contain BN layers in training mode."
)
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self.update_stats()
def update_stats(self):
"""
Update the model with precise statistics. Users can manually call this method.
"""
if self._disabled:
return
if self._data_iter is None:
self._data_iter = iter(self._data_loader)
def data_loader():
for num_iter in itertools.count(1):
if num_iter % 100 == 0:
self._logger.info(
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
)
# This way we can reuse the same iterator
yield next(self._data_iter)
with EventStorage(): # capture events in a new storage to discard them
self._logger.info(
"Running precise-BN for {} iterations... ".format(self._num_iter)
+ "Note that this could produce different statistics every time."
)
update_bn_stats(self._model, data_loader(), self._num_iter)
|
the-stack_0_24913
|
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Parameter
import dezero.functions as F
import dezero.layers as L
if __name__ == '__main__':
x = Variable(np.array(1.0))
p = Parameter(np.array(2.0))
y = x * p
print(isinstance(p, Parameter))
print(isinstance(x, Parameter))
print(isinstance(y, Parameter))
# class Linear(layers.Layer):
# def __init__(self, in_size, out_size, nobias=False, dtype=np.float32):
# super().__init__()
# I, O = in_size, out_size
# W_data = np.random.randn(I, O).astype(dtype) * np.sqrt(1 / I)
# self.W = Parameter(W_data, name='W')
# if nobias:
# self.b = None
# else:
# self.b = Parameter(np.zeros(0, dtype=dtype), name='b')
# def forward(self, x):
# y = F.linear(x, self.W, self.b)
# return y
# generate nonlinear data samples
np.random.seed(0)
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
l1 = L.Linear(10) # 출력 크기 지정
l2 = L.Linear(1)
def predict(x):
y = l1(x)
y = F.sigmoid(y)
y = l2(y)
return y
lr = 0.2
iters = 10000
for i in range(iters):
y_pred = predict(x)
loss = F.mean_squared_error(y, y_pred)
l1.cleargrads()
l2.cleargrads()
loss.backward()
for l in [l1, l2]:
for p in l.params():
p.data -= lr * p.grad.data
if i % 1000 == 0:
print(loss)
|
the-stack_0_24915
|
#!/usr/bin/python
import subprocess;
import os;
import sys;
import time;
import ConfigParser;
def started_as_root():
if subprocess.check_output('whoami').strip() == 'root':
return True
return False
def execc( cmds ):
return subprocess.check_output(cmds, stderr=subprocess.STDOUT)
def findAP( name, list ):
theLine = None
for line in list.split("\n"):
if name in line:
theLine = line.strip()
break
if theLine == None:
return theLine
else:
split = theLine.split(" ")
return split[len(split)-1]
def createConfig(cmSSID, SSID, PSK):
split = cmSSID.split("_");
ssid = split[2]
file ="""[service_{1}]
Type = wifi
Name = {1}
Passphrase = {2}
"""
cmPrefix = "/var/lib/connman"
path = "{0}/{1}.config".format(cmPrefix, SSID)
configFile = open(path, "w")
configFile.write(file.format(cmSSID,SSID,PSK))
configFile.close()
print("SSID {0} configured.".format(SSID));
time.sleep(2);
def configureWifi():
### Try to read the config
try:
config = ConfigParser.ConfigParser()
config.readfp(open('/boot/flash/network.cfg'))
SSID = config.get("default", "SSID")
PSK = config.get("default", "PSK")
except IOError:
print("No /boot/flash/network.cfg. Ignoring")
time.sleep(2)
sys.exit(0)
### Scan wifi
print("");
print("");
print("##############");
print("Scanning wifi");
execc(['connmanctl','scan','wifi'])
output = execc(['connmanctl','services'])
print("Locating SSID {0}".format(SSID));
cmSSID = findAP(SSID, output)
if cmSSID == None:
print("Unable to locate SSID {0}. Could not configure".format(SSID))
time.sleep(2)
sys.exit(0)
createConfig(cmSSID, SSID, PSK)
def main():
if started_as_root():
configureWifi()
else:
current_script = os.path.realpath(__file__)
os.system('sudo -S /usr/bin/python %s' % (current_script))
if __name__ == '__main__':
main()
|
the-stack_0_24916
|
"""
You are given a data structure of employee information, which includes the employee's unique id, his importance value and his direct subordinates' id.
For example, employee 1 is the leader of employee 2, and employee 2 is the leader of employee 3. They have importance value 15, 10 and 5, respectively. Then employee 1 has a data structure like [1, 15, [2]], and employee 2 has [2, 10, [3]], and employee 3 has [3, 5, []]. Note that although employee 3 is also a subordinate of employee 1, the relationship is not direct.
Now given the employee information of a company, and an employee id, you need to return the total importance value of this employee and all his subordinates.
Example 1:
Input: [[1, 5, [2, 3]], [2, 3, []], [3, 3, []]], 1
Output: 11
Explanation:
Employee 1 has importance value 5, and he has two direct subordinates: employee 2 and employee 3. They both have importance value 3. So the total importance value of employee 1 is 5 + 3 + 3 = 11.
Note:
One employee has at most one direct leader and may have several subordinates.
The maximum number of employees won't exceed 2000.
"""
"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
r= 0
if not employees:
return r
tr = {}
for em in employees:
tr[em.id] = (em.importance, em.subordinates)
q = [id]
while q:
c = q[0]
r += tr[c][0]
del q[0]
q.extend(tr[c][1])
return r
|
the-stack_0_24917
|
# Global configuration information used across all the
# translations of documentation.
#
# Import the base theme configuration
from cakephpsphinx.config.all import *
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '2.x'
# The search index version.
search_version = 'authentication-2'
# The marketing display name for the book.
version_name = ''
# Project name shown in the black header bar
project = 'CakePHP Authentication'
# Other versions that display in the version picker menu.
version_list = [
{'name': '1.x', 'number': '/authentication/1', 'title': '1.x'},
{'name': '2.x', 'number': '/authentication/2', 'title': '2.x', 'current': True},
]
# Languages available.
languages = ['en', 'fr', 'ja']
# The GitHub branch name for this version of the docs
# for edit links to point at.
branch = 'master'
# Current version being built
version = '2.x'
show_root_link = True
repository = 'cakephp/authentication'
source_path = 'docs/'
hide_page_contents = ('search', '404', 'contents')
|
the-stack_0_24918
|
# vim: sw=4:ts=4:et
import atexit
import datetime
import logging
import os, os.path
import secrets
import shutil
import signal
import sys
import threading
import time
import uuid
from multiprocessing import Manager, RLock, Pipe, Process
from unittest import TestCase
from subprocess import Popen, PIPE
import saq
import saq.engine
from saq.analysis import RootAnalysis, _enable_io_tracker, _disable_io_tracker
from saq.constants import *
from saq.crypto import get_aes_key
from saq.database import initialize_database, get_db_connection, use_db, Alert
from saq.engine import Engine
from saq.error import report_exception
from saq.util import storage_dir_from_uuid, workload_storage_dir, abs_path
from saq.splunk import SplunkQueryObject
test_dir = None
UNITTEST_USER_NAME = 'unittest'
UNITTEST_USER_ID = None
# decorators
#
def track_io(target_function):
def wrapper(*args, **kwargs):
try:
_enable_io_tracker()
return target_function(*args, **kwargs)
finally:
_disable_io_tracker()
return wrapper
def force_alerts(target_function):
"""Alerts will be forced ON for the duration of this function."""
def wrapper(*args, **kwargs):
try:
saq.FORCED_ALERTS = True
return target_function(*args, **kwargs)
finally:
saq.FORCED_ALERTS = False
return wrapper
def reset_alerts(target_function):
"""Deletes all alerts in the database."""
def wrapper(*args, **kwargs):
if saq.INSTANCE_TYPE != INSTANCE_TYPE_UNITTEST:
raise RuntimeError("invalid instance type")
with get_db_connection() as db:
c = db.cursor()
c.execute("""DELETE FROM alerts""")
db.commit()
return target_function(*args, **kwargs)
return wrapper
def verify_instance_type(target_function):
"""Raises a RuntimeError if the current INSTANCE_TYPE is not UNITTEST."""
def wrapper(*args, **kwargs):
if saq.INSTANCE_TYPE != INSTANCE_TYPE_UNITTEST:
raise RuntimeError(f"invalid instance type {saq.INSTANCE_TYPE}")
return target_function(*args, **kwargs)
return wrapper
#
# utility functions
def enable_module(engine_name, module_name):
"""Adds a module to be enabled."""
saq.CONFIG[module_name]['enabled'] = 'yes'
saq.CONFIG[engine_name][module_name] = 'yes'
def wait_for(condition, interval=1, timeout=8):
"""Wait for condition to return True, checking every interval seconds until timeout seconds have elapsed.
Return True if condition returned True before timeout was exceeded, False otherwise."""
timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < timeout:
if condition():
return True
time.sleep(interval)
return False
# test comms pipe is used to communicate between test process and child processes
test_comms_p = None
test_comms_pid = None
test_comms_c = None
def open_test_comms():
global test_comms_p
global test_comms_pid
global test_comms_c
test_comms_p, test_comms_c = Pipe()
test_comms_pid = os.getpid()
def close_test_comms():
test_comms_p.close()
test_comms_c.close()
def get_test_comm_pipe():
# if we are the original process then we use the "parent" pipe
# otherwise we use the "child" pipe
if os.getpid() == test_comms_pid:
return test_comms_p
return test_comms_c
def send_test_message(message):
get_test_comm_pipe().send(message)
def recv_test_message():
return get_test_comm_pipe().recv()
test_log_manager = None
test_log_sync = None
test_log_messages = None
memory_log_handler = None
class WaitTimedOutError(Exception):
pass
#
# custom logging
class MemoryLogHandler(logging.Handler):
def acquire(self):
test_log_sync.acquire()
def release(self):
test_log_sync.release()
def createLock(self):
pass
def emit(self, record):
try:
test_log_messages.append(record)
except:
sys.stderr.write(str(record) + "\n")
def clear(self):
with test_log_sync:
del test_log_messages[:]
def search(self, condition):
"""Searches and returns all log records for which condition(record) was True. Returns the list of LogRecord that matched."""
result = []
with test_log_sync:
for message in test_log_messages:
if condition(message):
result.append(message)
return result
def wait_for_log_entry(self, callback, timeout=5, count=1):
"""Waits for callback to return True count times before timeout seconds expire.
callback takes a single LogRecord object as the parameter and returns a boolean."""
# XXX this is a hack but on slower machines the tests are timing out because the system is slow
if timeout < 30:
timeout = 30
time_limit = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
current_index = 0
current_count = 0
while True:
with test_log_sync:
while current_index < len(test_log_messages):
if callback(test_log_messages[current_index]):
current_count += 1
if current_count == count:
return True
current_index += 1
if datetime.datetime.now() >= time_limit:
raise WaitTimedOutError()
time.sleep(0.1)
def _atexit_callback():
global test_log_manager
if test_log_manager:
try:
test_log_manager.shutdown()
except Exception as e:
print("ERROR: unable to shutdown test log manager: {}".format(e))
def initialize_unittest_logging():
# ACE is multi-process multi-threaded
# so we use this special logging mechanism to keep a central repository of the log events generated
# that the original process can access
global test_log_manager
global test_log_sync
global test_log_messages
global memory_log_handler
test_log_manager = Manager()
#atexit.register(_atexit_callback)
test_log_sync = RLock()
test_log_messages = test_log_manager.list()
log_format = logging.Formatter(datefmt='%(asctime)s')
if memory_log_handler is not None:
logging.getLogger().removeHandler(memory_log_handler)
memory_log_handler = MemoryLogHandler()
memory_log_handler.setLevel(logging.DEBUG)
memory_log_handler.setFormatter(log_format)
logging.getLogger().addHandler(memory_log_handler)
def wait_for_log_entry(*args, **kwargs):
return memory_log_handler.wait_for_log_entry(*args, **kwargs)
def log_count(text):
"""Returns the number of times the given text is seen in the logs."""
with test_log_sync:
return len([x for x in test_log_messages if text in x.getMessage()])
def wait_for_log_count(text, count, timeout=5):
"""Waits for text to occur count times in the logs before timeout seconds elapse."""
def condition(e):
return text in e.getMessage()
return memory_log_handler.wait_for_log_entry(condition, timeout, count)
def search_log(text):
return memory_log_handler.search(lambda log_record: text in log_record.getMessage())
def search_log_regex(regex):
return memory_log_handler.search(lambda log_record: regex.search(log_record.getMessage()))
def search_log_condition(func):
return memory_log_handler.search(func)
def splunk_query(search_string, *args, **kwargs):
config = saq.CONFIG['splunk']
q = SplunkQueryObject(
uri=config['uri'],
username=config['username'],
password=config['password'],
*args, **kwargs)
result = q.query(search_string)
return q, result
def initialize_test_environment():
global test_dir
# there is no reason to run anything as root
if os.geteuid() == 0:
print("do not run ace as root please")
sys.exit(1)
# where is ACE?
saq_home = '/opt/saq'
if 'SAQ_HOME' in os.environ:
saq_home = os.environ['SAQ_HOME']
# initialize saq
import saq
saq.initialize(
saq_home=saq_home,
config_paths=[],
logging_config_path=os.path.join(saq_home, 'etc', 'unittest_logging.ini'),
args=None,
relative_dir=None)
# set the encryption password to 'ace' for the purposes of testing
from saq.crypto import set_encryption_password
set_encryption_password('ace')
if saq.CONFIG['global']['instance_type'] != INSTANCE_TYPE_UNITTEST:
sys.stderr.write('\n\n *** CRITICAL ERROR *** \n\ninvalid instance_type setting in configuration\n')
sys.exit(1)
# additional logging required for testing
initialize_unittest_logging()
# create a temporary storage directory
test_dir = os.path.join(saq.SAQ_HOME, 'var', 'test')
if os.path.exists(test_dir):
try:
shutil.rmtree(test_dir)
except Exception as e:
logging.error("unable to delete {}: {}".format(test_dir, e))
sys.exit(1)
try:
os.makedirs(test_dir)
except Exception as e:
logging.error("unable to create temp dir {}: {}".format(test_dir, e))
# expected values
EV_TEST_DATE = datetime.datetime(2017, 11, 11, hour=7, minute=36, second=1, microsecond=1)
EV_ROOT_ANALYSIS_TOOL = 'test_tool'
EV_ROOT_ANALYSIS_TOOL_INSTANCE = 'test_tool_instance'
EV_ROOT_ANALYSIS_ALERT_TYPE = 'test_alert'
EV_ROOT_ANALYSIS_DESCRIPTION = 'This is only a test.'
EV_ROOT_ANALYSIS_EVENT_TIME = EV_TEST_DATE
EV_ROOT_ANALYSIS_NAME = 'test'
EV_ROOT_ANALYSIS_UUID = '14ca0ff2-ff7e-4fa1-a375-160dc072ab02'
def create_root_analysis(tool=None, tool_instance=None, alert_type=None, desc=None, event_time=None,
action_counts=None, details=None, name=None, remediation=None, state=None,
uuid=None, location=None, storage_dir=None, company_name=None, company_id=None,
analysis_mode=None, queue=None, instructions=None):
"""Returns a default RootAnalysis object with expected values for testing."""
return RootAnalysis(tool=tool if tool else EV_ROOT_ANALYSIS_TOOL,
tool_instance=tool_instance if tool_instance else EV_ROOT_ANALYSIS_TOOL_INSTANCE,
alert_type=alert_type if alert_type else EV_ROOT_ANALYSIS_ALERT_TYPE,
desc=desc if desc else EV_ROOT_ANALYSIS_DESCRIPTION,
event_time=event_time if event_time else EV_TEST_DATE,
action_counters=action_counts if action_counts else None,
details=details if details else None,
name=name if name else EV_ROOT_ANALYSIS_NAME,
remediation=remediation if remediation else None,
state=state if state else None,
uuid=uuid if uuid else EV_ROOT_ANALYSIS_UUID,
location=location if location else None,
storage_dir=storage_dir if storage_dir else os.path.relpath(
workload_storage_dir(uuid if uuid else EV_ROOT_ANALYSIS_UUID),
start=saq.SAQ_HOME),
company_name=company_name if company_name else None,
company_id=company_id if company_id else None,
analysis_mode=analysis_mode if analysis_mode else 'test_groups',
queue=queue if queue else None,
instructions=instructions if instructions else None)
def add_fp_alert():
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
root.add_observable(F_FQDN, 'microsoft.com')
root.add_observable(F_URL, 'https://google.com')
root.add_observable(F_FILE_NAME, 'calc.exe')
root.add_observable(F_HOSTNAME, 'localhost')
root.save()
alert = Alert(storage_dir=root.storage_dir)
alert.load()
alert.disposition = DISPOSITION_FALSE_POSITIVE
alert.disposition_time = datetime.datetime.now()
alert.sync()
class ServerProcess(object):
def __init__(self, args):
self.args = args
self.process = None
self.stdout_reader = None
self.stderr_reader = None
def start(self):
self.process = Popen(self.args, stdout=PIPE, stderr=PIPE, universal_newlines=True)
logging.debug("started process for {} with pid {} args {}".format(
type(self), self.process.pid, ','.join(self.args)))
self.stdout_reader = threading.Thread(target=self.pipe_reader, args=(self.process.stderr, self.handle_stdout))
self.stdout_reader.daemon = True
self.stdout_reader.start()
self.stderr_reader = threading.Thread(target=self.pipe_reader, args=(self.process.stdout, self.handle_stderr))
self.stderr_reader.daemon = True
self.stderr_reader.start()
logging.debug("waiting for {} to start...".format(type(self)))
wait_for(self.startup_condition)
logging.debug("{} started".format(type(self)))
def stop(self):
if self.process is None:
return
logging.debug("stopping process {} with pid {}".format(type(self), self.process.pid))
self.process.terminate()
self.process.wait()
self.process = None
logging.debug("stopping process output readers...")
self.stdout_reader.join()
self.stdout_reader = None
self.stderr_reader.join()
self.stderr_reader = None
def handle_stdout(self, line):
#print("STDOUT {}\t{}".format(type(self), line.strip()))
pass
def handle_stderr(self, line):
if '[ERROR]' in line:
print("detected error in subprocess: {}".format(line.strip()))
#print("STDERR {}\t{}".format(type(self), line.strip()))
def pipe_reader(self, pipe, callback):
for line in pipe:
callback(line.strip())
def started(self):
"""Returns True if this process has actually started."""
return True
class EngineProcess(ServerProcess):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.engine_started = False
def startup_condition(self):
return self.engine_started
def handle_stderr(self, line):
if 'engine started' in line:
self.engine_started = True
ServerProcess.handle_stderr(self, line)
class GUIServer(ServerProcess):
def __init__(self):
super().__init__(['python3', 'saq', '-L', 'etc/console_debug_logging.ini', 'start-gui'])
self.saq_init = 0
def handle_stderr(self, line):
if 'SAQ initialized' in line:
self.saq_init += 1
ServerProcess.handle_stderr(self, line)
def startup_condition(self):
return self.saq_init > 1
class ACEBasicTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_server_process = None
# a list of temporary test files we've created using self.create_test_file
self.tracked_test_files = []
def setUp(self):
#saq.DUMP_TRACEBACKS = True
self.starting_thread_count = threading.active_count()
self.save_signal_handlers()
initialize_test_environment()
logging.info("TEST: {}".format(self.id()))
self.reset()
import saq
saq.db.remove()
open_test_comms()
memory_log_handler.clear()
self.initialize_test_client()
def initialize_test_client(self):
from aceapi import create_app
self.app = create_app(testing=True)
self.app_context = self.app.test_request_context()
self.app_context.push()
self.client = self.app.test_client()
# Hopefully temporary hack to ensure session is cleared after each test
import aceapi
aceapi.db.session.close()
def tearDown(self):
close_test_comms()
# anything logged at CRITICAL log level will cause the test the fail
#self.assertFalse(memory_log_handler.search(lambda e: e.levelno == logging.CRITICAL))
import saq
saq.DUMP_TRACEBACKS = False
self.stop_api_server()
if saq.engine.CURRENT_ENGINE is not None:
try:
saq.engine.CURRENT_ENGINE.stop()
except:
pass
for file_path in self.tracked_test_files:
if os.path.exists(file_path):
os.remove(file_path)
# clear the database session this test used
saq.db.remove()
self.restore_signal_handlers()
# clear all the registered services
import saq.service
saq.service._registered_services = []
thread_count_difference = threading.active_count() - self.starting_thread_count
if thread_count_difference != 0:
logging.warning(f"thread count difference after {self.id()} is {thread_count_difference}")
for t in threading.enumerate():
logging.warning(f"running thread: {t}")
import saq.database
saq.database.reset_pools()
test_log_manager.shutdown()
def create_test_file(self, file_path='.unittest_test_data', file_content=None, root_analysis=None):
"""Creates a test file and returns the path to the newly created file.
Any file created this way is automatically deleted after the test runs.
If file_path is relative then the file is created relative to SAQ_HOME.
If root_analysis is a RootAnalysis object then file_path is crated relative to the storage_dir of this analysis.
If file_content is not None then it is used as the content of the file.
Otherwise, 1024 random bytes are used."""
if not os.path.isabs(file_path):
if root_analysis:
target_file_path = os.path.join(root_analysis.storage_dir, file_path)
else:
target_file_path = abs_path(file_path)
mode = 'wb'
if isinstance(file_content, str):
mode = 'w'
with open(target_file_path, mode) as fp:
if file_content:
fp.write(file_content)
else:
fp.write(secrets.token_bytes(1024))
self.tracked_test_files.append(target_file_path)
return file_path
def clear_error_reports(self):
"""Clears out any error reports generated by the test."""
try:
shutil.rmtree(os.path.join(saq.DATA_DIR, 'error_reports'))
os.makedirs(os.path.join(saq.DATA_DIR, 'error_reports'))
except Exception as e:
sys.stderr.write("unable to clear error_reports: {}\n".format(e))
def wait_for_log_entry(self, *args, **kwargs):
try:
return wait_for_log_entry(*args, **kwargs)
except WaitTimedOutError:
return False
def wait_for_condition(self, condition, timeout=5, delay=1):
"""Waits for condition to return True.
condition is checked every delay seconds until it return True or timeout seconds have elapsed."""
time_limit = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
while True:
if condition():
return True
if datetime.datetime.now() > time_limit:
raise WaitTimedOutError()
time.sleep(delay)
@verify_instance_type
def reset(self):
"""Resets everything back to the default state."""
self.reset_config()
self.reset_brocess()
self.reset_cloudphish()
self.reset_correlation()
self.reset_email_archive()
self.reset_crawlphish()
self.reset_log_exports()
self.reset_var_dir()
self.clear_error_reports()
# re-enable encryption in case we disabled it
#saq.ENCRYPTION_PASSWORD = get_aes_key('password')
@verify_instance_type
def reset_var_dir(self):
# clears out the var directory
shutil.rmtree(os.path.join(saq.DATA_DIR, 'var'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'tmp'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'daemon'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'services'))
@verify_instance_type
def reset_log_exports(self):
# reset splunk export logs
splunk_log_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['splunk_logging']['splunk_log_dir'])
if os.path.isdir(splunk_log_dir):
shutil.rmtree(splunk_log_dir)
os.mkdir(splunk_log_dir)
# reset es export logs
es_log_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['elk_logging']['elk_log_dir'])
if os.path.isdir(es_log_dir):
shutil.rmtree(es_log_dir)
os.mkdir(es_log_dir)
@verify_instance_type
def reset_crawlphish(self):
self.whitelist_path = saq.CONFIG['analysis_module_crawlphish']['whitelist_path'] \
= os.path.join('etc', 'crawlphish.unittest.whitelist')
self.regex_path = saq.CONFIG['analysis_module_crawlphish']['regex_path'] \
= os.path.join('etc', 'crawlphish.unittest.regex')
self.blacklist_path = saq.CONFIG['analysis_module_crawlphish']['blacklist_path'] \
= os.path.join('etc', 'crawlphish.unittest.blacklist')
if os.path.exists(self.whitelist_path):
os.remove(self.whitelist_path)
if os.path.exists(self.regex_path):
os.remove(self.regex_path)
if os.path.exists(self.blacklist_path):
os.remove(self.blacklist_path)
with open(self.blacklist_path, 'w') as fp:
fp.write('10.0.0.0/8\n')
fp.write('127.0.0.1\n')
fp.write('localhost.local\n')
with open(self.regex_path, 'w') as fp:
fp.write('\.(pdf|zip|scr|js|cmd|bat|ps1|doc|docx|xls|xlsx|ppt|pptx|exe|vbs|vbe|jse|wsh|cpl|rar|ace|hta)$\n')
with open(self.whitelist_path, 'w') as fp:
fp.write('anonfile.xyz\n')
@verify_instance_type
def reset_config(self):
"""Resets saq.CONFIG."""
saq.load_configuration()
@verify_instance_type
@use_db(name='hal9000')
def reset_hal9000(self, db, c):
c.execute("DELETE FROM observables")
db.commit()
@verify_instance_type
@use_db(name='brocess')
def reset_brocess(self, db, c):
# clear the brocess db
c.execute("""DELETE FROM httplog""")
c.execute("""DELETE FROM smtplog""")
db.commit()
# TODO instead of using harded values pull the limits from the config
c.execute("""INSERT INTO httplog ( host, numconnections, firstconnectdate )
VALUES ( 'local', 1000, UNIX_TIMESTAMP(NOW()) ),
( 'xyz', 1000, UNIX_TIMESTAMP(NOW()) ),
( 'test1.local', 70, UNIX_TIMESTAMP(NOW()) ),
( 'test2.local', 69, UNIX_TIMESTAMP(NOW()) )""")
db.commit()
@verify_instance_type
@use_db
def reset_cloudphish(self, db, c):
# clear cloudphish db
c.execute("""DELETE FROM cloudphish_analysis_results""")
c.execute("""DELETE FROM cloudphish_content_metadata""")
db.commit()
# clear cloudphish engine and module cache
for cache_dir in [ saq.CONFIG['cloudphish']['cache_dir'] ]:
if os.path.isdir(cache_dir):
shutil.rmtree(cache_dir)
os.makedirs(cache_dir)
@verify_instance_type
@use_db
def reset_correlation(self, db, c):
global UNITTEST_USER_ID
import saq
data_subdir = os.path.join(saq.CONFIG['global']['data_dir'], saq.SAQ_NODE)
failed_alert_subdir = os.path.join(saq.SAQ_HOME, '.saq_alerts')
subdirs = [ data_subdir, failed_alert_subdir ]
if saq.CONFIG['service_engine']['work_dir']:
subdirs.append(saq.CONFIG['service_engine']['work_dir'])
for subdir in subdirs:
if os.path.isdir(subdir):
try:
shutil.rmtree(subdir)
os.mkdir(subdir)
except Exception as e:
logging.error(f"unable to clear {subdir}: {e}")
c.execute("DELETE FROM alerts")
c.execute("DELETE FROM workload")
c.execute("DELETE FROM observables")
c.execute("DELETE FROM tags")
c.execute("INSERT INTO tags ( `id`, `name` ) VALUES ( 1, 'whitelisted' )")
c.execute("DELETE FROM events")
c.execute("DELETE FROM remediation")
c.execute("DELETE FROM messages")
c.execute("DELETE FROM persistence")
c.execute("DELETE FROM persistence_source")
c.execute("DELETE FROM company WHERE name != 'default'")
c.execute("DELETE FROM nodes WHERE is_local = 1")
c.execute("UPDATE nodes SET is_primary = 0")
c.execute("DELETE FROM locks")
c.execute("DELETE FROM delayed_analysis")
c.execute("DELETE FROM users")
c.execute("DELETE FROM malware")
c.execute("DELETE FROM `config`")
c.execute("DELETE FROM incoming_workload")
c.execute("DELETE FROM work_distribution")
from app.models import User
u = User()
u.username = 'unittest'
u.email = 'unittest@localhost'
u.password = 'unittest'
c.execute("""
INSERT INTO users ( username, email, password_hash ) VALUES ( %s, %s, %s )""",
(u.username, u.email, u.password_hash))
UNITTEST_USER_ID = c.lastrowid
logging.debug(f"got user id {UNITTEST_USER_ID} for unittest user")
db.commit()
import saq.database
saq.database.initialize_automation_user()
@verify_instance_type
def reset_email_archive(self):
import socket
archive_subdir = os.path.join(saq.DATA_DIR, saq.CONFIG['analysis_module_email_archiver']['archive_dir'],
socket.gethostname().lower())
if os.path.exists(archive_subdir):
try:
shutil.rmtree(archive_subdir)
os.mkdir(archive_subdir)
except Exception as e:
logging.error("unable to clear {}: {}".format(archive_subdir, e))
with get_db_connection('email_archive') as db:
c = db.cursor()
c.execute("DELETE FROM archive")
db.commit()
def start_api_server(self, remote_host=None, ssl_verification=None, listen_address=None, listen_port=None, ssl_cert=None, ssl_key=None):
"""Starts the API server as a separate process."""
self.api_server_process = Process(target=self.execute_api_server, args=(listen_address, listen_port, ssl_cert, ssl_key))
self.api_server_process.start()
if remote_host is None:
remote_host = saq.API_PREFIX
if ssl_verification is None:
ssl_verification = saq.CONFIG['SSL']['ca_chain_path']
import ace_api
result = None
errors = []
for x in range(5):
try:
result = ace_api.ping(remote_host=remote_host, ssl_verification=ssl_verification)
break
except Exception as e:
errors.append(str(e))
time.sleep(1)
if result is None:
for error in errors:
logging.error(error)
self.fail("unable to start api server")
def execute_api_server(self, listen_address=None, listen_port=None, ssl_cert=None, ssl_key=None):
# https://gist.github.com/rduplain/1705072
# this is a bit weird because I want the urls to be the same as they
# are configured for apache, where they are all starting with /api
import aceapi
from saq.database import initialize_database
app = aceapi.create_app(testing=True)
from werkzeug.serving import run_simple
from werkzeug.wsgi import DispatcherMiddleware
from flask import Flask
app.config['DEBUG'] = True
app.config['APPLICATION_ROOT'] = '/api'
application = DispatcherMiddleware(Flask('dummy_app'), {
app.config['APPLICATION_ROOT']: app,
})
if listen_address is None:
listen_address = saq.CONFIG.get('api', 'listen_address')
if listen_port is None:
listen_port = saq.CONFIG.getint('api', 'listen_port')
ssl_context = (
saq.CONFIG.get('api', 'ssl_cert') if ssl_cert is None else ssl_cert,
saq.CONFIG.get('api', 'ssl_key') if ssl_key is None else ssl_key )
initialize_database()
saq.db = aceapi.db.session
logging.info(f"starting api server on {listen_address} port {listen_port}")
run_simple(listen_address, listen_port, application, ssl_context=ssl_context, use_reloader=False)
def stop_api_server(self):
"""Stops the API server if it's running."""
if self.api_server_process is None:
return
import signal
os.kill(self.api_server_process.pid, signal.SIGKILL)
self.api_server_process.join()
self.api_server_process = None
def save_signal_handlers(self):
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sigint_handler = signal.getsignal(signal.SIGINT)
self.sighup_handler = signal.getsignal(signal.SIGHUP)
def restore_signal_handlers(self):
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ACEEngineTestCase(ACEBasicTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# if we create an engine using self.create_engine() then we track it here
self.tracked_engine = None
self.server_processes = {} # key = name, value ServerProcess
def start_gui_server(self):
self.server_processes['gui'] = GUIServer()
self.server_processes['gui'].start()
def start_cloudphish_server(self):
self.server_processes['cloudphish'] = CloudphishServer()
self.server_processes['cloudphish'].start()
def stop_tracked_engine(self):
if self.tracked_engine:
try:
self.tracked_engine.stop()
self.wait_engine(self.tracked_engine)
except Exception as e:
logging.error("unable to stop tracked engine {}: {}".format(self.tracked_engine, e))
report_exception()
finally:
self.tracked_engine = None
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
self.disable_all_modules()
def tearDown(self):
ACEBasicTestCase.tearDown(self)
self.stop_tracked_engine()
for key in self.server_processes.keys():
self.server_processes[key].stop()
#if saq.engine.CURRENT_ENGINE:
#try:
#saq.engine.CURRENT_ENGINE.stop()
#except:
#pass
def execute_engine_test(self, engine):
try:
engine.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def create_engine(self, cls, *args, **kwargs):
try:
self.tracked_engine = cls(*args, **kwargs)
return self.tracked_engine
except Exception as e:
logging.error("unable to create engine {}: {}".format(cls, e))
report_exception()
self.fail("unable to create engine {}: {}".format(cls, e))
def start_engine(self, engine):
try:
engine.start()
except Exception as e:
engine.stop()
engine.wait()
self.fail("engine failure: {}".format(e))
def wait_engine(self, engine):
try:
engine.wait()
except Exception as e:
engine.controlled_stop()
engine.wait()
self.fail("engine failure: {}".format(e))
def kill_engine(self, engine):
try:
engine.stop()
engine.wait()
except Exception as e:
self.fail("engine failure: {}".format(e))
def disable_all_modules(self):
"""Disables all the modules specified in the configuration file. Requires a @reset_config."""
for key in saq.CONFIG.keys():
if key.startswith('analysis_module_'):
saq.CONFIG[key]['enabled'] = 'no'
#if key.startswith('analysis_mode_'):
#delete_list = []
#for value in saq.CONFIG[key].keys():
#if value.startswith('analysis_module_'):
#delete_list.append(value)
#for analysis_module in delete_list:
#logging.debug(f"deleting {analysis_module} from {key}")
#del saq.CONFIG[key][analysis_module]
#saq.CONFIG[key]['module_groups'] = ''
logging.debug("disabled all modules")
class CloudphishServer(EngineProcess):
def __init__(self):
super().__init__(['python3', 'saq', '-L', 'etc/console_debug_logging.ini', '--start', 'cloudphish'])
class ACEModuleTestCase(ACEEngineTestCase):
pass
class TestEngine(Engine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.disable_alerting()
def set_cleanup(self, mode, value):
saq.CONFIG[f'analysis_mode_{mode}']['cleanup'] = 'yes' if value else 'no'
logging.debug(f"set cleanup to {value} for analysis mode {mode}")
|
the-stack_0_24920
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import inspect
import itertools
import json
import os
import shlex
import socket
import sys
import time
import traceback
import reframe
import reframe.core.config as config
import reframe.core.exceptions as errors
import reframe.core.logging as logging
import reframe.core.runtime as runtime
import reframe.core.warnings as warnings
import reframe.frontend.argparse as argparse
import reframe.frontend.autodetect as autodetect
import reframe.frontend.ci as ci
import reframe.frontend.dependencies as dependencies
import reframe.frontend.filters as filters
import reframe.frontend.runreport as runreport
import reframe.utility.jsonext as jsonext
import reframe.utility.osext as osext
import reframe.utility.typecheck as typ
from reframe.frontend.distribute import distribute_tests, getallnodes
from reframe.frontend.executors.policies import (SerialExecutionPolicy,
AsynchronousExecutionPolicy)
from reframe.frontend.executors import Runner, generate_testcases
from reframe.frontend.loader import RegressionCheckLoader
from reframe.frontend.printer import PrettyPrinter
def format_env(envvars):
ret = '[ReFrame Environment]\n'
notset = '<not set>'
envvars = [*envvars, 'RFM_INSTALL_PREFIX']
ret += '\n'.join(sorted(f' {e}={os.getenv(e, notset)}' for e in envvars))
return ret
def list_checks(testcases, printer, detailed=False, concretized=False):
printer.info('[List of matched checks]')
unique_checks = set()
def dep_lines(u, *, prefix, depth=0, lines=None, printed=None):
if lines is None:
lines = []
if printed is None:
printed = set(unique_checks)
adj = u.deps
for v in adj:
if concretized or (not concretized and
v.check.unique_name not in printed):
dep_lines(v, prefix=prefix + 2*' ', depth=depth+1,
lines=lines, printed=printed)
printed.add(v.check.unique_name)
if not v.check.is_fixture():
unique_checks.add(v.check.unique_name)
if depth:
tc_info = ''
details = ''
if concretized:
tc_info = f' @{u.partition.fullname}+{u.environ.name}'
location = inspect.getfile(type(u.check))
if detailed:
details = f' [id: {u.check.unique_name}, file: {location!r}]'
lines.append(
f'{prefix}^{u.check.display_name}{tc_info}{details}'
)
return lines
# We need the leaf test cases to be printed at the leftmost
leaf_testcases = list(t for t in testcases if t.in_degree == 0)
for t in leaf_testcases:
tc_info = ''
details = ''
if concretized:
tc_info = f' @{t.partition.fullname}+{t.environ.name}'
location = inspect.getfile(type(t.check))
if detailed:
details = f' [id: {t.check.unique_name}, file: {location!r}]'
# if not concretized and t.check.name not in unique_checks:
if concretized or (not concretized and
t.check.unique_name not in unique_checks):
printer.info(f'- {t.check.display_name}{tc_info}{details}')
if not t.check.is_fixture():
unique_checks.add(t.check.unique_name)
for l in reversed(dep_lines(t, prefix=' ')):
printer.info(l)
if concretized:
printer.info(f'Concretized {len(testcases)} test case(s)\n')
else:
printer.info(f'Found {len(unique_checks)} check(s)\n')
def describe_checks(testcases, printer):
records = []
unique_names = set()
for tc in testcases:
if tc.check.is_fixture():
continue
if tc.check.name not in unique_names:
unique_names.add(tc.check.name)
rec = json.loads(jsonext.dumps(tc.check))
# Now manipulate the record to be more user-friendly
#
# 1. Add other fields that are relevant for users
# 2. Remove all private fields
rec['unique_name'] = tc.check.unique_name
rec['display_name'] = tc.check.display_name
rec['pipeline_hooks'] = {}
rec['perf_variables'] = list(rec['perf_variables'].keys())
rec['prefix'] = tc.check.prefix
rec['variant_num'] = tc.check.variant_num
for stage, hooks in tc.check.pipeline_hooks().items():
for hk in hooks:
rec['pipeline_hooks'].setdefault(stage, [])
rec['pipeline_hooks'][stage].append(hk.__name__)
for attr in list(rec.keys()):
if attr == '__rfm_class__':
rec['@class'] = rec[attr]
del rec[attr]
elif attr == '__rfm_file__':
rec['@file'] = rec[attr]
del rec[attr]
elif attr.startswith('_'):
del rec[attr]
# List all required variables
required = []
for var in tc.check._rfm_var_space:
if not tc.check._rfm_var_space[var].is_defined():
required.append(var)
rec['@required'] = required
records.append(dict(sorted(rec.items())))
printer.info(jsonext.dumps(records, indent=2))
def list_tags(testcases, printer):
printer.info('[List of unique tags]')
tags = set()
tags = tags.union(*(t.check.tags for t in testcases))
printer.info(', '.join(f'{t!r}' for t in sorted(tags)))
printer.info(f'Found {len(tags)} tag(s)\n')
def logfiles_message():
log_files = logging.log_files()
msg = 'Log file(s) saved in '
if not log_files:
msg += '<no log file was generated>'
else:
msg += f'{", ".join(repr(f) for f in log_files)}'
return msg
def calc_verbosity(site_config, quiesce):
curr_verbosity = site_config.get('general/0/verbose')
return curr_verbosity - quiesce
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
output_options.add_argument(
'--report-junit', action='store', metavar='FILE',
help="Store a JUnit report in FILE",
envvar='RFM_REPORT_JUNIT',
configvar='general/report_junit'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='%FT%T',
metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help=('Skip checks with conflicting names '
'(this option is deprecated and has no effect)'),
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
# Select options
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--maintainer', action='append', dest='maintainers', default=[],
metavar='PATTERN',
help='Select checks with at least one maintainer matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
# FIXME: The following is the only selection option that has an associated
# (undocumented) configuration variable. This is to support pruning of the
# partition environments as the runtime is created, similarly to how the
# system partitions are treated. Currently, this facilitates the
# implementation of fixtures, but we should reconsider it: see discussion
# in https://github.com/eth-cscs/reframe/issues/2245
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
configvar='general/valid_env_names',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'-T', '--exclude-tag', action='append', dest='exclude_tags',
metavar='PATTERN', default=[],
help='Exclude checks whose tag matches PATTERN'
)
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
# Action options
action_options.add_argument(
'--ci-generate', action='store', metavar='FILE',
help=('Generate into FILE a Gitlab CI pipeline '
'for the selected tests and exit'),
)
action_options.add_argument(
'--describe', action='store_true',
help='Give full details on the selected tests'
)
action_options.add_argument(
'-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'],
help=('List the selected tests (T) or the concretized test cases (C) '
'providing more details')
)
action_options.add_argument(
'-l', '--list', nargs='?', const='T', choices=['C', 'T'],
help='List the selected tests (T) or the concretized test cases (C)'
)
action_options.add_argument(
'--list-tags', action='store_true',
help='List the unique tags found in the selected tests and exit'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
run_options.add_argument(
'--distribute', action='store', metavar='{all|STATE}',
nargs='?', const='idle',
help=('Distribute the selected single-node jobs on every node that'
'is in STATE (default: "idle"')
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)', type=int
)
run_options.add_argument(
'--maxfail', metavar='NUM', action='store', default=sys.maxsize,
help='Exit after first NUM failures', type=int
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'-S', '--setvar', action='append', metavar='[TEST.]VAR=VAL',
dest='vars', default=[],
help=('Set test variable VAR to VAL in all tests '
'or optionally in TEST only')
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
# Environment options
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--detect-host-topology', action='store', nargs='?', const='-',
help='Detect the local host topology and exit'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
misc_options.add_argument(
'-q', '--quiet', action='count', default=0,
help='Decrease verbosity level of output',
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='autodetect_fqdn',
envvar='RFM_AUTODETECT_FQDN',
action='store',
default=True,
type=typ.Bool,
help='Use FQDN as host name'
)
argparser.add_argument(
dest='autodetect_method',
envvar='RFM_AUTODETECT_METHOD',
action='store',
default='hostname',
help='Method to detect the system'
)
argparser.add_argument(
dest='autodetect_xthostname',
envvar='RFM_AUTODETECT_XTHOSTNAME',
action='store',
default=True,
type=typ.Bool,
help="Use Cray's xthostname file to find the host name"
)
argparser.add_argument(
dest='git_timeout',
envvar='RFM_GIT_TIMEOUT',
configvar='general/git_timeout',
action='store',
help=('Timeout in seconds when checking if the url is a '
'valid repository.'),
type=float
)
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='httpjson_url',
envvar='RFM_HTTPJSON_URL',
configvar='logging/handlers_perflog/httpjson_url',
help='URL of HTTP server accepting JSON logs'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='compact_test_names',
envvar='RFM_COMPACT_TEST_NAMES',
configvar='general/compact_test_names',
action='store_true',
help='Use a compact test naming scheme'
)
argparser.add_argument(
dest='dump_pipeline_progress',
envvar='RFM_DUMP_PIPELINE_PROGRESS',
configvar='general/dump_pipeline_progress',
action='store_true',
help='Dump progress information for the async execution'
)
argparser.add_argument(
dest='pipeline_timeout',
envvar='RFM_PIPELINE_TIMEOUT',
configvar='general/pipeline_timeout',
action='store',
help='Timeout for advancing the pipeline',
type=float
)
argparser.add_argument(
dest='remote_detect',
envvar='RFM_REMOTE_DETECT',
configvar='general/remote_detect',
action='store_true',
help='Detect remote system topology'
)
argparser.add_argument(
dest='remote_workdir',
envvar='RFM_REMOTE_WORKDIR',
configvar='general/remote_workdir',
action='store',
help='Working directory for launching ReFrame remotely'
)
argparser.add_argument(
dest='resolve_module_conflicts',
envvar='RFM_RESOLVE_MODULE_CONFLICTS',
configvar='general/resolve_module_conflicts',
action='store_true',
help='Resolve module conflicts automatically'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='trap_job_errors',
envvar='RFM_TRAP_JOB_ERRORS',
configvar='general/trap_job_errors',
action='store_true',
help='Trap job errors in job scripts and fail tests automatically'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
def restrict_logging():
'''Restrict logging to errors only.
This is done when specific options are passed, which generate JSON
output and we don't want to pollute the output with other logging
output.
:returns: :obj:`True` if the logging was restricted, :obj:`False`
otherwise.
'''
if (options.show_config or
options.detect_host_topology or options.describe):
logging.getlogger().setLevel(logging.ERROR)
return True
else:
return False
# Parse command line
options = argparser.parse_args()
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
if not restrict_logging():
printer.adjust_verbosity(calc_verbosity(site_config, options.quiet))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
site_config.set_autodetect_meth(
options.autodetect_method,
use_fqdn=options.autodetect_fqdn,
use_xthostname=options.autodetect_xthostname
)
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
printer.colorize = site_config.get('general/0/colorize')
if not restrict_logging():
printer.adjust_verbosity(calc_verbosity(site_config, options.quiet))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
if site_config.get('general/0/ignore_check_conflicts'):
logging.getlogger().warning(
"the 'ignore_check_conflicts' option is deprecated "
"and will be removed in the future"
)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
# Restore logging level
printer.setLevel(logging.INFO)
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
# Create a unique value to differentiate between configuration
# parameters with value `None` and invalid ones
default = {'token'}
value = rt.get_option(config_param, default)
if value is default:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
if options.detect_host_topology:
from reframe.utility.cpuinfo import cpuinfo
s_cpuinfo = cpuinfo()
# Restore logging level
printer.setLevel(logging.INFO)
topofile = options.detect_host_topology
if topofile == '-':
printer.info(json.dumps(s_cpuinfo, indent=2))
else:
try:
with open(topofile, 'w') as fp:
json.dump(s_cpuinfo, fp, indent=2)
fp.write('\n')
except OSError as e:
getlogger().error(
f'could not write topology file: {topofile!r}'
)
sys.exit(1)
sys.exit(0)
autodetect.detect_topology()
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a list of reports
if options.restore_session:
filenames = options.restore_session.split(',')
else:
filenames = [runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)]
report = runreport.load_report(*filenames)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
# Collect any variables set from the command line
external_vars = {}
for expr in options.vars:
try:
lhs, rhs = expr.split('=', maxsplit=1)
except ValueError:
printer.warning(
f'invalid test variable assignment: {expr!r}; skipping'
)
else:
external_vars[lhs] = rhs
loader = RegressionCheckLoader(check_search_path,
check_search_recursive,
external_vars,
options.skip_system_check,
options.skip_prgenv_check)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Need to parse the cli options before loading the tests
parsed_job_options = []
for opt in options.job_options:
opt_split = opt.split('=', maxsplit=1)
optstr = opt_split[0]
valstr = opt_split[1] if len(opt_split) > 1 else ''
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(optstr) == 1:
parsed_job_options.append(f'-{optstr} {valstr}')
else:
parsed_job_options.append(f'--{optstr} {valstr}')
# Locate and load checks; `force=True` is not needed for normal
# invocations from the command line and has practically no effect, but
# it is needed to better emulate the behavior of running reframe's CLI
# from within the unit tests, which call repeatedly `main()`.
checks_found = loader.load_all(force=True)
printer.verbose(f'Loaded {len(checks_found)} test(s)')
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
testcases_all = generate_testcases(checks_found)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(filters.have_any_name(options.names), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.exclude_tags:
testcases = filter(filters.have_not_tag(tag), testcases)
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases by maintainers
for maint in options.maintainers:
testcases = filter(filters.have_maintainer(maint), testcases)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if not rec:
return False
return (rec['result'] == 'failure' or
rec['result'] == 'aborted')
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
if options.distribute:
node_map = getallnodes(options.distribute, parsed_job_options)
# Remove the job options that begin with '--nodelist' and '-w', so
# that they do not override those set from the distribute feature.
#
# NOTE: This is Slurm-specific. When support of distributing tests
# is added to other scheduler backends, this needs to be updated,
# too.
parsed_job_options = [
x for x in parsed_job_options
if (not x.startswith('-w') and not x.startswith('--nodelist'))
]
testcases = distribute_tests(testcases, node_map)
testcases_all = testcases
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
tc.check.disable_hook(h)
# Act on checks
if options.describe:
# Restore logging level
printer.setLevel(logging.INFO)
describe_checks(testcases, printer)
sys.exit(0)
if options.list or options.list_detailed:
concretized = (options.list == 'C' or
options.list_detailed == 'C')
detailed = options.list_detailed is not None
list_checks(testcases, printer, detailed, concretized)
sys.exit(0)
if options.list_tags:
list_tags(testcases, printer)
sys.exit(0)
if options.ci_generate:
list_checks(testcases, printer)
printer.info('[Generate CI]')
with open(options.ci_generate, 'wt') as fp:
ci.emit_pipeline(fp, testcases)
printer.info(
f' Gitlab pipeline generated successfully '
f'in {options.ci_generate!r}.\n'
)
sys.exit(0)
if not options.run:
printer.error("No action option specified. Available options:\n"
" - `-l'/`-L' for listing\n"
" - `-r' for running\n"
" - `--list-tags' for listing unique test tags\n"
" - `--ci-generate' for generating a CI pipeline\n"
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
exec_policy.sched_options = parsed_job_options
if options.maxfail < 0:
raise errors.ConfigError(
f'--maxfail should be a non-negative integer: '
f'{options.maxfail!r}'
)
runner = Runner(exec_policy, printer, options.max_retries,
options.maxfail)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failed(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failed():
success = False
runner.stats.print_failure_report(
printer, not options.distribute
)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
printer.info(f'Run report saved in {report_file!r}')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
# Generate the junit xml report for this session
junit_report_file = rt.get_option('general/0/report_junit')
if junit_report_file:
# Expand variables in filename
junit_report_file = osext.expandvars(junit_report_file)
junit_xml = runreport.junit_xml_report(json_report)
try:
with open(junit_report_file, 'w') as fp:
runreport.junit_dump(junit_xml, fp)
except OSError as e:
printer.warning(
f'failed to generate report in {junit_report_file!r}: '
f'{e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except (Exception, KeyboardInterrupt, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(f'run session stopped: {errors.what(*exc_info)}')
if errors.is_exit_request(*exc_info):
# Print stack traces for exit requests only when TOO verbose
printer.debug2(tb)
elif errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
if not restrict_logging():
printer.info(logfiles_message())
|
the-stack_0_24922
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Functions for putting examples into torch format."""
from collections import Counter
import torch
def vectorize(ex, model, single_answer=False):
"""Torchify a single example."""
args = model.args
word_dict = model.word_dict
feature_dict = model.feature_dict
# Index words
document = torch.LongTensor([word_dict[w] for w in ex['document']])
question = torch.LongTensor([word_dict[w] for w in ex['question']])
# Create extra features vector
if len(feature_dict) > 0:
features = torch.zeros(len(ex['document']), len(feature_dict))
else:
features = None
# f_{exact_match}
if args.use_in_question:
q_words_cased = {w for w in ex['question']}
q_words_uncased = {w.lower() for w in ex['question']}
q_lemma = {w for w in ex['qlemma']} if args.use_lemma else None
for i in range(len(ex['document'])):
if ex['document'][i] in q_words_cased:
features[i][feature_dict['in_question']] = 1.0
if ex['document'][i].lower() in q_words_uncased:
features[i][feature_dict['in_question_uncased']] = 1.0
if q_lemma and ex['lemma'][i] in q_lemma:
features[i][feature_dict['in_question_lemma']] = 1.0
# f_{token} (POS)
if args.use_pos:
for i, w in enumerate(ex['pos']):
f = 'pos=%s' % w
if f in feature_dict:
features[i][feature_dict[f]] = 1.0
# f_{token} (NER)
if args.use_ner:
for i, w in enumerate(ex['ner']):
f = 'ner=%s' % w
if f in feature_dict:
features[i][feature_dict[f]] = 1.0
# f_{token} (TF)
if args.use_tf:
counter = Counter([w.lower() for w in ex['document']])
l = len(ex['document'])
for i, w in enumerate(ex['document']):
features[i][feature_dict['tf']] = counter[w.lower()] * 1.0 / l
# Maybe return without target
if 'answers' not in ex:
return document, features, question, ex['id']
# ...or with target(s) (might still be empty if answers is empty)
if single_answer:
assert(len(ex['answers']) > 0)
start = torch.LongTensor(1).fill_(ex['answers'][0][0])
end = torch.LongTensor(1).fill_(ex['answers'][0][1])
else:
start = [a[0] for a in ex['answers']]
end = [a[1] for a in ex['answers']]
return document, features, question, start, end, ex['id']
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
NUM_INPUTS = 3
NUM_TARGETS = 2
NUM_EXTRA = 1
ids = [ex[-1] for ex in batch]
docs = [ex[0] for ex in batch]
features = [ex[1] for ex in batch]
questions = [ex[2] for ex in batch]
# Batch documents and features
max_length = max([d.size(0) for d in docs])
x1 = torch.LongTensor(len(docs), max_length).zero_()
x1_mask = torch.ByteTensor(len(docs), max_length).fill_(1)
if features[0] is None:
x1_f = None
else:
x1_f = torch.zeros(len(docs), max_length, features[0].size(1))
for i, d in enumerate(docs):
x1[i, :d.size(0)].copy_(d)
x1_mask[i, :d.size(0)].fill_(0)
if x1_f is not None:
x1_f[i, :d.size(0)].copy_(features[i])
# Batch questions
max_length = max([q.size(0) for q in questions])
x2 = torch.LongTensor(len(questions), max_length).zero_()
x2_mask = torch.ByteTensor(len(questions), max_length).fill_(1)
for i, q in enumerate(questions):
x2[i, :q.size(0)].copy_(q)
x2_mask[i, :q.size(0)].fill_(0)
# Maybe return without targets
if len(batch[0]) == NUM_INPUTS + NUM_EXTRA:
return x1, x1_f, x1_mask, x2, x2_mask, ids
elif len(batch[0]) == NUM_INPUTS + NUM_EXTRA + NUM_TARGETS:
# ...Otherwise add targets
if torch.is_tensor(batch[0][3]):
y_s = torch.cat([ex[3] for ex in batch])
y_e = torch.cat([ex[4] for ex in batch])
else:
y_s = [ex[3] for ex in batch]
y_e = [ex[4] for ex in batch]
else:
raise RuntimeError('Incorrect number of inputs per example.')
return x1, x1_f, x1_mask, x2, x2_mask, y_s, y_e, ids
|
the-stack_0_24927
|
import json
import numpy as np
import time
import scipy.sparse as sp
from scipy.sparse import csr_matrix
def construct_feed_dict(placeholders, node_features, support, labels, r_indices, c_indices,
dropout, is_train=True):
"""
Create feed dictionary.
"""
if not type(support[0]) == tuple:
support = [sparse_to_tuple(sup) for sup in support]
feed_dict = dict()
feed_dict.update({placeholders['node_features']: node_features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['row_indices']: r_indices})
feed_dict.update({placeholders['col_indices']: c_indices})
feed_dict.update({placeholders['dropout']: dropout})
feed_dict.update({placeholders['is_train']: is_train})
return feed_dict
def support_dropout(sup, do, edge_drop=False):
before = time.time()
sup = sp.tril(sup)
assert do > 0.0 and do < 1.0
n_nodes = sup.shape[0]
# nodes that I want to isolate
isolate = np.random.choice(range(n_nodes), int(n_nodes*do), replace=False)
nnz_rows, nnz_cols = sup.nonzero()
# mask the nodes that have been selected
mask = np.in1d(nnz_rows, isolate)
mask += np.in1d(nnz_cols, isolate)
assert mask.shape[0] == sup.data.shape[0]
sup.data[mask] = 0
sup.eliminate_zeros()
if edge_drop:
prob = np.random.uniform(0, 1, size=sup.data.shape)
remove = prob < do
sup.data[remove] = 0
sup.eliminate_zeros()
sup = sup + sup.transpose()
return sup
def write_log(data, logfile):
with open(logfile, 'w') as outfile:
json.dump(data, outfile)
def get_degree_supports(adj, k, adj_self_con=False, verbose=True):
if verbose:
print('Computing adj matrices up to {}th degree'.format(k))
supports = [sp.identity(adj.shape[0])]
if k == 0: # return Identity matrix (no message passing)
return supports
assert k > 0
supports = [sp.identity(adj.shape[0]), adj.astype(np.float64) + adj_self_con*sp.identity(adj.shape[0])]
prev_power = adj
for i in range(k-1):
pow = prev_power.dot(adj)
new_adj = ((pow) == 1).astype(np.float64)
new_adj.setdiag(0)
new_adj.eliminate_zeros()
supports.append(new_adj)
prev_power = pow
return supports
def normalize_nonsym_adj(adj):
degree = np.asarray(adj.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv_sqrt = 1. / np.sqrt(degree)
degree_inv_sqrt_mat = sp.diags([degree_inv_sqrt], [0])
degree_inv = degree_inv_sqrt_mat.dot(degree_inv_sqrt_mat)
adj_norm = degree_inv.dot(adj)
return adj_norm
def sparse_to_tuple(sparse_mx):
""" change of format for sparse matrix. This format is used
for the feed_dict where sparse matrices need to be linked to placeholders
representing sparse matrices. """
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
class Graph(object):
"""docstring for Graph."""
def __init__(self, adj):
super(Graph, self).__init__()
self.adj = adj
self.n_nodes = adj.shape[0]
self.level = 0
def run_K_BFS(self, n, K):
"""
Returns a list of K edges, sampled using BFS starting from n
"""
visited = set()
edges = []
self.BFS(n, visited, K, edges)
assert len(edges) <= K
return edges
def BFS(self, n, visited, K, edges):
queue = [n]
while len(queue) > 0:
node = queue.pop(0)
if node not in visited:
visited.add(node)
neighs = list(self.adj[node].nonzero()[1])
for neigh in neighs:
if neigh not in visited:
edges.append((node, neigh))
queue.append(neigh)
if len(edges) == K:
return
|
the-stack_0_24928
|
import codecs, os.path
from setuptools import setup, find_packages
# use README.md as readme
def readme():
with open('README.md') as f:
return f.read()
# get __version__ from a file
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setup(
name='systemd-resolved-docker',
url='https://github.com/flaktack/systemd-resolved-docker',
license='MIT',
author='Zsombor Welker',
author_email='[email protected]',
install_requires=["docker", "dnslib", "systemd-python", "dbus-python", "pyroute2"],
description='systemd-resolved and docker DNS integration',
long_description=readme(),
long_description_content_type="text/markdown",
package_dir={
'': 'src',
},
packages=find_packages('src'),
entry_points={
'console_scripts': [
'systemd-resolved-docker=systemd_resolved_docker.cli:main',
],
},
excluded=['rpms/*'],
# extract version from source
version=get_version("src/systemd_resolved_docker/__init__.py"),
)
|
the-stack_0_24929
|
import xml.etree.ElementTree as ET
import argparse
import json
def parse_options():
parser = argparse.ArgumentParser()
parser.add_option(
"-f", "--file",
help="xml annotation file",
)
parser.add_option(
"-o", "--output_file",
help="converted via json file",
)
return parser.parse_args()
class Annotation:
"""
class to convert cvat xml annotations to via json annotations
args:
xml cvat file, with annotations made with "polyline" and "polygon"
return:
save a json file in via format
"""
def __init__(self, image_id, name, width, height, regions):
self.image_id = image_id
self.name = name
self.width = width
self.height = height
self.regions = regions
self.size = width*height
@classmethod
def from_xml(cls, image):
image_id = image.get('id')
name = image.get('name')
width = int(image.get('width'))
height = int(image.get('height'))
regions = cls.get_regions(image)
return cls(image_id, name, width, height, regions)
def get_regions(image):
"""
Parse cvat polylines and polygons to via regions
"""
regions = []
for region in image.findall('polyline')+image.findall('polygon'):
clase = region.get('label')
points = region.get('points')
regions.append(Annotation.via_region(clase, points))
return regions
def via_region(clase, points):
"""
Returns a dict with the region in via format
"""
all_points_x, all_points_y = Annotation.points_format(points)
return {
"region_attributes": {
"class": clase
},
"shape_attributes": {
"all_points_x": all_points_x,
"all_points_y": all_points_y,
"name": "polygon"
},
}
def points_format(points):
"""
Change cvat format [x1,y1,x2,y2...] to [x1,x2,x3...][y1,y2,y3...]
"""
all_points_x = []
all_points_y = []
for point in points.split(";"):
x, y = point.split(",")
all_points_x.append(round(float(x)))
all_points_y.append(round(float(y)))
return all_points_x, all_points_y
def get_dict(self):
"""
Returns a dict with the complete image in via format
"""
return {
"filename": self.name,
"dimension": [self.width, self.height],
"size": self.size,
"file_attributes:": {},
"regions": self.regions
}
def main():
args = parse_options()
root = ET.parse(args.file).getroot()
dataset = {}
for image in root.findall('image'):
annotation = Annotation.from_xml(image)
dataset[annotation.name] = annotation.get_dict()
with open(args.output_file, 'w') as out_file:
json.dump(dataset, out_file)
if __name__ == "__main__":
main()
|
the-stack_0_24930
|
from __future__ import absolute_import
from __future__ import print_function
from keras.optimizers import SGD, RMSprop, Adam, Adamax , Nadam
class OptimizerFactory(object):
@staticmethod
def create_optimizer(opt_type, lr, momentum=0, lr_decay=0.,
rho=0.9, epsilon=None, beta_1=0.9, beta_2=0.999,
clipnorm=10, clipvalue=100, amsgrad=False):
if opt_type == 'sgd':
return SGD(lr=lr, momentum=momentum, decay=lr_decay, nesterov=False,
clipnorm=clipnorm, clipvalue=clipvalue)
if opt_type == 'nsgd':
return SGD(lr=lr, momentum=momentum, decay=lr_decay, nesterov=True,
clipnorm=clipnorm, clipvalue=clipvalue)
if opt_type == 'rmsprop':
return RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=lr_decay,
clipnorm=clipnorm, clipvalue=clipvalue)
if opt_type == 'adam':
return Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
decay=lr_decay, amsgrad=amsgrad,
clipnorm=clipnorm, clipvalue=clipvalue)
if opt_type == 'nadam':
return Nadam(lr=lr, beta_1=beta_1, beta_2=beta_2,
epsilon=epsilon, schedule_decay=lr_decay,
clipnorm=clipnorm, clipvalue=clipvalue)
if opt_type == 'adamax':
return Adamax(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
decay=lr_decay,
clipnorm=clipnorm, clipvalue=clipvalue)
@staticmethod
def filter_args(prefix=None, **kwargs):
if prefix is None:
p = ''
else:
p = prefix + '_'
valid_args = ('opt_type', 'lr', 'momentum', 'lr_decay',
'rho', 'epsilon', 'beta_1', 'beta_2',
'clipnorm', 'clipvalue')
return dict((k, kwargs[p+k])
for k in valid_args if p+k in kwargs)
@staticmethod
def add_argparse_args(parser, prefix=None):
if prefix is None:
p1 = '--'
p2 = ''
else:
p1 = '--' + prefix + '-'
p2 = prefix + '_'
parser.add_argument(p1+'optimizer', dest=(p2+'opt_type'), type=str.lower,
default='adam',
choices=['sgd','nsgd','rmsprop','adam','nadam','adamax'],
help=('Optimizers: SGD, '
'NSGD (SGD with Nesterov momentum), '
'RMSprop, Adam, Adamax, '
'Nadam (Adam with Nesterov momentum), '
'(default: %(default)s)'))
parser.add_argument(p1+'lr' , dest=(p2+'lr'),
default=0.002, type=float,
help=('Initial learning rate (default: %(default)s)'))
parser.add_argument(p1+'momentum', dest=(p2+'momentum'), default=0.6, type=float,
help=('Momentum (default: %(default)s)'))
parser.add_argument(p1+'lr-decay', dest=(p2+'lr_decay'), default=1e-6, type=float,
help=('Learning rate decay in SGD optimizer '
'(default: %(default)s)'))
parser.add_argument(p1+'rho', dest=(p2+'rho'), default=0.9, type=float,
help=('Rho in RMSprop optimizer (default: %(default)s)'))
parser.add_argument(p1+'epsilon', dest=(p2+'epsilon'), default=1e-8, type=float,
help=('Epsilon in RMSprop and Adam optimizers '
'(default: %(default)s)'))
parser.add_argument(p1+'amsgrad', dest=(p2+'amsgrad'), default=False,
action='store_true',
help=('AMSGrad variant of Adam'))
parser.add_argument(p1+'beta1', dest=(p2+'beta_1'), default=0.9, type=float,
help=('Beta_1 in Adam optimizers (default: %(default)s)'))
parser.add_argument(p1+'beta2', dest=(p2+'beta_2'), default=0.999, type=float,
help=('Beta_2 in Adam optimizers (default: %(default)s)'))
# parser.add_argument(p1+'schedule-decay', dest=(p2+'schedule_decay'),
# default=0.004, type=float,
# help=('Schedule decay in Nadam optimizer '
# '(default: %(default)s)'))
parser.add_argument(p1+'clipnorm', dest=(p2+'clipnorm'),
default=10,type=float,
help=('Clips the norm of the gradient '
'(default: %(default)s)'))
parser.add_argument(p1+'clipvalue', dest=(p2+'clipvalue'),
default=100,type=float,
help=('Clips the absolute value of the gradient '
'(default: %(default)s)'))
|
the-stack_0_24932
|
import os
from pathlib import Path
from jina import Flow
from jina.parsers.helloworld import set_hw_parser
if __name__ == '__main__':
from helper import (
print_result,
write_html,
download_data,
index_generator,
query_generator,
)
from my_executors import MyEncoder, MyIndexer, MyEvaluator
else:
from .helper import (
print_result,
write_html,
download_data,
index_generator,
query_generator,
)
from .my_executors import MyEncoder, MyIndexer, MyEvaluator
cur_dir = os.path.dirname(os.path.abspath(__file__))
def hello_world(args):
"""
Runs Jina's Hello World.
Usage:
Use it via CLI :command:`jina hello-world`.
Description:
It downloads Fashion-MNIST dataset and :term:`Indexer<indexes>` 50,000 images.
The index is stored into 4 *shards*. It randomly samples 128 unseen images as :term:`Queries<Searching>`
Results are shown in a webpage.
More options can be found in :command:`jina hello-world --help`
:param args: Argparse object
"""
Path(args.workdir).mkdir(parents=True, exist_ok=True)
targets = {
'index-labels': {
'url': args.index_labels_url,
'filename': os.path.join(args.workdir, 'index-labels'),
},
'query-labels': {
'url': args.query_labels_url,
'filename': os.path.join(args.workdir, 'query-labels'),
},
'index': {
'url': args.index_data_url,
'filename': os.path.join(args.workdir, 'index-original'),
},
'query': {
'url': args.query_data_url,
'filename': os.path.join(args.workdir, 'query-original'),
},
}
# download the data
download_data(targets, args.download_proxy)
# reduce the network load by using `fp16`, or even `uint8`
os.environ['JINA_ARRAY_QUANT'] = 'fp16'
# now comes the real work
# load index flow from a YAML file
f = (
Flow()
.add(uses=MyEncoder, parallel=2)
.add(uses=MyIndexer, workspace=args.workdir)
.add(uses=MyEvaluator)
)
# run it!
with f:
f.index(
index_generator(num_docs=targets['index']['data'].shape[0], target=targets),
request_size=args.request_size,
)
f.post(
'/eval',
query_generator(
num_docs=args.num_query, target=targets, with_groundtruth=True
),
shuffle=True,
on_done=print_result,
request_size=args.request_size,
parameters={'top_k': args.top_k},
)
# write result to html
write_html(os.path.join(args.workdir, 'demo.html'))
if __name__ == '__main__':
args = set_hw_parser().parse_args()
hello_world(args)
|
the-stack_0_24934
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.encoding import smart_text
from django.utils.translation import get_language, ugettext_lazy as _
from rest_framework import fields, serializers
from olympia.amo.utils import to_language
from olympia.api.utils import is_gate_active
from olympia.translations.models import Translation
class ReverseChoiceField(fields.ChoiceField):
"""
A ChoiceField that exposes the "human-readable" values of its choices,
while storing the "actual" corresponding value as normal.
This is useful when you want to expose string constants to clients while
storing integers in the database.
Note that the values in the `choices_dict` must be unique, since they are
used for both serialization and de-serialization.
"""
def __init__(self, *args, **kwargs):
self.reversed_choices = {v: k for k, v in kwargs['choices']}
super(ReverseChoiceField, self).__init__(*args, **kwargs)
def to_representation(self, value):
"""
Convert to representation by getting the "human-readable" value from
the "actual" one.
"""
value = self.choices.get(value, None)
return super(ReverseChoiceField, self).to_representation(value)
def to_internal_value(self, value):
"""
Convert to internal value by getting the "actual" value from the
"human-readable" one that is passed.
"""
try:
value = self.reversed_choices[value]
except KeyError:
self.fail('invalid_choice', input=value)
return super(ReverseChoiceField, self).to_internal_value(value)
class TranslationSerializerField(fields.Field):
"""
Django-rest-framework custom serializer field for our TranslatedFields.
In normal operation:
- When deserializing, in `to_internal_value`, it accepts a dictionary only.
- When serializing, a dict with all translations for the given
`field_name` on `obj`, with languages as the keys.
However, if the parent's serializer context contains a request that has
a method 'GET', and a 'lang' parameter was passed, then only a returns
one translation in that dict. If the request lang is available that is
returned, otherwise the default locale is returned.
If the gate 'l10n_flat_input_output' is active then:
- When deserializing, in `to_internal_value`, it accepts both a string
or a dictionary. If a string is given, it'll be considered to be in the
default language.
- When serializing, its behavior depends on the parent's serializer
context:
If a request was included, and its method is 'GET', and a 'lang'
parameter was passed, then only returns one translation (letting the
TranslatedField figure out automatically which language to use).
Else, just returns a dict with all translations for the given
`field_name` on `obj`, with languages as the keys.
"""
default_error_messages = {
'min_length': _(u'The field must have a length of at least {num} '
u'characters.'),
'unknown_locale': _(u'The language code {lang_code} is invalid.'),
'no_dict': _(u'You must provide an object of {lang-code:value}.')
}
def __init__(self, *args, **kwargs):
self.min_length = kwargs.pop('min_length', None)
super().__init__(*args, **kwargs)
@property
def flat(self):
request = self.context.get('request', None)
return is_gate_active(request, 'l10n_flat_input_output')
def fetch_all_translations(self, obj, source, field):
# this property is set by amo.utils.attach_trans_dict
if trans_dict := getattr(obj, 'translations', None):
translations = trans_dict.get(field.id, [])
return {to_language(locale): value
for (locale, value) in translations}
else:
translations = field.__class__.objects.filter(
id=field.id, localized_string__isnull=False)
return {to_language(trans.locale): str(trans)
for trans in translations}
def fetch_single_translation(self, obj, source, field, requested_language):
return {to_language(field.locale): str(field)} if field else None
def get_attribute(self, obj):
source = self.source or self.field_name
try:
field = fields.get_attribute(obj, source.split('.'))
except AttributeError:
field = None
if not field:
return None
requested_language = None
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
requested_language = request.GET['lang']
if requested_language:
single = self.fetch_single_translation(obj, source, field,
requested_language)
return list(single.values())[0] if single and self.flat else single
else:
return self.fetch_all_translations(obj, source, field)
def to_representation(self, val):
return val
def to_internal_value(self, data):
if isinstance(data, str):
self.validate(data)
return data.strip()
elif isinstance(data, dict):
self.validate(data)
for key, value in data.items():
data[key] = value and value.strip()
return data
return str(data)
def validate(self, value):
if not self.flat and not isinstance(value, dict):
raise ValidationError(
self.error_messages['no_dict']
)
value_too_short = True
if isinstance(value, str):
if self.min_length and len(value.strip()) >= self.min_length:
value_too_short = False
else:
for locale, string in value.items():
if locale.lower() not in settings.LANGUAGES:
raise ValidationError(
self.error_messages['unknown_locale'].format(
lang_code=repr(locale)))
if self.min_length and string and (
len(string.strip()) >= self.min_length):
value_too_short = False
break
if self.min_length and value_too_short:
raise ValidationError(
self.error_messages['min_length'].format(num=self.min_length))
class ESTranslationSerializerField(TranslationSerializerField):
"""
Like TranslationSerializerField, but fetching the data from a dictionary
built from ES data that we previously attached on the object.
"""
suffix = '_translations'
_source = None
def get_source(self):
if self._source is None:
return None
return self._source + self.suffix
def set_source(self, val):
self._source = val
source = property(get_source, set_source)
def attach_translations(self, obj, data, source_name, target_name=None):
"""
Look for the translation of `source_name` in `data` and create a dict
with all translations for this field (which will look like
{'en-US': 'mytranslation'}) and attach it to a property on `obj`.
The property name is built with `target_name` and `cls.suffix`. If
`target_name` is None, `source_name` is used instead.
The suffix is necessary for two reasons:
1) The translations app won't let us set the dict on the real field
without making db queries
2) This also exactly matches how we store translations in ES, so we can
directly fetch the translations in the data passed to this method.
"""
if target_name is None:
target_name = source_name
target_key = '%s%s' % (target_name, self.suffix)
source_key = '%s%s' % (source_name, self.suffix)
target_translations = {v.get('lang', ''): v.get('string', '')
for v in data.get(source_key, {}) or {}}
setattr(obj, target_key, target_translations)
# Serializer might need the single translation in the current language,
# so fetch it and attach it directly under `target_name`. We need a
# fake Translation() instance to prevent SQL queries from being
# automatically made by the translations app.
translation = self.fetch_single_translation(
obj, target_name, target_translations, get_language())
if translation:
locale, value = list(translation.items())[0]
translation = Translation(localized_string=value, locale=locale)
setattr(obj, target_name, translation)
def fetch_all_translations(self, obj, source, field):
return field or None
def fetch_single_translation(self, obj, source, field, requested_language):
translations = self.fetch_all_translations(obj, source, field) or {}
locale = None
value = None
if requested_language in translations:
locale = requested_language
value = translations.get(requested_language)
else:
default_locale = getattr(
obj, 'default_locale', settings.LANGUAGE_CODE)
if default_locale in translations:
locale = default_locale
value = translations.get(default_locale)
return {locale: value} if locale and value else None
class SplitField(fields.Field):
"""
A field composed of two separate fields: one used for input, and another
used for output. Most commonly used to accept a primary key for input and
use a full serializer for output.
Example usage:
addon = SplitField(serializers.PrimaryKeyRelatedField(), AddonSerializer())
"""
label = None
def __init__(self, _input, output, **kwargs):
self.input = _input
self.output = output
kwargs['required'] = _input.required
fields.Field.__init__(self, source=_input.source, **kwargs)
def bind(self, field_name, parent):
fields.Field.bind(self, field_name, parent)
self.input.bind(field_name, parent)
self.output.bind(field_name, parent)
def get_read_only(self):
return self._read_only
def set_read_only(self, val):
self._read_only = val
self.input.read_only = val
self.output.read_only = val
read_only = property(get_read_only, set_read_only)
def get_value(self, data):
return self.input.get_value(data)
def to_internal_value(self, value):
return self.input.to_internal_value(value)
def get_attribute(self, obj):
return self.output.get_attribute(obj)
def to_representation(self, value):
return self.output.to_representation(value)
class SlugOrPrimaryKeyRelatedField(serializers.RelatedField):
"""
Combines SlugRelatedField and PrimaryKeyRelatedField. Takes a
`render_as` argument (either "pk" or "slug") to indicate how to
serialize.
"""
read_only = False
def __init__(self, *args, **kwargs):
self.render_as = kwargs.pop('render_as', 'pk')
if self.render_as not in ['pk', 'slug']:
raise ValueError("'render_as' must be one of 'pk' or 'slug', "
"not %r" % (self.render_as,))
self.slug_field = kwargs.pop('slug_field', 'slug')
super(SlugOrPrimaryKeyRelatedField, self).__init__(
*args, **kwargs)
def to_representation(self, obj):
if self.render_as == 'slug':
return getattr(obj, self.slug_field)
else:
return obj.pk
def to_internal_value(self, data):
try:
return self.queryset.get(pk=data)
except Exception:
try:
return self.queryset.get(**{self.slug_field: data})
except ObjectDoesNotExist:
msg = (_('Invalid pk or slug "%s" - object does not exist.') %
smart_text(data))
raise ValidationError(msg)
|
the-stack_0_24935
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.toon.ChatBalloon
from panda3d.core import VBase4, NodePath, DepthWriteAttrib, AntialiasAttrib
class ChatBalloon(NodePath):
TEXT_X_OFFSET = -0.05
TEXT_Y_OFFSET = -0.05
TEXT_Z_OFFSET = -(4.0 / 33.0)
TEXT_MIN_WIDTH = 1.2
TEXT_MIN_HEIGHT = 1.0
TEXT_GLYPH_SCALE = 1.0
TEXT_GLYPH_SHIFT = 0.1
BALLOON_X_PADDING = 0.55
BALLOON_Z_PADDING = 0.8
BUTTON_SCALE = 6
BUTTON_SHIFT = (0, 0, 0.6)
def __init__(self, model, modelWidth, modelHeight, textNode, foreground=VBase4(0, 0, 0, 1), background=VBase4(1, 1, 1, 1), reversed=False, button=None):
NodePath.__init__(self, 'chatBalloon')
self.model = model
self.modelWidth = modelWidth
self.modelHeight = modelHeight
self.textNode = textNode
self.foreground = foreground
self.background = background
self.button = button
self.textNode.setTextColor(self.foreground)
self.balloon = self.model.copyTo(self)
self.balloon.setColor(self.background)
self.balloon.setTransparency(self.background[3] < 1)
self.textNodePath = self.attachNewNode(self.textNode)
self.textNodePath.setTransparency(self.foreground[3] < 1)
self.textNodePath.setAttrib(DepthWriteAttrib.make(0))
middle = self.balloon.find('**/middle')
top = self.balloon.find('**/top')
self.textWidth = self.textNode.getWidth()
if self.textWidth < self.TEXT_MIN_WIDTH:
self.textWidth = self.TEXT_MIN_WIDTH
paddedWidth = self.textWidth + self.BALLOON_X_PADDING * 2
self.balloon.setSx(paddedWidth / modelWidth)
self.textHeight = textNode.getHeight()
if self.textHeight < self.TEXT_MIN_HEIGHT:
self.textHeight = self.TEXT_MIN_HEIGHT
paddedHeight = self.textHeight + self.BALLOON_Z_PADDING * 2
middle.setSz(paddedHeight - 1.5)
top.setZ(middle, 1)
if reversed:
self.balloon.setSx(-self.balloon.getSx())
self.balloon.setTwoSided(True)
self.width = paddedWidth
self.height = paddedHeight
self.center = self.balloon.getBounds().getCenter()
self.textNodePath.setPos(self.center)
self.textNodePath.setY(self.TEXT_Y_OFFSET)
self.textNodePath.setX(self.textNodePath, -(self.textWidth / 2))
if self.textWidth == self.TEXT_MIN_WIDTH:
centerX = (self.TEXT_MIN_WIDTH - self.textNode.getWidth()) / 2.0
self.textNodePath.setX(self.textNodePath, centerX)
self.textNodePath.setZ(top, -self.BALLOON_Z_PADDING + self.TEXT_Z_OFFSET)
if self.textHeight == self.TEXT_MIN_HEIGHT:
centerZ = (ChatBalloon.TEXT_MIN_HEIGHT - self.textNode.getHeight()) / 2.0
self.textNodePath.setZ(self.textNodePath, -centerZ)
self.textNodePath.setX(self.textNodePath, self.TEXT_X_OFFSET)
if self.button is not None:
self.buttonNodePath = button.copyTo(self)
self.buttonNodePath.setPos(self.textNodePath, self.textWidth, 0, -self.textHeight)
self.buttonNodePath.setPos(self.buttonNodePath, ChatBalloon.BUTTON_SHIFT)
self.buttonNodePath.setScale(ChatBalloon.BUTTON_SCALE)
else:
self.buttonNodePath = None
self.setAntialias(AntialiasAttrib.MMultisample)
return
def setForeground(self, foreground):
self.foreground = foreground
self.textNode.setTextColor(self.foreground)
def getForeground(self):
return self.foreground
def setBackground(self, background):
self.background = background
self.balloon.setColor(self.background)
def getBackground(self):
return self.background
def setButton(self, button):
if self.buttonNodePath is not None:
self.buttonNodePath.removeNode()
self.buttonNodePath = None
if button is not None:
self.buttonNodePath = button.copyTo(self)
self.buttonNodePath.setPos(self.textNodePath, self.textWidth, 0, -self.textHeight)
self.buttonNodePath.setPos(self.buttonNodePath, ChatBalloon.BUTTON_SHIFT)
self.buttonNodePath.setScale(ChatBalloon.BUTTON_SCALE)
return
|
the-stack_0_24936
|
"""
This file is part of the Semantic Quality Benchmark for Word Embeddings Tool in Python (SeaQuBe).
Copyright (c) 2021 by Benjamin Manns
:author: Benjamin Manns
"""
from os.path import join, basename, dirname
import unittest
from seaqube.nlp.seaqube_model import SeaQuBeCompressLoader, BaseModelWrapper
from seaqube.nlp.tools import gensim_we_model_to_custom_we_model
class BaseFTGensimModel(BaseModelWrapper):
def get_config(self):
return dict(sg=self.model.sg, cbow_mean=self.model.cbow_mean, size=self.model.vector_size,
alpha=self.model.alpha, min_alpha=self.model.min_alpha, min_n=self.model.wv.min_n,
max_n=self.model.wv.max_n, window=self.model.window, min_count=self.model.vocabulary.min_count,
sample=self.model.vocabulary.sample, negative=self.model.negative, workers=self.model.workers,
epochs=self.define_epochs(), class_name=str(self))
def _wrap_nlp_model(self, model):
return gensim_we_model_to_custom_we_model(model)
class TestExampleBasicAugmentation(unittest.TestCase):
def test_example001(self):
# Import all Augmentation methods
from seaqube.augmentation.word import Active2PassiveAugmentation, EDAAugmentation, TranslationAugmentation, EmbeddingAugmentation
from seaqube.augmentation.char import QwertyAugmentation
from seaqube.augmentation.corpus import UnigramAugmentation
from seaqube.tools.io import load_json
# prepare corpus and sample data
text = 'The quick brown fox jumps over the lazy dog .'
corpus = load_json(join(dirname(__file__), "..", "examples", "sick_full_corpus.json"))
#print(corpus)
# set up all augmentations
# a (experimental) active to passive voice transformer. Only one sentences / doc to another
a2p = Active2PassiveAugmentation()
# easy-data augmentation method implementation (random word swap, insertion, deletion and replacement with synonyms)
eda = EDAAugmentation(max_length=2)
# translate text to other language and back (with Google Translater)
translate = TranslationAugmentation(max_length=2)
# replace words by a similiar one using another word embedding
embed = EmbeddingAugmentation(max_length=2)
# insert typos on text based on a qwerty-keyboard
qwerty = QwertyAugmentation(replace_rate=0.07, max_length=2)
# based on the UDA algorithm, only the Unigram method, which replace low meaning full words with other low meaning full words
# this method needs a corpus, because it need to detect low meaningfull words
unigram = UnigramAugmentation(corpus=corpus, max_length=2)
## API - Usage
# Every augmentation object have the same possibility
# 1. augmenting a string - same syntax as NLPAUG (https://github.com/makcedward/nlpaug)
print(qwerty.augment(text))
# or
print(translate.augment(text))
# 2. augmenting a doc (token based text)
print(unigram.doc_augment(doc=corpus[0]))
# doc_augment can also handle text:
print(embed.doc_augment(text=text))
# 3. augmenting a whole corpus
print(eda(corpus[0:200]))
# 4. Active2Passive is still experimental:
a2p.doc_augment(doc=['someone', 'is', 'not', 'reading', 'the', 'email'])
## We want to apply a method on a corpus, train a model and measure the performance
# tidy up RAM
del unigram, embed, translate
corpus_augmented = eda(corpus[0:200]) # augment a small subset
# save on disk:
#save_json(corpus_augmented, "augmented_sick.json")
# To use NLP models which matching to or benchmark tool set, it must implement the 'BaseModelWrapper' interface.
# We set up a class who implements the fasttext nlp model from the gensim package.
# This is only needed to get the benchmark run
from gensim.models import FastText
class FTModelStd500V5(BaseFTGensimModel):
def define_epochs(self):
return 100
def define_model(self):
return FastText(sg=1, cbow_mean=1, size=300, alpha=0.025, min_alpha=0.0001, min_n=1, max_n=5,
window=5, min_count=1, sample=0.001, negative=5, workers=self.cpus - 1)
def define_training(self):
self.model.build_vocab(sentences=self.data, update=False)
self.model.train(sentences=self.data, total_examples=len(self.data), epochs=self.epochs)
model = FTModelStd500V5()
# train the model
# model.train_on_corpus(corpus_augmented)
# get a dumped model to store it on disk - or use it in another process
# model.get()
# dill_dumper(model.get(), "example_model.dill")
# or to save a compressed model:
# SeaQuBeCompressLoader.save_model_compressed(model.get(), "example_model_compressed.dill")
nlp = SeaQuBeCompressLoader.load_compressed_model(join(dirname(__file__), "..", "examples", "example_model_compressed.dill"), "example")
del model
from seaqube.benchmark.corpus4ir import WordCentroidSimilarityBenchmark
from seaqube.benchmark.wordanalogy import WordAnalogyBenchmark
from seaqube.benchmark.wordsimilarity import WordSimilarityBenchmark
from seaqube.benchmark.wordoutliers import WordOutliersBenchmark
wsb = WordSimilarityBenchmark(test_set='simlex999')
print(wsb(nlp.model)) # score=0.008905456556563954
wab = WordAnalogyBenchmark('google-analogies')
print(wab(nlp.model)) # score=0.0
wob = WordOutliersBenchmark('wikisem500')
print(wob(nlp.model)) # score=0.0
c4ir = WordCentroidSimilarityBenchmark(corpus[0:200]) # need the original corpus for setting up IR
print(c4ir(nlp.model))
def test_example_aug(self):
# Import all Augmentation methods
from seaqube.augmentation.word import Active2PassiveAugmentation, EDAAugmentation, TranslationAugmentation, \
EmbeddingAugmentation
from seaqube.augmentation.char import QwertyAugmentation
from seaqube.augmentation.corpus import UnigramAugmentation
# import some tools
from seaqube.tools.io import load_json
from os.path import join
# load example data
import json, urllib.request
data = urllib.request.urlopen(
"https://raw.githubusercontent.com/bees4ever/SeaQuBe/master/examples/sick_full_corpus.json").read()
corpus = json.loads(data)
text = 'The quick brown fox jumps over the lazy dog .'
# a (experimental) active to passive voice transformer. Only one sentences / doc to another
a2p = Active2PassiveAugmentation()
# easy-data augmentation method implementation (random word swap, insertion, deletion and replacement with synonyms)
eda = EDAAugmentation(max_length=2)
# translate text to other language and back (with Google Translater)
translate = TranslationAugmentation(max_length=2)
# replace words by a similiar one using another word embedding
embed = EmbeddingAugmentation(max_length=2)
# insert typos on text based on a qwerty-keyboard
qwerty = QwertyAugmentation(replace_rate=0.07, max_length=2)
# based on the UDA algorithm, only the Unigram method, which replace low meaning full words with other low meaning full words
# this method needs a corpus, because it need to detect low meaningfull words
unigram = UnigramAugmentation(corpus=corpus, max_length=2)
# 1. augmenting a string - same syntax as NLPAUG (https://github.com/makcedward/nlpaug)
print(qwerty.augment(text))
# or
print(translate.augment(text))
# 2. augmenting a doc (token based text)
print(unigram.doc_augment(doc=corpus[0]))
# doc_augment can also handle text:
print(embed.doc_augment(text=text))
# 3. augmenting a whole corpus
print(eda(corpus[0:200]))
# 4. Active2Passive is still experimental:
a2p.doc_augment(doc=['someone', 'is', 'not', 'reading', 'the', 'email'])
def test_example_chain(self):
from seaqube.augmentation.char.qwerty import QwertyAugmentation
from seaqube.augmentation.corpus.unigram import UnigramAugmentation
from seaqube.augmentation.word.active2passive import Active2PassiveAugmentation
from seaqube.augmentation.word.eda import EDAAugmentation
from seaqube.augmentation.word.embedding import EmbeddingAugmentation
from seaqube.augmentation.word.translation import TranslationAugmentation
TEST_CORPUS = [['till', 'this', 'moment', 'i', 'never', 'knew', 'myself', '.'],
['seldom', ',', 'very', 'seldom', ',', 'does', 'complete', 'truth', 'belong', 'to', 'any',
'human',
'disclosure', ';', 'seldom', 'can', 'it', 'happen', 'that', 'something', 'is', 'not', 'a',
'little',
'disguised', 'or', 'a', 'little', 'mistaken', '.'],
['i', 'declare', 'after', 'all', 'there', 'is', 'no', 'enjoyment', 'like', 'reading', '!', 'how',
'much',
'sooner', 'one', 'tires', 'of', 'anything', 'than', 'of', 'a', 'book', '!', '”'],
['men', 'have', 'had', 'every', 'advantage', 'of', 'us', 'in', 'telling', 'their', 'own',
'story', '.',
'education', 'has', 'been', 'theirs', 'in', 'so', 'much', 'higher', 'a', 'degree'],
['i', 'wish', ',', 'as', 'well', 'as', 'everybody', 'else', ',', 'to', 'be', 'perfectly',
'happy', ';',
'but', ',', 'like', 'everybody', 'else', ',', 'it', 'must', 'be', 'in', 'my', 'own', 'way',
'.'],
['there', 'are', 'people', ',', 'who', 'the', 'more', 'you', 'do', 'for', 'them', ',', 'the',
'less',
'they', 'will', 'do', 'for', 'themselves', '.'],
['one', 'half', 'of', 'the', 'world', 'can', 'not', 'understand', 'the', 'pleasures', 'of',
'the',
'other', '.']]
from seaqube.augmentation.base import AugmentationStreamer
from seaqube.augmentation.reduction.unique_corpus import UniqueCorpusReduction
# Here we set up a augmentation stream. Every document will passed trought this augmentation line by line.
# This means: a document _d_ will be in the first step translated.
# In the second step, this translated document is feed to Qwerty. Now, qwerty returns 2 documents.
# This 2 documents will be each feed to EDA. EDA geenrates 4 augmented documents for the two inputs, i.e. one line results in 8 lines output.
# AugmentationStreamer can also reduce documents, here it reduce it, using the unique reducer.
streamer = AugmentationStreamer(
[TranslationAugmentation(max_length=1), QwertyAugmentation(seed=424242, max_length=2),
EDAAugmentation(max_length=4)], reduction_chain=[UniqueCorpusReduction()])
augmented_doc = streamer([TEST_CORPUS[0]])
augmented_doc
len(augmented_doc) # after reducing documents can be less then 8
streamer(TEST_CORPUS) # apply the full corpus for the streamer
corpus = [['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']]
from seaqube.tools.chainer import CallOnOneChain
from seaqube.nlp.tools import unique_2d_list
pipe = CallOnOneChain([TranslationAugmentation(max_length=1),
UnigramAugmentation(corpus=TEST_CORPUS, seed=50, max_length=2, replace_threshold=0.9,
find_threshold=0.85),
QwertyAugmentation(seed=424242, max_length=2), unique_2d_list])
augmented_and_unique = pipe(TEST_CORPUS)
augmented_and_unique
len(augmented_and_unique) # 8 * 2 * 2 = 32, reducing makes it smaller
def test_example_nlp(self):
from seaqube.nlp.types import SeaQuBeWordEmbeddingsModel
# Lets have a look at a contexted based NLP model, called Context2Vec
from seaqube.nlp.context2vec.context2vec import Context2Vec
# Import some seaqube tools:
from seaqube.nlp.tools import word_count_list
from seaqube.nlp.types import RawModelTinCan
from seaqube.nlp.seaqube_model import SeaQuBeNLPLoader, SeaQuBeCompressLoader
from seaqube.nlp.tools import tokenize_corpus
class SeaQuBeWordEmbeddingsModelC2V(SeaQuBeWordEmbeddingsModel):
def __init__(self, c2v: Context2Vec):
self.c2v = c2v
def vocabs(self):
return self.c2v.wv.vocabs
@property
def wv(self):
return self.c2v.wv
def word_vector(self, word):
return self.c2v.wv[word]
def matrix(self):
return self.c2v.wv.matrix
star_wars_cites = ["How you get so big eating food of this kind?", "'Spring the trap!'", "Same as always…",
"You came in that thing? You’re braver than I thought!", "Who’s scruffy looking?",
"Let the Wookiee win.", "The Emperor is not as forgiving as I am",
"I don’t know where you get your delusions, laserbrain.", "Shutting up, sir,",
"Boring conversation anyway…", ]
corpus = tokenize_corpus(star_wars_cites)
c2v = Context2Vec(epoch=3)
c2v.train(corpus)
seaC2V = SeaQuBeWordEmbeddingsModelC2V(c2v)
tin_can = RawModelTinCan(seaC2V, word_count_list(corpus))
SeaQuBeCompressLoader.save_model_compressed(tin_can, "c2v_small")
nlp = SeaQuBeNLPLoader.load_model_from_tin_can(tin_can, "c2v")
nlp("This is a test")
doc = list(nlp("This is a test"));
print(doc);
type(doc[0])
nlp("This is a test")[0].vector
nlp("This is a test").vector
nlp("This is a test").sif_vector
nlp("Is the Emperor a laserbrain").similarity("Boring conversation anyway…")
nlp("Is the Emperor a laserbrain").similarity("Boring conversation anyway…", vector="sif")
word = nlp("Is the Emperor a laserbrain?")[2]
word
word.similarity("Wookiee")
nlp.vocab()
def test_example_benchmark(self):
# import some tools
from seaqube.tools.io import load_json
from os.path import join
from seaqube.nlp.seaqube_model import SeaQuBeCompressLoader
main_path = join(dirname(__file__), "..", "examples")
nlp = SeaQuBeCompressLoader.load_compressed_model(join(main_path, "example_model_compressed.dill"), "example")
import json, urllib.request
data = urllib.request.urlopen(
"https://raw.githubusercontent.com/bees4ever/SeaQuBe/master/examples/sick_full_corpus.json").read()
corpus = json.loads(data)
## import tools
from seaqube.benchmark.wordanalogy import WordAnalogyBenchmark
from seaqube.benchmark.wordsimilarity import WordSimilarityBenchmark
from seaqube.benchmark.wordoutliers import WordOutliersBenchmark
from seaqube.benchmark.semantic_wordnet import SemanticWordnetBenchmark
# We need to install `vec4ir`, this can be done trough "SeaQuBe":
# from seaqube.benchmark.corpus4ir import WordCentroidSimilarityBenchmark
from seaqube import download;
download('vec4ir')
import vec4ir
# load module
from seaqube.benchmark.corpus4ir import WordCentroidSimilarityBenchmark
# perform semantical tests
wsb = WordSimilarityBenchmark(test_set='simlex999')
print(wsb(nlp.model)) # score=0.008905456556563954
wab = WordAnalogyBenchmark('google-analogies')
print(wab(nlp.model)) # score=0.0
wob = WordOutliersBenchmark('wikisem500')
print(wob(nlp.model)) # score=0.0
c4ir = WordCentroidSimilarityBenchmark(corpus[0:200]) # need the original corpus for setting up IR
print(c4ir(nlp.model))
# The semantic word net benchmark needs a base of word pairs. This pairs can be generated easily:
vocabs = nlp.vocab()
vocabs = vocabs[0:200]
word_pairs, length = SemanticWordnetBenchmark.word_pairs_from_vocab_list(vocabs)
print("Pairs Length:", length)
swb = SemanticWordnetBenchmark(word_pairs, False)
print(swb(nlp.model))
def test_roberta_example(self):
from seaqube.nlp.roberta.seaberta import SeaBERTa
from seaqube.nlp.types import SeaQuBeWordEmbeddingsModel, SeaQuBeNLPModel2WV
import logging
logging.basicConfig(level=logging.DEBUG)
from seaqube.tools.io import load_json, save_json
from os.path import join
# Import some seaqube tools:
from seaqube.nlp.tools import word_count_list
from seaqube.nlp.types import RawModelTinCan
from seaqube.nlp.seaqube_model import SeaQuBeNLPLoader, SeaQuBeCompressLoader
from seaqube.nlp.tools import tokenize_corpus
yoda_cites = [
["fear", "is", "the", "path", "to", "the", "dark", "side", ".", "fear", "leads", "to", "anger", ".",
"anger", "leads", "to", "hate", ".", "hate", "leads", "to", "suffering", "."],
["once", "you", "start", "down", "the", "dark", "path", ",", "forever", "will", "it", "dominate", "your",
"destiny", ".", "consume", "you", ",", "it", "will", "."],
["always", "pass", "on", "what", "you", "have", "learned", "."],
["patience", "you", "must", "have", "my", "young", "padawan", "."],
["in", "a", "dark", "place", "we", "find", "ourselves", ",", "and", "a", "little", "more", "knowledge",
"lights", "our", "way", "."],
["death", "is", "a", "natural", "part", "of", "life", ".", "rejoice", "for", "those", "around", "you",
"who", "transform", "into", "the", "force", ".", "mourn", "them", "do", "not", ".", "miss", "them", "do",
"not", ".", "attachment", "leads", "to", "jealously", ".", "the", "shadow", "of", "greed", ",", "that",
"is", "."],
["powerful", "you", "have", "become", ",", "the", "dark", "side", "i", "sense", "in", "you", "."],
["train", "yourself", "to", "let", "go", "of", "everything", "you", "fear", "to", "lose", "."],
["feel", "the", "force", "!"], ["truly", "wonderful", "the", "mind", "of", "a", "child", "is", "."],
["do", "or", "do", "not", ".", "there", "is", "no", "try", "."],
["great", "warrior", ".", "wars", "not", "make", "one", "great", "."],
["size", "matters", "not", ".", "look", "at", "me", ".", "judge", "me", "by", "my", "size", ",", "do",
"you", "?", "hmm", "?", "hmm", ".", "and", "well", "you", "should", "not", ".", "for", "my", "ally", "is",
"the", "force", ",", "and", "a", "powerful", "ally", "it", "is", ".", "life", "creates", "it", ",",
"makes", "it", "grow", ".", "its", "energy", "surrounds", "us", "and", "binds", "us", ".", "luminous",
"beings", "are", "we", ",", "not", "this", "crude", "matter", ".", "you", "must", "feel", "the", "force",
"around", "you", ";", "here", ",", "between", "you", ",", "me", ",", "the", "tree", ",", "the", "rock",
",", "everywhere", ",", "yes", ".", "even", "between", "the", "land", "and", "the", "ship", "."],
["the", "dark", "side", "clouds", "everything", ".", "impossible", "to", "see", "the", "light", ",", "the",
"future", "is", "."], ["you", "will", "find", "only", "what", "you", "bring", "in", "."],
["to", "be", "jedi", "is", "to", "face", "the", "truth", ",", "and", "choose", ".", "give", "off", "light",
",", "or", "darkness", ",", "padawan", ".", "be", "a", "candle", ",", "or", "the", "night", "."],
["control", ",", "control", ",", "you", "must", "learn", "control", "!"],
["on", "many", "long", "journeys", "have", "i", "gone", ".", "and", "waited", ",", "too", ",", "for",
"others", "to", "return", "from", "journeys", "of", "their", "own", ".", "some", "return", ";", "some",
"are", "broken", ";", "some", "come", "back", "so", "different", "only", "their", "names", "remain", "."],
["in", "the", "end", ",", "cowards", "are", "those", "who", "follow", "the", "dark", "side", "."],
["difficult", "to", "see", ".", "always", "in", "motion", "is", "the", "future", "."],
["ready", "are", "you", "?", "what", "know", "you", "of", "ready", "?", "for", "eight", "hundred", "years",
"have", "i", "trained", "jedi", ".", "my", "own", "counsel", "will", "i", "keep", "on", "who", "is", "to",
"be", "trained", ".", "a", "jedi", "must", "have", "the", "deepest", "commitment", ",", "the", "most",
"serious", "mind", ".", "this", "one", "a", "long", "time", "have", "i", "watched", ".", "all", "his",
"life", "has", "he", "looked", "away\u2026", "to", "the", "future", ",", "to", "the", "horizon", ".",
"never", "his", "mind", "on", "where", "he", "was", ".", "hmm", "?", "what", "he", "was", "doing", ".",
"hmph", ".", "adventure", ".", "heh", ".", "excitement", ".", "heh", ".", "a", "jedi", "craves", "not",
"these", "things", ".", "you", "are", "reckless", "."],
["secret", ",", "shall", "i", "tell", "you", "?", "grand", "master", "of", "jedi", "order", "am", "i", ".",
"won", "this", "job", "in", "a", "raffle", "i", "did", ",", "think", "you", "?", "\u2018", "how", "did",
"you", "know", ",", "how", "did", "you", "know", ",", "master", "yoda", "?", "\u2019", "master", "yoda",
"knows", "these", "things", ".", "his", "job", "it", "is", "."],
["to", "answer", "power", "with", "power", ",", "the", "jedi", "way", "this", "is", "not", ".", "in",
"this", "war", ",", "a", "danger", "there", "is", ",", "of", "losing", "who", "we", "are", "."],
["many", "of", "the", "truths", "that", "we", "cling", "to", "depend", "on", "our", "point", "of", "view",
"."], ["named", "must", "your", "fear", "be", "before", "banish", "it", "you", "can", "."],
["you", "think", "yoda", "stops", "teaching", ",", "just", "because", "his", "student", "does", "not",
"want", "to", "hear", "?", "a", "teacher", "yoda", "is", ".", "yoda", "teaches", "like", "drunkards",
"drink", ",", "like", "killers", "kill", "."],
["do", "not", "assume", "anything", "obi-wan", ".", "clear", "your", "mind", "must", "be", "if", "you",
"are", "to", "discover", "the", "real", "villains", "behind", "this", "plot", "."],
["you", "will", "know", "(", "the", "good", "from", "the", "bad", ")", "when", "you", "are", "calm", ",",
"at", "peace", ".", "passive", ".", "a", "jedi", "uses", "the", "force", "for", "knowledge", "and",
"defense", ",", "never", "for", "attack", "."],
["soon", "will", "i", "rest", ",", "yes", ",", "forever", "sleep", ".", "earned", "it", "i", "have", ".",
"twilight", "is", "upon", "me", ",", "soon", "night", "must", "fall", "."],
["when", "you", "look", "at", "the", "dark", "side", ",", "careful", "you", "must", "be", ".", "for", "the",
"dark", "side", "looks", "back", "."],
["you", "will", "know", "(", "the", "good", "from", "the", "bad", ")", "when", "you", "are", "calm", ",",
"at", "peace", ".", "passive", ".", "a", "jedi", "uses", "the", "force", "for", "knowledge", "and",
"defense", ",", "never", "for", "attack", "."],
["smaller", "in", "number", "are", "we", ",", "but", "larger", "in", "mind", "."],
["your", "path", "you", "must", "decide", "."],
["always", "two", "there", "are", ",", "no", "more", ",", "no", "less", ".", "a", "master", "and", "an",
"apprentice", "."],
["no", "longer", "certain", ",", "that", "one", "ever", "does", "win", "a", "war", ",", "i", "am", ".",
"for", "in", "fighting", "the", "battles", ",", "the", "bloodshed", ",", "already", "lost", "we", "have",
".", "yet", ",", "open", "to", "us", "a", "path", "remains", ".", "that", "unknown", "to", "the", "sith",
"is", ".", "through", "this", "path", ",", "victory", "we", "may", "yet", "find", ".", "not", "victory",
"in", "the", "clone", "wars", ",", "but", "victory", "for", "all", "time", "."],
["if", "no", "mistake", "you", "have", "made", ",", "losing", "you", "are", ".", "a", "different", "game",
"you", "should", "play", "."],
["[", "luke", "skywalker", ":", "]", "i", "can", "\u2019", "t", "believe", "it", ".", "[", "yoda", ":", "]",
"that", "is", "why", "you", "fail", "."], ["happens", "to", "every", "guy", "sometimes", "this", "does"],
["adventure", ".", "excitement", ".", "a", "jedi", "craves", "not", "these", "things", "."],
["only", "the", "dark", "lord", "of", "the", "sith", "knows", "of", "our", "weakness", ".", "if",
"informed", "the", "senate", "is", ",", "multiply", "our", "adversaries", "will", "."]]
main_path = join(dirname(__file__), "..", "examples", "seaberta", "training")
train_params = {
"per_gpu_eval_batch_size": 4,
"do_eval": True,
"evaluate_during_training": False,
"line_by_line": False,
"should_continue": False,
"model_name_or_path": False,
"mlm": True,
"do_train": True,
"overwrite_output_dir": True,
"overwrite_cache": False,
"block_size": 512,
"eval_all_checkpoints": 2,
"server_ip": "",
"mlm_probability": 0.15,
"local_rank": -1, # NO GPU,
"no_cuda": False,
"fp16": False,
"fp16_opt_level": 'O1',
"max_steps": 10,
"warmup_steps": 10,
"learning_rate": 5e-5,
"per_gpu_train_batch_size": 4,
"gradient_accumulation_steps": 4,
"weight_decay": 0.01,
"adam_epsilon": 1e-6,
"max_grad_norm": 100.0,
"save_total_limit": 10,
"save_steps": 10,
"logging_steps": 2,
"seed": 0,
}
roberta = SeaBERTa(main_path, train_params)
roberta.train(yoda_cites)
roberta.load_trained_model()
class SeaQuBeWordEmbeddingsModelSeaBERTa(SeaQuBeWordEmbeddingsModel):
def __init__(self, seaberta: SeaBERTa):
self.seaberta = seaberta
def vocabs(self):
return self.seaberta.wv.vocabs
@property
def wv(self):
return self.seaberta.wv
def word_vector(self, word):
return self.seaberta.wv[word]
def matrix(self):
return self.seaberta.wv.matrix
def context_embedding(self, words, position):
return self.seaberta.context_embedding(words, position)
seaberta = SeaQuBeWordEmbeddingsModelSeaBERTa(roberta)
roberta.context_embedding(["t"], 0), roberta.context_embedding(["t"], 0).shape
self.assertEqual(type(roberta.wv.vocabs), list)
tin_can = RawModelTinCan(seaberta, word_count_list(yoda_cites))
nlp = SeaQuBeNLPLoader.load_model_from_tin_can(tin_can, "seaberta")
doc = nlp("Luke is a Jedi and yoda is a master Jedi!")
self.assertEqual(('jedi', 'jedi'), (doc[3].text, doc[9].text)) # both are the same word
from seaqube.tools.math import cosine
self.assertAlmostEqual(cosine(doc[3].vector, doc[9].vector), 0.9, delta=0.5)
|
the-stack_0_24937
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **unmemoized class tester** (i.e., unmemoized and thus efficient
callable testing various properties of arbitrary classes) utilities.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar._roarexc import _BeartypeUtilTypeException
from beartype._cave._cavefast import TestableTypes as TestableTypesTuple
from beartype._data.cls.datacls import TYPES_BUILTIN_FAKE
from beartype._data.mod.datamod import BUILTINS_MODULE_NAME
from beartype._util.utiltyping import (
TypeException,
TypeOrTupleTypes,
)
# ....................{ VALIDATORS }....................
def die_unless_type(
# Mandatory parameters.
cls: object,
# Optional parameters.
exception_cls: TypeException = _BeartypeUtilTypeException,
exception_prefix: str = '',
) -> None:
'''
Raise an exception of the passed type unless the passed object is a class.
Parameters
----------
cls : object
Object to be validated.
exception_cls : Type[Exception]
Type of exception to be raised. Defaults to
:exc:`_BeartypeUtilTypeException`.
exception_prefix : str, optional
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Raises
----------
:exc:`exception_cls`
If this object is *not* a class.
'''
# If this object is *NOT* a class, raise an exception.
if not isinstance(cls, type):
assert isinstance(exception_cls, type), (
'f{repr(exception_cls)} not exception class.')
assert isinstance(exception_prefix, str), (
'f{repr(exception_prefix)} not string.')
raise exception_cls(f'{exception_prefix}{repr(cls)} not class.')
#FIXME: Unit test us up.
def die_unless_type_or_types(
# Mandatory parameters.
type_or_types: object,
# Optional parameters.
exception_cls: TypeException = _BeartypeUtilTypeException,
exception_prefix: str = '',
) -> None:
'''
Raise an exception of the passed type unless the passed object is either a
class *or* tuple of one or more classes.
Parameters
----------
type_or_types : object
Object to be validated.
exception_cls : Type[Exception]
Type of exception to be raised. Defaults to
:exc:`_BeartypeUtilTypeException`.
exception_prefix : str, optional
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Raises
----------
:exc:`exception_cls`
If this object is neither a class *nor* tuple of one or more classes.
'''
# If this object is neither a class *NOR* tuple of one or more classes,
# raise an exception.
if not is_type_or_types(type_or_types):
assert isinstance(exception_cls, type), (
'f{repr(exception_cls)} not exception class.')
assert isinstance(exception_prefix, str), (
'f{repr(exception_prefix)} not string.')
# Exception message to be raised below.
exception_message = (
f'{exception_prefix}{repr(type_or_types)} neither '
f'class nor tuple of one or more classes'
)
# If this object is a tuple...
if isinstance(type_or_types, tuple):
# If this tuple is empty, note that.
if not type_or_types:
exception_message += ' (i.e., is empty tuple)'
# Else, this tuple is non-empty. In this case...
else:
# For the 0-based index of each tuple item and that item...
for cls_index, cls in enumerate(type_or_types):
# If this object is *NOT* a class...
if not isinstance(cls, type):
# Note this.
exception_message += (
f' (i.e., tuple item {cls_index} '
f'{repr(cls)} not class)'
)
# Halt iteration.
break
# Else, this object is a class. Continue to the next item.
# Else, this object is a non-tuple. In this case, the general-purpose
# exception message suffices.
# Raise this exception.
raise exception_cls(f'{exception_message}.')
# ....................{ TESTERS }....................
def is_type_or_types(type_or_types: object) -> bool:
'''
``True`` only if the passed object is either a class *or* tuple of one or
more classes.
Parameters
----------
type_or_types : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is either a class *or* tuple of one or
more classes.
'''
# Return true only if either...
return (
# This object is a class *OR*...
isinstance(type_or_types, type) or
(
# This object is a tuple *AND*...
isinstance(type_or_types, tuple) and
# This tuple is non-empty *AND*...
bool(type_or_types) and
# This tuple contains only classes.
all(isinstance(cls, type) for cls in type_or_types)
)
)
def is_type_builtin(cls: type) -> bool:
'''
``True`` only if the passed class is **builtin** (i.e., globally accessible
C-based type requiring *no* explicit importation).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
cls : type
Class to be inspected.
Returns
----------
bool
``True`` only if this class is builtin.
Raises
----------
_BeartypeUtilTypeException
If this object is *not* a class.
'''
# Avoid circular import dependencies.
from beartype._util.mod.utilmodule import (
get_object_type_module_name_or_none)
# If this object is *NOT* a type, raise an exception.
die_unless_type(cls)
# Else, this object is a type.
# If this type is a fake builtin (i.e., type that is *NOT* builtin but
# erroneously masquerading as being builtin), this type is *NOT* a builtin.
# In this case, silently reject this type.
if cls in TYPES_BUILTIN_FAKE:
return False
# Else, this type is *NOT* a fake builtin.
# Fully-qualified name of the module defining this type if this type is
# defined by a module *OR* "None" otherwise (i.e., if this type is
# dynamically defined in-memory).
cls_module_name = get_object_type_module_name_or_none(cls)
# This return true only if this name is that of the "builtins" module
# declaring all builtin types.
return cls_module_name == BUILTINS_MODULE_NAME
def is_type_subclass(
cls: object, base_classes: TypeOrTupleTypes) -> bool:
'''
``True`` only if the passed object is a subclass of either the passed class
if passed one class *or* at least one of the passed classes if passed a
tuple of classes.
Caveats
----------
**This higher-level tester should always be called in lieu of the
lower-level** :func:`issubclass` **builtin,** which raises an undescriptive
exception when the first passed parameter is *not* a class: e.g.,
.. code-block:: python
>>> issubclass(object(), type)
TypeError: issubclass() arg 1 must be a class
This tester suffers no such deficits, instead safely returning ``False``
when the first passed parameter is *not* a class.
Parameters
----------
obj : object
Object to be inspected.
base_classes : TestableTypes
Class(es) to test whether this object is a subclass of defined as
either:
* A single class.
* A tuple of one or more classes.
Returns
----------
bool
``True`` only if this object is a subclass of these class(es).
'''
assert isinstance(base_classes, TestableTypesTuple), (
f'{repr(base_classes)} neither class nor tuple of classes.')
# One-liners for tremendous bravery.
return isinstance(cls, type) and issubclass(cls, base_classes)
|
the-stack_0_24939
|
#!/usr/bin/env python3
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines to modify keyboard LED state."""
import ast
import fcntl
import logging
import os
import sys
import threading
# Constants from /usr/include/linux/kd.h.
KDSETLED = 0x4B32
LED_SCR = 1
LED_NUM = 2
LED_CAP = 4
# Resets LEDs to the default values (console_ioctl.h says that any higher-
# order bit than LED_CAP will do).
LED_RESET = 8
# Number of VTs on which to set the keyboard LEDs.
MAX_VTS = 8
# Set of FDs of all TTYs on which to set LEDs. Lazily initialized by SetLeds.
_tty_fds = None
_tty_fds_lock = threading.Lock()
def SetLeds(state):
"""Sets the current LEDs on VTs [0,MAX_VTS) to the given state.
Errors are ignored.
(We set the LED on all VTs because /dev/console may not work reliably under
the combination of X and autotest.)
Args:
pattern: A bitwise OR of zero or more of LED_SCR, LED_NUM, and LED_CAP.
Returns:
True if able to set at least one LED, and False otherwise.
"""
global _tty_fds # pylint: disable=global-statement
with _tty_fds_lock:
if _tty_fds is None:
_tty_fds = []
for tty in range(MAX_VTS):
dev = '/dev/tty%d' % tty
try:
_tty_fds.append(os.open(dev, os.O_RDWR))
except Exception:
logging.exception('Unable to open %s', dev)
if not _tty_fds:
return False
for fd in _tty_fds:
try:
fcntl.ioctl(fd, KDSETLED, state)
except Exception:
pass
return True
class Blinker:
"""Blinks LEDs asynchronously according to a particular pattern.
Start() and Stop() are not thread-safe and must be invoked from the same
thread.
This can also be used as a context manager:
with leds.Blinker(...):
...do something that will take a while...
"""
thread = None
def __init__(self, pattern):
"""Constructs the blinker (but does not start it).
Args:
pattern: A list of tuples. Each element is (state, duration),
where state contains the LEDs that should be lit (a bitwise
OR of LED_SCR, LED_NUM, and/or LED_CAP). For example,
((LED_SCR|LED_NUM|LED_CAP, .2),
(0, .05))
would turn all LEDs on for .2 s, then all off for 0.05 s,
ad infinitum.
"""
self.pattern = pattern
self.done = threading.Event()
def Start(self):
"""Starts blinking in a separate thread until Stop is called.
May only be invoked once.
"""
assert not self.thread
self.thread = threading.Thread(target=self._Run)
self.thread.start()
def Stop(self):
"""Stops blinking."""
self.done.set()
if self.thread:
self.thread.join()
self.thread = None
def __enter__(self):
self.Start()
def __exit__(self, exc_type, exc_value, traceback):
del exc_type, exc_value, traceback # Unused.
self.Stop()
def _Run(self):
while True: # Repeat pattern forever
for state, duration in self.pattern:
if not SetLeds(state):
return # Failure, end this thread
self.done.wait(duration)
if self.done.is_set():
SetLeds(LED_RESET)
return
def main():
"""Blinks the pattern in sys.argv[1] if present, or the famous theme from
William Tell otherwise.
"""
if len(sys.argv) > 1:
blinker = Blinker(ast.literal_eval(sys.argv[1]))
else:
DURATION_SCALE = .125
def Blip(state, duration=1):
return [(state, duration * .6 * DURATION_SCALE),
(0, duration * .4 * DURATION_SCALE)]
blinker = Blinker(
2 * (2 * Blip(LED_NUM) + Blip(LED_NUM, 2)) + 2 * Blip(LED_NUM) +
Blip(LED_CAP, 2) + Blip(LED_SCR, 2) + Blip(LED_CAP | LED_SCR, 2) +
2 * Blip(LED_NUM) + Blip(LED_NUM, 2) + 2 * Blip(LED_NUM) +
Blip(LED_CAP, 2) + 2 * Blip(LED_SCR) + Blip(LED_CAP, 2) +
Blip(LED_NUM, 2) + Blip(LED_CAP | LED_NUM, 2)
)
with blinker:
# Wait for newline, and then quit gracefully
sys.stdin.readline()
if __name__ == '__main__':
main()
|
the-stack_0_24943
|
"""Test script for successful basic authentication."""
import time
import json
import requests
ZOE_API_URI = 'http://127.0.0.1:5100/api/0.7/'
ZOE_AUTH = ('admin', 'admin')
TIMEOUT = 5
class TestZoeRest:
"""Test case class."""
id = None
def test_info(self, zoe_api_process):
"""Test info api endpoint."""
print('Test info api endpoint')
req = requests.get(ZOE_API_URI + 'info', timeout=TIMEOUT)
assert req.status_code == 200
def test_user(self, zoe_api_process):
"""Test user api endpoint."""
print('Test user api endpoint')
req = requests.get(ZOE_API_URI + 'user', auth=ZOE_AUTH, timeout=TIMEOUT)
assert req.status_code == 200
def test_list_all_executions(self, zoe_api_process):
"""Test list all executions api endpoint."""
print('Test list all executions api endpoint')
req = requests.get(ZOE_API_URI + 'execution', auth=ZOE_AUTH, timeout=TIMEOUT)
assert req.status_code == 200
# def test_7_delete_execution(self):
# """Test delete execution api endpoint."""
# print('Test delete execution api endpoint')
# req = requests.delete(self.__class__.uri + 'execution/delete/' + self.__class__.id, auth=self.__class__.auth)
# if req.status_code != 204:
# print('error message: {}'.format(req.json()['message']))
# self.assertEqual(req.status_code, 204)
def test_start_stop_execution(self, zoe_api_process, zoe_master_process):
"""Test start execution api endpoint."""
print('Test start execution api endpoint')
with open('integration_tests/zapp.json', encoding='utf-8') as data_file:
data = json.loads(data_file.read())
time.sleep(10) # wait for test Zoe to start and load the docker status
req = requests.post(ZOE_API_URI + 'execution', auth=ZOE_AUTH, json={"application": data, "name": "requests"}, timeout=TIMEOUT)
assert req.status_code == 201
exec_id = str(req.json()['execution_id'])
print('execution ID is {}'.format(exec_id))
print('Test execution details api endpoint')
while True:
req = requests.get(ZOE_API_URI + 'execution/' + exec_id, auth=ZOE_AUTH, timeout=TIMEOUT)
assert req.status_code == 200
if req.json()['status'] == 'running':
break
elif req.json()['status'] == 'error':
print(req.json()['error_message'])
assert False
print('Waiting for ZApp to start (status is {})...'.format(req.json()['status']))
time.sleep(1)
print('Test service details api endpoint')
req = requests.get(ZOE_API_URI + 'execution/' + exec_id, auth=ZOE_AUTH, timeout=TIMEOUT)
sid = req.json()["services"][0]
req = requests.get(ZOE_API_URI + 'service/' + str(sid), auth=ZOE_AUTH, timeout=TIMEOUT)
assert req.status_code == 200
print('Test terminate execution api endpoint')
time.sleep(10)
req = requests.delete(ZOE_API_URI + 'execution/' + exec_id, auth=ZOE_AUTH, timeout=TIMEOUT)
assert req.status_code == 204
time.sleep(4)
def test_zapp_validate(self, zoe_api_process):
"""Test ZApp validation endpoint"""
print("Test ZApp validation endpoint")
with open('integration_tests/zapp.json', encoding='utf-8') as data_file:
data = json.loads(data_file.read())
req = requests.post(ZOE_API_URI + 'zapp_validate', json=data, timeout=TIMEOUT)
if req.status_code != 200:
print('Error reason: {}, {}'.format(req.reason, req.text))
assert req.status_code == 200
def test_zapp_validate_fail(self, zoe_api_process):
"""Test ZApp validation endpoint"""
print("Test ZApp validation endpoint")
with open('integration_tests/zapp.json', encoding='utf-8') as data_file:
data = json.loads(data_file.read())
del data['version']
req = requests.post(ZOE_API_URI + 'zapp_validate', json=data, timeout=TIMEOUT)
assert req.status_code == 400
|
the-stack_0_24944
|
import json
import setuptools
kwargs = json.loads(
"""
{
"name": "ros-cdk-memcache",
"version": "1.0.3",
"description": "Aliyun SDK Copyright (C) Alibaba Cloud Computing All rights reserved. http://www.aliyun.com",
"license": "Apache-2.0",
"url": "https://github.com/aliyun/Resource-Orchestration-Service-Cloud-Development-Kit.git",
"long_description_content_type": "text/markdown",
"author": "ROS Development Team",
"bdist_wheel": {
"universal": true
},
"project_urls": {
"Source": "https://github.com/aliyun/Resource-Orchestration-Service-Cloud-Development-Kit.git"
},
"package_dir": {
"": "src"
},
"packages": [
"ros_cdk_memcache",
"ros_cdk_memcache._jsii"
],
"package_data": {
"ros_cdk_memcache._jsii": [
"[email protected]"
],
"ros_cdk_memcache": [
"py.typed"
]
},
"python_requires": ">=3.6",
"install_requires": [
"constructs>=3.0.4, <4.0.0",
"jsii>=1.49.0, <2.0.0",
"publication>=0.0.3",
"ros-cdk-core>=1.0.5, <2.0.0"
],
"classifiers": [
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Typing :: Typed",
"License :: OSI Approved",
"Programming Language :: Python :: 3"
],
"scripts": []
}
"""
)
with open("README.md", encoding="utf8") as fp:
kwargs["long_description"] = fp.read()
setuptools.setup(**kwargs)
|
the-stack_0_24945
|
"""Module contains api objects for executing and checking requests"""
from urllib.parse import urlencode
import allure
import attr
from .endpoints import Endpoints
from .methods import Methods
from .tools import attach_request_log
from ..steps.asserts import status_code_should_be, body_should_be
@attr.dataclass
class Request:
"""Request for a specific endpoint"""
method: Methods
endpoint: Endpoints
object_id: int = None
url_params: dict = {}
headers: dict = {}
data: dict = {}
@attr.dataclass
class ExpectedResponse:
"""Response to be expected. Checking the status code and body if present"""
status_code: int
body: dict = None
class APPApi:
"""APP api wrapper"""
__slots__ = ("_url",)
_api_prefix = ""
def __init__(self, url="http://localhost:8000"):
self._url = url
@property
def _base_url(self):
return f"{self._url}{self._api_prefix}"
def exec_request(self, request: Request, expected_response: ExpectedResponse):
"""
Execute HTTP request based on "request" argument.
Assert response params amd values based on "expected_response" argument.
"""
url = self.get_url_for_endpoint(
endpoint=request.endpoint,
method=request.method,
object_id=request.object_id,
)
url_params = request.url_params.copy()
step_name = f"Send {request.method.name} {url.replace(self._base_url, '')}"
if url_params:
step_name += f"?{urlencode(url_params)}"
with allure.step(step_name):
response = request.method.function(
url=url,
params=url_params,
json=request.data,
headers=request.headers,
)
attach_request_log(response)
status_code_should_be(
response=response, status_code=expected_response.status_code
)
if expected_response.body is not None:
body_should_be(response=response, expected_body=expected_response.body)
return response
def get_url_for_endpoint(
self, endpoint: Endpoints, method: Methods, object_id: int
):
"""
Return direct link for endpoint object
"""
if "{id}" in method.url_template:
if object_id is None:
raise ValueError(
"Request template requires 'id', but 'request.object_id' is None"
)
url = method.url_template.format(name=endpoint.path, id=object_id)
else:
url = method.url_template.format(name=endpoint.path)
return f"{self._base_url}{url}"
|
the-stack_0_24948
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
from functools import partial
import numpy as np
import pytest
import torch
from torch.distributions import biject_to, constraints
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.distributions.transforms import block_autoregressive, iterated
from pyro.infer import SVI, Trace_ELBO, TraceMeanField_ELBO
from pyro.infer.autoguide import (
AutoDiagonalNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
AutoLowRankMultivariateNormal,
AutoMultivariateNormal,
)
from pyro.infer.autoguide.guides import AutoNormalizingFlow
from tests.common import assert_equal
from tests.integration_tests.test_conjugate_gaussian_models import GaussianChain
logger = logging.getLogger(__name__)
pytestmark = pytest.mark.stage("integration", "integration_batch_2")
# conjugate model to test AutoGuide logic from end-to-end (this has a non-mean-field posterior)
class AutoGaussianChain(GaussianChain):
# this is gross but we need to convert between different posterior factorizations
def compute_target(self, N):
self.target_auto_mus = torch.zeros(N + 1)
self.target_auto_diag_cov = torch.zeros(N + 1)
self.target_auto_mus[-1] = self.target_mus[N].item()
self.target_auto_diag_cov[-1] = 1.0 / self.lambda_posts[-1].item()
for n in range(N - 1, 0, -1):
self.target_auto_mus[n] += self.target_mus[n].item()
self.target_auto_mus[n] += (
self.target_kappas[n].item() * self.target_auto_mus[n + 1]
)
self.target_auto_diag_cov[n] += 1.0 / self.lambda_posts[n].item()
self.target_auto_diag_cov[n] += (
self.target_kappas[n].item() ** 2
) * self.target_auto_diag_cov[n + 1]
def test_multivariatate_normal_auto(self):
self.do_test_auto(3, reparameterized=True, n_steps=10001)
def do_test_auto(self, N, reparameterized, n_steps):
logger.debug("\nGoing to do AutoGaussianChain test...")
pyro.clear_param_store()
self.setUp()
self.setup_chain(N)
self.compute_target(N)
self.guide = AutoMultivariateNormal(self.model)
logger.debug(
"target auto_loc: {}".format(
self.target_auto_mus[1:].detach().cpu().numpy()
)
)
logger.debug(
"target auto_diag_cov: {}".format(
self.target_auto_diag_cov[1:].detach().cpu().numpy()
)
)
# TODO speed up with parallel num_particles > 1
adam = optim.Adam({"lr": 0.001, "betas": (0.95, 0.999)})
svi = SVI(self.model, self.guide, adam, loss=Trace_ELBO())
for k in range(n_steps):
loss = svi.step(reparameterized)
assert np.isfinite(loss), loss
if k % 1000 == 0 and k > 0 or k == n_steps - 1:
logger.debug(
"[step {}] guide mean parameter: {}".format(
k, self.guide.loc.detach().cpu().numpy()
)
)
L = self.guide.scale_tril
diag_cov = torch.mm(L, L.t()).diag()
logger.debug(
"[step {}] auto_diag_cov: {}".format(
k, diag_cov.detach().cpu().numpy()
)
)
assert_equal(
self.guide.loc.detach(),
self.target_auto_mus[1:],
prec=0.05,
msg="guide mean off",
)
assert_equal(
diag_cov,
self.target_auto_diag_cov[1:],
prec=0.07,
msg="guide covariance off",
)
@pytest.mark.parametrize(
"auto_class",
[
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
],
)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceMeanField_ELBO])
def test_auto_diagonal_gaussians(auto_class, Elbo):
n_steps = 3001
def model():
pyro.sample("x", dist.Normal(-0.2, 1.2))
pyro.sample("y", dist.Normal(0.2, 0.7))
if auto_class is AutoLowRankMultivariateNormal:
guide = auto_class(model, rank=1)
else:
guide = auto_class(model)
adam = optim.ClippedAdam(
{"lr": 0.01, "betas": (0.95, 0.999), "lrd": 0.1 ** (1 / n_steps)}
)
svi = SVI(model, guide, adam, loss=Elbo())
for k in range(n_steps):
loss = svi.step()
assert np.isfinite(loss), loss
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
loc, scale = guide._loc_scale()
expected_loc = torch.tensor([-0.2, 0.2])
assert_equal(
loc.detach(),
expected_loc,
prec=0.05,
msg="\n".join(
[
"Incorrect guide loc. Expected:",
str(expected_loc.cpu().numpy()),
"Actual:",
str(loc.detach().cpu().numpy()),
]
),
)
expected_scale = torch.tensor([1.2, 0.7])
assert_equal(
scale.detach(),
expected_scale,
prec=0.08,
msg="\n".join(
[
"Incorrect guide scale. Expected:",
str(expected_scale.cpu().numpy()),
"Actual:",
str(scale.detach().cpu().numpy()),
]
),
)
@pytest.mark.parametrize(
"auto_class",
[
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
],
)
def test_auto_transform(auto_class):
n_steps = 3500
def model():
pyro.sample("x", dist.LogNormal(0.2, 0.7))
if auto_class is AutoLowRankMultivariateNormal:
guide = auto_class(model, rank=1)
else:
guide = auto_class(model)
adam = optim.Adam({"lr": 0.001, "betas": (0.90, 0.999)})
svi = SVI(model, guide, adam, loss=Trace_ELBO())
for k in range(n_steps):
loss = svi.step()
assert np.isfinite(loss), loss
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
loc, scale = guide._loc_scale()
assert_equal(loc.detach(), torch.tensor([0.2]), prec=0.04, msg="guide mean off")
assert_equal(
scale.detach(), torch.tensor([0.7]), prec=0.04, msg="guide covariance off"
)
@pytest.mark.parametrize(
"auto_class",
[
AutoDiagonalNormal,
AutoIAFNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
lambda m: AutoNormalizingFlow(m, partial(iterated, 2, block_autoregressive)),
],
)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceMeanField_ELBO])
def test_auto_dirichlet(auto_class, Elbo):
num_steps = 2000
prior = torch.tensor([0.5, 1.0, 1.5, 3.0])
data = torch.tensor([0] * 4 + [1] * 2 + [2] * 5).long()
posterior = torch.tensor([4.5, 3.0, 6.5, 3.0])
def model(data):
p = pyro.sample("p", dist.Dirichlet(prior))
with pyro.plate("data_plate"):
pyro.sample("data", dist.Categorical(p).expand_by(data.shape), obs=data)
guide = auto_class(model)
svi = SVI(model, guide, optim.Adam({"lr": 0.003}), loss=Elbo())
for _ in range(num_steps):
loss = svi.step(data)
assert np.isfinite(loss), loss
expected_mean = posterior / posterior.sum()
if isinstance(guide, (AutoIAFNormal, AutoNormalizingFlow)):
loc = guide.transform(torch.zeros(guide.latent_dim))
else:
loc = guide.loc
actual_mean = biject_to(constraints.simplex)(loc)
assert_equal(
actual_mean,
expected_mean,
prec=0.2,
msg="".join(
[
"\nexpected {}".format(expected_mean.detach().cpu().numpy()),
"\n actual {}".format(actual_mean.detach().cpu().numpy()),
]
),
)
|
the-stack_0_24949
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains functions for working with "native" Python types as they correspond to types over the
Ledger API.
"""
from typing import Any, Dict, Mapping, Tuple
__all__ = ["to_record", "to_variant"]
VARIANT_KEYS = frozenset(["tag", "value"])
def to_record(obj: "Any") -> "Mapping[str, Any]":
"""
"Unflattens" fields of a dict to support nested records.
"""
from collections.abc import Mapping
if not isinstance(obj, Mapping):
raise ValueError("a mapping is required")
# pull out any specialized dotted-field mappings
reformatted = dict() # type: Dict[str, Any]
for key, value in obj.items():
k1, d, k2 = key.partition(".")
if d:
v = reformatted.get(k1)
if v is None:
v = dict()
reformatted[k1] = v
v[k2] = value
else:
reformatted[key] = value
return reformatted
def to_variant(obj: "Any") -> "Tuple[str, Any]":
"""
Return the constructor and value that is represented by the given object.
"""
from collections.abc import Mapping
if not isinstance(obj, Mapping):
raise ValueError(f"cannot coerce {obj!r} to a variant")
if VARIANT_KEYS == obj.keys():
return obj["tag"], obj["value"]
if len(obj) != 1:
raise ValueError(f"variants must be encoded as single-key dicts (got {obj!r} instead)")
key = list(obj)[0]
value = obj.get(key)
return key, value
|
the-stack_0_24950
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
# mypy: allow-untyped-defs
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Text,
Tuple,
TypeVar,
Union,
Set,
Iterable,
Sequence,
)
from blib2to3.pgen2.grammar import Grammar
__author__ = "Guido van Rossum <[email protected]>"
import sys
from io import StringIO
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs: Dict[int, Union[Text, int]] = {}
def type_repr(type_num: int) -> Union[Text, int]:
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name in dir(python_symbols):
val = getattr(python_symbols, name)
if type(val) == int:
_type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
_P = TypeVar("_P")
NL = Union["Node", "Leaf"]
Context = Tuple[Text, Tuple[int, int]]
RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]]
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type: int # int: token number (< 256) or symbol number (>= 256)
parent: Optional["Node"] = None # Parent node pointer, or None
children: List[NL] # List of subnodes
was_changed: bool = False
was_checked: bool = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other: Any) -> bool:
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # type: Any # For Py3 compatibility.
@property
def prefix(self) -> Text:
raise NotImplementedError
def _eq(self: _P, other: _P) -> bool:
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self: _P) -> _P:
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self) -> Iterator[NL]:
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self) -> Iterator[NL]:
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new: Union[NL, List[NL]]) -> None:
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.children = l_children
self.parent.changed()
self.parent.invalidate_sibling_maps()
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self) -> Optional[int]:
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return None
node = node.children[0]
return node.lineno
def changed(self) -> None:
if self.was_changed:
return
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self) -> Optional[int]:
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
del self.parent.children[i]
self.parent.changed()
self.parent.invalidate_sibling_maps()
self.parent = None
return i
return None
@property
def next_sibling(self) -> Optional[NL]:
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
if self.parent.next_sibling_map is None:
self.parent.update_sibling_maps()
assert self.parent.next_sibling_map is not None
return self.parent.next_sibling_map[id(self)]
@property
def prev_sibling(self) -> Optional[NL]:
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
if self.parent.prev_sibling_map is None:
self.parent.update_sibling_maps()
assert self.parent.prev_sibling_map is not None
return self.parent.prev_sibling_map[id(self)]
def leaves(self) -> Iterator["Leaf"]:
for child in self.children:
yield from child.leaves()
def depth(self) -> int:
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self) -> Text:
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
prefix = next_sib.prefix
return prefix
class Node(Base):
"""Concrete implementation for interior nodes."""
fixers_applied: Optional[List[Any]]
used_names: Optional[Set[Text]]
def __init__(
self,
type: int,
children: List[NL],
context: Optional[Any] = None,
prefix: Optional[Text] = None,
fixers_applied: Optional[List[Any]] = None,
) -> None:
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
self.invalidate_sibling_maps()
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self) -> Text:
"""Return a canonical string representation."""
assert self.type is not None
return "%s(%s, %r)" % (
self.__class__.__name__,
type_repr(self.type),
self.children,
)
def __str__(self) -> Text:
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
def _eq(self, other) -> bool:
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self) -> "Node":
assert self.type is not None
"""Return a cloned (deep) copy of self."""
return Node(
self.type,
[ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied,
)
def post_order(self) -> Iterator[NL]:
"""Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self) -> Iterator[NL]:
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
@property
def prefix(self) -> Text:
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
@prefix.setter
def prefix(self, prefix) -> None:
if self.children:
self.children[0].prefix = prefix
def set_child(self, i: int, child: NL) -> None:
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
self.invalidate_sibling_maps()
def insert_child(self, i: int, child: NL) -> None:
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
self.invalidate_sibling_maps()
def append_child(self, child: NL) -> None:
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
self.invalidate_sibling_maps()
def invalidate_sibling_maps(self) -> None:
self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None
self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None
def update_sibling_maps(self) -> None:
_prev: Dict[int, Optional[NL]] = {}
_next: Dict[int, Optional[NL]] = {}
self.prev_sibling_map = _prev
self.next_sibling_map = _next
previous: Optional[NL] = None
for current in self.children:
_prev[id(current)] = previous
_next[id(previous)] = current
previous = current
_next[id(current)] = None
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
value: Text
fixers_applied: List[Any]
bracket_depth: int
opening_bracket: "Leaf"
used_names: Optional[Set[Text]]
_prefix = "" # Whitespace and comments preceding this token in the input
lineno: int = 0 # Line where this token starts in the input
column: int = 0 # Column where this token starts in the input
def __init__(
self,
type: int,
value: Text,
context: Optional[Context] = None,
prefix: Optional[Text] = None,
fixers_applied: List[Any] = [],
) -> None:
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied: Optional[List[Any]] = fixers_applied[:]
self.children = []
def __repr__(self) -> str:
"""Return a canonical string representation."""
from .pgen2.token import tok_name
assert self.type is not None
return "%s(%s, %r)" % (
self.__class__.__name__,
tok_name.get(self.type, self.type),
self.value,
)
def __str__(self) -> Text:
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
def _eq(self, other) -> bool:
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self) -> "Leaf":
assert self.type is not None
"""Return a cloned (deep) copy of self."""
return Leaf(
self.type,
self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied,
)
def leaves(self) -> Iterator["Leaf"]:
yield self
def post_order(self) -> Iterator["Leaf"]:
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self) -> Iterator["Leaf"]:
"""Return a pre-order iterator for the tree."""
yield self
@property
def prefix(self) -> Text:
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
@prefix.setter
def prefix(self, prefix) -> None:
self.changed()
self._prefix = prefix
def convert(gr: Grammar, raw_node: RawNode) -> NL:
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
assert children is not None
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value or "", context=context)
_Results = Dict[Text, NL]
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type: Optional[int]
type = None # Node type (token if < 256, symbol if >= 256)
content: Any = None # Optional content matching pattern
name: Optional[Text] = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self) -> Text:
assert self.type is not None
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def _submatch(self, node, results=None) -> bool:
raise NotImplementedError
def optimize(self) -> "BasePattern":
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node: NL, results: Optional[_Results] = None) -> bool:
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r: Optional[_Results] = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
assert results is not None
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool:
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r: _Results = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(
self,
type: Optional[int] = None,
content: Optional[Text] = None,
name: Optional[Text] = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node: NL, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards: bool = False
def __init__(
self,
type: Optional[int] = None,
content: Optional[Iterable[Text]] = None,
name: Optional[Text] = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
newcontent = list(content)
for i, item in enumerate(newcontent):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = newcontent
self.name = name
def _submatch(self, node, results=None) -> bool:
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
min: int
max: int
def __init__(
self,
content: Optional[Text] = None,
min: int = 0,
max: int = HUGE,
name: Optional[Text] = None,
) -> None:
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
f = lambda s: tuple(s)
wrapped_content = tuple(map(f, content)) # Protect against alterations
# Check sanity of alternatives
assert len(wrapped_content), repr(wrapped_content) # Can't have zero alternatives
for alt in wrapped_content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = wrapped_content
self.min = min
self.max = max
self.name = name
def optimize(self) -> Any:
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (
self.content is not None
and len(self.content) == 1
and len(self.content[0]) == 1
):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (
self.min <= 1
and isinstance(subpattern, WildcardPattern)
and subpattern.min <= 1
and self.name == subpattern.name
):
return WildcardPattern(
subpattern.content,
self.min * subpattern.min,
self.max * subpattern.max,
subpattern.name,
)
return self
def match(self, node, results=None) -> bool:
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None) -> bool:
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
"""Special optimized matcher for bare_name."""
count = 0
r = {} # type: _Results
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
assert self.name is not None
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]:
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count + 1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content: Optional[Any] = None) -> None:
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node, results=None) -> bool:
# We never match a node in its entirety
return False
def match_seq(self, nodes, results=None) -> bool:
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(
patterns: List[BasePattern], nodes: List[NL]
) -> Iterator[Tuple[int, _Results]]:
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
_Convert = Callable[[Grammar, RawNode], Any]
|
the-stack_0_24955
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from utilities.querysets import RestrictedQuerySet
class PrefixQuerySet(RestrictedQuerySet):
def annotate_tree(self):
"""
Annotate the number of parent and child prefixes for each Prefix. Raw SQL is needed for these subqueries
because we need to cast NULL VRF values to integers for comparison. (NULL != NULL).
"""
return self.extra(
select={
'parents': 'SELECT COUNT(U0."prefix") AS "c" '
'FROM "ipam_prefix" U0 '
'WHERE (U0."prefix" >> "ipam_prefix"."prefix" '
'AND COALESCE(U0."vrf_id", 0) = COALESCE("ipam_prefix"."vrf_id", 0))',
'children': 'SELECT COUNT(U1."prefix") AS "c" '
'FROM "ipam_prefix" U1 '
'WHERE (U1."prefix" << "ipam_prefix"."prefix" '
'AND COALESCE(U1."vrf_id", 0) = COALESCE("ipam_prefix"."vrf_id", 0))',
}
)
class VLANQuerySet(RestrictedQuerySet):
def get_for_device(self, device):
"""
Return all VLANs available to the specified Device.
"""
from .models import VLANGroup
# Find all relevant VLANGroups
q = Q()
if device.site.region:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'region'),
scope_id__in=device.site.region.get_ancestors(include_self=True)
)
if device.site.group:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'sitegroup'),
scope_id__in=device.site.group.get_ancestors(include_self=True)
)
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'site'),
scope_id=device.site_id
)
if device.location:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'location'),
scope_id__in=device.location.get_ancestors(include_self=True)
)
if device.rack:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'rack'),
scope_id=device.rack_id
)
# Return all applicable VLANs
return self.filter(
Q(group__in=VLANGroup.objects.filter(q)) |
Q(site=device.site) |
Q(group__isnull=True, site__isnull=True) # Global VLANs
)
def get_for_virtualmachine(self, vm):
"""
Return all VLANs available to the specified VirtualMachine.
"""
from .models import VLANGroup
# Find all relevant VLANGroups
q = Q()
if vm.cluster.site:
if vm.cluster.site.region:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'region'),
scope_id__in=vm.cluster.site.region.get_ancestors(include_self=True)
)
if vm.cluster.site.group:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'sitegroup'),
scope_id__in=vm.cluster.site.group.get_ancestors(include_self=True)
)
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('dcim', 'site'),
scope_id=vm.cluster.site_id
)
if vm.cluster.group:
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('virtualization', 'clustergroup'),
scope_id=vm.cluster.group_id
)
q |= Q(
scope_type=ContentType.objects.get_by_natural_key('virtualization', 'cluster'),
scope_id=vm.cluster_id
)
vlan_groups = VLANGroup.objects.filter(q)
# Return all applicable VLANs
q = (
Q(group__in=vlan_groups) |
Q(group__isnull=True, site__isnull=True) # Global VLANs
)
if vm.cluster.site:
q |= Q(site=vm.cluster.site)
return self.filter(q)
|
the-stack_0_24956
|
#! /usr/bin/env python
#
# Copyright (C) 2018 Raphael Vallat
DESCRIPTION = "Yet Another Spindle Algorithm"
LONG_DESCRIPTION = """YASA (Yet Another Spindle Algorithm) : fast and robust detection of spindles, slow-waves, and rapid eye movements from sleep EEG recordings..
"""
DISTNAME = 'yasa'
MAINTAINER = 'Raphael Vallat'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://github.com/raphaelvallat/yasa/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/raphaelvallat/yasa/'
VERSION = '0.5.1'
PACKAGE_DATA = {'yasa.data.icons': ['*.svg']}
INSTALL_REQUIRES = [
'numpy',
'scipy',
'pandas',
'matplotlib',
'seaborn',
'mne>=0.20.0',
'numba',
'outdated',
'antropy',
'scikit-learn',
'tensorpac>=0.6.5',
'pyriemann>=0.2.7',
'lspopt',
'ipywidgets',
'joblib'
]
PACKAGES = [
'yasa',
]
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
]
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
if __name__ == "__main__":
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=INSTALL_REQUIRES,
include_package_data=True,
packages=PACKAGES,
package_data=PACKAGE_DATA,
classifiers=CLASSIFIERS,
)
|
the-stack_0_24957
|
# coding: utf-8
"""
Fulfillment API
Use the Fulfillment API to complete the process of packaging, addressing, handling, and shipping each order on behalf of the seller, in accordance with the payment method and timing specified at checkout. # noqa: E501
OpenAPI spec version: v1.19.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EbayFulfillmentProgram(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fulfilled_by': 'str'
}
attribute_map = {
'fulfilled_by': 'fulfilledBy'
}
def __init__(self, fulfilled_by=None): # noqa: E501
"""EbayFulfillmentProgram - a model defined in Swagger""" # noqa: E501
self._fulfilled_by = None
self.discriminator = None
if fulfilled_by is not None:
self.fulfilled_by = fulfilled_by
@property
def fulfilled_by(self):
"""Gets the fulfilled_by of this EbayFulfillmentProgram. # noqa: E501
The value returned in this field indicates the party that is handling fulfillment of the order line item. <br /><br />Valid value: <code>EBAY</code> # noqa: E501
:return: The fulfilled_by of this EbayFulfillmentProgram. # noqa: E501
:rtype: str
"""
return self._fulfilled_by
@fulfilled_by.setter
def fulfilled_by(self, fulfilled_by):
"""Sets the fulfilled_by of this EbayFulfillmentProgram.
The value returned in this field indicates the party that is handling fulfillment of the order line item. <br /><br />Valid value: <code>EBAY</code> # noqa: E501
:param fulfilled_by: The fulfilled_by of this EbayFulfillmentProgram. # noqa: E501
:type: str
"""
self._fulfilled_by = fulfilled_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EbayFulfillmentProgram, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EbayFulfillmentProgram):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_24959
|
_base_ = ['../_base_/datasets/charger_tape.py']
log_level = 'INFO'
load_from = None
ex_name = "c_litehrnet30_512_hm128_repr_v5"
resume_from = "/root/share/tf/mmpose_checkpoints/"+ex_name+"/epoch_5.pth"
# resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metric='acc', save_best='acc')
work_dir = "/root/share/tf/mmpose_checkpoints/"+ex_name+"/"
optimizer = dict(
type='Adam',
lr=5e-5,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[2, 3, 6])
total_epochs = 10
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='NeptuneLoggerHook',
init_kwargs=dict(
run="CHAR-284",
project="tnowak/charger")
# mode="debug",
# project='charger',
# api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI4NGRmMmFkNi0wMWNjLTQxY2EtYjQ1OS01YjQ0YzRkYmFlNGIifQ==",
# name=ex_name,
# tags=["LiteHRNet", "512", "HM128", "aug", 'batch4']),
# description="repr losss as joint mse"
)
])
target_type = 'GaussianHeatmap'
channel_cfg = dict(
num_output_channels=4,
dataset_joints=4,
dataset_channel=[
[0, 1, 2, 3],
],
inference_channel=[
0, 1, 2, 3])
# model settings
model = dict(
type='TopDownCharger',
pretrained=None,
backbone=dict(
type='LiteHRNet',
in_channels=3,
extra=dict(
stem=dict(stem_channels=32, out_channels=32, expand_ratio=1),
num_stages=3,
stages_spec=dict(
num_modules=(3, 8, 3),
num_branches=(2, 3, 4),
num_blocks=(2, 2, 2),
module_type=('LITE', 'LITE', 'LITE'),
with_fuse=(True, True, True),
reduce_ratios=(8, 8, 8),
num_channels=(
(40, 80),
(40, 80, 160),
(40, 80, 160, 320),
)),
with_head=True,
)),
keypoint_head=dict(
type='TopdownHeatmapReprHead',
in_channels=40,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=False,
target_type=target_type,
modulate_kernel=17,
use_udp=True))
data_cfg = dict(
image_size=[512, 512],
heatmap_size=[128, 128],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=5, scale_factor=0.7),
dict(
type="Albumentation",
transforms=[
dict(type="HueSaturationValue", p=0.7),
dict(type="GaussianBlur", p=0.3),
dict(type="MotionBlur", p=0.3),
dict(type="ColorJitter", p=0.4),
dict(type="GaussNoise", p=0.1),
dict(type="JpegCompression", p=0.6, quality_lower=80),
dict(type="RandomFog", p=0.1),
dict(type="RandomRain", p=0.1),
],
),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='TopDownGenerateTarget',
sigma=3,
encoding='UDP',
target_type=target_type),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale', 'bbox',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),# channel_order='bgr'),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='TopDownGenerateTarget',
sigma=3,
encoding='UDP',
target_type=target_type),
dict(
type='Collect',
keys=['img', "target", "target_weight"],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score', 'bbox',
'flip_pairs'
]),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
data_root = '/root/share/tf/dataset/final_localization/corners_1.0'
data = dict(
samples_per_gpu=4,
workers_per_gpu=1,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type='TopDownChargerDataset',
ann_dir=f'{data_root}/annotations/',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownChargerDataset',
ann_dir=f'{data_root}/val/annotations/',
img_prefix=f'{data_root}/val/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownChargerDataset',
ann_dir=f'{data_root}/val/annotations/',
img_prefix=f'{data_root}/val/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
the-stack_0_24962
|
#!/usr/bin/env python
"""
_WorkQueue_t_
WorkQueue tests
"""
from __future__ import print_function
import os
import threading
import time
import unittest
import logging
from retry import retry
from WMCore.WMBase import getTestBase
from WMCore.ACDC.DataCollectionService import DataCollectionService
from WMCore.Configuration import Configuration
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.File import File as WMFile
from WMCore.DataStructs.Run import Run
from WMCore.Lexicon import sanitizeURL
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.Services.DBS.DBSErrors import DBSReaderError
from WMCore.Services.UUIDLib import makeUUID
from WMCore.Services.WorkQueue.WorkQueue import WorkQueue as WorkQueueService
from WMCore.WMBS.Job import Job
from WMCore.WMSpec.StdSpecs.DQMHarvest import DQMHarvestWorkloadFactory
from WMCore.WMSpec.StdSpecs.StepChain import StepChainWorkloadFactory
from WMCore.WMSpec.StdSpecs.ReReco import ReRecoWorkloadFactory
from WMCore.WMSpec.WMWorkload import WMWorkload, WMWorkloadHelper
from WMCore.WorkQueue.WorkQueue import WorkQueue, globalQueue, localQueue
from WMCore.WorkQueue.WorkQueueExceptions import (WorkQueueWMSpecError, WorkQueueNoMatchingElements,
WorkQueueNoWorkError)
from WMCore.WorkQueue.DataStructs.WorkQueueElement import STATES
from WMQuality.Emulators import EmulatorSetup
from WMQuality.Emulators.DataBlockGenerator import Globals
from WMQuality.Emulators.PhEDExClient.MockPhEDExApi import PILEUP_DATASET
from WMQuality.Emulators.WMSpecGenerator.WMSpecGenerator import createConfig
from WMCore_t.WMSpec_t.samples.MultiTaskProductionWorkload \
import workload as MultiTaskProductionWorkload
from WMCore_t.WorkQueue_t.WorkQueueTestCase import WorkQueueTestCase
NBLOCKS_HICOMM = 47
NFILES_HICOMM = 72
NBLOCKS_COSMIC = 58
NFILES_COSMIC = 108
NFILES_COSMICRAW = 141
TOTAL_EVENTS=10000
def rerecoWorkload(workloadName, arguments, assignArgs=None):
factory = ReRecoWorkloadFactory()
wmspec = factory.factoryWorkloadConstruction(workloadName, arguments)
if assignArgs:
args = factory.getAssignTestArguments()
args.update(assignArgs)
wmspec.updateArguments(args)
return wmspec
def stepchainWorkload(workloadName, arguments):
factory = StepChainWorkloadFactory()
wmspec = factory.factoryWorkloadConstruction(workloadName, arguments)
return wmspec
def getFirstTask(wmspec):
"""Return the 1st top level task"""
return next(wmspec.taskIterator())
def syncQueues(queue, skipWMBS=False):
"""Sync parent & local queues and split work
Workaround having to wait for couchdb replication and splitting polling
"""
queue.backend.forceQueueSync()
time.sleep(2)
work = queue.processInboundWork()
queue.performQueueCleanupActions(skipWMBS=skipWMBS)
queue.backend.forceQueueSync()
# after replication need to wait a while to update result
time.sleep(2)
return work
class WorkQueueTest(WorkQueueTestCase):
"""
_WorkQueueTest_
For /MinimumBias/ComissioningHI-v1/RAW the dataset has 47 blocks with 72 files.
The PhEDEx emulator sets the block locations like:
17 at 'T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC'
19 at 'T2_XX_SiteA', 'T2_XX_SiteB'
11 at 'T2_XX_SiteA' only
"""
def __init__(self, methodName='runTest'):
super(WorkQueueTest, self).__init__(methodName=methodName, mockDBS=True, mockPhEDEx=True)
def setupConfigCacheAndAgrs(self):
self.rerecoArgs = ReRecoWorkloadFactory.getTestArguments()
self.rerecoArgs["CouchDBName"] = self.configCacheDB
self.rerecoArgs["ConfigCacheID"] = createConfig(self.rerecoArgs["CouchDBName"])
self.mcArgs = StepChainWorkloadFactory.getTestArguments()
self.mcArgs["CouchDBName"] = self.configCacheDB
self.mcArgs['Step1']["ConfigCacheID"] = createConfig(self.mcArgs["CouchDBName"])
self.parentProcArgs = ReRecoWorkloadFactory.getTestArguments()
self.parentProcArgs.update(IncludeParents="True")
self.parentProcArgs.update(InputDataset="/Cosmics/ComissioningHI-PromptReco-v1/RECO")
self.parentProcArgs["CouchDBName"] = self.configCacheDB
self.parentProcArgs["ConfigCacheID"] = createConfig(self.parentProcArgs["CouchDBName"])
self.openRunningProcArgs = ReRecoWorkloadFactory.getTestArguments()
self.openRunningProcArgs.update(OpenRunningTimeout=10)
self.openRunningProcArgs["CouchDBName"] = self.configCacheDB
self.openRunningProcArgs["ConfigCacheID"] = createConfig(self.openRunningProcArgs["CouchDBName"])
self.pileupArgs = StepChainWorkloadFactory.getTestArguments()
self.pileupArgs['Step1'].update(MCPileup=PILEUP_DATASET)
self.pileupArgs['Step1'].update(InputDataset="/MinimumBias/ComissioningHI-v1/RAW",
RequestNumEvents=TOTAL_EVENTS,
SplittingAlgo="EventAwareLumiBased")
self.pileupArgs["CouchDBName"] = self.configCacheDB
self.pileupArgs['Step1']["ConfigCacheID"] = createConfig(self.pileupArgs["CouchDBName"])
self.pileupMcArgs = StepChainWorkloadFactory.getTestArguments()
self.pileupMcArgs['Step1'].update(MCPileup=PILEUP_DATASET)
self.pileupArgs['Step1'].update(RequestNumEvents=TOTAL_EVENTS)
self.pileupMcArgs["CouchDBName"] = self.configCacheDB
self.pileupMcArgs['Step1']["ConfigCacheID"] = createConfig(self.pileupMcArgs["CouchDBName"])
def setUp(self):
"""
If we dont have a wmspec file create one
"""
# undo any customizations
Globals.GlobalParams.resetParams()
# set up WMAgent config file for couchdb
self.configFile = EmulatorSetup.setupWMAgentConfig()
WorkQueueTestCase.setUp(self)
self.setupConfigCacheAndAgrs()
# Basic production Spec
self.spec = stepchainWorkload('testProduction', self.mcArgs)
self.spec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB'])
getFirstTask(self.spec).addProduction(totalEvents=TOTAL_EVENTS)
self.spec.setSpecUrl(os.path.join(self.workDir, 'testworkflow.spec'))
self.spec.save(self.spec.specUrl())
# Production spec plus pileup
self.productionPileupSpec = stepchainWorkload('testProduction', self.pileupMcArgs)
self.productionPileupSpec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB'])
getFirstTask(self.productionPileupSpec).addProduction(totalEvents=TOTAL_EVENTS)
self.productionPileupSpec.setSpecUrl(os.path.join(self.workDir, 'testworkflowPileupMc.spec'))
self.productionPileupSpec.save(self.productionPileupSpec.specUrl())
# Processing spec plus pileup
self.processingPileupSpec = stepchainWorkload('testProcessing', self.pileupArgs)
self.processingPileupSpec.setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC'])
getFirstTask(self.processingPileupSpec).addProduction(totalEvents=TOTAL_EVENTS)
self.processingPileupSpec.setSpecUrl(os.path.join(self.workDir, 'testworkflowPileup.spec'))
self.processingPileupSpec.save(self.processingPileupSpec.specUrl())
# ReReco spec with whitelist
self.whitelistSpec = rerecoWorkload('whitelistlistSpec', self.rerecoArgs)
self.whitelistSpec.setSpecUrl(os.path.join(self.workDir,
'testWhitelist.spec'))
getFirstTask(self.whitelistSpec).data.constraints.sites.whitelist = ['T2_XX_SiteB']
self.whitelistSpec.save(self.whitelistSpec.specUrl())
# ReReco spec with delay for running open
self.openRunningSpec = rerecoWorkload('openRunningSpec', self.openRunningProcArgs)
self.openRunningSpec.setSpecUrl(os.path.join(self.workDir,
'testOpenRunningSpec.spec'))
self.openRunningSpec.save(self.openRunningSpec.specUrl())
# Create queues
globalCouchUrl = "%s/%s" % (self.testInit.couchUrl, self.globalQDB)
logdbCouchUrl = "%s/%s" % (self.testInit.couchUrl, self.logDBName)
reqdbUrl = "%s/%s" % (self.testInit.couchUrl, self.requestDBName)
self.globalQueue = globalQueue(DbName=self.globalQDB,
InboxDbName=self.globalQInboxDB,
QueueURL=globalCouchUrl,
central_logdb_url=logdbCouchUrl,
log_reporter="WorkQueue_Unittest",
UnittestFlag=True,
RequestDBURL=reqdbUrl)
# self.midQueue = WorkQueue(SplitByBlock = False, # mid-level queue
# PopulateFilesets = False,
# ParentQueue = self.globalQueue,
# CacheDir = None)
# ignore mid queue as it causes database duplication's
# copy jobStateMachine couchDB configuration here since we don't want/need to pass whole configuration
jobCouchConfig = Configuration()
jobCouchConfig.section_("JobStateMachine")
jobCouchConfig.JobStateMachine.couchurl = os.environ["COUCHURL"]
jobCouchConfig.JobStateMachine.couchDBName = "testcouchdb"
jobCouchConfig.JobStateMachine.jobSummaryDBName = "wmagent_summary_test"
jobCouchConfig.JobStateMachine.summaryStatsDBName = "stat_summary_test"
# copy bossAir configuration here since we don't want/need to pass whole configuration
bossAirConfig = Configuration()
bossAirConfig.section_("BossAir")
bossAirConfig.BossAir.pluginDir = "WMCore.BossAir.Plugins"
bossAirConfig.BossAir.pluginNames = ["MockPlugin"]
bossAirConfig.BossAir.section_("MockPlugin")
bossAirConfig.BossAir.MockPlugin.fakeReport = os.path.join(getTestBase(),
'WMComponent_t/JobAccountant_t/fwjrs',
"MergeSuccess.pkl")
bossAirConfig.section_("Agent")
bossAirConfig.Agent.agentName = "TestAgent"
bossAirConfig.section_("JobStateMachine")
bossAirConfig.JobStateMachine.couchurl = os.environ["COUCHURL"]
bossAirConfig.JobStateMachine.couchDBName = "testcouchdb"
bossAirConfig.JobStateMachine.jobSummaryDBName = "wmagent_summary_test"
bossAirConfig.JobStateMachine.summaryStatsDBName = "stat_summary_test"
self.localQueue = localQueue(DbName=self.localQDB,
InboxDbName=self.localQInboxDB,
ParentQueueCouchUrl=globalCouchUrl,
ParentQueueInboxCouchDBName=self.globalQInboxDB,
JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
CacheDir=self.workDir,
central_logdb_url=logdbCouchUrl,
log_reporter="WorkQueue_Unittest",
RequestDBURL=reqdbUrl)
self.localQueue2 = localQueue(DbName=self.localQDB2,
InboxDbName=self.localQInboxDB2,
ParentQueueCouchUrl=globalCouchUrl,
ParentQueueInboxCouchDBName=self.globalQInboxDB,
JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
CacheDir=self.workDir,
central_logdb_url=logdbCouchUrl,
log_reporter="WorkQueue_Unittest",
RequestDBURL=reqdbUrl)
# configuration for the Alerts messaging framework, work (alerts) and
# control channel addresses to which alerts
# these are destination addresses where AlertProcessor:Receiver listens
config = Configuration()
config.section_("Alert")
config.Alert.address = "tcp://127.0.0.1:5557"
config.Alert.controlAddr = "tcp://127.0.0.1:5559"
# standalone queue for unit tests
self.queue = WorkQueue(JobDumpConfig=jobCouchConfig,
BossAirConfig=bossAirConfig,
DbName=self.queueDB,
InboxDbName=self.queueInboxDB,
CacheDir=self.workDir,
config=config,
central_logdb_url=logdbCouchUrl,
log_reporter="WorkQueue_Unittest",
RequestDBURL=reqdbUrl)
# create relevant sites in wmbs
rc = ResourceControl()
site_se_mapping = {'T2_XX_SiteA': 'T2_XX_SiteA', 'T2_XX_SiteB': 'T2_XX_SiteB'}
for site, se in site_se_mapping.iteritems():
rc.insertSite(site, 100, 200, se, cmsName=site, plugin="MockPlugin")
daofactory = DAOFactory(package="WMCore.WMBS",
logger=threading.currentThread().logger,
dbinterface=threading.currentThread().dbi)
addLocation = daofactory(classname="Locations.New")
addLocation.execute(siteName=site, pnn=se)
def setupReReco(self, assignArgs=None, **kwargs):
# Sample Tier1 ReReco spec
self.rerecoArgs.update(kwargs)
processingSpec = rerecoWorkload('testProcessing', self.rerecoArgs, assignArgs=assignArgs)
processingSpec.setSpecUrl(os.path.join(self.workDir, 'testProcessing.spec'))
processingSpec.save(processingSpec.specUrl())
return processingSpec
def setupParentProcSpec(self, assignArgs=None, **kwargs):
# Sample Tier1 ReReco spec with parent
self.parentProcArgs.update(kwargs)
parentProcSpec = rerecoWorkload('testParentProcessing', self.parentProcArgs, assignArgs=assignArgs)
parentProcSpec.setSpecUrl(os.path.join(self.workDir, 'testParentProcessing.spec'))
parentProcSpec.save(parentProcSpec.specUrl())
return parentProcSpec
def setupHighPrioReReco(self, assignArgs=None, **kwargs):
# High priority ReReco spec
self.rerecoArgs.update(kwargs)
highPrioReReco = rerecoWorkload('highPrioSpec', self.rerecoArgs, assignArgs=assignArgs)
highPrioReReco.data.request.priority = 999998
highPrioReReco.setSpecUrl(os.path.join(self.workDir, 'highPrioSpec.spec'))
highPrioReReco.save(highPrioReReco.specUrl())
return highPrioReReco
def tearDown(self):
"""tearDown"""
super(WorkQueueTest, self).tearDown()
# Delete WMBSAgent config file
EmulatorSetup.deleteConfig(self.configFile)
def createWQReplication(self, parentQURL, childURL):
wqfilter = 'WorkQueue/queueFilter'
query_params = {'childUrl': childURL, 'parentUrl': sanitizeURL(parentQURL)['url']}
localQInboxURL = "%s_inbox" % childURL
replicatorDocs = []
replicatorDocs.append({'source': sanitizeURL(parentQURL)['url'], 'target': localQInboxURL,
'filter': wqfilter, 'query_params': query_params})
replicatorDocs.append({'source': sanitizeURL(localQInboxURL)['url'], 'target': parentQURL,
'filter': wqfilter, 'query_params': query_params})
for rp in replicatorDocs:
self.localCouchMonitor.couchServer.replicate(
rp['source'], rp['target'], filter=rp['filter'],
query_params=rp.get('query_params', False),
continuous=False)
return
def pullWorkWithReplication(self, localQ, resources):
localQ.pullWork(resources)
self.createWQReplication(localQ.params['ParentQueueCouchUrl'], localQ.params['QueueURL'])
def createResubmitSpec(self, serverUrl, couchDB, parentage=False):
"""
_createResubmitSpec_
Create a bogus resubmit workload.
"""
site = ["T1_US_FNAL"]
workload = WMWorkloadHelper(WMWorkload("TestWorkload"))
reco = workload.newTask("reco")
workload.setOwnerDetails(name="evansde77", group="DMWM")
workload.setSiteWhitelist(site)
# first task uses the input dataset
reco.addInputDataset(name="/PRIMARY/processed-v1/TIERONE",
primary="PRIMARY", processed="processed-v1", tier="TIERONE")
reco.data.input.splitting.algorithm = "File"
reco.data.input.splitting.include_parents = parentage
reco.setTaskType("Processing")
cmsRunReco = reco.makeStep("cmsRun1")
cmsRunReco.setStepType("CMSSW")
reco.applyTemplates()
cmsRunRecoHelper = cmsRunReco.getTypeHelper()
cmsRunRecoHelper.addOutputModule("outputRECO",
primaryDataset="PRIMARY",
processedDataset="processed-v2",
dataTier="TIERTWO",
lfnBase="/store/dunkindonuts",
mergedLFNBase="/store/kfc")
workload.setTrustLocationFlag(inputFlag=True, pileupFlag=False)
dcs = DataCollectionService(url=serverUrl, database=couchDB)
def getJob(workload):
job = Job()
job["task"] = workload.getTask("reco").getPathName()
job["workflow"] = workload.name()
job["location"] = site
job["owner"] = workload.getOwner().get("name")
job["group"] = workload.getOwner().get("group")
return job
testFileA = WMFile(lfn=makeUUID(), size=1024, events=1024, parents=['parent1'])
testFileA.setLocation(site)
testFileA.addRun(Run(1, 1, 2))
testFileB = WMFile(lfn=makeUUID(), size=1024, events=1024, parents=['parent2'])
testFileB.setLocation(site)
testFileB.addRun(Run(1, 3, 4))
testJobA = getJob(workload)
testJobA.addFile(testFileA)
testJobA.addFile(testFileB)
dcs.failedJobs([testJobA])
topLevelTask = workload.getTopLevelTask()[0]
workload.truncate("Resubmit_TestWorkload", topLevelTask.getPathName(),
serverUrl, couchDB)
return workload
def testProduction(self):
"""
Enqueue and get work for a production WMSpec.
"""
specfile = self.spec.specUrl()
numUnit = 1
jobSlot = [10] * numUnit # array of jobs per block
total = sum(jobSlot)
for _ in range(numUnit):
self.queue.queueWork(specfile)
self.assertEqual(numUnit, len(self.queue))
# try to get work
work = self.queue.getWork({'SiteDoesNotExist': jobSlot[0]}, {})
self.assertEqual([], work) # not in whitelist
work = self.queue.getWork({'T2_XX_SiteA': 0}, {})
self.assertEqual([], work)
work = self.queue.getWork({'T2_XX_SiteA': jobSlot[0]}, {})
self.assertEqual(len(work), 1)
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': total}, {})))
def testProductionMultiQueue(self):
"""Test production with multiple queueus"""
specfile = self.spec.specUrl()
numUnit = 1
jobSlot = [10] * numUnit # array of jobs per block
total = sum(jobSlot)
self.globalQueue.queueWork(specfile)
self.assertEqual(numUnit, len(self.globalQueue))
# pull work to localQueue2 - check local doesn't get any
numWork = self.localQueue2.pullWork({'T2_XX_SiteA': total})
self.assertEqual(numUnit, numWork)
self.assertEqual(0, self.localQueue.pullWork({'T2_XX_SiteA': total}))
syncQueues(self.localQueue)
syncQueues(self.localQueue2)
self.assertEqual(0, len(self.localQueue.status(status='Available')))
self.assertEqual(numUnit, len(self.localQueue2.status(status='Available')))
self.assertEqual(numUnit, len(self.globalQueue.status(status='Acquired')))
self.assertEqual(sanitizeURL(self.localQueue2.params['QueueURL'])['url'],
self.globalQueue.status()[0]['ChildQueueUrl'])
def testPriority(self):
"""
Test priority change functionality
"""
jobSlot = 10
totalSlices = 1
self.queue.queueWork(self.spec.specUrl())
self.queue.processInboundWork()
# priority change
self.queue.setPriority(50, self.spec.name())
# test elements are now cancelled
self.assertEqual([x['Priority'] for x in self.queue.status(RequestName=self.spec.name())],
[50] * totalSlices)
self.assertRaises(RuntimeError, self.queue.setPriority, 50, 'blahhhhh')
# claim all work
work = self.queue.getWork({'T2_XX_SiteA': jobSlot}, {})
self.assertEqual(len(work), totalSlices)
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': jobSlot}, {})))
def testProcessing(self):
"""
Enqueue and get work for a processing WMSpec.
"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB", "T2_XX_SiteC"]})
specfile = processingSpec.specUrl()
# Queue Work & check accepted
self.queue.queueWork(specfile)
self.queue.processInboundWork()
self.assertEqual(NBLOCKS_HICOMM, len(self.queue))
self.queue.updateLocationInfo()
# No resources
work = self.queue.getWork({}, {})
self.assertEqual(len(work), 0)
work = self.queue.getWork({'T2_XX_SiteA': 0, 'T2_XX_SiteB': 0}, {})
self.assertEqual(len(work), 0)
# Get the first bit of work available at site C
work = self.queue.getWork({'T2_XX_SiteC': 1}, {})
self.assertEqual(len(work), 1) # Double check A
processedBlocks = len(work)
processedFiles = work[0]["NumOfFilesAdded"]
# Get the rest the of work available at site C
work = self.queue.getWork({'T2_XX_SiteC': 1000}, {})
processedBlocks += len(work)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedBlocks, 17)
self.assertEqual(processedFiles, 22)
# Get the rest the of work available at site B
work = self.queue.getWork({'T2_XX_SiteB': 1000}, {})
processedBlocks += len(work)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedBlocks, 17 + 19)
self.assertEqual(processedFiles, 22 + 35)
# Make sure no work left for B or C
work = self.queue.getWork({'T2_XX_SiteB': 1000, 'T2_XX_SiteC': 1000}, {})
self.assertEqual(len(work), 0)
# Make sure we get all the work when we include A
work = self.queue.getWork({'T2_XX_SiteA': 1000}, {})
processedBlocks += len(work)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedBlocks, NBLOCKS_HICOMM)
self.assertEqual(processedFiles, NFILES_HICOMM)
# Make sure no remaining work
work = self.queue.getWork({'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000}, {})
self.assertEqual(len(work), 0) # no more work available
def testBlackList(self):
"""
Black list functionality
"""
blacklistSpec = self.setupReReco(assignArgs={'SiteWhitelist':["T2_XX_SiteB"], 'SiteBlacklist': ["T2_XX_SiteA"]})
specfile = blacklistSpec.specUrl()
# Queue Work & check accepted
self.queue.queueWork(specfile)
self.queue.processInboundWork()
self.assertEqual(NBLOCKS_HICOMM, len(self.queue))
self.queue.updateLocationInfo()
# T2_XX_SiteA is in blacklist, no work pulled
work = self.queue.getWork({'T2_XX_SiteA': 1000}, {})
self.assertEqual(len(work), 0)
# T2_XX_SiteB can run most blocks
work = self.queue.getWork({'T2_XX_SiteB': 1000}, {})
self.assertEqual(len(work), 17 + 19)
def testWhiteList(self):
"""
White list functionality
"""
specfile = self.whitelistSpec.specUrl()
# Queue Work & check accepted
self.queue.queueWork(specfile)
self.queue.processInboundWork()
self.assertEqual(NBLOCKS_HICOMM, len(self.queue))
self.queue.updateLocationInfo()
# Only SiteB in whitelist
work = self.queue.getWork({'T2_XX_SiteA': 1000}, {})
self.assertEqual(len(work), 0)
# Site B can run
work = self.queue.getWork({'T2_XX_SiteB': 1000, 'T2_XX_SiteAA': 1000}, {})
self.assertEqual(len(work), 17 + 19)
def testQueueChaining(self):
"""
Chain WorkQueues, pull work down and verify splitting
"""
self.assertEqual(0, len(self.globalQueue))
# check no work in local queue
self.assertEqual(0, len(self.localQueue.getWork({'T2_XX_SiteA': 1000}, {})))
# Add work to top most queue
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteD"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# check work isn't passed down to site without subscription
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteD': 1000}), 0)
# put at correct site
self.globalQueue.updateLocationInfo()
# check work isn't passed down to the wrong agent
work = self.localQueue.getWork({'T2_XX_SiteD': 1000}, {}) # Not in subscription
self.assertEqual(0, len(work))
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# pull work down to the lowest queue
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1000}), NBLOCKS_HICOMM)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue), NBLOCKS_HICOMM)
self.localQueue.updateLocationInfo()
work = self.localQueue.getWork({'T2_XX_SiteA': 1000},
{})
self.assertEqual(0, len(self.localQueue))
self.assertEqual(NBLOCKS_HICOMM, len(work))
# check work in local and subscription made
for x in work:
self.assertTrue(x['SubscriptionId'] > 0)
for x in self.localQueue.status():
self.assertTrue(x['SubscriptionId'] > 0)
# mark work done & check this passes upto the top level
self.localQueue.setStatus('Done', [x.id for x in work])
def testQueueChainingStatusUpdates(self):
"""Chain workQueues, pass work down and verify lifecycle"""
self.assertEqual(0, len(self.globalQueue))
self.assertEqual(0, len(self.localQueue.getWork({'T2_XX_SiteA': 1000}, {})))
# Add work to top most queue
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteD"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.processInboundWork()
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# pull to local queue
self.globalQueue.updateLocationInfo()
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1000}), NBLOCKS_HICOMM)
syncQueues(self.localQueue) # Tell parent local has acquired
self.assertEqual(len(self.globalQueue.status('Acquired')), NBLOCKS_HICOMM)
self.assertEqual(len(self.localQueue.status('Available')), NBLOCKS_HICOMM)
# run work
self.globalQueue.updateLocationInfo()
work = self.localQueue.getWork({'T2_XX_SiteA': 1000},
{})
self.assertEqual(len(work), NBLOCKS_HICOMM)
# resend info
syncQueues(self.localQueue)
self.assertEqual(len(self.globalQueue.status('Running')), NBLOCKS_HICOMM)
self.assertEqual(len(self.localQueue.status('Running')), NBLOCKS_HICOMM)
# finish work locally and propagate to global
self.localQueue.doneWork([x.id for x in work])
# just update the elements but not using the result
for x in work:
self.localQueue.backend.updateElements(x.id, PercentComplete=100, PercentSuccess=99)
elements = self.localQueue.status('Done')
self.assertEqual(len(elements), len(work))
self.assertEqual([x['PercentComplete'] for x in elements],
[100] * len(work))
self.assertEqual([x['PercentSuccess'] for x in elements],
[99] * len(work))
syncQueues(self.localQueue, skipWMBS=True)
elements = self.globalQueue.status('Done')
self.assertEqual(len(elements), NBLOCKS_HICOMM)
self.assertEqual([x['PercentComplete'] for x in elements], [100] * NBLOCKS_HICOMM)
self.assertEqual([x['PercentSuccess'] for x in elements], [99] * NBLOCKS_HICOMM)
self.globalQueue.performQueueCleanupActions()
# gq still has 2 elements since elements won't be cleaned up untill requset status updated
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue.status()))
elements = self.globalQueue.backend.getInboxElements('Done')
self.assertEqual(len(elements), 1)
self.assertEqual([x['PercentComplete'] for x in elements], [100])
self.assertEqual([x['PercentSuccess'] for x in elements], [99])
def testMultiTaskProduction(self):
"""
Test Multi top level task production spec.
multiTaskProduction spec consist 2 top level tasks each task has event size 1000 and 2000
respectfully
"""
# TODO: needs more rigorous test on each element per task
# Basic production Spec
spec = MultiTaskProductionWorkload
for task in spec.taskIterator():
delattr(task.steps().data.application.configuration, 'configCacheUrl')
spec.setSpecUrl(os.path.join(self.workDir, 'multiTaskProduction.spec'))
spec.setOwnerDetails("evansde77", "DMWM", {'dn': 'MyDN'})
spec.save(spec.specUrl())
specfile = spec.specUrl()
numElements = 3
njobs = [10] * numElements # array of jobs per block
total = sum(njobs)
# Queue Work &njobs check accepted
self.queue.queueWork(specfile)
self.assertEqual(2, len(self.queue))
# try to get work
work = self.queue.getWork({'T2_XX_SiteA': 0},
{})
self.assertEqual([], work)
# check individual task whitelists obeyed when getting work
work = self.queue.getWork({'T2_XX_SiteA': total},
{})
self.assertEqual(len(work), 1)
work2 = self.queue.getWork({'T2_XX_SiteB': total},
{})
self.assertEqual(len(work2), 1)
work.extend(work2)
self.assertEqual(len(work), 2)
self.assertEqual(sum([x['Jobs'] for x in self.queue.status(status='Running')]),
total)
# check we have all tasks and no extra/missing ones
for task in spec.taskIterator():
# note: order of elements in work is undefined (both inserted simultaneously)
element = [x for x in work if x['Subscription']['workflow'].task == task.getPathName()]
if not element:
self.fail("Top level task %s not in wmbs" % task.getPathName())
element = element[0]
# check restrictions - only whitelist for now
whitelist = element['Subscription'].getWhiteBlackList()
whitelist = [x['site_name'] for x in whitelist if x['valid'] == 1]
self.assertEqual(sorted(task.siteWhitelist()), sorted(whitelist))
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': total, 'T2_XX_SiteB': total},
{})))
try:
os.unlink(specfile)
except OSError:
pass
def testTeams(self):
"""
Team behaviour
"""
specfile = self.spec.specUrl()
self.globalQueue.queueWork(specfile, team='The A-Team')
self.globalQueue.processInboundWork()
self.assertEqual(1, len(self.globalQueue))
slots = {'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000}
# Can't get work for wrong team
self.localQueue.params['Team'] = 'other'
self.assertEqual(self.localQueue.pullWork(slots), 0)
# and with correct team name
self.localQueue.params['Team'] = 'The A-Team'
self.assertEqual(self.localQueue.pullWork(slots), 1)
syncQueues(self.localQueue)
# when work leaves the queue in the agent it doesn't care about teams
self.localQueue.params['Team'] = 'other'
self.assertEqual(len(self.localQueue.getWork(slots, {})), 1)
self.assertEqual(0, len(self.globalQueue))
def testSplittingLargeInputs(self):
"""
_testSplittingLargeInputs_
Check that we can split large inputs and store the processed inputs
in the inbox element correctly.
"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
self.globalQueue.queueWork(processingSpec.specUrl())
inboxElement = self.globalQueue.backend.getInboxElements(elementIDs=[processingSpec.name()])
self.assertEqual(len(inboxElement[0]['ProcessedInputs']), NBLOCKS_HICOMM)
return
def testGlobalBlockSplitting(self):
"""Block splitting at global level"""
# force global queue to split work on block
self.globalQueue.params['SplittingMapping']['DatasetBlock']['name'] = 'Block'
self.globalQueue.params['SplittingMapping']['Block']['name'] = 'Block'
self.globalQueue.params['SplittingMapping']['Dataset']['name'] = 'Block'
# queue work, globally for block, pass down, report back -> complete
totalSpec = 1
totalBlocks = totalSpec * NBLOCKS_HICOMM
self.assertEqual(0, len(self.globalQueue))
for _ in range(totalSpec):
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.processInboundWork()
self.assertEqual(totalBlocks, len(self.globalQueue))
# both blocks in global belong to same parent, but have different inputs
status = self.globalQueue.status()
self.assertEqual(status[0]['ParentQueueId'], status[1]['ParentQueueId'])
self.assertNotEqual(status[0]['Inputs'], status[1]['Inputs'])
# pull to local
# location info should already be added
# self.globalQueue.updateLocationInfo()
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1000}),
totalBlocks)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status(status='Available')),
totalBlocks) # 2 in local
# self.localQueue.updateLocationInfo()
work = self.localQueue.getWork({'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000},
{})
self.assertEqual(len(work), totalBlocks)
# both refer to same wmspec
self.assertEqual(work[0]['RequestName'], work[1]['RequestName'])
self.localQueue.doneWork([str(x.id) for x in work])
# elements in local deleted at end of update, only global ones left
self.assertEqual(len(self.localQueue.status(status='Done')),
totalBlocks)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status(status='Done')),
totalBlocks)
self.assertEqual(len(self.globalQueue.status(status='Done')),
totalBlocks)
def testGlobalDatasetSplitting(self):
"""Dataset splitting at global level"""
dqmHarvArgs = DQMHarvestWorkloadFactory.getTestArguments()
dqmHarvArgs["DQMConfigCacheID"] = createConfig(dqmHarvArgs["CouchDBName"])
factory = DQMHarvestWorkloadFactory()
dqmWorkload = factory.factoryWorkloadConstruction('dqmTest', dqmHarvArgs)
dqmWorkload.setSpecUrl((os.path.join(self.workDir, 'dqmTest.spec')))
dqmWorkload.setSiteWhitelist('T2_XX_SiteA')
dqmWorkload.setTrustLocationFlag()
dqmWorkload.save(dqmWorkload.specUrl())
# force global queue to split work on Dataset
self.globalQueue.params['SplittingMapping']['DatasetBlock']['name'] = 'Dataset'
self.globalQueue.params['SplittingMapping']['Block']['name'] = 'Dataset'
self.globalQueue.params['SplittingMapping']['Dataset']['name'] = 'Dataset'
# queue work, globally for block, pass down, report back -> complete
totalSpec = 1
dummyTotalBlocks = totalSpec * NBLOCKS_HICOMM
self.assertEqual(0, len(self.globalQueue))
for _ in range(totalSpec):
self.globalQueue.queueWork(dqmWorkload.specUrl())
self.globalQueue.processInboundWork()
self.assertEqual(totalSpec, len(self.globalQueue))
# pull to local
# self.globalQueue.updateLocationInfo()
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1000}), totalSpec)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status(status='Available')), totalSpec)
self.localQueue.updateLocationInfo()
work = self.localQueue.getWork({'T2_XX_SiteA': 1000}, {})
self.assertEqual(len(work), 0)
self.localQueue.doneWork([str(x.id) for x in work])
self.assertEqual(len(self.localQueue.status(status='Done')), totalSpec)
syncQueues(self.localQueue)
# elements are not deleted untill request status is changed
self.assertEqual(len(self.localQueue.status(status='Done')), totalSpec)
self.assertEqual(len(self.globalQueue.status(status='Done')), totalSpec)
def testResetWork(self):
"""Reset work in global to different child queue"""
# TODO: This test sometimes fails - i suspect a race condition (maybe conflict in couch)
# Cancel code needs reworking so this will hopefully be fixed then
totalBlocks = NBLOCKS_HICOMM
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.updateLocationInfo()
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1000}),
totalBlocks)
syncQueues(self.localQueue)
work = self.localQueue.getWork({'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000},
{})
self.assertEqual(len(work), totalBlocks)
self.assertEqual(len(self.localQueue.status(status='Running')), totalBlocks)
syncQueues(self.localQueue)
self.assertEqual(len(self.globalQueue.status(status='Running')), totalBlocks)
# Re-assign work in global
self.globalQueue.resetWork([x.id for x in self.globalQueue.status(status='Running')])
# work should be canceled in local
# TODO: Note the work in local will be orphaned but not canceled
syncQueues(self.localQueue)
work_at_local = [x for x in self.globalQueue.status(status='Running') \
if x['ChildQueueUrl'] == sanitizeURL(self.localQueue.params['QueueURL'])['url']]
self.assertEqual(len(work_at_local), 0)
# now 2nd queue calls and acquires work
self.assertEqual(self.localQueue2.pullWork({'T2_XX_SiteA': 1000}),
totalBlocks)
syncQueues(self.localQueue2)
# check work in global assigned to local2
self.assertEqual(len(self.localQueue2.status(status='Available')), totalBlocks) # work in local2
work_at_local2 = [x for x in self.globalQueue.status(status='Acquired')
if x['ChildQueueUrl'] == sanitizeURL(self.localQueue2.params['QueueURL'])['url']]
self.assertEqual(len(work_at_local2), totalBlocks)
def testCancelWork(self):
"""Cancel work"""
processingSpec = self.setupReReco(assignArgs={"SiteWhitelist":["T2_XX_SiteA", "T2_XX_SiteB"]})
self.queue.queueWork(processingSpec.specUrl())
elements = len(self.queue)
self.queue.updateLocationInfo()
self.assertEqual(len(self.queue.status()), NBLOCKS_HICOMM)
work = self.queue.getWork({'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000}, {})
self.assertEqual(len(self.queue), 0)
self.assertEqual(len(self.queue.status(status='Running')), elements)
ids = [x.id for x in work]
self.assertEqual(len(ids), NBLOCKS_HICOMM)
canceled = self.queue.cancelWork(ids)
self.assertEqual(sorted(canceled), sorted(ids))
self.assertEqual(len(self.queue.status()), NBLOCKS_HICOMM)
self.assertEqual(len(self.queue.status(status='Running')), NBLOCKS_HICOMM)
self.assertEqual(len(self.queue.statusInbox(status='Canceled')), 1)
# create a new request with one fake file
self.queue.queueWork(self.spec.specUrl())
self.assertEqual(len(self.queue), 1)
work = self.queue.getWork({'T2_XX_SiteA': 1000, 'T2_XX_SiteB': 1000}, {})
self.assertEqual(len(self.queue.status(status='Running')), len(self.queue.status()))
ids = [x.id for x in work]
canceled = self.queue.cancelWork(WorkflowName='testProduction')
self.assertEqual(canceled, ids)
self.assertEqual(len(self.queue), 0)
def testCancelWorkGlobal(self):
"""Cancel work in global queue"""
# queue to global & pull an element to local
processingSpec = self.setupReReco(assignArgs= {'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
syncQueues(self.localQueue)
# cancel in global and propagate to local
service = WorkQueueService(self.localQueue.backend.parentCouchUrlWithAuth)
service.cancelWorkflow(processingSpec.name())
# marked for cancel
self.assertEqual(len(self.globalQueue.status(status='CancelRequested')), NBLOCKS_HICOMM)
self.assertEqual(len(self.globalQueue.statusInbox(status='Acquired')), 1)
# will cancel element left in global, one sent to local queue stays CancelRequested
syncQueues(self.globalQueue)
self.assertEqual(len(self.globalQueue.status(status='CancelRequested')), 1)
self.assertEqual(len(self.globalQueue.status(status='Canceled')), NBLOCKS_HICOMM - 1)
self.assertEqual(len(self.globalQueue.statusInbox(status='CancelRequested')), 1)
# global parent stays CancelRequested till child queue cancels
syncQueues(self.globalQueue)
self.assertEqual(len(self.globalQueue.status(status='CancelRequested')), 1)
self.assertEqual(len(self.globalQueue.status(status='Canceled')), NBLOCKS_HICOMM - 1)
self.assertEqual(len(self.globalQueue.statusInbox(status='CancelRequested')), 1)
# during sync local queue will synced with globalQueue but not gets deleted until workflow finished
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.statusInbox(status='Canceled')), 1) # inbox is synced
self.assertEqual(len(self.globalQueue.status(status='Canceled')), NBLOCKS_HICOMM)
self.assertEqual(len(self.globalQueue.statusInbox(status='CancelRequested')), 1)
syncQueues(self.globalQueue)
self.assertEqual(len(self.globalQueue.status(status='Canceled')), NBLOCKS_HICOMM)
self.assertEqual(len(self.globalQueue.statusInbox(status='Canceled')), 1)
syncQueues(self.localQueue)
# local cancelded
# self.assertEqual(len(self.localQueue.status(status='Canceled')), 1)
# clear global
self.globalQueue.deleteWorkflows(processingSpec.name())
self.assertEqual(len(self.globalQueue.statusInbox()), 0)
### check cancel of work negotiating in agent works
self.globalQueue.queueWork(self.whitelistSpec.specUrl())
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteB': 1}), 1)
self.localQueue.backend.forceQueueSync()
time.sleep(2)
self.assertEqual(len(self.localQueue.statusInbox(status='Negotiating')), 1)
# now cancel
service.cancelWorkflow(self.whitelistSpec.name())
syncQueues(self.globalQueue)
self.localQueue.backend.forceQueueSync() # pull in cancelation
time.sleep(2)
self.assertEqual(len(self.globalQueue.status(status='Canceled')), 2 * NBLOCKS_HICOMM - 1)
self.assertEqual(len(self.localQueue.statusInbox(status='CancelRequested')), 1)
syncQueues(self.localQueue, skipWMBS=True)
self.assertEqual(len(self.localQueue.statusInbox(status='Canceled')), 2)
syncQueues(self.localQueue)
syncQueues(self.globalQueue)
self.assertEqual(len(self.localQueue.statusInbox(WorkflowName=self.whitelistSpec.name())), 1)
self.assertEqual(len(self.globalQueue.status(WorkflowName=self.whitelistSpec.name())), NBLOCKS_HICOMM)
self.assertEqual(len(self.globalQueue.statusInbox(status='Canceled')), 1)
# clear global
self.globalQueue.deleteWorkflows(self.whitelistSpec.name())
self.assertEqual(len(self.globalQueue.statusInbox()), 0)
def testInvalidSpecs(self):
"""Complain on invalid WMSpecs"""
# request != workflow name
self.assertRaises(WorkQueueWMSpecError, self.queue.queueWork,
self.setupReReco().specUrl(),
request='fail_this')
# invalid white list
mcspec = stepchainWorkload('testProductionInvalid', self.mcArgs)
getFirstTask(mcspec).setSiteWhitelist('ThisIsInvalid')
mcspec.setSpecUrl(os.path.join(self.workDir, 'testProductionInvalid.spec'))
mcspec.save(mcspec.specUrl())
self.assertRaises(WorkQueueWMSpecError, self.queue.queueWork, mcspec.specUrl())
getFirstTask(mcspec).setSiteWhitelist(['T2_XX_SiteB'])
self.queue.deleteWorkflows(mcspec.name())
# 0 events
getFirstTask(mcspec).addProduction(totalEvents=0)
getFirstTask(mcspec).setSiteWhitelist(['T2_XX_SiteB'])
mcspec.save(mcspec.specUrl())
self.assertRaises(WorkQueueNoWorkError, self.queue.queueWork, mcspec.specUrl())
# no dataset
processingSpec = rerecoWorkload('testProcessingInvalid', self.rerecoArgs)
getFirstTask(processingSpec).setSiteWhitelist(['T2_XX_SiteB'])
processingSpec.setSpecUrl(os.path.join(self.workDir,
'testProcessingInvalid.spec'))
processingSpec.save(processingSpec.specUrl())
getFirstTask(processingSpec).data.input.dataset = None
processingSpec.save(processingSpec.specUrl())
self.assertRaises(WorkQueueWMSpecError, self.queue.queueWork, processingSpec.specUrl())
# invalid dbs url
processingSpec = rerecoWorkload('testProcessingInvalid', self.rerecoArgs)
getFirstTask(processingSpec).setSiteWhitelist(['T2_XX_SiteB'])
processingSpec.setSpecUrl(os.path.join(self.workDir,
'testProcessingInvalid.spec'))
getFirstTask(processingSpec).data.input.dataset.dbsurl = 'wrongprot://dbs.example.com'
processingSpec.save(processingSpec.specUrl())
self.assertRaises(WorkQueueWMSpecError, self.queue.queueWork, processingSpec.specUrl())
self.queue.deleteWorkflows(processingSpec.name())
# invalid dataset name
processingSpec = rerecoWorkload('testProcessingInvalid', self.rerecoArgs)
getFirstTask(processingSpec).setSiteWhitelist(['T2_XX_SiteB'])
processingSpec.setSpecUrl(os.path.join(self.workDir,
'testProcessingInvalid.spec'))
getFirstTask(processingSpec).data.input.dataset.name = '/MinimumBias/FAKE-Filter-v1/RECO'
processingSpec.save(processingSpec.specUrl())
self.assertRaises(DBSReaderError, self.queue.queueWork, processingSpec.specUrl())
self.queue.deleteWorkflows(processingSpec.name())
# Cant have a slash in primary ds name - validation should fail
getFirstTask(processingSpec).data.input.dataset.primary = 'a/b'
processingSpec.save(processingSpec.specUrl())
self.assertRaises(DBSReaderError, self.queue.queueWork, processingSpec.specUrl())
self.queue.deleteWorkflows(processingSpec.name())
# dataset splitting with invalid run whitelist
processingSpec = rerecoWorkload('testProcessingInvalid', self.rerecoArgs)
getFirstTask(processingSpec).setSiteWhitelist(['T2_XX_SiteB'])
processingSpec.setSpecUrl(os.path.join(self.workDir,
'testProcessingInvalid.spec'))
processingSpec.setStartPolicy('Dataset')
processingSpec.setRunWhitelist([666]) # not in this dataset
processingSpec.save(processingSpec.specUrl())
self.assertRaises(DBSReaderError, self.queue.queueWork, processingSpec.specUrl())
self.queue.deleteWorkflows(processingSpec.name())
# block splitting with invalid run whitelist
processingSpec = rerecoWorkload('testProcessingInvalid', self.rerecoArgs)
getFirstTask(processingSpec).setSiteWhitelist(['T2_XX_SiteB'])
processingSpec.setSpecUrl(os.path.join(self.workDir,
'testProcessingInvalid.spec'))
processingSpec.setStartPolicy('Block')
processingSpec.setRunWhitelist([666]) # not in this dataset
processingSpec.save(processingSpec.specUrl())
self.assertRaises(DBSReaderError, self.queue.queueWork, processingSpec.specUrl())
self.queue.deleteWorkflows(processingSpec.name())
def testIgnoreDuplicates(self):
"""Ignore duplicate work"""
specfile = self.spec.specUrl()
self.globalQueue.queueWork(specfile)
self.assertEqual(1, len(self.globalQueue))
# queue work again
self.globalQueue.queueWork(specfile)
self.assertEqual(1, len(self.globalQueue))
def testConflicts(self):
"""Resolve conflicts between global & local queue"""
self.globalQueue.queueWork(self.spec.specUrl())
self.localQueue.pullWork({'T2_XX_SiteA': TOTAL_EVENTS})
self.localQueue.getWork({'T2_XX_SiteA': TOTAL_EVENTS},
{})
syncQueues(self.localQueue)
global_ids = [x.id for x in self.globalQueue.status()]
self.localQueue.backend.updateInboxElements(*global_ids, Status='Done', PercentComplete=69)
self.globalQueue.backend.updateElements(*global_ids, Status='Canceled')
self.localQueue.backend.forceQueueSync()
time.sleep(2)
self.assertForConflicts()
@retry(AssertionError, tries=3, delay=10)
def assertForConflicts(self):
"""
Make the assertions in a separate function so we can use retry
"""
global_ids = [x.id for x in self.globalQueue.status()]
self.localQueue.backend.fixConflicts()
self.localQueue.backend.forceQueueSync()
time.sleep(2)
self.assertEqual([x['Status'] for x in self.globalQueue.status(elementIDs=global_ids)],
['Canceled'])
self.assertEqual([x['PercentComplete'] for x in self.globalQueue.status(elementIDs=global_ids)],
[69])
self.assertEqual([x for x in self.localQueue.statusInbox()],
[x for x in self.globalQueue.status()])
def testDeleteWork(self):
"""Delete finished work
TODO: do emulate the reqmgr2 and change the status of request
so actually request gets deleted when performCleanupAction is run.
"""
self.globalQueue.queueWork(self.spec.specUrl())
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': TOTAL_EVENTS}), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.getWork({'T2_XX_SiteA': TOTAL_EVENTS},
{})), 1)
syncQueues(self.localQueue)
self.localQueue.doneWork(WorkflowName=self.spec.name())
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status(WorkflowName=self.spec.name())),
1) # not deleted until request status is updated
self.assertEqual('Done',
self.globalQueue.status(WorkflowName=self.spec.name())[0]['Status'])
self.globalQueue.performQueueCleanupActions()
self.assertEqual('Done',
self.globalQueue.statusInbox(WorkflowName=self.spec.name())[0]['Status'])
self.assertEqual(len(self.globalQueue.status(WorkflowName=self.spec.name())),
1) # not deleted until request status is update
self.globalQueue.deleteWorkflows(self.spec.name())
self.assertEqual(len(self.globalQueue.statusInbox(WorkflowName=self.spec.name())),
0)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.statusInbox(WorkflowName=self.spec.name())),
1) # not deleted until request status is update
def testResubmissionWorkflow(self):
"""Test workflow resubmission via ACDC"""
acdcCouchDB = "workqueue_t_acdc"
self.testInit.setupCouch(acdcCouchDB, "GroupUser", "ACDC")
spec = self.createResubmitSpec(self.testInit.couchUrl,
acdcCouchDB)
spec.setSpecUrl(os.path.join(self.workDir, 'resubmissionWorkflow.spec'))
spec.setSiteWhitelist('T1_US_FNAL')
spec.save(spec.specUrl())
self.localQueue.params['Team'] = 'cmsdataops'
self.globalQueue.queueWork(spec.specUrl(), "Resubmit_TestWorkload", team="cmsdataops")
self.assertEqual(self.localQueue.pullWork({"T1_US_FNAL": 100}), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.getWork({"T1_US_FNAL": 100}, {})), 1)
def testResubmissionWithParentsWorkflow(self):
"""Test workflow resubmission with parentage via ACDC"""
acdcCouchDB = "workqueue_t_acdc"
self.testInit.setupCouch(acdcCouchDB, "GroupUser", "ACDC")
spec = self.createResubmitSpec(self.testInit.couchUrl,
acdcCouchDB, parentage=True)
spec.setSpecUrl(os.path.join(self.workDir, 'resubmissionWorkflow.spec'))
spec.setSiteWhitelist('T1_US_FNAL')
spec.save(spec.specUrl())
self.localQueue.params['Team'] = 'cmsdataops'
self.globalQueue.queueWork(spec.specUrl(), "Resubmit_TestWorkload", team="cmsdataops")
self.localQueue.pullWork({"T1_US_FNAL": 100})
syncQueues(self.localQueue)
self.localQueue.getWork({"T1_US_FNAL": 100}, {})
def testResubmissionWorkflowSiteWhitelistLocations(self):
""" Test an ACDC workflow where we use the site whitelist as locations"""
acdcCouchDB = "workqueue_t_acdc"
self.testInit.setupCouch(acdcCouchDB, "GroupUser", "ACDC")
spec = self.createResubmitSpec(self.testInit.couchUrl,
acdcCouchDB)
spec.setSpecUrl(os.path.join(self.workDir, 'resubmissionWorkflow.spec'))
spec.setSiteWhitelist('T1_US_FNAL')
spec.setTrustLocationFlag(inputFlag=True, pileupFlag=False)
spec.save(spec.specUrl())
self.localQueue.params['Team'] = 'cmsdataops'
self.globalQueue.queueWork(spec.specUrl(), "Resubmit_TestWorkload", team="cmsdataops")
self.assertEqual(self.localQueue.pullWork({"T1_UK_RAL": 100}), 0)
self.assertEqual(self.localQueue.pullWork({"T1_US_FNAL": 100}), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.getWork({"T1_US_FNAL": 100}, {})), 1)
def testThrottling(self):
"""Pull work only if all previous work processed in child"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
# further pull will fail till we replicate to child
# hopefully couch replication wont happen till we manually sync
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 0)
self.assertEqual(NBLOCKS_HICOMM - 1, len(self.globalQueue))
self.assertEqual(0, len(self.localQueue))
syncQueues(self.localQueue)
self.assertEqual(1, len(self.localQueue))
# pull works again
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
def testSitesFromResourceControl(self):
"""Test sites from resource control"""
# Most tests pull work for specific sites (to give us control)
# In reality site list will come from resource control so test
# that here (just a simple check that we can get sites from rc)
self.globalQueue.queueWork(self.spec.specUrl())
self.assertEqual(self.localQueue.pullWork(), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status()), 1)
def testParentProcessing(self):
"""
Enqueue and get work for a processing WMSpec.
"""
parentProcSpec = self.setupParentProcSpec(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
specfile = parentProcSpec.specUrl()
# Queue Work & check accepted
self.queue.queueWork(specfile)
self.queue.processInboundWork()
self.assertEqual(NBLOCKS_COSMIC, len(self.queue))
self.queue.updateLocationInfo()
# No resources
work = self.queue.getWork({}, {})
self.assertEqual(len(work), 0)
work = self.queue.getWork({'T2_XX_SiteA': 0,
'T2_XX_SiteB': 0}, {})
self.assertEqual(len(work), 0)
# Get 1 work element when any resources free
work = self.queue.getWork({'T2_XX_SiteB': 1}, {})
self.assertEqual(len(work), 1)
processedFiles = work[0]["NumOfFilesAdded"]
# claim remaining work
work = self.queue.getWork({'T2_XX_SiteA': TOTAL_EVENTS, 'T2_XX_SiteB': TOTAL_EVENTS}, {})
self.assertEqual(len(work), NBLOCKS_COSMIC - 1)
for element in work:
processedFiles += element["NumOfFilesAdded"]
self.assertEqual(processedFiles, NFILES_COSMIC + NFILES_COSMICRAW)
# no more work available
self.assertEqual(0, len(self.queue.getWork({'T2_XX_SiteA': 1000}, {})))
def testWMBSInjectionStatus(self):
self.globalQueue.queueWork(self.spec.specUrl())
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
self.globalQueue.queueWork(processingSpec.specUrl())
# test globalqueue status (no parent queue case)
self.assertEqual(self.globalQueue.getWMBSInjectionStatus(),
[{'testProcessing': False}, {'testProduction': False}])
self.assertEqual(self.globalQueue.getWMBSInjectionStatus(self.spec.name()),
False)
# Amount of work varies, just make sure it's positive
self.assertGreater(self.localQueue.pullWork(), 0)
# test local queue status with parents (globalQueue is not synced yet
self.assertEqual(self.localQueue.getWMBSInjectionStatus(),
[{'testProcessing': False}, {'testProduction': False}])
self.assertEqual(self.localQueue.getWMBSInjectionStatus(self.spec.name()),
False)
syncQueues(self.localQueue)
self.localQueue.processInboundWork()
self.localQueue.updateLocationInfo()
self.localQueue.getWork({'T2_XX_SiteA': 1000},
{})
self.assertEqual(self.localQueue.getWMBSInjectionStatus(),
[{'testProcessing': False}, {'testProduction': False}])
self.assertEqual(self.localQueue.getWMBSInjectionStatus(self.spec.name()),
False)
# update parents status but is still running open since it is the default
self.localQueue.performQueueCleanupActions()
self.localQueue.backend.sendToParent(continuous=False)
self.assertEqual(self.localQueue.getWMBSInjectionStatus(),
[{'testProcessing': False}, {'testProduction': False}])
self.assertEqual(self.localQueue.getWMBSInjectionStatus(self.spec.name()),
False)
# close the global inbox elements, they won't be split anymore
self.globalQueue.closeWork(['testProcessing', 'testProduction'])
self.localQueue.getWMBSInjectionStatus()
time.sleep(1)
# There are too many jobs to pull down for testProcessing still has element not in WMBS
self.assertEqual(self.localQueue.getWMBSInjectionStatus(),
[{'testProcessing': False}, {'testProduction': True}])
self.assertEqual(self.localQueue.getWMBSInjectionStatus(self.spec.name()),
True)
# test not existing workflow
self.assertRaises(WorkQueueNoMatchingElements,
self.localQueue.getWMBSInjectionStatus,
"NotExistWorkflow")
def testEndPolicyNegotiating(self):
"""Test end policy processing of request before splitting"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
work = self.globalQueue.queueWork(processingSpec.specUrl())
self.assertEqual(work, NBLOCKS_HICOMM)
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
self.localQueue.backend.pullFromParent() # pull work into inbox (Negotiating state)
self.localQueue.processInboundWork()
syncQueues(self.localQueue)
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
# should print message but not raise an error
self.localQueue.performQueueCleanupActions(skipWMBS=True)
self.localQueue.backend.pullFromParent(continuous=False)
self.assertEqual(len(self.localQueue.statusInbox(Status='Negotiating')), 1)
self.assertEqual(len(self.localQueue), 1)
def testSiteStatus(self):
"""Check that we only pull work on sites in Normal status"""
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.queueWork(self.spec.specUrl())
# acquire 1 element of a wf and then mark site as draining.
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
syncQueues(self.localQueue)
existing_wf = [x['RequestName'] for x in self.localQueue.statusInbox()]
self.assertEqual(1, len(existing_wf))
existing_wf = existing_wf[0]
bossAirConfig = Configuration()
bossAirConfig.section_("BossAir")
bossAirConfig.BossAir.pluginDir = "WMCore.BossAir.Plugins"
bossAirConfig.BossAir.pluginNames = []
bossAirConfig.section_("Agent")
bossAirConfig.Agent.agentName = "TestAgent"
bossAirConfig.section_("JobStateMachine")
bossAirConfig.JobStateMachine.couchurl = os.environ["COUCHURL"]
bossAirConfig.JobStateMachine.couchDBName = "testcouchdb"
rc = ResourceControl(bossAirConfig)
rc.changeSiteState('T2_XX_SiteA', 'Draining')
rc.changeSiteState('T2_XX_SiteB', 'Draining')
# pull more work, no work should be acquired
self.localQueue.pullWork()
syncQueues(self.localQueue)
for x in self.localQueue.statusInbox():
if x['RequestName'] != existing_wf:
self.fail('Got new wf %s for draining site' % x['RequestName'])
# wmbs injection for draining sites continues to work
self.assertTrue(self.localQueue.getWork({'T2_XX_SiteA': 10},
{}))
# re-enable site and get remainder of work
rc.changeSiteState('T2_XX_SiteA', 'Normal')
self.assertTrue(self.localQueue.pullWork())
syncQueues(self.localQueue)
self.assertTrue(self.localQueue.getWork({'T2_XX_SiteA': 100},
{}))
def test0eventBlock(self):
"""0 event blocks should be processed as usual"""
# use event splitting and 0 events so we get 0 jobs - verify this doesn't cause any problems
# FIXME: This does not work currently because we don't actually have 0 event blocks.
Globals.GlobalParams.setNumOfEventsPerFile(0)
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
processingSpec.setStartPolicy('Block', SliceType='NumberOfEvents')
processingSpec.save(processingSpec.specUrl())
self.globalQueue.queueWork(processingSpec.specUrl())
# all blocks pulled as each has 0 jobs
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue.status()), 1)
self.assertEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 1},
{})), 1)
for element in self.localQueue.status():
# check files added and subscription made
self.assertEqual(element['NumOfFilesAdded'], 1)
self.assertTrue(element['SubscriptionId'] >= 0)
self.assertEqual(element['Jobs'], 1)
# complete workflow
self.localQueue.performQueueCleanupActions(skipWMBS=True)
self.localQueue.doneWork([str(x.id) for x in self.localQueue.status()])
self.assertEqual(len(self.localQueue.status(status='Done')), 1)
syncQueues(self.localQueue)
self.assertEqual(len(self.globalQueue.status(status='Done')), 1)
def testProcessingWithContinuousSplitting(self):
"""Test the open request handling in the WorkQueue"""
# Put normal work in
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB", "T2_XX_SiteC"]})
specfile = processingSpec.specUrl()
# Queue work with initial block count
logging.info("Queuing work for spec name: %s", processingSpec.name())
self.assertEqual(NBLOCKS_HICOMM, self.globalQueue.queueWork(specfile))
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# Try adding work, no change in blocks available. No work should be added
logging.info("Adding work - already added - for spec name: %s", processingSpec.name())
self.assertEqual(0, self.globalQueue.addWork(processingSpec.name()))
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# Now pull work from the global to the local queue
logging.info("Pulling 1 workqueue element from the parent queue")
self.localQueue.pullWork({'T2_XX_SiteA': 1})
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue), 1)
self.assertEqual(len(self.globalQueue), NBLOCKS_HICOMM - 1)
# This time pull work from the local queue into WMBS
logging.info("Getting 1 workqueue element from the local queue")
dummyWork = self.localQueue.getWork({'T2_XX_SiteA': 1000},
{})
syncQueues(self.localQueue)
syncQueues(self.globalQueue)
# FIXME: for some reason, it tries to reinsert all those elements again
# however, if we call it again, it won't retry anything
self.assertEqual(47, self.globalQueue.addWork(processingSpec.name()))
self.assertEqual(0, self.globalQueue.addWork(processingSpec.name()))
self.assertEqual(NBLOCKS_HICOMM - 1, len(self.globalQueue))
self.assertEqual(len(self.globalQueue.backend.getInboxElements(status="Running")), 1)
# Now pull the new work to the local queue
self.localQueue.pullWork({'T2_XX_SiteB': 1000, 'T2_XX_SiteC': 1000})
syncQueues(self.localQueue)
self.assertEqual(len(self.localQueue), 35)
self.assertEqual(len(self.globalQueue), NBLOCKS_HICOMM - 35 - 1)
# FIXME: for some reason, it tries to reinsert all those elements again
# however, if we call it again, it won't retry anything
self.assertEqual(47, self.globalQueue.addWork(processingSpec.name()))
self.assertEqual(0, self.globalQueue.addWork(processingSpec.name()))
return
def testProcessingWithPileup(self):
"""Test a full WorkQueue cycle in a request with pileup datasets"""
specfile = self.processingPileupSpec.specUrl()
# Queue work with initial block count
self.assertEqual(NBLOCKS_HICOMM, self.globalQueue.queueWork(specfile))
self.assertEqual(NBLOCKS_HICOMM, len(self.globalQueue))
# All blocks are in Site A, B, and C, but the pileup is only at C.
# We should not be able to pull all the work.
self.assertGreaterEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1,
'T2_XX_SiteB': 1,
'T2_XX_SiteC': 1}), 3)
# The PhEDEx emulator will move the pileup blocks to site A
self.globalQueue.updateLocationInfo()
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteB': 1,
'T2_XX_SiteC': 1}), 0)
# Now try with just site A (no work)
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': 1}), 0)
syncQueues(self.localQueue)
self.assertGreaterEqual(len(self.localQueue), 3)
self.assertEqual(len(self.globalQueue), NBLOCKS_HICOMM - 3)
# Pull it to WMBS, first try with an impossible site
# The pileup was split again in the local queue so site A is not there
self.assertGreaterEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 1,
'T2_XX_SiteB': 1,
'T2_XX_SiteC': 1}, {})), 2)
Globals.moveBlock({'%s#1' % PILEUP_DATASET: ['T2_XX_SiteA', 'T2_XX_SiteC'],
'%s#2' % PILEUP_DATASET: ['T2_XX_SiteA', 'T2_XX_SiteC']})
self.localQueue.updateLocationInfo()
#FIXME: this test gives different results in jenkins and in private tests
self.assertGreaterEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 1}, {})), 0)
self.assertGreaterEqual(len(self.localQueue.status()), 3)
def testPileupOnProduction(self):
"""Test that we can split properly a Production workflow with pileup"""
specfile = self.productionPileupSpec.specUrl()
# Sanity check on queueWork only
self.assertEqual(1, self.globalQueue.queueWork(specfile))
self.assertEqual(1, len(self.globalQueue))
self.assertEqual(len(self.globalQueue.backend.getActivePileupData()), 1)
self.assertNotEqual(self.globalQueue.backend.getActivePileupData()[0]['dbs_url'], None)
def testPrioritiesWorkPolling(self):
"""Test how the priorities and current jobs in the queue affect the workqueue behavior
for acquiring and injecting work"""
# Queue a low prio workflow and a high prio workflow
highPrioReReco = self.setupHighPrioReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.queueWork(highPrioReReco.specUrl())
# Pull all into local queue
self.assertEqual(self.localQueue.pullWork({'T2_XX_SiteA': TOTAL_EVENTS}), 2 * NBLOCKS_HICOMM)
syncQueues(self.localQueue)
# Try pulling work into WMBS when "there is" a job of higher priority than the high prio workflow
self.assertEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 1},
{'T2_XX_SiteA': {highPrioReReco.priority() + 1: 1}})),
0)
# Allow one more job slot
self.assertEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 2},
{'T2_XX_SiteA': {highPrioReReco.priority() + 1: 1}})),
1)
# Allow 1 slot more and many slots occupied by low prio jobs
self.assertEqual(len(self.localQueue.getWork({'T2_XX_SiteA': 2},
{'T2_XX_SiteA': {1: 50}})),
1)
self.assertEqual(len(self.localQueue.backend.getElements(WorkflowName=highPrioReReco.name())),
NBLOCKS_HICOMM)
def testMonitorWorkQueue(self):
"""
Test several WorkQueue couch queries to monitor amount of work in the system
"""
# Run some bootstrap, same code as in the test above...
highPrioReReco = self.setupHighPrioReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
processingSpec = self.setupReReco(assignArgs={'SiteWhitelist': ["T2_XX_SiteA", "T2_XX_SiteB"]})
self.globalQueue.queueWork(processingSpec.specUrl())
self.globalQueue.queueWork(highPrioReReco.specUrl())
initialStatus = ['Available', 'Negotiating', 'Acquired']
metrics = self.globalQueue.monitorWorkQueue(status=initialStatus)
time.sleep(1) # HACKY: query again to get the up-to-date views
metrics = self.globalQueue.monitorWorkQueue(status=initialStatus)
expectedMetrics = ('workByStatus', 'workByStatusAndPriority', 'workByAgentAndStatus',
'workByAgentAndPriority', 'uniqueJobsPerSiteAAA', 'possibleJobsPerSiteAAA',
'uniqueJobsPerSite', 'possibleJobsPerSite', 'total_query_time')
self.assertItemsEqual(metrics.keys(), expectedMetrics)
self.assertItemsEqual(metrics['workByStatus'].keys(), STATES)
self.assertEqual(metrics['workByStatus']['Available']['sum_jobs'], 678)
self.assertEqual(metrics['workByStatus']['Acquired'], {})
self.assertItemsEqual(metrics['workByStatusAndPriority'].keys(), STATES)
prios = [item['priority'] for item in metrics['workByStatusAndPriority']['Available']]
self.assertItemsEqual(prios, [8000, 999998])
self.assertEqual(metrics['workByStatusAndPriority']['Acquired'], [])
self.assertEqual(len(metrics['workByAgentAndStatus']), 1)
self.assertEqual(metrics['workByAgentAndStatus'][0]['agent_name'], 'AgentNotDefined')
self.assertEqual(metrics['workByAgentAndStatus'][0]['status'], 'Available')
self.assertEqual(len(metrics['workByAgentAndPriority']), 2)
self.assertEqual(metrics['workByAgentAndPriority'][0]['agent_name'], 'AgentNotDefined')
self.assertEqual([item['priority'] for item in metrics['workByAgentAndPriority']], [8000, 999998])
for met in ('uniqueJobsPerSiteAAA', 'possibleJobsPerSiteAAA', 'uniqueJobsPerSite', 'possibleJobsPerSite'):
self.assertItemsEqual(metrics[met].keys(), initialStatus)
self.assertEqual(len(metrics[met]['Available']), 2)
self.assertEqual(len(metrics[met]['Acquired']), 0)
self.assertItemsEqual(metrics[met]['Available'].keys(), ['T2_XX_SiteA', 'T2_XX_SiteB'])
self.assertTrue(metrics['total_query_time'] >= 0)
# Pull all into local queue (get them into Acquired status)
self.localQueue.pullWork({'T2_XX_SiteA': 500})
syncQueues(self.localQueue)
metrics = self.globalQueue.monitorWorkQueue(status=initialStatus)
time.sleep(1) # HACKY: query again to get the up-to-date views
metrics = self.globalQueue.monitorWorkQueue(status=initialStatus)
self.assertTrue(metrics['workByStatus']['Available']['sum_jobs'] < 200)
self.assertTrue(metrics['workByStatus']['Acquired']['sum_jobs'] >= 500)
self.assertEqual(len(metrics['workByStatusAndPriority']['Available']), 1)
self.assertEqual(len(metrics['workByStatusAndPriority']['Acquired']), 2)
self.assertEqual(metrics['workByStatusAndPriority']['Available'][0]['priority'], 8000)
prios = [item['priority'] for item in metrics['workByStatusAndPriority']['Acquired']]
self.assertItemsEqual(prios, [8000, 999998])
self.assertEqual(len(metrics['workByAgentAndStatus']), 2)
for elem in metrics['workByAgentAndStatus']:
if elem['status'] == 'Available':
self.assertEqual(elem['agent_name'], 'AgentNotDefined')
else: # in Acquired
self.assertTrue(elem['agent_name'] != 'AgentNotDefined')
self.assertEqual(len(metrics['workByAgentAndPriority']), 3)
prios = []
for item in metrics['workByAgentAndPriority']:
if item['agent_name'] != 'AgentNotDefined':
prios.append(item['priority'])
self.assertItemsEqual(prios, [8000, 999998])
for met in ('uniqueJobsPerSiteAAA', 'possibleJobsPerSiteAAA', 'uniqueJobsPerSite', 'possibleJobsPerSite'):
self.assertItemsEqual(metrics[met].keys(), initialStatus)
self.assertEqual(len(metrics[met]['Available']), 2)
self.assertEqual(len(metrics[met]['Acquired']), 2)
self.assertItemsEqual(metrics[met]['Available'].keys(), ['T2_XX_SiteA', 'T2_XX_SiteB'])
self.assertItemsEqual(metrics[met]['Acquired'].keys(), ['T2_XX_SiteA', 'T2_XX_SiteB'])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_24963
|
from math import sqrt
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import connection
def is_gfk(content_field):
return isinstance(content_field, GenericForeignKey)
def query_has_where(query):
where, params = query.get_compiler(using='default').compile(query.where)
return where == ''
def query_as_sql(query):
return query.get_compiler(connection=connection).as_sql()
def sim_euclidean_distance(ratings_queryset, factor_a, factor_b):
rating_model = ratings_queryset.model
if isinstance(factor_a, User):
filter_field = 'user_id'
match_on = 'hashed'
lookup_a = factor_a.pk
lookup_b = factor_b.pk
else:
filter_field = 'hashed'
match_on = 'user_id'
lookup_a = rating_model(content_object=factor_a).generate_hash()
lookup_b = rating_model(content_object=factor_b).generate_hash()
sql = """
SELECT r1.score - r2.score AS diff
FROM
%(ratings_table)s AS r1
INNER JOIN
%(ratings_table)s AS r2
ON r1.%(match_on)s = r2.%(match_on)s
WHERE
r1.%(filter_field)s = '%(lookup_a)s' AND
r2.%(filter_field)s = '%(lookup_b)s'
%(queryset_filter)s
"""
rating_query = ratings_queryset.values_list('pk').query
if query_has_where(rating_query):
queryset_filter = ''
else:
q, p = query_as_sql(rating_query)
rating_qs_sql = q % p
queryset_filter = ' AND r1.id IN (%s)' % rating_qs_sql
params = {
'ratings_table': rating_model._meta.db_table,
'filter_field': filter_field,
'match_on': match_on,
'lookup_a': lookup_a,
'lookup_b': lookup_b,
'queryset_filter': queryset_filter
}
cursor = connection.cursor()
cursor.execute(sql % params)
sum_of_squares = 0
while True:
result = cursor.fetchone()
if result is None:
break
sum_of_squares += result[0] ** 2
return 1 / (1 + sum_of_squares)
def sim_pearson_correlation(ratings_queryset, factor_a, factor_b):
rating_model = ratings_queryset.model
if isinstance(factor_a, User):
filter_field = 'user_id'
match_on = 'hashed'
lookup_a = factor_a.pk
lookup_b = factor_b.pk
else:
filter_field = 'hashed'
match_on = 'user_id'
lookup_a = rating_model(content_object=factor_a).generate_hash()
lookup_b = rating_model(content_object=factor_b).generate_hash()
sql = """
SELECT
SUM(r1.score) AS r1_sum,
SUM(r2.score) AS r2_sum,
SUM(r1.score*r1.score) AS r1_square_sum,
SUM(r2.score*r2.score) AS r2_square_sum,
SUM(r1.score*r2.score) AS p_sum,
COUNT(r1.id) AS sample_size
FROM
%(ratings_table)s AS r1
INNER JOIN
%(ratings_table)s AS r2
ON r1.%(match_on)s = r2.%(match_on)s
WHERE
r1.%(filter_field)s = '%(lookup_a)s' AND
r2.%(filter_field)s = '%(lookup_b)s'
%(queryset_filter)s
"""
rating_query = ratings_queryset.values_list('pk').query
if query_has_where(rating_query):
queryset_filter = ''
else:
q, p = query_as_sql(rating_query)
rating_qs_sql = q % p
queryset_filter = ' AND r1.id IN (%s)' % rating_qs_sql
params = {
'ratings_table': rating_model._meta.db_table,
'filter_field': filter_field,
'match_on': match_on,
'lookup_a': lookup_a,
'lookup_b': lookup_b,
'queryset_filter': queryset_filter
}
cursor = connection.cursor()
cursor.execute(sql % params)
result = cursor.fetchone()
if not result:
return 0
sum1, sum2, sum1_sq, sum2_sq, psum, sample_size = result
if sum1 is None or sum2 is None or sample_size == 0:
return 0
num = psum - (sum1 * sum2 / sample_size)
den = sqrt((sum1_sq - pow(sum1, 2) / sample_size) * (sum2_sq - pow(sum2, 2) / sample_size))
if den == 0:
return 0
return num / den
def top_matches(ratings_queryset, items, item, n=5, similarity=sim_pearson_correlation):
scores = [
(similarity(ratings_queryset, item, other), other)
for other in items if other != item
]
scores.sort()
scores.reverse()
return scores[:n]
def recommendations(ratings_queryset, people, person, similarity=sim_pearson_correlation):
already_rated = ratings_queryset.filter(user=person).values_list('hashed')
totals = {}
sim_sums = {}
for other in people:
if other == person:
continue
sim = similarity(ratings_queryset, person, other)
if sim <= 0:
continue
items = ratings_queryset.filter(user=other).exclude(hashed__in=already_rated)
# now, score the items person hasn't rated yet
for item in items:
totals.setdefault(item.content_object, 0)
totals[item.content_object] += (item.score * sim)
sim_sums.setdefault(item.content_object, 0)
sim_sums[item.content_object] += sim
rankings = [(total / sim_sums[pk], pk) for pk, total in totals.items()]
rankings.sort()
rankings.reverse()
return rankings
def calculate_similar_items(ratings_queryset, num=10):
# get distinct items from the ratings queryset - this can be optimized
field = ratings_queryset.model._meta.get_field('content_object')
if is_gfk(field):
rated_ctypes = ratings_queryset.values_list('content_type', flat=True).distinct()
ctypes = ContentType.objects.filter(pk__in=rated_ctypes)
for ctype in ctypes:
ratings_subset = ratings_queryset.filter(content_type=ctype)
rating_ids = ratings_subset.values_list('object_id')
model_class = ctype.model_class()
queryset = model_class._default_manager.filter(pk__in=rating_ids)
_store_top_matches(ratings_queryset, queryset, num, True)
else:
rated_model = field.rel.to
rating_ids = ratings_queryset.values_list('content_object__pk')
queryset = rated_model._default_manager.filter(pk__in=rating_ids)
_store_top_matches(ratings_queryset, queryset, num, False)
def _store_top_matches(ratings_queryset, rated_queryset, num, is_gfk):
from ratings.models import SimilarItem
ctype = ContentType.objects.get_for_model(rated_queryset.model)
rated_queryset.values_list('pk') # fill cache
for item in rated_queryset.iterator():
matches = top_matches(ratings_queryset, rated_queryset, item, num)
for (score, match) in matches:
si, created = SimilarItem.objects.get_or_create(
content_type=ctype,
object_id=item.pk,
similar_content_type=ContentType.objects.get_for_model(match),
similar_object_id=match.pk)
if created or si.score != score:
si.score = score
si.save()
def recommended_items(ratings_queryset, user):
from ratings.models import SimilarItem
scores = {}
total_sim = {}
for item in ratings_queryset.filter(user=user):
similar_items = SimilarItem.objects.get_for_item(item.content_object)
for similar_item in similar_items:
actual = similar_item.similar_object
lookup_kwargs = ratings_queryset.model.lookup_kwargs(actual)
lookup_kwargs['user'] = user
if ratings_queryset.filter(**lookup_kwargs):
continue
scores.setdefault(actual, 0)
scores[actual] += similar_item.score * item.score
total_sim.setdefault(actual, 0)
total_sim[actual] += similar_item.score
rankings = [(score / total_sim[item], item) for item, score in scores.items()]
rankings.sort()
rankings.reverse()
return rankings
|
the-stack_0_24966
|
#
"""
Various entropies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from texar.losses.losses_utils import mask_and_reduce, reduce_dimensions
from texar.utils.shapes import get_rank
# pylint: disable=too-many-arguments
__all__ = [
"entropy_with_logits",
"sequence_entropy_with_logits"
]
def _get_entropy(logits):
probs = tf.nn.softmax(logits) + 1e-8
entropy = - probs * tf.log(probs)
entropy = tf.reduce_sum(entropy, -1)
return entropy
def entropy_with_logits(logits,
rank=None,
average_across_batch=True,
average_across_remaining=False,
sum_over_batch=False,
sum_over_remaining=True):
"""Shannon entropy given logits.
Args:
logits: Unscaled log probabilities of shape
`[batch_size, d_2, ..., d_{rank-1}, distribution_dim]`
and of dtype `float32` or `float64`.
The rank of the tensor is optionally specified by the argument
:attr:`rank`.
The tensor is considered as having `[batch_size, .., d_{rank-1}]`
elements, each of which has a distribution of length `d_rank`
(i.e., `distribution_dim`). So the last dimension is always
summed out to compute the entropy.
rank (int, optional): The rank of :attr:`logits`.
If `None` (default), :attr:`rank` is inferred automatically from
:attr:`logits`. If the inferred rank is `None`, :attr:`rank` is
set to 2, i.e., assuming :attr:`logits` is of shape
`[batch_size, distribution_dim]`
average_across_batch (bool): If set, average the entropy across the
batch dimension. Must not set :attr:`average_across_batch`'
and :attr:`sum_over_batch` at the same time.
average_across_remaining (bool): If set, average the entropy across the
remaining dimensions. Must not set :attr:`average_across_remaining`'
and :attr:`sum_over_remaining` at the same time.
Used only when :attr:`logits` has rank >= 3.
sum_over_batch (bool): If set, sum the entropy across the
batch dimension. Must not set :attr:`average_across_batch`
and :attr:`sum_over_batch` at the same time.
sum_over_remaining (bool): If set, sum the entropy across the
remaining dimension. Must not set :attr:`average_across_remaining`
and :attr:`sum_over_remaining` at the same time.
Used only when :attr:`logits` has rank >= 3.
"""
entropy = _get_entropy(logits)
if rank is None:
rank = get_rank(logits)
if rank is None:
rank = 2
rank -= 1 # reduced last dimension
# Reduces
if average_across_batch and sum_over_batch:
raise ValueError("Only one of `average_across_batch` and "
"`sum_over_batch` can be set.")
if average_across_remaining and sum_over_remaining:
raise ValueError("Only one of `average_across_remaining` and "
"`sum_over_remaining` can be set.")
sum_axes, average_axes = [], []
if sum_over_batch:
sum_axes.append(0)
if average_across_batch:
average_axes.append(0)
if sum_over_remaining and rank >= 2:
sum_axes += list(range(1, rank))
if average_across_remaining and rank >= 2:
average_axes += list(range(1, rank))
entropy = reduce_dimensions(
entropy, average_axes=average_axes, sum_axes=sum_axes)
return entropy
def sequence_entropy_with_logits(logits,
rank=None,
sequence_length=None,
average_across_batch=True,
average_across_timesteps=False,
average_across_remaining=False,
sum_over_batch=False,
sum_over_timesteps=True,
sum_over_remaining=True,
time_major=False):
"""Shannon entropy given logits.
Args:
logits: Unscaled log probabilities of shape
`[batch_size, max_time, d_3, ..., d_{rank-1}, distribution_dim]`
and of dtype `float32` or `float64`.
The rank of the tensor is optionally specified by the argument
:attr:`rank`.
The tensor is considered as having `[batch_size, .., d_{rank-1}]`
elements, each of which has a distribution of length `d_rank`
(i.e., `distribution_dim`). So the last dimension is always
summed out to compute the entropy.
The batch and time dimensions are exchanged if :attr:`time_major`
is `True`.
rank (int, optional): The rank of :attr:`logits`.
If `None` (default), :attr:`rank` is inferred automatically from
:attr:`logits`. If the inferred rank is `None`, :attr:`rank` is
set to 3, i.e., assuming :attr:`logits` is of shape
`[batch_size, max_time, distribution_dim]`
sequence_length (optional): A Tensor of shape `[batch_size]`.
Time steps beyond the respective sequence lengths are
counted into the entropy.
average_across_timesteps (bool): If set, average the entropy across
the time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
average_across_batch (bool): If set, average the entropy across the
batch dimension. Must not set :attr:`average_across_batch`'
and :attr:`sum_over_batch` at the same time.
average_across_remaining (bool): If set, average the entropy across the
remaining dimensions. Must not set :attr:`average_across_remaining`'
and :attr:`sum_over_remaining` at the same time.
Used only when :attr:`logits` has rank >= 4.
sum_over_timesteps (bool): If set, sum the entropy across the
time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
sum_over_batch (bool): If set, sum the entropy across the
batch dimension. Must not set :attr:`average_across_batch`
and :attr:`sum_over_batch` at the same time.
sum_over_remaining (bool): If set, sum the entropy across the
remaining dimension. Must not set :attr:`average_across_remaining`
and :attr:`sum_over_remaining` at the same time.
Used only when :attr:`logits` has rank >= 4.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`logits` must have shape `[max_time, batch_size, ...]`.
If `False` (default), it must have shape
`[batch_size, max_time, ...]`.
"""
entropy = _get_entropy(logits)
if rank is None:
rank = get_rank(logits)
if rank is None:
rank = 3
rank -= 1 # reduced last dimension
entropy = mask_and_reduce(
entropy,
sequence_length,
rank=rank,
average_across_batch=average_across_batch,
average_across_timesteps=average_across_timesteps,
average_across_remaining=average_across_remaining,
sum_over_batch=sum_over_batch,
sum_over_timesteps=sum_over_timesteps,
sum_over_remaining=sum_over_remaining,
time_major=time_major)
return entropy
|
the-stack_0_24967
|
"""
lambda_function.py is a script file destinated to be executed by AWS lambda service as a cron job.
The idea is to check if the proxy is working and if not, the swict to the failover.
Data comes in likes the following, or at least it is what the 'event' argument should look like:
{
"domain_test": "www.google.com",
"proxy_port": 3128
}
If the contend of the even is null, then the variables are assumed as when in the handler is specified.
@return: A Dictionary if there was a fail over, nothing otherwise
@author: Fauzi Gomez
@email: [email protected]
"""
import json
import boto3
import os
import random
import urllib.request
import urllib.parse
import socket
import http.client
outcome = {}
"""
function to get autoscaling groups
@var String name [Optional]: the name of the autoscaling group you want.
@return Dictionary containing the ASG looked up, them all if the variable is omited
"""
def get_asg(name=None):
client = boto3.client('autoscaling')
if name != None and os.environ.get('vpc_name') != None:
ASG = client.describe_auto_scaling_groups(AutoScalingGroupNames=[name])['AutoScalingGroups']
del client
else:
ASG = client.describe_auto_scaling_groups()['AutoScalingGroups']
del client
return ASG
"""
function to get the vpc_id of a specific instance
@var list instance_info should contain the output of the function get_instances_info
@return String the vpc id of the instance in question
"""
def get_instance_vpc_id(instance_info):
for reservation in instance_info['Reservations']:
for instance in reservation['Instances']:
return instance['VpcId']
"""
funtion to ge the instances id of an autoscaling group
@var list autoscaling_group autoscaling groups from the function get_asg
@return list with the list of instances id found
"""
def get_instances_id(autoscaling_group):
ids_list = []
for instances in autoscaling_group:
for instance in instances['Instances']:
ids_list.append(instance['InstanceId'])
return ids_list
"""
function to get a list of instances with a "Healthy" status according to an autoscaling group
@var list autoscaling_group autoscaling groups from the function get_asg
@return list containing the ids of the healthy instances
"""
def get_healthy_instances_id(autoscaling_group):
ids_list = []
for instances in autoscaling_group:
for instance in instances['Instances']:
if instance['HealthStatus'] == "Healthy":
ids_list.append(instance['InstanceId'])
return ids_list
"""
function to get status of instances according to hte instance information
@var list id_list list with the instances id you want to know the status
@return list status of the instances in question
"""
def get_instances_status(id_list):
client = boto3.client('ec2')
status = client.describe_instance_status(InstanceIds=id_list)
del client
return status
"""
function to get an instance information based on a ENI id
@var string ENI the eni id
@returm dictionary with the result of describe_instances
"""
def get_instance_by_eni(eni):
client = boto3.client('ec2')
reservations = client.describe_instances(Filters=[{'Name':'network-interface.network-interface-id','Values': [eni]}])
del client
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
return instance
#return instance['Reservations'][0]['Instances'][0]
"""
function to get the eni ids of an instance
@var list instances_id list with the instances id you want to know the eni ids
@return list with the eni ids of the instances
"""
def get_instances_eni(instances_id):
reservations = get_instances_info(instances_id)
ni_id = []
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
for network_interfaces in instance['NetworkInterfaces']:
ni_id.append(network_interfaces['NetworkInterfaceId'])
return ni_id
"""
function to ge the private ip of instances
@var list instances_id list with the instances id you want to know the eni ids
@return list with the private ips of the instances in the variable
"""
def get_instances_priv_ip(instances_id):
reservations = get_instances_info(instances_id)
priv_ip = []
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
priv_ip.append(instance['PrivateIpAddress'])
return priv_ip
"""
function to get the information of instances
@var list id_list list with the instances id you want to get the description
@return list instances info
"""
def get_instances_info(id_list):
client = boto3.client('ec2')
instances = client.describe_instances(InstanceIds=id_list)
del client
return instances
"""
function to get a vpc based on an instance id
@var String vpc_id the id of the vpc you want to get
@return Dict containing the vpc data
"""
def get_vpc(vpc_id):
client = boto3.resource('ec2')
vpc = client.Vpc(vpc_id)
return vpc
"""
function to get route tables information
@var String vpc_id id of the VPC where the route belongs to
String name name of the route in question
@return Dict containing the information of the table
"""
def get_route_table(vpc_id,name):
client = boto3.client('ec2')
route_table = client.describe_route_tables(Filters=[{'Name':'tag:Name','Values': [name]},{'Name':'vpc-id','Values':[vpc_id]}])
del client
return route_table
"""
function to get the routing table id of a routing table
@var Dict route_table expexted the value of the function get_route_table
@return String the routing table id
"""
def get_route_table_id(route_table):
for table in route_table['RouteTables']:
for association in table['Associations']:
return association['RouteTableId']
# There should always be one single RouteTables, otherwise we only care for the first one ?
#return route_table['RouteTables'][0]['RouteTableId']
"""
function to request an address from the internet. The whole lambda funtion is expected to be within a single VPC and
associated to the same subnets where the EKS workers are to fairly simulate traffic out to the internet from the
workers
@var String ulr address to attempt a request
@return Int with the HTTP response code
"""
def test_proxy(url):
response = ''
try:
"""
#url = 'https://%s' % (url)
#f = urllib.request.urlopen(url)
#print(f.read())
#response = f.getcode()
"""
# The only reason we don't do https on checkups is because in the proxy only the IP is reported instead of the domain.
# We want to know the domain this function is calling out
# conn = http.client.HTTPSConnection(url)
conn = http.client.HTTPConnection(url,timeout=3)
conn.request("GET", "/")
response = conn.getresponse().status
except Exception as e:
outcome['test_proxy'] = "The url '%s' check failed with the following message: '%s'" % (url,e)
#print(e)
if str(e) == 'HTTP Error 403: Forbidden':
response = 403
elif 'Connection refused' in str(e):
# if we get <urlopen error [Errno 111] Connection refused> then we know the squid is busted or something similar
response = 111
else:
# if anything else, then let's assume the current default GW is not working
response = 112
return response
"""
function that searches for a default gateway in a defined routing table
@var string rtid the id of the routing table to be queried
@return Dictionary with a boto response of the request
"""
def exist_default_gw(rtid):
client = boto3.client('ec2')
response = client.describe_route_tables(Filters=[{'Name': 'route.destination-cidr-block','Values':['0.0.0.0/0']}],RouteTableIds=[rtid])
del client
return response
"""
function to delete a default gateway of a routing table
@var String rtid routing table id where you want the default GW deleted
@return Int with the response of the attemp. HTTP code
"""
def del_default_gw(rtid):
client = boto3.client('ec2')
response = client.delete_route(DestinationCidrBlock='0.0.0.0/0',RouteTableId=rtid)
del client
return response
"""
function to set a default route to a routing table
@var String eni eni id that will be handing the traffic
String rtid routing table id you want to set the default GW
@return Int with the response of the attemp. HTTP code
"""
def set_default_gw(eni,rtid):
client = boto3.client('ec2')
#print(eni)
response = exist_default_gw(rtid)
if len(response['RouteTables']) > 0:
response = del_default_gw(rtid)
if response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:
response = client.create_route(DestinationCidrBlock='0.0.0.0/0',NetworkInterfaceId=eni,RouteTableId=rtid)
if response['Return'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:
outcome[rtid] = "Route %s default GW successfully changed to %s" % (rtid,eni)
#print("Route %s changed successfully" % rtid)
else:
outcome[eni] = response
#print(response)
else:
outcome[eni] = response
#print(response)
del client
return response
"""
function to check if a port is open for an address
@var String addr address you want to check for the port
Int port port to check
@return int 0 if open, something else if otherwise
"""
def check_port(addr,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5.0)
result = sock.connect_ex((addr, port))
sock.close
return result
"""
function to get a hosted zone in route53 based on the comments
@var String comment what you are looking for
@return Dict with the hosted zone information
"""
def get_hosted_zone(comment):
client = boto3.client('route53')
zones = client.list_hosted_zones()
del client
for zone in zones['HostedZones']:
if comment in zone['Config']['Comment']:
return zone
"""
function to get the recordsets of a hosted zone
@var String zone_id id of the zone you want the recordsets
@return Dict with the recordsets
"""
def get_record_sets(zone_id):
client = boto3.client('route53')
record_sets = client.list_resource_record_sets(HostedZoneId=zone_id)
del client
return record_sets
"""
function to determine if a recordset exist in a hosted zone
@var list record_sets expected the value of the function get_record_sets
String name name of the record set you are looking for
@return Boool if it exist or not
"""
def exist_record_set(record_sets, name):
for record_set in record_sets['ResourceRecordSets']:
if name in record_set['Name']:
return True
return False
"""
function to get a vpc base upon its name
@var string name name of the vpc you want
@return dictionary with the vpc description
"""
def get_vpc(name):
client = boto3.client('ec2')
vpcs = client.describe_vpcs(Filters=[{'Name':'tag:Name','Values':[name]}])
del client
for vpc in vpcs['Vpcs']:
return vpc
"""
function to set a recordset in a hosted zone
@var String zone_id the zone you want the recordset created
String name of the record you want created
String action ["CREATE", "UPSERT", "DELETE"]
String type type of the record ["A", "AAAA", "CNAME", ...]
Int ttl time to live for the record
String value where the record will point to
@return nothing
"""
def change_resource_record_sets(zone_id,name,action,type,ttl,value):
try:
client = boto3.client('route53')
response = client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch= {
'Comment': '%s %s record' % (action, value),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': name,
'Type': type,
'TTL': ttl,
'ResourceRecords': [{'Value': value}]
}
}]
})
except Exception as e:
print(e)
"""
function to find the instance attribute sourceDestinationCheck for a single instance
this must be set to false if we want our instance to serve as proxy and be
the default gateway for a specific route table
@var string instance_id the id of the instance we want to know the value of the attibue
@return boolean True or False based on the attribute value
"""
def get_sourceDestinationCheck_attr(instance_id):
client = boto3.client('ec2')
response = client.describe_instance_attribute(InstanceId=instance_id, Attribute='sourceDestCheck')
del client
return response['SourceDestCheck']['Value']
"""
function to set the attribute sourceDestinationCheck to false for a single instance
@var string instance_id the id of the instance we want to know the value set to false
@return dictionary with the execution results
"""
def set_sourceDestinationCheck_attr(instance_id,value=False):
client = boto3.client('ec2')
response = client.modify_instance_attribute(InstanceId=instance_id, SourceDestCheck={'Value': value})
del client
return response
def lambda_handler(event, context):
if 'domain_test' in event:
domain = event['domain_test']
elif os.environ.get('domain_test') is not None:
domain = os.environ['domain_test']
else:
domain = 'gen3.org'
#print(domain)
http_code = test_proxy(domain)
#print (http_code)
#data = socket.gethostbyname_ex(domain)
#print ("\n\nThe IP Address of the Domain Name is: "+repr(data))
#conn = http.client.HTTPSConnection(domain)
#conn.request("GET", "/")
#r1 = conn.getresponse()
#print(r1.status, r1.reason)
if 'proxy_port' in event:
proxy_port = event['proxy_port']
elif os.environ.get('proxy_port') is not None:
proxy_port = os.environ['proxy_port']
else:
proxy_port = 3128
#return
#internal controls
statusCode = 200
if http_code != 200:
if os.environ.get('vpc_name') is not None:
autoscaling_group = get_asg("squid-auto-"+os.environ['vpc_name'])
instances_ids = get_healthy_instances_id(autoscaling_group)
if len(instances_ids) > 0:
for instance_id in instances_ids:
instance_priv_ip = get_instances_priv_ip([instance_id])
if get_sourceDestinationCheck_attr(instance_id):
set_sourceDestinationCheck_attr(instance_id)
outcome['sourceDestinationCheck'] = "sourceDestinationCheck set to false for %s" % instance_id
#print("sourceDestinationCheck set to false for %s" % instance_id)
if check_port(instance_priv_ip[0],proxy_port) == 0:
healthy_instance_eni = get_instances_eni([instance_id])
healthy_instance_priv_ip = get_instances_priv_ip([instance_id])
vpc_id = get_instance_vpc_id(get_instances_info([instance_id]))
eks_private_route_table_id = get_route_table_id(get_route_table(vpc_id,'eks_private'))
private_kube_route_table_id = get_route_table_id(get_route_table(vpc_id,'private_kube'))
try:
set_default_gw(healthy_instance_eni[0],eks_private_route_table_id)
outcome['eks_private'] = 'succefully changed the default route for eks_private routing table'
#print('succefully changed the default route for eks_private routing table')
except Exception as e:
statusCode = statusCode + 1
outcome['eks_private'] = e
#print(e)
try:
set_default_gw(healthy_instance_eni[0],private_kube_route_table_id)
outcome['private_kube'] = 'succefully changed the default route for private_kube routing table'
#print('succefully changed the default route for private_kube routing table')
except Exception as e:
statusCode = statusCode + 1
outcome['private_kube'] = e
#print(e)
zone = get_hosted_zone(os.environ['vpc_name'])
zone_id = zone['Id']
#print(zone_id)
record_sets = get_record_sets(zone_id)
if exist_record_set(record_sets,'cloud-proxy'):
try:
change_resource_record_sets(zone_id,'cloud-proxy.internal.io','UPSERT','A',300,instance_priv_ip[0])
outcome['cloud-proxy'] = "subdomain cloud-proxy.internal.io changed for %s with ip: %s" % (zone_id,instance_priv_ip[0])
except Exception as e:
statusCode = statusCode + 1
outcome['cloud-proxy'] = e
#outcome['cloud-proxy'] = "subdomain cloud-proxy.internal.io changed for %s" % zone_id
#print("subdomain cloud-proxy.internal.io changed for %s" % zone_id)
else:
try:
change_resource_record_sets(zone_id,'cloud-proxy.internal.io','CREATE','A',300,instance_priv_ip[0])
outcome['cloud-proxy'] = "subdomain cloud-proxy.internal.io created for %s with ip: %s" % (zone_id,instance_priv_ip[0])
except Exception as e:
statusCode = statusCode + 1
outcome['cloud-proxy'] = e
#print("subdomain cloud-proxy.internal.io created for %s" % zone_id)
if statusCode != 200:
outcome['message'] = 'Proxy switch partially successfull'
else:
outcome['message'] = 'Proxy switch successfull'
break
else:
outcome['message'] = 'No healthy instances found'
else:
outcome['message'] = 'ERROR: The VPC name has not been specified, cannot continue'
print(json.dumps(outcome))
return json.dumps(outcome)
else:
zone = get_hosted_zone(os.environ['vpc_name'])
zone_id = zone['Id']
record_sets = get_record_sets(zone_id)
if not exist_record_set(record_sets,'cloud-proxy'):
try:
vpc_id = get_vpc(os.environ['vpc_name'])
#print(vpc_id)
route_tables = get_route_table(vpc_id['VpcId'],'eks_private')
ip = ''
for route_table in route_tables['RouteTables']:
for route in route_table['Routes']:
if route['DestinationCidrBlock'] == '0.0.0.0/0':
ip = get_instance_by_eni(route['NetworkInterfaceId'])['PrivateIpAddress']
break
if ip:
change_resource_record_sets(zone_id,'cloud-proxy.internal.io','CREATE','A',300,ip)
outcome['cloud-proxy'] = "subdomain cloud-proxy.internal.io created for %s with ip: %s" % (zone_id,ip)
else:
statusCode = statusCode + 1
outcome['cloud-proxy'] = "subdomain cloud-proxy.internal.io could not be created for %s, no ip was found" % zone_id
except Exception as e:
statusCode = statusCode + 1
outcome['cloud-proxy'] = e
if statusCode != 200:
outcome['message'] = 'Proxy switch partially successfull'
else:
outcome['message'] = 'Proxy switch successfull'
print(json.dumps(outcome))
return json.dumps(outcome)
|
the-stack_0_24968
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2018-2019 Rohit Singh <[email protected]>
# Copyright (c) 2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
import sys
from migen import *
from litex_boards.platforms import nereid
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.clock import *
from litedram.modules import MT8KTF51264
from litedram.phy import s7ddrphy
from litepcie.phy.s7pciephy import S7PCIEPHY
from litepcie.core import LitePCIeEndpoint, LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMA
from litepcie.frontend.wishbone import LitePCIeWishboneBridge
from litepcie.software import generate_litepcie_software
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
# Clk/Rst
clk100 = platform.request("clk100")
# PLL
self.submodules.pll = pll = S7PLL()
pll.register_clkin(clk100, 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_idelay, 200e6)
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)
# BaseSoC -----------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, platform, with_pcie=False, **kwargs):
sys_clk_freq = int(100e6)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Nereid",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = CRG(platform, sys_clk_freq)
self.add_csr("crg")
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.K7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 200e6)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT8KTF51264(sys_clk_freq, "1:4", speedgrade="800"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
# PHY
self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x4"),
data_width = 128,
bar0_size = 0x20000)
platform.add_false_path_constraints(self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk)
self.add_csr("pcie_phy")
# Endpoint
self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy, max_pending_requests=8)
# Wishbone bridge
self.submodules.pcie_bridge = LitePCIeWishboneBridge(self.pcie_endpoint,
base_address = self.mem_map["csr"])
self.add_wb_master(self.pcie_bridge.wishbone)
# DMA0
self.submodules.pcie_dma0 = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint,
with_buffering = True, buffering_depth=1024,
with_loopback = True)
self.add_csr("pcie_dma0")
self.add_constant("DMA_CHANNELS", 1)
# MSI
self.submodules.pcie_msi = LitePCIeMSI()
self.add_csr("pcie_msi")
self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi)
self.interrupts = {
"PCIE_DMA0_WRITER": self.pcie_dma0.writer.irq,
"PCIE_DMA0_READER": self.pcie_dma0.reader.irq,
}
for i, (k, v) in enumerate(sorted(self.interrupts.items())):
self.comb += self.pcie_msi.irqs[i].eq(v)
self.add_constant(k + "_INTERRUPT", i)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Nereid")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-pcie", action="store_true", help="Enable PCIe support")
parser.add_argument("--driver", action="store_true", help="Generate LitePCIe driver")
parser.add_argument("--load", action="store_true", help="Load bitstream")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
# Enforce arguments
args.csr_data_width = 32
platform = nereid.Platform()
soc = BaseSoC(platform, with_pcie=args.with_pcie, **soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
the-stack_0_24969
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import copy
import time
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import scf
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
libri = lib.load_library('libri')
def density_fit(mf, auxbasis=None, with_df=None, only_dfj=False):
'''For the given SCF object, update the J, K matrix constructor with
corresponding density fitting integrals.
Args:
mf : an SCF object
Kwargs:
auxbasis : str or basis dict
Same format to the input attribute mol.basis. If auxbasis is
None, optimal auxiliary basis based on AO basis (if possible) or
even-tempered Gaussian basis will be used.
only_dfj : str
Compute Coulomb integrals only and no approximation for HF
exchange. Same to RIJONX in ORCA
Returns:
An SCF object with a modified J, K matrix constructor which uses density
fitting integrals to compute J and K
Examples:
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.density_fit(scf.RHF(mol))
>>> mf.scf()
-100.005306000435510
>>> mol.symmetry = 1
>>> mol.build(0, 0)
>>> mf = scf.density_fit(scf.UHF(mol))
>>> mf.scf()
-100.005306000435510
'''
from pyscf import df
from pyscf.scf import dhf
from pyscf.soscf import newton_ah
assert(isinstance(mf, scf.hf.SCF))
if with_df is None:
if isinstance(mf, dhf.UHF):
with_df = df.DF4C(mf.mol)
else:
with_df = df.DF(mf.mol)
with_df.max_memory = mf.max_memory
with_df.stdout = mf.stdout
with_df.verbose = mf.verbose
with_df.auxbasis = auxbasis
mf_class = mf.__class__
if isinstance(mf, _DFHF):
if mf.with_df is None:
mf.with_df = with_df
elif getattr(mf.with_df, 'auxbasis', None) != auxbasis:
#logger.warn(mf, 'DF might have been initialized twice.')
mf = copy.copy(mf)
mf.with_df = with_df
mf.only_dfj = only_dfj
return mf
class DFHF(_DFHF, mf_class):
__doc__ = '''
Density fitting SCF class
Attributes for density-fitting SCF:
auxbasis : str or basis dict
Same format to the input attribute mol.basis.
The default basis 'weigend+etb' means weigend-coulomb-fit basis
for light elements and even-tempered basis for heavy elements.
with_df : DF object
Set mf.with_df = None to switch off density fitting mode.
See also the documents of class %s for other SCF attributes.
''' % mf_class
def __init__(self, mf, df, only_dfj):
self.__dict__.update(mf.__dict__)
self._eri = None
self.with_df = df
self.only_dfj = only_dfj
self._keys = self._keys.union(['with_df', 'only_dfj'])
def get_jk(self, mol=None, dm=None, hermi=1, with_j=True, with_k=True,
omega=None):
if dm is None: dm = self.make_rdm1()
if self.with_df and self.only_dfj:
vj = vk = None
if with_j:
vj, vk = self.with_df.get_jk(dm, hermi, True, False,
self.direct_scf_tol, omega)
if with_k:
vk = mf_class.get_jk(self, mol, dm, hermi, False, True, omega)[1]
elif self.with_df:
vj, vk = self.with_df.get_jk(dm, hermi, with_j, with_k,
self.direct_scf_tol, omega)
else:
vj, vk = mf_class.get_jk(self, mol, dm, hermi, with_j, with_k, omega)
return vj, vk
# for pyscf 1.0, 1.1 compatibility
@property
def _cderi(self):
naux = self.with_df.get_naoaux()
return next(self.with_df.loop(blksize=naux))
@_cderi.setter
def _cderi(self, x):
self.with_df._cderi = x
@property
def auxbasis(self):
return getattr(self.with_df, 'auxbasis', None)
return DFHF(mf, with_df, only_dfj)
# 1. A tag to label the derived SCF class
# 2. A hook to register DF specific methods, such as nuc_grad_method.
class _DFHF(object):
def nuc_grad_method(self):
from pyscf.df.grad import rhf, uhf, rks, uks
if isinstance(self, (scf.uhf.UHF, scf.rohf.ROHF)):
if getattr(self, 'xc', None):
return uks.Gradients(self)
else:
return uhf.Gradients(self)
elif isinstance(self, scf.rhf.RHF):
if getattr(self, 'xc', None):
return rks.Gradients(self)
else:
return rhf.Gradients(self)
else:
raise NotImplementedError
Gradients = nuc_grad_method
def Hessian(self):
from pyscf.df.hessian import rhf, uhf, rks, uks
if isinstance(self, (scf.uhf.UHF, scf.rohf.ROHF)):
if getattr(self, 'xc', None):
return uks.Hessian(self)
else:
return uhf.Hessian(self)
elif isinstance(self, scf.rhf.RHF):
if getattr(self, 'xc', None):
return rks.Hessian(self)
else:
return rhf.Hessian(self)
else:
raise NotImplementedError
def method_not_implemented(self, *args, **kwargs):
raise NotImplementedError
NMR = method_not_implemented
NSR = method_not_implemented
Polarizability = method_not_implemented
RotationalGTensor = method_not_implemented
MP2 = method_not_implemented
CISD = method_not_implemented
CCSD = method_not_implemented
CASCI = method_not_implemented
CASSCF = method_not_implemented
def get_jk(dfobj, dm, hermi=1, with_j=True, with_k=True, direct_scf_tol=1e-13):
assert(with_j or with_k)
if (not with_k and not dfobj.mol.incore_anyway and
# 3-center integral tensor is not initialized
dfobj._cderi is None):
return get_j(dfobj, dm, hermi, direct_scf_tol), None
t0 = t1 = (time.clock(), time.time())
log = logger.Logger(dfobj.stdout, dfobj.verbose)
fmmm = _ao2mo.libao2mo.AO2MOmmm_bra_nr_s2
fdrv = _ao2mo.libao2mo.AO2MOnr_e2_drv
ftrans = _ao2mo.libao2mo.AO2MOtranse2_nr_s2
null = lib.c_null_ptr()
dms = numpy.asarray(dm)
dm_shape = dms.shape
nao = dm_shape[-1]
dms = dms.reshape(-1,nao,nao)
nset = dms.shape[0]
vj = 0
vk = numpy.zeros_like(dms)
if with_j:
idx = numpy.arange(nao)
dmtril = lib.pack_tril(dms + dms.conj().transpose(0,2,1))
dmtril[:,idx*(idx+1)//2+idx] *= .5
if not with_k:
for eri1 in dfobj.loop():
rho = numpy.einsum('ix,px->ip', dmtril, eri1)
vj += numpy.einsum('ip,px->ix', rho, eri1)
elif getattr(dm, 'mo_coeff', None) is not None:
#TODO: test whether dm.mo_coeff matching dm
mo_coeff = numpy.asarray(dm.mo_coeff, order='F')
mo_occ = numpy.asarray(dm.mo_occ)
nmo = mo_occ.shape[-1]
mo_coeff = mo_coeff.reshape(-1,nao,nmo)
mo_occ = mo_occ.reshape(-1,nmo)
if mo_occ.shape[0] * 2 == nset: # handle ROHF DM
mo_coeff = numpy.vstack((mo_coeff, mo_coeff))
mo_occa = numpy.array(mo_occ> 0, dtype=numpy.double)
mo_occb = numpy.array(mo_occ==2, dtype=numpy.double)
assert(mo_occa.sum() + mo_occb.sum() == mo_occ.sum())
mo_occ = numpy.vstack((mo_occa, mo_occb))
orbo = []
for k in range(nset):
c = numpy.einsum('pi,i->pi', mo_coeff[k][:,mo_occ[k]>0],
numpy.sqrt(mo_occ[k][mo_occ[k]>0]))
orbo.append(numpy.asarray(c, order='F'))
max_memory = dfobj.max_memory - lib.current_memory()[0]
blksize = max(4, int(min(dfobj.blockdim, max_memory*.3e6/8/nao**2)))
buf = numpy.empty((blksize*nao,nao))
for eri1 in dfobj.loop(blksize):
naux, nao_pair = eri1.shape
assert(nao_pair == nao*(nao+1)//2)
if with_j:
rho = numpy.einsum('ix,px->ip', dmtril, eri1)
vj += numpy.einsum('ip,px->ix', rho, eri1)
for k in range(nset):
nocc = orbo[k].shape[1]
if nocc > 0:
buf1 = buf[:naux*nocc]
fdrv(ftrans, fmmm,
buf1.ctypes.data_as(ctypes.c_void_p),
eri1.ctypes.data_as(ctypes.c_void_p),
orbo[k].ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), ctypes.c_int(nao),
(ctypes.c_int*4)(0, nocc, 0, nao),
null, ctypes.c_int(0))
vk[k] += lib.dot(buf1.T, buf1)
t1 = log.timer_debug1('jk', *t1)
else:
#:vk = numpy.einsum('pij,jk->pki', cderi, dm)
#:vk = numpy.einsum('pki,pkj->ij', cderi, vk)
rargs = (ctypes.c_int(nao), (ctypes.c_int*4)(0, nao, 0, nao),
null, ctypes.c_int(0))
dms = [numpy.asarray(x, order='F') for x in dms]
max_memory = dfobj.max_memory - lib.current_memory()[0]
blksize = max(4, int(min(dfobj.blockdim, max_memory*.22e6/8/nao**2)))
buf = numpy.empty((2,blksize,nao,nao))
for eri1 in dfobj.loop(blksize):
naux, nao_pair = eri1.shape
if with_j:
rho = numpy.einsum('ix,px->ip', dmtril, eri1)
vj += numpy.einsum('ip,px->ix', rho, eri1)
for k in range(nset):
buf1 = buf[0,:naux]
fdrv(ftrans, fmmm,
buf1.ctypes.data_as(ctypes.c_void_p),
eri1.ctypes.data_as(ctypes.c_void_p),
dms[k].ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs)
buf2 = lib.unpack_tril(eri1, out=buf[1])
vk[k] += lib.dot(buf1.reshape(-1,nao).T, buf2.reshape(-1,nao))
t1 = log.timer_debug1('jk', *t1)
if with_j: vj = lib.unpack_tril(vj, 1).reshape(dm_shape)
if with_k: vk = vk.reshape(dm_shape)
logger.timer(dfobj, 'df vj and vk', *t0)
return vj, vk
def get_j(dfobj, dm, hermi=1, direct_scf_tol=1e-13):
from pyscf.scf import _vhf
from pyscf.scf import jk
from pyscf.df import addons
t0 = t1 = (time.clock(), time.time())
mol = dfobj.mol
if dfobj._vjopt is None:
dfobj.auxmol = auxmol = addons.make_auxmol(mol, dfobj.auxbasis)
opt = _vhf.VHFOpt(mol, 'int3c2e', 'CVHFnr3c2e_schwarz_cond')
opt.direct_scf_tol = direct_scf_tol
# q_cond part 1: the regular int2e (ij|ij) for mol's basis
opt.init_cvhf_direct(mol, 'int2e', 'CVHFsetnr_direct_scf')
mol_q_cond = lib.frompointer(opt._this.contents.q_cond, mol.nbas**2)
# Update q_cond to include the 2e-integrals (auxmol|auxmol)
j2c = auxmol.intor('int2c2e', hermi=1)
j2c_diag = numpy.sqrt(abs(j2c.diagonal()))
aux_loc = auxmol.ao_loc
aux_q_cond = [j2c_diag[i0:i1].max()
for i0, i1 in zip(aux_loc[:-1], aux_loc[1:])]
q_cond = numpy.hstack((mol_q_cond, aux_q_cond))
fsetqcond = _vhf.libcvhf.CVHFset_q_cond
fsetqcond(opt._this, q_cond.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(q_cond.size))
try:
opt.j2c = j2c = scipy.linalg.cho_factor(j2c, lower=True)
opt.j2c_type = 'cd'
except scipy.linalg.LinAlgError:
opt.j2c = j2c
opt.j2c_type = 'regular'
# jk.get_jk function supports 4-index integrals. Use bas_placeholder
# (l=0, nctr=1, 1 function) to hold the last index.
bas_placeholder = numpy.array([0, 0, 1, 1, 0, 0, 0, 0],
dtype=numpy.int32)
fakemol = mol + auxmol
fakemol._bas = numpy.vstack((fakemol._bas, bas_placeholder))
opt.fakemol = fakemol
dfobj._vjopt = opt
t1 = logger.timer_debug1(dfobj, 'df-vj init_direct_scf', *t1)
opt = dfobj._vjopt
fakemol = opt.fakemol
dm = numpy.asarray(dm, order='C')
dm_shape = dm.shape
nao = dm_shape[-1]
dm = dm.reshape(-1,nao,nao)
n_dm = dm.shape[0]
# First compute the density in auxiliary basis
# j3c = fauxe2(mol, auxmol)
# jaux = numpy.einsum('ijk,ji->k', j3c, dm)
# rho = numpy.linalg.solve(auxmol.intor('int2c2e'), jaux)
nbas = mol.nbas
nbas1 = mol.nbas + dfobj.auxmol.nbas
shls_slice = (0, nbas, 0, nbas, nbas, nbas1, nbas1, nbas1+1)
with lib.temporary_env(opt, prescreen='CVHFnr3c2e_vj_pass1_prescreen',
_dmcondname='CVHFsetnr_direct_scf_dm'):
jaux = jk.get_jk(fakemol, dm, ['ijkl,ji->kl']*n_dm, 'int3c2e',
aosym='s2ij', hermi=0, shls_slice=shls_slice,
vhfopt=opt)
# remove the index corresponding to bas_placeholder
jaux = numpy.array(jaux)[:,:,0]
t1 = logger.timer_debug1(dfobj, 'df-vj pass 1', *t1)
if opt.j2c_type == 'cd':
rho = scipy.linalg.cho_solve(opt.j2c, jaux.T)
else:
rho = scipy.linalg.solve(opt.j2c, jaux.T)
# transform rho to shape (:,1,naux), to adapt to 3c2e integrals (ij|k)
rho = rho.T[:,numpy.newaxis,:]
t1 = logger.timer_debug1(dfobj, 'df-vj solve ', *t1)
# Next compute the Coulomb matrix
# j3c = fauxe2(mol, auxmol)
# vj = numpy.einsum('ijk,k->ij', j3c, rho)
with lib.temporary_env(opt, prescreen='CVHFnr3c2e_vj_pass2_prescreen',
_dmcondname=None):
# CVHFnr3c2e_vj_pass2_prescreen requires custom dm_cond
aux_loc = dfobj.auxmol.ao_loc
dm_cond = [abs(rho[:,:,i0:i1]).max()
for i0, i1 in zip(aux_loc[:-1], aux_loc[1:])]
dm_cond = numpy.array(dm_cond)
fsetcond = _vhf.libcvhf.CVHFset_dm_cond
fsetcond(opt._this, dm_cond.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(dm_cond.size))
vj = jk.get_jk(fakemol, rho, ['ijkl,lk->ij']*n_dm, 'int3c2e',
aosym='s2ij', hermi=1, shls_slice=shls_slice,
vhfopt=opt)
t1 = logger.timer_debug1(dfobj, 'df-vj pass 2', *t1)
logger.timer(dfobj, 'df-vj', *t0)
return numpy.asarray(vj).reshape(dm_shape)
def r_get_jk(dfobj, dms, hermi=1, with_j=True, with_k=True):
'''Relativistic density fitting JK'''
t0 = (time.clock(), time.time())
mol = dfobj.mol
c1 = .5 / lib.param.LIGHT_SPEED
tao = mol.tmap()
ao_loc = mol.ao_loc_2c()
n2c = ao_loc[-1]
def fjk(dm):
dm = numpy.asarray(dm, dtype=numpy.complex128)
fmmm = libri.RIhalfmmm_r_s2_bra_noconj
fdrv = _ao2mo.libao2mo.AO2MOr_e2_drv
ftrans = libri.RItranse2_r_s2
vj = numpy.zeros_like(dm)
vk = numpy.zeros_like(dm)
fcopy = libri.RImmm_r_s2_transpose
rargs = (ctypes.c_int(n2c), (ctypes.c_int*4)(0, n2c, 0, 0),
tao.ctypes.data_as(ctypes.c_void_p),
ao_loc.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(mol.nbas))
dmll = numpy.asarray(dm[:n2c,:n2c], order='C')
dmls = numpy.asarray(dm[:n2c,n2c:], order='C') * c1
dmsl = numpy.asarray(dm[n2c:,:n2c], order='C') * c1
dmss = numpy.asarray(dm[n2c:,n2c:], order='C') * c1**2
for erill, eriss in dfobj.loop():
naux, nao_pair = erill.shape
buf = numpy.empty((naux,n2c,n2c), dtype=numpy.complex)
buf1 = numpy.empty((naux,n2c,n2c), dtype=numpy.complex)
fdrv(ftrans, fmmm,
buf.ctypes.data_as(ctypes.c_void_p),
erill.ctypes.data_as(ctypes.c_void_p),
dmll.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf == (P|LL)
rho = numpy.einsum('kii->k', buf)
fdrv(ftrans, fcopy,
buf1.ctypes.data_as(ctypes.c_void_p),
erill.ctypes.data_as(ctypes.c_void_p),
dmll.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf1 == (P|LL)
vk[:n2c,:n2c] += numpy.dot(buf1.reshape(-1,n2c).T,
buf.reshape(-1,n2c))
fdrv(ftrans, fmmm,
buf.ctypes.data_as(ctypes.c_void_p),
eriss.ctypes.data_as(ctypes.c_void_p),
dmls.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf == (P|LS)
vk[:n2c,n2c:] += numpy.dot(buf1.reshape(-1,n2c).T,
buf.reshape(-1,n2c)) * c1
fdrv(ftrans, fmmm,
buf.ctypes.data_as(ctypes.c_void_p),
eriss.ctypes.data_as(ctypes.c_void_p),
dmss.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf == (P|SS)
rho += numpy.einsum('kii->k', buf)
vj[:n2c,:n2c] += lib.unpack_tril(numpy.dot(rho, erill), 1)
vj[n2c:,n2c:] += lib.unpack_tril(numpy.dot(rho, eriss), 1) * c1**2
fdrv(ftrans, fcopy,
buf1.ctypes.data_as(ctypes.c_void_p),
eriss.ctypes.data_as(ctypes.c_void_p),
dmss.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf == (P|SS)
vk[n2c:,n2c:] += numpy.dot(buf1.reshape(-1,n2c).T,
buf.reshape(-1,n2c)) * c1**2
if hermi != 1:
fdrv(ftrans, fmmm,
buf.ctypes.data_as(ctypes.c_void_p),
erill.ctypes.data_as(ctypes.c_void_p),
dmsl.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(naux), *rargs) # buf == (P|SL)
vk[n2c:,:n2c] += numpy.dot(buf1.reshape(-1,n2c).T,
buf.reshape(-1,n2c)) * c1
if hermi == 1:
vk[n2c:,:n2c] = vk[:n2c,n2c:].T.conj()
return vj, vk
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
vj, vk = fjk(dms)
else:
vjk = [fjk(dm) for dm in dms]
vj = numpy.array([x[0] for x in vjk])
vk = numpy.array([x[1] for x in vjk])
logger.timer(dfobj, 'vj and vk', *t0)
return vj, vk
if __name__ == '__main__':
import pyscf.gto
import pyscf.scf
mol = pyscf.gto.Mole()
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
method = density_fit(pyscf.scf.RHF(mol), 'weigend')
method.max_memory = 0
energy = method.scf()
print(energy, -76.0259362997)
method = density_fit(pyscf.scf.DHF(mol), 'weigend')
energy = method.scf()
print(energy, -76.0807386770) # normal DHF energy is -76.0815679438127
method = density_fit(pyscf.scf.UKS(mol), 'weigend')
energy = method.scf()
print(energy, -75.8547753298)
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
spin = 1,
charge = 1,
)
method = density_fit(pyscf.scf.UHF(mol), 'weigend')
energy = method.scf()
print(energy, -75.6310072359)
method = density_fit(pyscf.scf.RHF(mol), 'weigend')
energy = method.scf()
print(energy, -75.6265157244)
|
the-stack_0_24971
|
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality, Relational
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, Dummy
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import (piecewise_fold,
Piecewise)
from sympy.logic.boolalg import BooleanFunction
from sympy.tensor.indexed import Idx
from sympy.sets.sets import Interval
from sympy.sets.fancysets import Range
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
def _common_new(cls, function, *symbols, **assumptions):
"""Return either a special return value or the tuple,
(function, limits, orientation). This code is common to
both ExprWithLimits and AddWithLimits."""
function = sympify(function)
if hasattr(function, 'func') and isinstance(function, Equality):
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
for i, li in enumerate(limits):
if len(li) == 4:
function = function.subs(li[0], li[-1])
limits[i] = Tuple(*li[:-1])
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Any embedded piecewise functions need to be brought out to the
# top level. We only fold Piecewise that contain the integration
# variable.
reps = {}
symbols_of_integration = set([i[0] for i in limits])
for p in function.atoms(Piecewise):
if not p.has(*symbols_of_integration):
reps[p] = Dummy()
# mask off those that don't
function = function.xreplace(reps)
# do the fold
function = piecewise_fold(function)
# remove the masking
function = function.xreplace({v: k for k, v in reps.items()})
return function, limits, orientation
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, (Relational, BooleanFunction)):
variable = V.atoms(Symbol).pop()
V = (variable, V.as_set())
if isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):
if isinstance(V, Idx):
if V.lower is None or V.upper is None:
limits.append(Tuple(V))
else:
limits.append(Tuple(V, V.lower, V.upper))
else:
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
if len(V) == 2 and isinstance(V[1], Range):
lo = V[1].inf
hi = V[1].sup
dx = abs(V[1].step)
V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]
V = sympify(flatten(V)) # a list of sympified elements
if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval): # 2 -> 3
# Interval
V[1:] = [V[1].start, V[1].end]
elif len(V) == 3:
# general case
if V[2] is None and not V[1] is None:
orientation *= -1
V = [newsymbol] + [i for i in V[1:] if i is not None]
if not isinstance(newsymbol, Idx) or len(V) == 3:
if len(V) == 4:
limits.append(Tuple(*V))
continue
if len(V) == 3:
if isinstance(newsymbol, Idx):
# Idx represents an integer which may have
# specified values it can take on; if it is
# given such a value, an error is raised here
# if the summation would try to give it a larger
# or smaller value than permitted. None and Symbolic
# values will not raise an error.
lo, hi = newsymbol.lower, newsymbol.upper
try:
if lo is not None and not bool(V[1] >= lo):
raise ValueError("Summation will set Idx value too low.")
except TypeError:
pass
try:
if hi is not None and not bool(V[2] <= hi):
raise ValueError("Summation will set Idx value too high.")
except TypeError:
pass
limits.append(Tuple(*V))
continue
if len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, _ = pre
else:
return pre
# limits must have upper and lower bounds; the indefinite form
# is not supported. This restriction does not apply to AddWithLimits
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the limit variables.
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def bound_symbols(self):
"""Return only variables that are dummy variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i, j, k
>>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
[i, j]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits if len(l) != 1]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
{y}
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for integrals
change_index : Perform mapping on the sum and product dummy variables
"""
from sympy.core.function import AppliedUndef, UndefinedFunction
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
if new._diff_wrt:
xab = (new,)
else:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
@property
def has_finite_limits(self):
"""
Returns True if the limits are known to be finite, either by the
explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be infinite, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 1, 8)).has_finite_limits
True
>>> Integral(x, (x, 1, oo)).has_finite_limits
False
>>> M = Symbol('M')
>>> Sum(x, (x, 1, M)).has_finite_limits
>>> N = Symbol('N', integer=True)
>>> Product(x, (x, 1, N)).has_finite_limits
True
See Also
========
has_reversed_limits
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
if any(l.is_infinite for l in lim[1:]):
# Any of the bounds are +/-oo
return False
elif any(l.is_infinite is None for l in lim[1:]):
# Maybe there are assumptions on the variable?
if lim[0].is_infinite is None:
ret_None = True
else:
if lim[0].is_infinite is None:
ret_None = True
if ret_None:
return None
return True
@property
def has_reversed_limits(self):
"""
Returns True if the limits are known to be in reversed order, either
by the explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be in normal order, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 8, 1)).has_reversed_limits
True
>>> Sum(x, (x, 1, oo)).has_reversed_limits
False
>>> M = Symbol('M')
>>> Integral(x, (x, 1, M)).has_reversed_limits
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(x, (x, 1, N)).has_reversed_limits
False
>>> Product(x, (x, 2, N)).has_reversed_limits
>>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
False
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
var, a, b = lim
dif = b - a
if dif.is_extended_negative:
return True
elif dif.is_extended_nonnegative:
continue
else:
ret_None = True
else:
return None
if ret_None:
return None
return False
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, orientation = pre
else:
return pre
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function] # orientation not used in ExprWithLimits
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not set(self.variables) & w.free_symbols)
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, *self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
from sympy.matrices.matrices import MatrixBase
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[self.func(i, *self.limits) for i in summand.args])
elif isinstance(summand, MatrixBase):
return summand.applyfunc(lambda x: self.func(x, *self.limits))
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
the-stack_0_24972
|
#!/usr/bin/env python3
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "31/01/2020"
__status__ = "stable"
import io
import sys
import os
import platform
import shutil
import logging
import glob
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("pyFAI.setup")
from distutils.command.clean import clean as Clean
from distutils.command.build import build as _build
try:
from setuptools import Command
from setuptools.command.build_py import build_py as _build_py
from setuptools.command.build_ext import build_ext
from setuptools.command.sdist import sdist
logger.info("Use setuptools")
except ImportError:
try:
from numpy.distutils.core import Command
except ImportError:
from distutils.core import Command
from distutils.command.build_py import build_py as _build_py
from distutils.command.build_ext import build_ext
from distutils.command.sdist import sdist
logger.info("Use distutils")
try:
import sphinx
import sphinx.util.console
sphinx.util.console.color_terminal = lambda: False
from sphinx.setup_command import BuildDoc
except ImportError:
sphinx = None
PROJECT = "pyFAI"
if "LANG" not in os.environ and sys.platform == "darwin" and sys.version_info[0] > 2:
print("""WARNING: the LANG environment variable is not defined,
an utf-8 LANG is mandatory to use setup.py, you may face unexpected UnicodeError.
export LANG=en_US.utf-8
export LC_ALL=en_US.utf-8
""")
def get_version():
"""Returns current version number from version.py file"""
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, dirname)
import version
sys.path = sys.path[1:]
return version.strictversion
def get_readme():
"""Returns content of README.rst file"""
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "README.rst")
with io.open(filename, "r", encoding="utf-8") as fp:
long_description = fp.read()
return long_description
# double check classifiers on https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Cython",
"Environment :: Console",
"Environment :: X11 Applications :: Qt",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Topic :: Scientific/Engineering :: Physics"
]
# ########## #
# version.py #
# ########## #
class build_py(_build_py):
"""
Enhanced build_py which copies version.py to <PROJECT>._version.py
"""
def find_package_modules(self, package, package_dir):
modules = _build_py.find_package_modules(self, package, package_dir)
if package == PROJECT:
modules.append((PROJECT, '_version', 'version.py'))
return modules
########
# Test #
########
class PyTest(Command):
"""Command to start tests running the script: run_tests.py"""
user_options = []
description = "Execute the unittests"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
errno = subprocess.call([sys.executable, 'run_tests.py'])
if errno != 0:
raise SystemExit(errno)
# ################### #
# build_doc command #
# ################### #
if sphinx is None:
class SphinxExpectedCommand(Command):
"""Command to inform that sphinx is missing"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
raise RuntimeError(
'Sphinx is required to build or test the documentation.\n'
'Please install Sphinx (http://www.sphinx-doc.org).')
class BuildMan(Command):
"""Command to build man pages"""
description = "Build man pages of the provided entry points"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def entry_points_iterator(self):
"""Iterate other entry points available on the project."""
entry_points = self.distribution.entry_points
console_scripts = entry_points.get('console_scripts', [])
gui_scripts = entry_points.get('gui_scripts', [])
scripts = []
scripts.extend(console_scripts)
scripts.extend(gui_scripts)
for script in scripts:
# Remove ending extra dependencies
script = script.split("[")[0]
elements = script.split("=")
target_name = elements[0].strip()
elements = elements[1].split(":")
module_name = elements[0].strip()
function_name = elements[1].strip()
yield target_name, module_name, function_name
def run_targeted_script(self, target_name, script_name, env, log_output=False):
"""Execute targeted script using --help and --version to help checking
errors. help2man is not very helpful to do it for us.
:return: True is both return code are equal to 0
:rtype: bool
"""
import subprocess
if log_output:
extra_args = {}
else:
try:
# Python 3
from subprocess import DEVNULL
except ImportError:
# Python 2
import os
DEVNULL = open(os.devnull, 'wb')
extra_args = {'stdout': DEVNULL, 'stderr': DEVNULL}
succeeded = True
command_line = [sys.executable, script_name, "--help"]
if log_output:
logger.info("See the following execution of: %s", " ".join(command_line))
p = subprocess.Popen(command_line, env=env, **extra_args)
status = p.wait()
if log_output:
logger.info("Return code: %s", status)
succeeded = succeeded and status == 0
command_line = [sys.executable, script_name, "--version"]
if log_output:
logger.info("See the following execution of: %s", " ".join(command_line))
p = subprocess.Popen(command_line, env=env, **extra_args)
status = p.wait()
if log_output:
logger.info("Return code: %s", status)
succeeded = succeeded and status == 0
return succeeded
@staticmethod
def _write_script(target_name, lst_lines=None):
"""Write a script to a temporary file and return its name
:paran target_name: base of the script name
:param lst_lines: list of lines to be written in the script
:return: the actual filename of the script (for execution or removal)
"""
import tempfile
import stat
script_fid, script_name = tempfile.mkstemp(prefix="%s_" % target_name, text=True)
with os.fdopen(script_fid, 'wt') as script:
for line in lst_lines:
if not line.endswith("\n"):
line += "\n"
script.write(line)
# make it executable
mode = os.stat(script_name).st_mode
os.chmod(script_name, mode + stat.S_IEXEC)
return script_name
def get_synopsis(self, module_name, env, log_output=False):
"""Execute a script to retrieve the synopsis for help2man
:return: synopsis
:rtype: single line string
"""
import subprocess
script_name = None
synopsis = None
script = ["#!%s\n" % sys.executable,
"import logging",
"logging.basicConfig(level=logging.ERROR)",
"import %s as app" % module_name,
"print(app.__doc__)"]
try:
script_name = self._write_script(module_name, script)
command_line = [sys.executable, script_name]
p = subprocess.Popen(command_line, env=env, stdout=subprocess.PIPE)
status = p.wait()
if status != 0:
logger.warning("Error while getting synopsis for module '%s'.", module_name)
synopsis = p.stdout.read().decode("utf-8").strip()
if synopsis == 'None':
synopsis = None
finally:
# clean up the script
if script_name is not None:
os.remove(script_name)
return synopsis
def run(self):
build = self.get_finalized_command('build')
path = sys.path
path.insert(0, os.path.abspath(build.build_lib))
env = dict((str(k), str(v)) for k, v in os.environ.items())
env["PYTHONPATH"] = os.pathsep.join(path)
if not os.path.isdir("build/man"):
os.makedirs("build/man")
import subprocess
script_name = None
entry_points = self.entry_points_iterator()
for target_name, module_name, function_name in entry_points:
logger.info("Build man for entry-point target '%s'" % target_name)
# help2man expect a single executable file to extract the help
# we create it, execute it, and delete it at the end
py3 = sys.version_info >= (3, 0)
try:
script = ["#!%s" % sys.executable,
"import logging",
"logging.basicConfig(level=logging.ERROR)",
"import %s as app" % module_name,
"app.%s()" % function_name]
script_name = self._write_script(target_name, script)
# execute help2man
man_file = "build/man/%s.1" % target_name
command_line = ["help2man", "-N", script_name, "-o", man_file]
synopsis = self.get_synopsis(module_name, env)
if synopsis:
command_line += ["-n", synopsis]
if not py3:
# Before Python 3.4, ArgParser --version was using
# stderr to print the version
command_line.append("--no-discard-stderr")
# Then we dont know if the documentation will contains
# durtty things
succeeded = self.run_targeted_script(target_name, script_name, env, False)
if not succeeded:
logger.info("Error while generating man file for target '%s'.", target_name)
self.run_targeted_script(target_name, script_name, env, True)
raise RuntimeError("Fail to generate '%s' man documentation" % target_name)
p = subprocess.Popen(command_line, env=env)
status = p.wait()
if status != 0:
logger.info("Error while generating man file for target '%s'.", target_name)
self.run_targeted_script(target_name, script_name, env, True)
raise RuntimeError("Fail to generate '%s' man documentation" % target_name)
finally:
# clean up the script
if script_name is not None:
os.remove(script_name)
if sphinx is not None:
class BuildDocCommand(BuildDoc):
"""Command to build documentation using sphinx.
Project should have already be built.
"""
def run(self):
# make sure the python path is pointing to the newly built
# code so that the documentation is built on this and not a
# previously installed version
build = self.get_finalized_command('build')
sys.path.insert(0, os.path.abspath(build.build_lib))
# # Copy .ui files to the path:
# dst = os.path.join(
# os.path.abspath(build.build_lib), "silx", "gui")
# if not os.path.isdir(dst):
# os.makedirs(dst)
# for i in os.listdir("gui"):
# if i.endswith(".ui"):
# src = os.path.join("gui", i)
# idst = os.path.join(dst, i)
# if not os.path.exists(idst):
# shutil.copy(src, idst)
# Build the Users Guide in HTML and TeX format
for builder in ['html', 'latex']:
self.builder = builder
self.builder_target_dir = os.path.join(self.build_dir, builder)
self.mkpath(self.builder_target_dir)
BuildDoc.run(self)
sys.path.pop(0)
else:
BuildDocCommand = SphinxExpectedCommand
# ################### #
# test_doc command #
# ################### #
if sphinx is not None:
class TestDocCommand(BuildDoc):
"""Command to test the documentation using sphynx doctest.
http://www.sphinx-doc.org/en/1.4.8/ext/doctest.html
"""
def run(self):
# make sure the python path is pointing to the newly built
# code so that the documentation is built on this and not a
# previously installed version
build = self.get_finalized_command('build')
sys.path.insert(0, os.path.abspath(build.build_lib))
# Build the Users Guide in HTML and TeX format
for builder in ['doctest']:
self.builder = builder
self.builder_target_dir = os.path.join(self.build_dir, builder)
self.mkpath(self.builder_target_dir)
BuildDoc.run(self)
sys.path.pop(0)
else:
TestDocCommand = SphinxExpectedCommand
# ############################# #
# numpy.distutils Configuration #
# ############################# #
def configuration(parent_package='', top_path=None):
"""Recursive construction of package info to be used in setup().
See http://docs.scipy.org/doc/numpy/reference/distutils.html#numpy.distutils.misc_util.Configuration
"""
try:
from numpy.distutils.misc_util import Configuration
except ImportError:
raise ImportError(
"To install this package, you must install numpy first\n"
"(See https://pypi.python.org/pypi/numpy)")
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage(PROJECT)
return config
# ############## #
# Compiler flags #
# ############## #
class Build(_build):
"""Command to support more user options for the build."""
user_options = [
('no-openmp', None,
"do not use OpenMP for compiled extension modules"),
('openmp', None,
"use OpenMP for the compiled extension modules"),
('no-cython', None,
"do not compile Cython extension modules (use default compiled c-files)"),
('force-cython', None,
"recompile all Cython extension modules"),
]
user_options.extend(_build.user_options)
boolean_options = ['no-openmp', 'openmp', 'no-cython', 'force-cython']
boolean_options.extend(_build.boolean_options)
def initialize_options(self):
_build.initialize_options(self)
self.no_openmp = None
self.openmp = None
self.no_cython = None
self.force_cython = None
def finalize_options(self):
_build.finalize_options(self)
self.finalize_cython_options(min_version='0.21.1')
self.finalize_openmp_options()
def _parse_env_as_bool(self, key):
content = os.environ.get(key, "")
value = content.lower()
if value in ["1", "true", "yes", "y"]:
return True
if value in ["0", "false", "no", "n"]:
return False
if value in ["none", ""]:
return None
msg = "Env variable '%s' contains '%s'. But a boolean or an empty \
string was expected. Variable ignored."
logger.warning(msg, key, content)
return None
def finalize_openmp_options(self):
"""Check if extensions must be compiled with OpenMP.
The result is stored into the object.
"""
if self.openmp:
use_openmp = True
elif self.no_openmp:
use_openmp = False
else:
env_force_cython = self._parse_env_as_bool("WITH_OPENMP")
if env_force_cython is not None:
use_openmp = env_force_cython
else:
# Use it by default
use_openmp = True
if use_openmp:
if platform.system() == "Darwin":
# By default Xcode5 & XCode6 do not support OpenMP, Xcode4 is OK.
osx = tuple([int(i) for i in platform.mac_ver()[0].split(".")])
if osx >= (10, 8):
logger.warning("OpenMP support ignored. Your platform does not support it.")
use_openmp = False
# Remove attributes used by distutils parsing
# use 'use_openmp' instead
del self.no_openmp
del self.openmp
self.use_openmp = use_openmp
def finalize_cython_options(self, min_version=None):
"""
Check if cythonization must be used for the extensions.
The result is stored into the object.
"""
if self.force_cython:
use_cython = "force"
elif self.no_cython:
use_cython = "no"
else:
env_force_cython = self._parse_env_as_bool("FORCE_CYTHON")
env_with_cython = self._parse_env_as_bool("WITH_CYTHON")
if env_force_cython is True:
use_cython = "force"
elif env_with_cython is True:
use_cython = "yes"
elif env_with_cython is False:
use_cython = "no"
else:
# Use it by default
use_cython = "yes"
if use_cython in ["force", "yes"]:
try:
import Cython.Compiler.Version
if min_version and Cython.Compiler.Version.version < min_version:
msg = "Cython version is too old. At least version is %s \
expected. Cythonization is skipped."
logger.warning(msg, str(min_version))
use_cython = "no"
except ImportError:
msg = "Cython is not available. Cythonization is skipped."
logger.warning(msg)
use_cython = "no"
# Remove attribute used by distutils parsing
# use 'use_cython' and 'force_cython' instead
del self.no_cython
self.force_cython = use_cython == "force"
self.use_cython = use_cython in ["force", "yes"]
class BuildExt(build_ext):
"""Handle extension compilation.
Command-line argument and environment can custom:
- The use of cython to cythonize files, else a default version is used
- Build extension with support of OpenMP (by default it is enabled)
- If building with MSVC, compiler flags are converted from gcc flags.
"""
COMPILE_ARGS_CONVERTER = {'-fopenmp': '/openmp'}
LINK_ARGS_CONVERTER = {'-fopenmp': ''}
description = 'Build extensions'
def finalize_options(self):
build_ext.finalize_options(self)
build_obj = self.distribution.get_command_obj("build")
self.use_openmp = build_obj.use_openmp
self.use_cython = build_obj.use_cython
self.force_cython = build_obj.force_cython
def patch_with_default_cythonized_files(self, ext):
"""Replace cython files by .c or .cpp files in extension's sources.
It replaces the *.pyx and *.py source files of the extensions
to either *.cpp or *.c source files.
No compilation is performed.
:param Extension ext: An extension to patch.
"""
new_sources = []
for source in ext.sources:
base, file_ext = os.path.splitext(source)
if file_ext in ('.pyx', '.py'):
if ext.language == 'c++':
cythonized = base + '.cpp'
else:
cythonized = base + '.c'
if not os.path.isfile(cythonized):
raise RuntimeError("Source file not found: %s. Cython is needed" % cythonized)
print("Use default cythonized file for %s" % source)
new_sources.append(cythonized)
else:
new_sources.append(source)
ext.sources = new_sources
def patch_extension(self, ext):
"""
Patch an extension according to requested Cython and OpenMP usage.
:param Extension ext: An extension
"""
# Cytonize
if not self.use_cython:
self.patch_with_default_cythonized_files(ext)
else:
from Cython.Build import cythonize
patched_exts = cythonize(
[ext],
compiler_directives={'embedsignature': True,
'language_level': 3},
force=self.force_cython,
)
ext.sources = patched_exts[0].sources
# Remove OpenMP flags if OpenMP is disabled
if not self.use_openmp:
ext.extra_compile_args = [
f for f in ext.extra_compile_args if f != '-fopenmp']
ext.extra_link_args = [
f for f in ext.extra_link_args if f != '-fopenmp']
# Convert flags from gcc to MSVC if required
if self.compiler.compiler_type == 'msvc':
extra_compile_args = [self.COMPILE_ARGS_CONVERTER.get(f, f)
for f in ext.extra_compile_args]
# Avoid empty arg
ext.extra_compile_args = [arg for arg in extra_compile_args if arg]
extra_link_args = [self.LINK_ARGS_CONVERTER.get(f, f)
for f in ext.extra_link_args]
# Avoid empty arg
ext.extra_link_args = [arg for arg in extra_link_args if arg]
elif self.compiler.compiler_type == 'unix':
# Avoids runtime symbol collision for manylinux1 platform
# See issue #1070
extern = 'extern "C" ' if ext.language == 'c++' else ''
return_type = 'void' if sys.version_info[0] <= 2 else 'PyObject*'
ext.extra_compile_args.append('-fvisibility=hidden')
import numpy
numpy_version = [int(i) for i in numpy.version.short_version.split(".", 2)[:2]]
if numpy_version < [1, 16]:
ext.extra_compile_args.append(
'''-D'PyMODINIT_FUNC=%s__attribute__((visibility("default"))) %s ' ''' % (extern, return_type))
else:
ext.define_macros.append(
('PyMODINIT_FUNC',
'%s__attribute__((visibility("default"))) %s ' % (extern, return_type)))
def is_debug_interpreter(self):
"""
Returns true if the script is executed with a debug interpreter.
It looks to be a non-standard code. It is not working for Windows and
Mac. But it have to work at least for Debian interpreters.
:rtype: bool
"""
if sys.version_info >= (3, 0):
# It is normalized on Python 3
# But it is not available on Windows CPython
if hasattr(sys, "abiflags"):
return "d" in sys.abiflags
else:
# It's a Python 2 interpreter
# pydebug is not available on Windows/Mac OS interpreters
if hasattr(sys, "pydebug"):
return sys.pydebug
# We can't know if we uses debug interpreter
return False
def patch_compiler(self):
"""
Patch the compiler to:
- always compile extensions with debug symboles (-g)
- only compile asserts in debug mode (-DNDEBUG)
Plus numpy.distutils/setuptools/distutils inject a lot of duplicated
flags. This function tries to clean up default debug options.
"""
build_obj = self.distribution.get_command_obj("build")
if build_obj.debug:
debug_mode = build_obj.debug
else:
# Force debug_mode also when it uses python-dbg
# It is needed for Debian packaging
debug_mode = self.is_debug_interpreter()
if self.compiler.compiler_type == "unix":
args = list(self.compiler.compiler_so)
# clean up debug flags -g is included later in another way
must_be_cleaned = ["-DNDEBUG", "-g"]
args = filter(lambda x: x not in must_be_cleaned, args)
args = list(args)
# always insert symbols
args.append("-g")
# only strip asserts in release mode
if not debug_mode:
args.append('-DNDEBUG')
# patch options
self.compiler.compiler_so = list(args)
def build_extensions(self):
self.patch_compiler()
for ext in self.extensions:
self.patch_extension(ext)
build_ext.build_extensions(self)
################################################################################
# Clean command
################################################################################
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def expand(self, path_list):
"""Expand a list of path using glob magic.
:param list[str] path_list: A list of path which may contains magic
:rtype: list[str]
:returns: A list of path without magic
"""
path_list2 = []
for path in path_list:
if glob.has_magic(path):
iterator = glob.iglob(path)
path_list2.extend(iterator)
else:
path_list2.append(path)
return path_list2
def find(self, path_list):
"""Find a file pattern if directories.
Could be done using "**/*.c" but it is only supported in Python 3.5.
:param list[str] path_list: A list of path which may contains magic
:rtype: list[str]
:returns: A list of path without magic
"""
import fnmatch
path_list2 = []
for pattern in path_list:
for root, _, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, pattern):
path_list2.append(os.path.join(root, filename))
return path_list2
def run(self):
Clean.run(self)
cython_files = self.find(["*.pyx"])
cythonized_files = [path.replace(".pyx", ".c") for path in cython_files]
cythonized_files += [path.replace(".pyx", ".cpp") for path in cython_files]
# really remove the directories
# and not only if they are empty
to_remove = [self.build_base]
to_remove = self.expand(to_remove)
to_remove += cythonized_files
if not self.dry_run:
for path in to_remove:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
logger.info("removing '%s'", path)
except OSError:
pass
################################################################################
# Source tree
################################################################################
class SourceDistWithCython(sdist):
"""
Force cythonization of the extensions before generating the source
distribution.
To provide the widest compatibility the cythonized files are provided
without suppport of OpenMP.
"""
description = "Create a source distribution including cythonized files (tarball, zip file, etc.)"
def finalize_options(self):
sdist.finalize_options(self)
self.extensions = self.distribution.ext_modules
def run(self):
self.cythonize_extensions()
sdist.run(self)
def cythonize_extensions(self):
from Cython.Build import cythonize
cythonize(
self.extensions,
compiler_directives={'embedsignature': True,
'language_level': 3},
force=True,
)
################################################################################
# Debian source tree
################################################################################
class sdist_debian(sdist):
"""
Tailor made sdist for debian
* remove auto-generated doc
* remove cython generated .c files
* remove cython generated .cpp files
* remove .bat files
* include .l man files
"""
description = "Create a source distribution for Debian (tarball, zip file, etc.)"
@staticmethod
def get_debian_name():
import version
name = "%s_%s" % (PROJECT, version.debianversion)
return name
def prune_file_list(self):
sdist.prune_file_list(self)
to_remove = ["doc/build", "doc/pdf", "doc/html", "pylint", "epydoc"]
print("Removing files for debian")
for rm in to_remove:
self.filelist.exclude_pattern(pattern="*", anchor=False, prefix=rm)
# this is for Cython files specifically: remove C & html files
search_root = os.path.dirname(os.path.abspath(__file__))
for root, _, files in os.walk(search_root):
for afile in files:
if os.path.splitext(afile)[1].lower() == ".pyx":
base_file = os.path.join(root, afile)[len(search_root) + 1:-4]
self.filelist.exclude_pattern(pattern=base_file + ".c")
self.filelist.exclude_pattern(pattern=base_file + ".cpp")
self.filelist.exclude_pattern(pattern=base_file + ".html")
# do not include third_party/_local files
self.filelist.exclude_pattern(pattern="*", prefix="pyFAI/third_party/_local")
def make_distribution(self):
self.prune_file_list()
sdist.make_distribution(self)
dest = self.archive_files[0]
dirname, basename = os.path.split(dest)
base, ext = os.path.splitext(basename)
while ext in [".zip", ".tar", ".bz2", ".gz", ".Z", ".lz", ".orig"]:
base, ext = os.path.splitext(base)
if ext:
dest = "".join((base, ext))
else:
dest = base
# sp = dest.split("-")
# base = sp[:-1]
# nr = sp[-1]
debian_arch = os.path.join(dirname, self.get_debian_name() + ".orig.tar.gz")
os.rename(self.archive_files[0], debian_arch)
self.archive_files = [debian_arch]
print("Building debian .orig.tar.gz in %s" % self.archive_files[0])
#################
# PyFAI specific
#################
class PyFaiTestData(Command):
"""
Tailor made tarball with test data
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def download_images(self):
"""
Download all test images and
"""
root_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.join(root_dir, PROJECT, "test")
sys.path.insert(0, test_dir)
from utilstest import UtilsTest
image_home = os.path.join(root_dir, "testimages")
testimages = os.path.join(image_home, "all_testimages.json")
UtilsTest.image_home = image_home
UtilsTest.testimages = testimages
if os.path.exists(testimages):
import json
with open(testimages) as f:
all_files = set(json.load(f))
else:
raise(RuntimeError("Please run 'python setup.py build test' to download all images"))
return list(all_files)
def run(self):
datafiles = self.download_images()
dist = "dist"
arch = os.path.join(dist, PROJECT + "-testimages.tar.gz")
print("Building testdata tarball in %s" % arch)
if not os.path.isdir(dist):
os.mkdir(dist)
if os.path.exists(arch):
os.unlink(arch)
import tarfile
with tarfile.open(name=arch, mode='w:gz') as tarball:
for afile in datafiles:
tarball.add(os.path.join("testimages", afile), afile)
# ##### #
# setup #
# ##### #
def get_project_configuration(dry_run):
"""Returns project arguments for setup"""
# Use installed numpy version as minimal required version
# This is useful for wheels to advertise the numpy version they were built with
if dry_run:
numpy_requested_version = ""
else:
from numpy.version import version as numpy_version
numpy_requested_version = ">=%s" % numpy_version
logger.info("Install requires: numpy %s", numpy_requested_version)
install_requires = [
"numpy%s" % numpy_requested_version,
# h5py was removed from dependencies cause it creates an issue with
# Debian 8. Pip is not aware that h5py is installed and pkg_resources
# check dependencies and in this case raise an exception
# FIXME we still have to investigate
# "h5py",
"fabio>=0.5",
"matplotlib",
"scipy",
"numexpr",
# for the use of pkg_resources on script launcher
"setuptools",
"silx>=0.10"]
setup_requires = [
"setuptools",
"numpy"]
package_data = {
'pyFAI.resources': [
# Add here all resources files
'calibration/*.D',
'gui/*.ui',
'gui/icons/*.svg',
'gui/icons/*.png',
'gui/images/*.svg',
'gui/images/*.png',
'gui/styles/*.json',
'openCL/*.cl',
]
}
gui_requires = ['PyQt5', 'h5py', 'hdf5plugin', 'PyOpenGL']
opencl_requires = ['pyopencl']
extras_require = {
'calib2': gui_requires, # Keep compatibility
'gui': gui_requires,
'opencl': opencl_requires,
'full': gui_requires + opencl_requires,
}
console_scripts = [
'check_calib = pyFAI.app.check_calib:main',
'detector2nexus = pyFAI.app.detector2nexus:main',
'diff_map = pyFAI.app.diff_map:main',
'diff_tomo = pyFAI.app.diff_tomo:main',
'eiger-mask = pyFAI.app.eiger_mask:main',
'MX-calibrate = pyFAI.app.mx_calibrate:main',
'pyFAI-average = pyFAI.app.average:main',
'pyFAI-benchmark = pyFAI.app.benchmark:main',
'pyFAI-calib = pyFAI.app.calib:main',
'pyFAI-calib2 = pyFAI.app.calib2:main',
'pyFAI-drawmask = pyFAI.app.drawmask:main',
'pyFAI-diffmap = pyFAI.app.diff_map:main',
'pyFAI-integrate = pyFAI.app.integrate:main',
'pyFAI-recalib = pyFAI.app.recalib:main',
'pyFAI-saxs = pyFAI.app.saxs:main',
'pyFAI-waxs = pyFAI.app.waxs:main',
]
entry_points = {
'console_scripts': console_scripts,
# 'gui_scripts': [],
}
cmdclass = dict(
build=Build,
build_py=build_py,
test=PyTest,
build_doc=BuildDocCommand,
test_doc=TestDocCommand,
build_ext=BuildExt,
build_man=BuildMan,
clean=CleanCommand,
sdist=SourceDistWithCython,
debian_src=sdist_debian,
testimages=PyFaiTestData,
)
if dry_run:
# DRY_RUN implies actions which do not require NumPy
#
# And they are required to succeed without Numpy for example when
# pip is used to install silx when Numpy is not yet present in
# the system.
setup_kwargs = {}
else:
config = configuration()
setup_kwargs = config.todict()
setup_kwargs.update(name=PROJECT,
version=get_version(),
url="https://github.com/silx-kit/pyFAI",
download_url="https://github.com/silx-kit/pyFAI/releases",
author="Jérôme Kieffer (algo) & Valentin Valls (gui)",
author_email="[email protected]",
classifiers=classifiers,
description='Python implementation of fast azimuthal integration',
long_description=get_readme(),
install_requires=install_requires,
setup_requires=setup_requires,
extras_require=extras_require,
cmdclass=cmdclass,
package_data=package_data,
zip_safe=False,
entry_points=entry_points,
)
return setup_kwargs
def setup_package():
"""Run setup(**kwargs)
Depending on the command, it either runs the complete setup which depends on numpy,
or a *dry run* setup with no dependency on numpy.
"""
# Check if action requires build/install
dry_run = len(sys.argv) == 1 or (len(sys.argv) >= 2 and (
'--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean', '--name')))
if dry_run:
# DRY_RUN implies actions which do not require dependencies, like NumPy
try:
from setuptools import setup
logger.info("Use setuptools.setup")
except ImportError:
from distutils.core import setup
logger.info("Use distutils.core.setup")
else:
try:
from setuptools import setup
except ImportError:
from numpy.distutils.core import setup
logger.info("Use numpy.distutils.setup")
setup_kwargs = get_project_configuration(dry_run)
setup(**setup_kwargs)
if __name__ == "__main__":
setup_package()
|
the-stack_0_24973
|
import os
src_folders = ["stix_shifter_utils", "stix_shifter", "stix_shifter_modules"]
install_requires = set()
requirements_files = []
for src_folder in src_folders:
for r, d, f in os.walk(src_folder):
for file in f:
if 'requirements.txt'==file and not os.path.isfile(os.path.join(r, 'SKIP.ME')):
requirements_files.append(os.path.join(r, file))
print('requirements_files: %s' % requirements_files)
for requirements_file in requirements_files:
with open(requirements_file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
lines = list(filter(lambda s: len(s)>0, lines))
install_requires.update(lines)
install_requires = list(install_requires)
install_requires.sort()
print('install_requires: %s' % install_requires)
with open('requirements.txt', 'w') as out_file:
for item in install_requires:
out_file.write(item)
out_file.write('\n')
|
the-stack_0_24975
|
# -*- coding: utf-8 -*-
"""This module implements all CAD database manipulations using skill commands.
"""
from typing import List, Dict, Optional, Any, Tuple
import os
import shutil
import yaml
from jinja2 import Template
import bag.io
from .database import DbAccess
calibre_tmp = bag.io.read_resource(bag.__name__, os.path.join('virtuoso_files', 'calibreview_setup.pytemp'))
try:
import cybagoa
except ImportError:
cybagoa = None
def _dict_to_pcell_params(table):
"""Convert given parameter dictionary to pcell parameter list format.
Parameters
----------
table : dict[str, any]
the parameter dictionary.
Returns
-------
param_list : list[any]
the Pcell parameter list
"""
param_list = []
for key, val in table.items():
# python 2/3 compatibility: convert raw bytes to string.
val = bag.io.fix_string(val)
if isinstance(val, float):
param_list.append([key, "float", val])
elif isinstance(val, str):
# unicode string
param_list.append([key, "string", val])
elif isinstance(val, int):
param_list.append([key, "int", val])
elif isinstance(val, bool):
param_list.append([key, "bool", val])
else:
raise Exception('Unsupported parameter %s with type: %s' % (key, type(val)))
return param_list
def to_skill_list_str(pylist):
"""Convert given python list to a skill list string.
Parameters
----------
pylist : list[str]
a list of string.
Returns
-------
ans : str
a string representation of the equivalent skill list.
"""
content = ' '.join(('"%s"' % val for val in pylist))
return "'( %s )" % content
def _handle_reply(reply):
"""Process the given reply."""
if isinstance(reply, dict):
if reply.get('type') == 'error':
if 'data' not in reply:
raise Exception('Unknown reply format: %s' % reply)
raise VirtuosoException(reply['data'])
else:
try:
return reply['data']
except Exception:
raise Exception('Unknown reply format: %s' % reply)
else:
raise Exception('Unknown reply format: %s' % reply)
class VirtuosoException(Exception):
"""Exception raised when Virtuoso returns an error."""
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
Exception.__init__(self, *args, **kwargs)
class SkillInterface(DbAccess):
"""Skill interface between bag and Virtuoso.
This class sends all bag's database and simulation operations to
an external Virtuoso process, then get the result from it.
Parameters
----------
dealer : :class:`bag.interface.ZMQDealer`
the socket used to communicate with :class:`~bag.interface.SkillOceanServer`.
tmp_dir : string
temporary file directory for DbAccess.
db_config : dict[str, any]
the database configuration dictionary.
"""
def __init__(self, dealer, tmp_dir, db_config):
"""Initialize a new SkillInterface object.
"""
DbAccess.__init__(self, tmp_dir, db_config)
self.handler = dealer
self._rcx_jobs = {}
def close(self):
"""Terminate the database server gracefully.
"""
self.handler.send_obj(dict(type='exit'))
self.handler.close()
def _eval_skill(self, expr, input_files=None, out_file=None):
# type: (str, Optional[Dict[str, Any]], Optional[str]) -> str
"""Send a request to evaluate the given skill expression.
Because Virtuoso has a limit on the input/output data (< 4096 bytes),
if your input is large, you need to write it to a file and have
Virtuoso open the file to parse it. Similarly, if you expect a
large output, you need to make Virtuoso write the result to the
file, then read it yourself. The parameters input_files and
out_file help you achieve this functionality.
For example, if you need to evaluate "skill_fun(arg fname)", where
arg is a file containing the list [1 2 3], and fname is the output
file name, you will call this function with:
expr = "skill_fun({arg} {fname})"
input_files = { "arg": [1 2 3] }
out_file = "fname"
the bag server will then a temporary file for arg and fname, write
the list [1 2 3] into the file for arg, call Virtuoso, then read
the output file fname and return the result.
Parameters
----------
expr : string
the skill expression to evaluate.
input_files : dict[string, any] or None
A dictionary of input files content.
out_file : string or None
the output file name argument in expr.
Returns
-------
result : str
a string representation of the result.
Raises
------
:class: `.VirtuosoException` :
if virtuoso encounters errors while evaluating the expression.
"""
request = dict(
type='skill',
expr=expr,
input_files=input_files,
out_file=out_file,
)
self.handler.send_obj(request)
reply = self.handler.recv_obj()
return _handle_reply(reply)
def parse_schematic_template(self, lib_name, cell_name):
"""Parse the given schematic template.
Parameters
----------
lib_name : str
name of the library.
cell_name : str
name of the cell.
Returns
-------
template : str
the content of the netlist structure file.
"""
cmd = 'parse_cad_sch( "%s" "%s" {netlist_info} )' % (lib_name, cell_name)
return self._eval_skill(cmd, out_file='netlist_info')
def get_cells_in_library(self, lib_name):
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : list[str]
a list of cells in the library
"""
cmd = 'get_cells_in_library_file( "%s" {cell_file} )' % lib_name
return self._eval_skill(cmd, out_file='cell_file').split()
def create_library(self, lib_name, lib_path=''):
"""Create a new library if one does not exist yet.
Parameters
----------
lib_name : string
the library name.
lib_path : string
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
return self._eval_skill('create_or_erase_library("%s" "%s" "%s" nil)' % (lib_name, tech_lib, lib_path))
def create_implementation(self, lib_name, template_list, change_list, lib_path=''):
"""Create implementation of a design in the CAD database.
Parameters
----------
lib_name : str
implementation library name.
template_list : list
a list of schematic templates to copy to the new library.
change_list :
a list of changes to be performed on each copied templates.
lib_path : str
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
if cybagoa is not None and self.db_config['schematic'].get('use_cybagoa', False):
cds_lib_path = os.environ.get('CDS_LIB_PATH', './cds.lib')
sch_name = 'schematic'
sym_name = 'symbol'
encoding = bag.io.get_encoding()
# release write locks
cell_view_list = []
for _, _, cell_name in template_list:
cell_view_list.append((cell_name, sch_name))
cell_view_list.append((cell_name, sym_name))
self.release_write_locks(lib_name, cell_view_list)
# create library in case it doesn't exist
self.create_library(lib_name, lib_path)
# write schematic
with cybagoa.PyOASchematicWriter(cds_lib_path, lib_name, encoding) as writer:
for temp_info, change_info in zip(template_list, change_list):
sch_cell = cybagoa.PySchCell(temp_info[0], temp_info[1], temp_info[2], encoding)
for old_pin, new_pin in change_info['pin_map']:
sch_cell.rename_pin(old_pin, new_pin)
for inst_name, rinst_list in change_info['inst_list']:
sch_cell.add_inst(inst_name, lib_name, rinst_list)
writer.add_sch_cell(sch_cell)
writer.create_schematics(sch_name, sym_name)
copy = 'nil'
else:
copy = "'t"
in_files = {'template_list': template_list,
'change_list': change_list}
sympin = to_skill_list_str(self.db_config['schematic']['sympin'])
ipin = to_skill_list_str(self.db_config['schematic']['ipin'])
opin = to_skill_list_str(self.db_config['schematic']['opin'])
iopin = to_skill_list_str(self.db_config['schematic']['iopin'])
simulators = to_skill_list_str(self.db_config['schematic']['simulators'])
cmd = ('create_concrete_schematic( "%s" "%s" "%s" {template_list} '
'{change_list} %s %s %s %s %s %s)' % (lib_name, tech_lib, lib_path,
sympin, ipin, opin, iopin, simulators, copy))
return self._eval_skill(cmd, input_files=in_files)
def configure_testbench(self, tb_lib, tb_cell):
"""Update testbench state for the given testbench.
This method fill in process-specific information for the given testbench.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
cur_env : str
the current simulation environment.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
"""
tb_config = self.db_config['testbench']
cmd = ('instantiate_testbench("{tb_cell}" "{targ_lib}" ' +
'"{config_libs}" "{config_views}" "{config_stops}" ' +
'"{default_corner}" "{corner_file}" {def_files} ' +
'"{tech_lib}" {result_file})')
cmd = cmd.format(tb_cell=tb_cell,
targ_lib=tb_lib,
config_libs=tb_config['config_libs'],
config_views=tb_config['config_views'],
config_stops=tb_config['config_stops'],
default_corner=tb_config['default_env'],
corner_file=tb_config['env_file'],
def_files=to_skill_list_str(tb_config['def_files']),
tech_lib=self.db_config['schematic']['tech_lib'],
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'))
return tb_config['default_env'], output['corners'], output['parameters'], output['outputs']
def get_testbench_info(self, tb_lib, tb_cell):
"""Returns information about an existing testbench.
Parameters
----------
tb_lib : str
testbench library.
tb_cell : str
testbench cell.
Returns
-------
cur_envs : list[str]
the current simulation environments.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
outputs : dict[str, str]
a list of testbench output expressions.
"""
cmd = 'get_testbench_info("{tb_lib}" "{tb_cell}" {result_file})'
cmd = cmd.format(tb_lib=tb_lib,
tb_cell=tb_cell,
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'))
return output['enabled_corners'], output['corners'], output['parameters'], output['outputs']
def update_testbench(self, lib, cell, parameters, sim_envs, config_rules, env_parameters):
# type: (str, str, Dict[str, str], List[str], List[List[str]], List[List[Tuple[str, str]]]) -> None
"""Update the given testbench configuration.
Parameters
----------
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
testbench parameters.
sim_envs : List[str]
list of enabled simulation environments.
config_rules : List[List[str]]
config view mapping rules, list of (lib, cell, view) rules.
env_parameters : List[List[Tuple[str, str]]]
list of param/value list for each simulation environment.
"""
cmd = 'modify_testbench("%s" "%s" {conf_rules} {run_opts} {sim_envs} {params} {env_params})' % (lib, cell)
in_files = {'conf_rules': config_rules,
'run_opts': [],
'sim_envs': sim_envs,
'params': list(parameters.items()),
'env_params': list(zip(sim_envs, env_parameters)),
}
self._eval_skill(cmd, input_files=in_files)
def instantiate_layout_pcell(self, lib_name, cell_name, view_name,
inst_lib, inst_cell, params, pin_mapping):
"""Create a layout cell with a single pcell instance.
Parameters
----------
lib_name : str
layout library name.
cell_name : str
layout cell name.
view_name : str
layout view name, default is "layout".
inst_lib : str
pcell library name.
inst_cell : str
pcell cell name.
params : dict[str, any]
the parameter dictionary.
pin_mapping: dict[str, str]
the pin mapping dictionary.
"""
# create library in case it doesn't exist
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
param_list = _dict_to_pcell_params(params)
cmd = ('create_layout_with_pcell( "%s" "%s" "%s" "%s" "%s"'
'{params} {pin_mapping} )' % (lib_name, cell_name,
view_name, inst_lib, inst_cell))
in_files = {'params': param_list, 'pin_mapping': list(pin_mapping.items())}
return self._eval_skill(cmd, input_files=in_files)
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology library name.
layout_list : list[any]
a list of layouts to create
"""
# create library in case it doesn't exist
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
new_layout_list = []
for info_list in layout_list:
new_inst_list = []
for inst in info_list[1]:
if 'params' in inst:
inst = inst.copy()
inst['params'] = _dict_to_pcell_params(inst['params'])
new_inst_list.append(inst)
new_info_list = info_list[:]
new_info_list[1] = new_inst_list
new_layout_list.append(new_info_list)
cmd = 'create_layout( "%s" "%s" "%s" {layout_list} )' % (lib_name, view_name, via_tech)
in_files = {'layout_list': new_layout_list}
return self._eval_skill(cmd, input_files=in_files)
def release_write_locks(self, lib_name, cell_view_list):
"""Release write locks from all the given cells.
Parameters
----------
lib_name : string
the library name.
cell_view_list : List[(string, string)]
list of cell/view name tuples.
"""
cmd = 'release_write_locks( "%s" {cell_view_list} )' % lib_name
in_files = {'cell_view_list': cell_view_list}
return self._eval_skill(cmd, input_files=in_files)
def create_schematic_from_netlist(self, netlist, lib_name, cell_name,
sch_view=None, **kwargs):
# type: (str, str, str, Optional[str], **kwargs) -> None
"""Create a schematic from a netlist.
This is mainly used to create extracted schematic from an extracted netlist.
Parameters
----------
netlist : str
the netlist file name.
lib_name : str
library name.
cell_name : str
cell_name
sch_view : Optional[str]
schematic view name. The default value is implemendation dependent.
**kwargs
additional implementation-dependent arguments.
"""
calview_config = self.db_config.get('calibreview', None)
use_calibreview = self.db_config.get('use_calibreview', True)
if calview_config is not None and use_calibreview:
# create calibre view from extraction netlist
cell_map = calview_config['cell_map']
sch_view = sch_view or calview_config['view_name']
# create calibre view config file
content = Template(calibre_tmp).render(netlist_file=netlist,
lib_name=lib_name,
cell_name=cell_name,
calibre_cellmap=cell_map,
view_name=sch_view)
with bag.io.open_temp(prefix='calview', dir=self.tmp_dir, delete=False) as f:
fname = f.name
f.write(content)
# delete old calibre view
cmd = 'delete_cellview( "%s" "%s" "%s" )' % (lib_name, cell_name, sch_view)
self._eval_skill(cmd)
# make extracted schematic
cmd = 'mgc_rve_load_setup_file( "%s" )' % fname
self._eval_skill(cmd)
else:
# get netlists to copy
netlist_dir = os.path.dirname(netlist)
netlist_files = self.checker.get_rcx_netlists(lib_name, cell_name)
if not netlist_files:
# some error checking. Shouldn't be needed but just in case
raise ValueError('RCX did not generate any netlists')
# copy netlists to a "netlist" subfolder in the CAD database
cell_dir = self.get_cell_directory(lib_name, cell_name)
targ_dir = os.path.join(cell_dir, 'netlist')
os.makedirs(targ_dir, exist_ok=True)
for fname in netlist_files:
shutil.copy(os.path.join(netlist_dir, fname), targ_dir)
# create symbolic link as aliases
symlink = os.path.join(targ_dir, 'netlist')
try:
os.remove(symlink)
except FileNotFoundError:
pass
os.symlink(netlist_files[0], symlink)
def get_cell_directory(self, lib_name, cell_name):
# type: (str, str) -> str
"""Returns the directory name of the given cell.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
Returns
-------
cell_dir : str
path to the cell directory.
"""
# use yaml.load to remove outermost quotation marks
lib_dir = yaml.load(self._eval_skill('get_lib_directory( "%s" )' % lib_name))
if not lib_dir:
raise ValueError('Library %s not found.' % lib_name)
return os.path.join(lib_dir, cell_name)
def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):
# type: (str, str, str, **kwargs) -> None
"""Create a verilog view for mix-signal simulation.
Parameters
----------
verilog_file : str
the verilog file name.
lib_name : str
library name.
cell_name : str
cell name.
**kwargs
additional implementation-dependent arguments.
"""
# delete old verilog view
cmd = 'delete_cellview( "%s" "%s" "verilog" )' % (lib_name, cell_name)
self._eval_skill(cmd)
cmd = 'schInstallHDL("%s" "%s" "verilog" "%s" t)' % (lib_name, cell_name, verilog_file)
self._eval_skill(cmd)
|
the-stack_0_24981
|
import inspect
import logging
import os
import warnings
from abc import ABCMeta, abstractmethod
from typing import Dict, Optional, Sequence, Type, Union
from uuid import uuid4
import numpy as np
import pyro
import rich
import torch
from anndata import AnnData
from scvi import settings
from scvi.data.anndata import AnnDataManager
from scvi.data.anndata._compat import manager_from_setup_dict
from scvi.data.anndata._constants import (
_MODEL_NAME_KEY,
_SCVI_UUID_KEY,
_SETUP_ARGS_KEY,
)
from scvi.data.anndata._utils import _assign_adata_uuid
from scvi.dataloaders import AnnDataLoader
from scvi.model._utils import parse_use_gpu_arg
from scvi.module.base import PyroBaseModuleClass
from scvi.utils import setup_anndata_dsp
from ._utils import _initialize_model, _load_saved_files, _validate_var_names
logger = logging.getLogger(__name__)
_UNTRAINED_WARNING_MESSAGE = "Trying to query inferred values from an untrained model. Please train the model first."
_SETUP_INPUTS_EXCLUDED_PARAMS = {"adata", "kwargs"}
class BaseModelMetaClass(ABCMeta):
"""
Metaclass for :class:`~scvi.model.base.BaseModelClass`.
Constructs model class-specific mappings for :class:`~scvi.data.anndata.AnnDataManager` instances.
``cls._setup_adata_manager_store`` maps from AnnData object UUIDs to :class:`~scvi.data.anndata.AnnDataManager` instances.
This mapping is populated everytime ``cls.setup_anndata()`` is called.
``cls._per_isntance_manager_store`` maps from model instance UUIDs to AnnData UUID::class:`~scvi.data.anndata.AnnDataManager` mappings.
These :class:`~scvi.data.anndata.AnnDataManager` instances are tied to a single model instance and populated either
during model initialization or after running ``self._validate_anndata()``.
"""
def __init__(cls, name, bases, dct):
cls._setup_adata_manager_store: Dict[
str, Type[AnnDataManager]
] = dict() # Maps adata id to AnnDataManager instances.
cls._per_instance_manager_store: Dict[
str, Dict[str, Type[AnnDataManager]]
] = dict() # Maps model instance id to AnnDataManager mappings.
super().__init__(name, bases, dct)
class BaseModelClass(metaclass=BaseModelMetaClass):
"""Abstract class for scvi-tools models."""
def __init__(self, adata: Optional[AnnData] = None):
self.id = str(uuid4()) # Used for cls._manager_store keys.
if adata is not None:
self._adata = adata
self._adata_manager = self._get_most_recent_anndata_manager(
adata, required=True
)
self._register_manager_for_instance(self.adata_manager)
# Suffix registry instance variable with _ to include it when saving the model.
self.registry_ = self._adata_manager.registry
self.summary_stats = self._adata_manager.summary_stats
self.is_trained_ = False
self._model_summary_string = ""
self.train_indices_ = None
self.test_indices_ = None
self.validation_indices_ = None
self.history_ = None
self._data_loader_cls = AnnDataLoader
@property
def adata(self) -> AnnData:
"""Data attached to model instance."""
return self._adata
@adata.setter
def adata(self, adata: AnnData):
if adata is None:
raise ValueError("adata cannot be None.")
self._validate_anndata(adata)
self._adata = adata
self._adata_manager = self.get_anndata_manager(adata, required=True)
self.registry_ = self._adata_manager.registry
self.summary_stats = self._adata_manager.summary_stats
@property
def adata_manager(self) -> AnnDataManager:
"""Manager instance associated with self.adata."""
return self._adata_manager
def to_device(self, device: Union[str, int]):
"""
Move model to device.
Parameters
----------
device
Device to move model to. Options: 'cpu' for CPU, integer GPU index (eg. 0),
or 'cuda:X' where X is the GPU index (eg. 'cuda:0'). See torch.device for more info.
Examples
--------
>>> adata = scvi.data.synthetic_iid()
>>> model = scvi.model.SCVI(adata)
>>> model.to_device('cpu') # moves model to CPU
>>> model.to_device('cuda:0') # moves model to GPU 0
>>> model.to_device(0) # also moves model to GPU 0
"""
my_device = torch.device(device)
self.module.to(my_device)
@property
def device(self):
return self.module.device
@staticmethod
def _get_setup_method_args(**setup_locals) -> dict:
"""
Returns a dictionary organizing the arguments used to call ``setup_anndata``.
Must be called with ``**locals()`` at the start of the ``setup_anndata`` method
to avoid the inclusion of any extraneous variables.
"""
cls = setup_locals.pop("cls")
model_name = cls.__name__
setup_args = dict()
for k, v in setup_locals.items():
if k not in _SETUP_INPUTS_EXCLUDED_PARAMS:
setup_args[k] = v
return {_MODEL_NAME_KEY: model_name, _SETUP_ARGS_KEY: setup_args}
@classmethod
def register_manager(cls, adata_manager: AnnDataManager):
"""
Registers an :class:`~scvi.data.anndata.AnnDataManager` instance with this model class.
"""
adata_id = adata_manager.adata_uuid
cls._setup_adata_manager_store[adata_id] = adata_manager
def _register_manager_for_instance(self, adata_manager: AnnDataManager):
"""
Registers an :class:`~scvi.data.anndata.AnnDataManager` instance with this model instance.
Creates a model-instance specific mapping in ``cls._per_instance_manager_store`` for this
:class:`~scvi.data.anndata.AnnDataManager` instance.
"""
if self.id not in self._per_instance_manager_store:
self._per_instance_manager_store[self.id] = dict()
adata_id = adata_manager.adata_uuid
instance_manager_store = self._per_instance_manager_store[self.id]
instance_manager_store[adata_id] = adata_manager
@classmethod
def _get_most_recent_anndata_manager(
cls, adata: AnnData, required: bool = False
) -> Optional[AnnDataManager]:
"""
Retrieves the :class:`~scvi.data.anndata.AnnDataManager` for a given AnnData object specific to this model class.
Checks for the most recent :class:`~scvi.data.anndata.AnnDataManager` created for the given AnnData object via
``setup_anndata()`` on model initialization. Unlike :meth:`scvi.model.base.BaseModelClass.get_anndata_manager`,
this method is not model instance specific and can be called before a model is fully initialized.
Parameters
----------
adata
AnnData object to find manager instance for.
required
If True, errors on missing manager. Otherwise, returns None when manager is missing.
"""
if _SCVI_UUID_KEY not in adata.uns:
if required:
raise ValueError(
f"Please set up your AnnData with {cls.__name__}.setup_anndata first."
)
return None
adata_id = adata.uns[_SCVI_UUID_KEY]
if adata_id not in cls._setup_adata_manager_store:
if required:
raise ValueError(
f"Please set up your AnnData with {cls.__name__}.setup_anndata first. "
"It appears the AnnData object has been setup with a different model."
)
return None
adata_manager = cls._setup_adata_manager_store[adata_id]
if adata_manager.adata is not adata:
raise ValueError(
"The provided AnnData object does not match the AnnData object "
"previously provided for setup. Did you make a copy?"
)
return adata_manager
def get_anndata_manager(
self, adata: AnnData, required: bool = False
) -> Optional[AnnDataManager]:
"""
Retrieves the :class:`~scvi.data.anndata.AnnDataManager` for a given AnnData object specific to this model instance.
Requires ``self.id`` has been set. Checks for an :class:`~scvi.data.anndata.AnnDataManager`
specific to this model instance.
Parameters
----------
adata
AnnData object to find manager instance for.
required
If True, errors on missing manager. Otherwise, returns None when manager is missing.
"""
cls = self.__class__
if _SCVI_UUID_KEY not in adata.uns:
if required:
raise ValueError(
f"Please set up your AnnData with {cls.__name__}.setup_anndata first."
)
return None
adata_id = adata.uns[_SCVI_UUID_KEY]
if self.id not in cls._per_instance_manager_store:
if required:
raise AssertionError(
"Unable to find instance specific manager store. "
"The model has likely not been initialized with an AnnData object."
)
return None
elif adata_id not in cls._per_instance_manager_store[self.id]:
if required:
raise AssertionError(
"Please call ``self._validate_anndata`` on this AnnData object."
)
return None
adata_manager = cls._per_instance_manager_store[self.id][adata_id]
if adata_manager.adata is not adata:
raise ValueError(
"The provided AnnData object does not match the AnnData object "
"previously provided for setup. Did you make a copy?"
)
return adata_manager
def get_from_registry(
self,
adata: AnnData,
registry_key: str,
) -> np.ndarray:
"""
Returns the object in AnnData associated with the key in the data registry.
AnnData object should be registered with the model prior to calling this function
via the ``self._validate_anndata`` method.
Parameters
----------
registry_key
key of object to get from data registry.
adata
AnnData to pull data from.
Returns
-------
The requested data as a NumPy array.
"""
adata_manager = self.get_anndata_manager(adata)
if adata_manager is None:
raise AssertionError(
"AnnData not registered with model. Call `self._validate_anndata` "
"prior to calling this function."
)
return adata_manager.get_from_registry(registry_key)
def _make_data_loader(
self,
adata: AnnData,
indices: Optional[Sequence[int]] = None,
batch_size: Optional[int] = None,
shuffle: bool = False,
data_loader_class=None,
**data_loader_kwargs,
):
"""
Create a AnnDataLoader object for data iteration.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData.
indices
Indices of cells in adata to use. If `None`, all cells are used.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
shuffle
Whether observations are shuffled each iteration though
data_loader_class
Class to use for data loader
data_loader_kwargs
Kwargs to the class-specific data loader class
"""
adata_manager = self.get_anndata_manager(adata)
if adata_manager is None:
raise AssertionError(
"AnnDataManager not found. Call `self._validate_anndata` prior to calling this function."
)
adata = adata_manager.adata
if batch_size is None:
batch_size = settings.batch_size
if indices is None:
indices = np.arange(adata.n_obs)
if data_loader_class is None:
data_loader_class = self._data_loader_cls
if "num_workers" not in data_loader_kwargs:
data_loader_kwargs.update({"num_workers": settings.dl_num_workers})
dl = data_loader_class(
adata_manager,
shuffle=shuffle,
indices=indices,
batch_size=batch_size,
**data_loader_kwargs,
)
return dl
def _validate_anndata(
self, adata: Optional[AnnData] = None, copy_if_view: bool = True
) -> AnnData:
"""Validate anndata has been properly registered, transfer if necessary."""
if adata is None:
adata = self.adata
if adata.is_view:
if copy_if_view:
logger.info("Received view of anndata, making copy.")
adata = adata.copy()
# Reassign AnnData UUID to produce a separate AnnDataManager.
_assign_adata_uuid(adata, overwrite=True)
else:
raise ValueError("Please run `adata = adata.copy()`")
adata_manager = self.get_anndata_manager(adata)
if adata_manager is None:
logger.info(
"Input AnnData not setup with scvi-tools. "
+ "attempting to transfer AnnData setup"
)
self._register_manager_for_instance(
self.adata_manager.transfer_setup(adata)
)
else:
# Case where correct AnnDataManager is found, replay registration as necessary.
adata_manager.validate()
return adata
def _check_if_trained(
self, warn: bool = True, message: str = _UNTRAINED_WARNING_MESSAGE
):
"""
Check if the model is trained.
If not trained and `warn` is True, raise a warning, else raise a RuntimeError.
"""
if not self.is_trained_:
if warn:
warnings.warn(message)
else:
raise RuntimeError(message)
@property
def is_trained(self):
return self.is_trained_
@property
def test_indices(self):
return self.test_indices_
@property
def train_indices(self):
return self.train_indices_
@property
def validation_indices(self):
return self.validation_indices_
@train_indices.setter
def train_indices(self, value):
self.train_indices_ = value
@test_indices.setter
def test_indices(self, value):
self.test_indices_ = value
@validation_indices.setter
def validation_indices(self, value):
self.validation_indices_ = value
@is_trained.setter
def is_trained(self, value):
self.is_trained_ = value
@property
def history(self):
"""Returns computed metrics during training."""
return self.history_
def _get_user_attributes(self):
"""Returns all the self attributes defined in a model class, e.g., self.is_trained_."""
attributes = inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))
attributes = [
a for a in attributes if not (a[0].startswith("__") and a[0].endswith("__"))
]
attributes = [a for a in attributes if not a[0].startswith("_abc_")]
return attributes
def _get_init_params(self, locals):
"""
Returns the model init signature with associated passed in values.
Ignores the initial AnnData.
"""
init = self.__init__
sig = inspect.signature(init)
parameters = sig.parameters.values()
init_params = [p.name for p in parameters]
all_params = {p: locals[p] for p in locals if p in init_params}
all_params = {
k: v for (k, v) in all_params.items() if not isinstance(v, AnnData)
}
# not very efficient but is explicit
# seperates variable params (**kwargs) from non variable params into two dicts
non_var_params = [p.name for p in parameters if p.kind != p.VAR_KEYWORD]
non_var_params = {k: v for (k, v) in all_params.items() if k in non_var_params}
var_params = [p.name for p in parameters if p.kind == p.VAR_KEYWORD]
var_params = {k: v for (k, v) in all_params.items() if k in var_params}
user_params = {"kwargs": var_params, "non_kwargs": non_var_params}
return user_params
@abstractmethod
def train(self):
"""Trains the model."""
def save(
self,
dir_path: str,
prefix: Optional[str] = None,
overwrite: bool = False,
save_anndata: bool = False,
**anndata_write_kwargs,
):
"""
Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Model files are not expected to be reproducibly saved and loaded across versions
until we reach version 1.0.
Parameters
----------
dir_path
Path to a directory.
prefix
Prefix to prepend to saved file names.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
save_anndata
If True, also saves the anndata
anndata_write_kwargs
Kwargs for :meth:`~anndata.AnnData.write`
"""
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
file_name_prefix = prefix or ""
if save_anndata:
self.adata.write(
os.path.join(dir_path, f"{file_name_prefix}adata.h5ad"),
**anndata_write_kwargs,
)
model_save_path = os.path.join(dir_path, f"{file_name_prefix}model.pt")
# save the model state dict and the trainer state dict only
model_state_dict = self.module.state_dict()
var_names = self.adata.var_names.astype(str)
var_names = var_names.to_numpy()
# get all the user attributes
user_attributes = self._get_user_attributes()
# only save the public attributes with _ at the very end
user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == "_"}
torch.save(
dict(
model_state_dict=model_state_dict,
var_names=var_names,
attr_dict=user_attributes,
),
model_save_path,
)
@classmethod
def load(
cls,
dir_path: str,
prefix: Optional[str] = None,
adata: Optional[AnnData] = None,
use_gpu: Optional[Union[str, int, bool]] = None,
):
"""
Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
prefix
Prefix of saved file names.
adata
AnnData organized in the same way as data used to train model.
It is not necessary to run setup_anndata,
as AnnData is validated against the saved `scvi` setup dictionary.
If None, will check for and load anndata saved with the model.
use_gpu
Load model on default GPU if available (if None or True),
or index of GPU to use (if int), or name of GPU (if str), or use CPU (if False).
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> model = ModelClass.load(save_path, adata) # use the name of the model class used to save
>>> model.get_....
"""
load_adata = adata is None
use_gpu, device = parse_use_gpu_arg(use_gpu)
(
attr_dict,
var_names,
model_state_dict,
new_adata,
) = _load_saved_files(dir_path, load_adata, map_location=device, prefix=prefix)
adata = new_adata if new_adata is not None else adata
_validate_var_names(adata, var_names)
# Legacy support for old setup dict format.
if "scvi_setup_dict_" in attr_dict:
scvi_setup_dict = attr_dict.pop("scvi_setup_dict_")
cls.register_manager(manager_from_setup_dict(cls, adata, scvi_setup_dict))
else:
registry = attr_dict.pop("registry_")
if (
_MODEL_NAME_KEY in registry
and registry[_MODEL_NAME_KEY] != cls.__name__
):
raise ValueError(
"It appears you are loading a model from a different class."
)
if _SETUP_ARGS_KEY not in registry:
raise ValueError(
"Saved model does not contain original setup inputs. "
"Cannot load the original setup."
)
# Calling ``setup_anndata`` method with the original arguments passed into
# the saved model. This enables simple backwards compatibility in the case of
# newly introduced fields or parameters.
cls.setup_anndata(
adata, source_registry=registry, **registry[_SETUP_ARGS_KEY]
)
model = _initialize_model(cls, adata, attr_dict)
# some Pyro modules with AutoGuides may need one training step
try:
model.module.load_state_dict(model_state_dict)
except RuntimeError as err:
if isinstance(model.module, PyroBaseModuleClass):
old_history = model.history_.copy()
logger.info("Preparing underlying module for load")
model.train(max_steps=1)
model.history_ = old_history
pyro.clear_param_store()
model.module.load_state_dict(model_state_dict)
else:
raise err
model.to_device(device)
model.module.eval()
model._validate_anndata(adata)
return model
def __repr__(
self,
):
summary_string = self._model_summary_string
summary_string += "\nTraining status: {}".format(
"Trained" if self.is_trained_ else "Not Trained"
)
rich.print(summary_string)
return ""
@classmethod
@abstractmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
*args,
**kwargs,
):
"""
%(summary)s.
Each model class deriving from this class provides parameters to this method
according to its needs. To operate correctly with the model initialization,
the implementation must call :meth:`~scvi.model.base.BaseModelClass.register_manager`
on a model-specific instance of :class:`~scvi.data.anndata.AnnDataManager`.
"""
@staticmethod
def view_setup_args(dir_path: str, prefix: Optional[str] = None) -> None:
"""
Print args used to setup a saved model.
Parameters
----------
dir_path
Path to saved outputs.
prefix
Prefix of saved file names.
"""
attr_dict = _load_saved_files(dir_path, False, prefix=prefix)[0]
# Legacy support for old setup dict format.
if "scvi_setup_dict_" in attr_dict:
raise NotImplementedError(
"Viewing setup args for pre v0.15.0 models is unsupported. "
"Load and resave the model to use this function."
)
registry = attr_dict.pop("registry_")
AnnDataManager.view_setup_method_args(registry)
def view_anndata_setup(
self, adata: Optional[AnnData] = None, hide_state_registries: bool = False
) -> None:
"""
Print summary of the setup for the initial AnnData or a given AnnData object.
Parameters
----------
adata
AnnData object setup with ``setup_anndata`` or
:meth:`~scvi.data.anndata.AnnDataManager.transfer_setup`.
hide_state_registries
If True, prints a shortened summary without details of each state registry.
"""
if adata is None:
adata = self.adata
try:
adata_manager = self.get_anndata_manager(adata, required=True)
except ValueError:
raise ValueError(
f"Given AnnData not setup with {self.__class__.__name__}. "
"Cannot view setup summary."
)
adata_manager.view_registry(hide_state_registries=hide_state_registries)
|
the-stack_0_24982
|
# Author: Bichen Wu ([email protected]) 08/25/2016
"""SqueezeDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class SqueezeDet(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_layer(
'conv1', self.image_input, filters=64, size=3, stride=2,
padding='SAME', freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='SAME')
fire2 = self._fire_layer(
'fire2', pool1, s1x1=16, e1x1=64, e3x3=64, freeze=False)
fire3 = self._fire_layer(
'fire3', fire2, s1x1=16, e1x1=64, e3x3=64, freeze=False)
pool3 = self._pooling_layer(
'pool3', fire3, size=3, stride=2, padding='SAME')
fire4 = self._fire_layer(
'fire4', pool3, s1x1=32, e1x1=128, e3x3=128, freeze=False)
fire5 = self._fire_layer(
'fire5', fire4, s1x1=32, e1x1=128, e3x3=128, freeze=False)
pool5 = self._pooling_layer(
'pool5', fire5, size=3, stride=2, padding='SAME')
fire6 = self._fire_layer(
'fire6', pool5, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire7 = self._fire_layer(
'fire7', fire6, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire8 = self._fire_layer(
'fire8', fire7, s1x1=64, e1x1=256, e3x3=256, freeze=False)
fire9 = self._fire_layer(
'fire9', fire8, s1x1=64, e1x1=256, e3x3=256, freeze=False)
# Two extra fire modules that are not trained before
fire10 = self._fire_layer(
'fire10', fire9, s1x1=96, e1x1=384, e3x3=384, freeze=False)
fire11 = self._fire_layer(
'fire11', fire10, s1x1=96, e1x1=384, e3x3=384, freeze=False)
dropout11 = tf.nn.dropout(fire11, self.keep_prob, name='drop11')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv12', dropout11, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, stddev=0.01,
freeze=False):
"""Fire layer constructor.
Args:
layer_name: layer name
inputs: input tensor
s1x1: number of 1x1 filters in squeeze layer.
e1x1: number of 1x1 filters in expand layer.
e3x3: number of 3x3 filters in expand layer.
freeze: if true, do not train parameters in this layer.
Returns:
fire layer operation.
"""
sq1x1 = self._conv_layer(
layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex1x1 = self._conv_layer(
layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex3x3 = self._conv_layer(
layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
#return tf.concat([ex1x1, ex3x3], 3, name=layer_name+'/concat')
values = [ex1x1, ex3x3]
axis = 3
name = layer_name+'/concat'
try:
# calling the new concat, if available
return tf.concat(axis, values, name=name)
except TypeError:
# fallback to the old concat
return tf.concat(values, axis, name=name)
|
the-stack_0_24983
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Pablo Delgado",
author_email='[email protected]',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Mesos framework logger",
entry_points={
'console_scripts': [
'frameworker=frameworker.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='frameworker',
name='frameworker',
packages=find_packages(include=['frameworker', 'frameworker.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/pablete/frameworker',
version='0.1.1',
zip_safe=False,
)
|
the-stack_0_24984
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import sys
import chainer
from chainer import optimizers
import gym
gym.undo_logger_setup() # NOQA
from gym import spaces
import gym.wrappers
import numpy as np
from chainerrl.agents.ddpg import DDPG
from chainerrl.agents.ddpg import DDPGModel
from chainerrl import experiments
from chainerrl import explorers
from chainerrl import misc
from chainerrl import policy
from chainerrl import q_functions
from chainerrl import replay_buffer
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--env', type=str, default='Humanoid-v1')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--final-exploration-steps',
type=int, default=10 ** 6)
parser.add_argument('--actor-lr', type=float, default=1e-4)
parser.add_argument('--critic-lr', type=float, default=1e-3)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--steps', type=int, default=10 ** 7)
parser.add_argument('--n-hidden-channels', type=int, default=300)
parser.add_argument('--n-hidden-layers', type=int, default=3)
parser.add_argument('--replay-start-size', type=int, default=5000)
parser.add_argument('--n-update-times', type=int, default=1)
parser.add_argument('--target-update-interval',
type=int, default=1)
parser.add_argument('--target-update-method',
type=str, default='soft', choices=['hard', 'soft'])
parser.add_argument('--soft-update-tau', type=float, default=1e-2)
parser.add_argument('--update-interval', type=int, default=4)
parser.add_argument('--eval-n-runs', type=int, default=100)
parser.add_argument('--eval-interval', type=int, default=10 ** 5)
parser.add_argument('--gamma', type=float, default=0.995)
parser.add_argument('--minibatch-size', type=int, default=200)
parser.add_argument('--render', action='store_true')
parser.add_argument('--demo', action='store_true')
parser.add_argument('--use-bn', action='store_true', default=False)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
args = parser.parse_args()
args.outdir = experiments.prepare_output_dir(
args, args.outdir, argv=sys.argv)
print('Output files are saved in {}'.format(args.outdir))
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
def clip_action_filter(a):
return np.clip(a, action_space.low, action_space.high)
def reward_filter(r):
return r * args.reward_scale_factor
def make_env(test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
if args.monitor:
env = gym.wrappers.Monitor(env, args.outdir)
if isinstance(env.action_space, spaces.Box):
misc.env_modifiers.make_action_filtered(env, clip_action_filter)
if not test:
misc.env_modifiers.make_reward_filtered(env, reward_filter)
if args.render and not test:
misc.env_modifiers.make_rendered(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_size = np.asarray(env.observation_space.shape).prod()
action_space = env.action_space
action_size = np.asarray(action_space.shape).prod()
if args.use_bn:
q_func = q_functions.FCBNLateActionSAQFunction(
obs_size, action_size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
normalize_input=True)
pi = policy.FCBNDeterministicPolicy(
obs_size, action_size=action_size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
min_action=action_space.low, max_action=action_space.high,
bound_action=True,
normalize_input=True)
else:
q_func = q_functions.FCSAQFunction(
obs_size, action_size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers)
pi = policy.FCDeterministicPolicy(
obs_size, action_size=action_size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
min_action=action_space.low, max_action=action_space.high,
bound_action=True)
model = DDPGModel(q_func=q_func, policy=pi)
opt_a = optimizers.Adam(alpha=args.actor_lr)
opt_c = optimizers.Adam(alpha=args.critic_lr)
opt_a.setup(model['policy'])
opt_c.setup(model['q_function'])
opt_a.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_a')
opt_c.add_hook(chainer.optimizer.GradientClipping(1.0), 'hook_c')
rbuf = replay_buffer.ReplayBuffer(5 * 10 ** 5)
def phi(obs):
return obs.astype(np.float32)
def random_action():
a = action_space.sample()
if isinstance(a, np.ndarray):
a = a.astype(np.float32)
return a
ou_sigma = (action_space.high - action_space.low) * 0.2
explorer = explorers.AdditiveOU(sigma=ou_sigma)
agent = DDPG(model, opt_a, opt_c, rbuf, gamma=args.gamma,
explorer=explorer, replay_start_size=args.replay_start_size,
target_update_method=args.target_update_method,
target_update_interval=args.target_update_interval,
update_interval=args.update_interval,
soft_update_tau=args.soft_update_tau,
n_times_update=args.n_update_times,
phi=phi, gpu=args.gpu, minibatch_size=args.minibatch_size)
if len(args.load) > 0:
agent.load(args.load)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_runs=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_with_evaluation(
agent=agent, env=env, steps=args.steps,
eval_env=eval_env,
eval_n_runs=args.eval_n_runs, eval_interval=args.eval_interval,
outdir=args.outdir,
max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.